max_stars_repo_path
stringlengths
4
286
max_stars_repo_name
stringlengths
5
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.03M
content_cleaned
stringlengths
6
1.03M
language
stringclasses
111 values
language_score
float64
0.03
1
comments
stringlengths
0
556k
edu_score
float64
0.32
5.03
edu_int_score
int64
0
5
src/Tools/FigureOfMerit/FigureOfMerit/BlockCiphers/Scenario2/Constants.py
GaloisInc/hacrypto
34
6621451
# # University of Luxembourg # Laboratory of Algorithmics, Cryptology and Security (LACS) # # FigureOfMerit (FOM) # # Copyright (C) 2015 University of Luxembourg # # Written in 2015 by <NAME> <<EMAIL>> # # This file is part of FigureOfMerit. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # __author__ = 'daniel.dinu' # Architectures ARCHITECTURE_AVR = 'AVR' ARCHITECTURE_MSP = 'MSP' ARCHITECTURE_ARM = 'ARM' # FOM 1 weights # AVR FOM1_AVR_CODE_SIZE_WEIGHT = 1 FOM1_AVR_RAM_WEIGHT = 1 FOM1_AVR_EXECUTION_TIME_WEIGHT = 1 # MSP FOM1_MSP_CODE_SIZE_WEIGHT = 1 FOM1_MSP_RAM_WEIGHT = 1 FOM1_MSP_EXECUTION_TIME_WEIGHT = 1 # ARM FOM1_ARM_CODE_SIZE_WEIGHT = 1 FOM1_ARM_RAM_WEIGHT = 1 FOM1_ARM_EXECUTION_TIME_WEIGHT = 1 # FOM 2 weights # AVR FOM2_AVR_CODE_SIZE_WEIGHT = 1 FOM2_AVR_RAM_WEIGHT = 1 FOM2_AVR_EXECUTION_TIME_WEIGHT = 0 # MSP FOM2_MSP_CODE_SIZE_WEIGHT = 1 FOM2_MSP_RAM_WEIGHT = 1 FOM2_MSP_EXECUTION_TIME_WEIGHT = 0 # ARM FOM2_ARM_CODE_SIZE_WEIGHT = 1 FOM2_ARM_RAM_WEIGHT = 1 FOM2_ARM_EXECUTION_TIME_WEIGHT = 0 # FOM 3 weights # AVR FOM3_AVR_CODE_SIZE_WEIGHT = 0 FOM3_AVR_RAM_WEIGHT = 0 FOM3_AVR_EXECUTION_TIME_WEIGHT = 1 # MSP FOM3_MSP_CODE_SIZE_WEIGHT = 0 FOM3_MSP_RAM_WEIGHT = 0 FOM3_MSP_EXECUTION_TIME_WEIGHT = 1 # ARM FOM3_ARM_CODE_SIZE_WEIGHT = 0 FOM3_ARM_RAM_WEIGHT = 0 FOM3_ARM_EXECUTION_TIME_WEIGHT = 1 # Max RAM & ROM AVR_MAX_RAM = 4 * 1024 AVR_MAX_ROM = 128 * 1024 MSP_MAX_RAM = 10 * 1024 MSP_MAX_ROM = 48 * 1024 ARM_MAX_RAM = 96 * 1024 ARM_MAX_ROM = 512 * 1024 # Recompute FOM RECOMPUTE_FOM = False # Input CSV_FILE_PATH_FORMAT = 'Input/{0}/{1}_{0}_scenario2.csv' FILE_PREFIX = 'FELICS' CSV_DELIMITER = ',' CSV_QUOTECHAR = '"' CSV_LINETERMINATOR = '\n' CSV_HEADER_LINES = 3 CIPHER_NAME_COLUMN_INDEX = 0 BLOCK_SIZE_COLUMN_INDEX = 1 KEY_SIZE_COLUMN_INDEX = 2 IMPLEMENTATION_VERSION_COLUMN_INDEX = 3 IMPLEMENTATION_TYPE_COLUMN_INDEX = 4 IMPLEMENTATION_COMPILER_OPTIONS_COLUMN_INDEX = 5 CODE_SIZE_E_COLUMN_INDEX = 6 RAM_STACK_E_COLUMN_INDEX = 7 RAM_DATA_COLUMN_INDEX = 8 EXECUTION_TIME_E_COLUMN_INDEX = 9 IDENTITY_CIPHER_NAME = 'IdentityCipher' IMPLEMENTATION_TYPE_ASM = 'ASM' IMPLEMENTATION_TYPE_C = 'C' IMPLEMENTATION_TYPE_C_ASM = 'C+ASM' DEFAULT_IMPLEMENTATION_TYPE = '?' DEFAULT_METRIC_VALUE = 10 ** 10 # Output RESULT_LATEX_FILE_PATH = 'Output/TableScenario2.tex' RESULT_MEDIAWIKI_FILE_PATH = 'Output/TableScenario2.mkw' RESULT_GNUPLOT_NAME_DAT_FILE_PATH = 'Output/scenario2_name.dat' RESULT_GNUPLOT_FOM_DAT_FILE_PATH = 'Output/scenario2_fom.dat' RESULT_STATISTICS_CSV_FILE_PATH = 'Output/scenario2_statistics.csv' RESULT_FILE_ACCESS_MODE = 'w' # Debug levels DEBUG_OFF = 0 DEBUG_ON = 1 DEBUG = 0 # Debug messages CIPHER_IMPLEMENTATION_FOM1_DETAILS = 'FOM 1: {} {} {} [{}] {} {} {} [{}] {} {} {} [{}]' CIPHER_IMPLEMENTATION_FOM2_DETAILS = 'FOM 2: {} {} {} [{}] {} {} {} [{}] {} {} {} [{}]' CIPHER_IMPLEMENTATION_FOM3_DETAILS = 'FOM 3: {} {} {} [{}] {} {} {} [{}] {} {} {} [{}]' CIPHER_SCENARIO_FOM1 = 'FOM 1: {}) [{}] [{} {} {}] [{} {} {}] {}' CIPHER_SCENARIO_FOM2 = 'FOM 2: {}) [{}] [{} {} {}] [{} {} {}] {}' CIPHER_SCENARIO_FOM3 = 'FOM 3: {}) [{}] [{} {} {}] [{} {} {}] {}' SCENARIO_FOM_MIN_VALUES = 'FOM MIN values: [{} {} {}] [{} {} {}] [{} {} {}]' SCENARIO_FOM_MAX_VALUES = 'FOM MAX values: [{} {} {}] [{} {} {}] [{} {} {}]' SCENARIO_FOM1_MIN_VALUES = 'FOM 1 MIN values: [{} {} {}] [{} {} {}] [{} {} {}]' SCENARIO_FOM1_MAX_VALUES = 'FOM 1 MAX values: [{} {} {}] [{} {} {}] [{} {} {}]' SCENARIO_FOM2_MIN_VALUES = 'FOM 2 MIN values: [{} {} {}] [{} {} {}] [{} {} {}]' SCENARIO_FOM2_MAX_VALUES = 'FOM 2 MAX values: [{} {} {}] [{} {} {}] [{} {} {}]' SCENARIO_FOM3_MIN_VALUES = 'FOM 3 MIN values: [{} {} {}] [{} {} {}] [{} {} {}]' SCENARIO_FOM3_MAX_VALUES = 'FOM 3 MAX values: [{} {} {}] [{} {} {}] [{} {} {}]' DONE = 'Done!' FOM1_AVR = 'FOM 1 AVR: {} {} {} {}' FOM1_MSP = 'FOM 1 MSP: {} {} {} {}' FOM1_ARM = 'FOM 1 ARM: {} {} {} {}' FOM2_AVR = 'FOM 2 AVR: {} {} {} {}' FOM2_MSP = 'FOM 2 MSP: {} {} {} {}' FOM2_ARM = 'FOM 2 ARM: {} {} {} {}' FOM3_AVR = 'FOM 3 AVR: {} {} {} {}' FOM3_MSP = 'FOM 3 MSP: {} {} {} {}' FOM3_ARM = 'FOM 3 ARM: {} {} {} {}' FOM1_SELECTED_AVR = 'FOM 1 selected AVR: {} {} {}' FOM1_SELECTED_MSP = 'FOM 1 selected MSP: {} {} {}' FOM1_SELECTED_ARM = 'FOM 1 selected ARM: {} {} {}' FOM2_SELECTED_AVR = 'FOM 2 selected AVR: {} {} {}' FOM2_SELECTED_MSP = 'FOM 2 selected MSP: {} {} {}' FOM2_SELECTED_ARM = 'FOM 2 selected ARM: {} {} {}' FOM3_SELECTED_AVR = 'FOM 3 selected AVR: {} {} {}' FOM3_SELECTED_MSP = 'FOM 3 selected MSP: {} {} {}' FOM3_SELECTED_ARM = 'FOM 3 selected ARM: {} {} {}' # LaTeX LATEX_MIN_VALUE = '\\textbf{{{}}}' LATEX_MAX_VALUE = '{}' LATEX_ASM_VALUE = '{}\\tnote{{\\textasteriskcentered}}' # '\\textit{{{}}}' LATEX_C_VALUE = '{}' LATEX_SECTION1_ROW_FORMAT = '\\textbf{{{}}} & {} & {} & {} & {} & {} & {} & {} & {} & {} & {} \\\\ ' \ '% AVR: v{} ({}); MSP: v{} ({}); ARM: v{} ({}); \n' LATEX_SECTION2_ROW_FORMAT = '\\textbf{{{}}} & {} & {} & {} & {} & {} & {} & {} & {} & {} \\\\ ' \ '% AVR: v{} ({}); MSP: v{} ({}); ARM: v{} ({}); \n' LATEX_SECTION3_ROW_FORMAT = '\\textbf{{{}}} & {} & {} & {} & {} & {} & {} & {} & {} & {} \\\\ ' \ '% AVR: v{} ({}); MSP: v{} ({}); ARM: v{} ({}); \n' LATEX_ROUND_FOM = 1 # MediaWiki MEDIAWIKI_MIN_VALUE = '<span style="color: green">\'\'\'{}\'\'\'</span>' MEDIAWIKI_MAX_VALUE = '{}' # '<span style="color: red">\'\'\'{}\'\'\'</span>' MEDIAWIKI_ASM_VALUE = '\'\'{}\'\'' MEDIAWIKI_C_VALUE = '{}' MEDIAWIKI_CIPHER_NAME_FORMAT = '[[Lightweight_Block_Ciphers#{}|{}]]' MEDIAWIKI_SECTION1_ROW_FORMAT = '|-\n! {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n' \ '| {}\n ' \ '<!-- AVR: v{} ({}); MSP: v{} ({}); ARM: v{} ({}); -->\n' MEDIAWIKI_SECTION2_ROW_FORMAT = '|-\n! {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n ' \ '<!-- AVR: v{} ({}); MSP: v{} ({}); ARM: v{} ({}); -->\n' MEDIAWIKI_SECTION3_ROW_FORMAT = '|-\n! {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n ' \ '<!-- AVR: v{} ({}); MSP: v{} ({}); ARM: v{} ({}); -->\n' MEDIAWIKI_ROUND_FOM = 1 # Gnuplot dat GNUPLOT_DAT_ROW_FORMAT = '{} {} {} {} {} {} {} {} {} {} {} {} {}\n' GNUPLOT_ROUND_FOM = 1 # Statistics csv IMPLEMENTATION_FULL_NAME_FORMAT = '{}_{}_{}_v{}' STATISTICS_CSV_HEADER_ROW = ['Implementation', 'FOM Position', 'FOM Entries', 'Small code size & RAM Entries', 'Best execution time Entries']
# # University of Luxembourg # Laboratory of Algorithmics, Cryptology and Security (LACS) # # FigureOfMerit (FOM) # # Copyright (C) 2015 University of Luxembourg # # Written in 2015 by <NAME> <<EMAIL>> # # This file is part of FigureOfMerit. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # __author__ = 'daniel.dinu' # Architectures ARCHITECTURE_AVR = 'AVR' ARCHITECTURE_MSP = 'MSP' ARCHITECTURE_ARM = 'ARM' # FOM 1 weights # AVR FOM1_AVR_CODE_SIZE_WEIGHT = 1 FOM1_AVR_RAM_WEIGHT = 1 FOM1_AVR_EXECUTION_TIME_WEIGHT = 1 # MSP FOM1_MSP_CODE_SIZE_WEIGHT = 1 FOM1_MSP_RAM_WEIGHT = 1 FOM1_MSP_EXECUTION_TIME_WEIGHT = 1 # ARM FOM1_ARM_CODE_SIZE_WEIGHT = 1 FOM1_ARM_RAM_WEIGHT = 1 FOM1_ARM_EXECUTION_TIME_WEIGHT = 1 # FOM 2 weights # AVR FOM2_AVR_CODE_SIZE_WEIGHT = 1 FOM2_AVR_RAM_WEIGHT = 1 FOM2_AVR_EXECUTION_TIME_WEIGHT = 0 # MSP FOM2_MSP_CODE_SIZE_WEIGHT = 1 FOM2_MSP_RAM_WEIGHT = 1 FOM2_MSP_EXECUTION_TIME_WEIGHT = 0 # ARM FOM2_ARM_CODE_SIZE_WEIGHT = 1 FOM2_ARM_RAM_WEIGHT = 1 FOM2_ARM_EXECUTION_TIME_WEIGHT = 0 # FOM 3 weights # AVR FOM3_AVR_CODE_SIZE_WEIGHT = 0 FOM3_AVR_RAM_WEIGHT = 0 FOM3_AVR_EXECUTION_TIME_WEIGHT = 1 # MSP FOM3_MSP_CODE_SIZE_WEIGHT = 0 FOM3_MSP_RAM_WEIGHT = 0 FOM3_MSP_EXECUTION_TIME_WEIGHT = 1 # ARM FOM3_ARM_CODE_SIZE_WEIGHT = 0 FOM3_ARM_RAM_WEIGHT = 0 FOM3_ARM_EXECUTION_TIME_WEIGHT = 1 # Max RAM & ROM AVR_MAX_RAM = 4 * 1024 AVR_MAX_ROM = 128 * 1024 MSP_MAX_RAM = 10 * 1024 MSP_MAX_ROM = 48 * 1024 ARM_MAX_RAM = 96 * 1024 ARM_MAX_ROM = 512 * 1024 # Recompute FOM RECOMPUTE_FOM = False # Input CSV_FILE_PATH_FORMAT = 'Input/{0}/{1}_{0}_scenario2.csv' FILE_PREFIX = 'FELICS' CSV_DELIMITER = ',' CSV_QUOTECHAR = '"' CSV_LINETERMINATOR = '\n' CSV_HEADER_LINES = 3 CIPHER_NAME_COLUMN_INDEX = 0 BLOCK_SIZE_COLUMN_INDEX = 1 KEY_SIZE_COLUMN_INDEX = 2 IMPLEMENTATION_VERSION_COLUMN_INDEX = 3 IMPLEMENTATION_TYPE_COLUMN_INDEX = 4 IMPLEMENTATION_COMPILER_OPTIONS_COLUMN_INDEX = 5 CODE_SIZE_E_COLUMN_INDEX = 6 RAM_STACK_E_COLUMN_INDEX = 7 RAM_DATA_COLUMN_INDEX = 8 EXECUTION_TIME_E_COLUMN_INDEX = 9 IDENTITY_CIPHER_NAME = 'IdentityCipher' IMPLEMENTATION_TYPE_ASM = 'ASM' IMPLEMENTATION_TYPE_C = 'C' IMPLEMENTATION_TYPE_C_ASM = 'C+ASM' DEFAULT_IMPLEMENTATION_TYPE = '?' DEFAULT_METRIC_VALUE = 10 ** 10 # Output RESULT_LATEX_FILE_PATH = 'Output/TableScenario2.tex' RESULT_MEDIAWIKI_FILE_PATH = 'Output/TableScenario2.mkw' RESULT_GNUPLOT_NAME_DAT_FILE_PATH = 'Output/scenario2_name.dat' RESULT_GNUPLOT_FOM_DAT_FILE_PATH = 'Output/scenario2_fom.dat' RESULT_STATISTICS_CSV_FILE_PATH = 'Output/scenario2_statistics.csv' RESULT_FILE_ACCESS_MODE = 'w' # Debug levels DEBUG_OFF = 0 DEBUG_ON = 1 DEBUG = 0 # Debug messages CIPHER_IMPLEMENTATION_FOM1_DETAILS = 'FOM 1: {} {} {} [{}] {} {} {} [{}] {} {} {} [{}]' CIPHER_IMPLEMENTATION_FOM2_DETAILS = 'FOM 2: {} {} {} [{}] {} {} {} [{}] {} {} {} [{}]' CIPHER_IMPLEMENTATION_FOM3_DETAILS = 'FOM 3: {} {} {} [{}] {} {} {} [{}] {} {} {} [{}]' CIPHER_SCENARIO_FOM1 = 'FOM 1: {}) [{}] [{} {} {}] [{} {} {}] {}' CIPHER_SCENARIO_FOM2 = 'FOM 2: {}) [{}] [{} {} {}] [{} {} {}] {}' CIPHER_SCENARIO_FOM3 = 'FOM 3: {}) [{}] [{} {} {}] [{} {} {}] {}' SCENARIO_FOM_MIN_VALUES = 'FOM MIN values: [{} {} {}] [{} {} {}] [{} {} {}]' SCENARIO_FOM_MAX_VALUES = 'FOM MAX values: [{} {} {}] [{} {} {}] [{} {} {}]' SCENARIO_FOM1_MIN_VALUES = 'FOM 1 MIN values: [{} {} {}] [{} {} {}] [{} {} {}]' SCENARIO_FOM1_MAX_VALUES = 'FOM 1 MAX values: [{} {} {}] [{} {} {}] [{} {} {}]' SCENARIO_FOM2_MIN_VALUES = 'FOM 2 MIN values: [{} {} {}] [{} {} {}] [{} {} {}]' SCENARIO_FOM2_MAX_VALUES = 'FOM 2 MAX values: [{} {} {}] [{} {} {}] [{} {} {}]' SCENARIO_FOM3_MIN_VALUES = 'FOM 3 MIN values: [{} {} {}] [{} {} {}] [{} {} {}]' SCENARIO_FOM3_MAX_VALUES = 'FOM 3 MAX values: [{} {} {}] [{} {} {}] [{} {} {}]' DONE = 'Done!' FOM1_AVR = 'FOM 1 AVR: {} {} {} {}' FOM1_MSP = 'FOM 1 MSP: {} {} {} {}' FOM1_ARM = 'FOM 1 ARM: {} {} {} {}' FOM2_AVR = 'FOM 2 AVR: {} {} {} {}' FOM2_MSP = 'FOM 2 MSP: {} {} {} {}' FOM2_ARM = 'FOM 2 ARM: {} {} {} {}' FOM3_AVR = 'FOM 3 AVR: {} {} {} {}' FOM3_MSP = 'FOM 3 MSP: {} {} {} {}' FOM3_ARM = 'FOM 3 ARM: {} {} {} {}' FOM1_SELECTED_AVR = 'FOM 1 selected AVR: {} {} {}' FOM1_SELECTED_MSP = 'FOM 1 selected MSP: {} {} {}' FOM1_SELECTED_ARM = 'FOM 1 selected ARM: {} {} {}' FOM2_SELECTED_AVR = 'FOM 2 selected AVR: {} {} {}' FOM2_SELECTED_MSP = 'FOM 2 selected MSP: {} {} {}' FOM2_SELECTED_ARM = 'FOM 2 selected ARM: {} {} {}' FOM3_SELECTED_AVR = 'FOM 3 selected AVR: {} {} {}' FOM3_SELECTED_MSP = 'FOM 3 selected MSP: {} {} {}' FOM3_SELECTED_ARM = 'FOM 3 selected ARM: {} {} {}' # LaTeX LATEX_MIN_VALUE = '\\textbf{{{}}}' LATEX_MAX_VALUE = '{}' LATEX_ASM_VALUE = '{}\\tnote{{\\textasteriskcentered}}' # '\\textit{{{}}}' LATEX_C_VALUE = '{}' LATEX_SECTION1_ROW_FORMAT = '\\textbf{{{}}} & {} & {} & {} & {} & {} & {} & {} & {} & {} & {} \\\\ ' \ '% AVR: v{} ({}); MSP: v{} ({}); ARM: v{} ({}); \n' LATEX_SECTION2_ROW_FORMAT = '\\textbf{{{}}} & {} & {} & {} & {} & {} & {} & {} & {} & {} \\\\ ' \ '% AVR: v{} ({}); MSP: v{} ({}); ARM: v{} ({}); \n' LATEX_SECTION3_ROW_FORMAT = '\\textbf{{{}}} & {} & {} & {} & {} & {} & {} & {} & {} & {} \\\\ ' \ '% AVR: v{} ({}); MSP: v{} ({}); ARM: v{} ({}); \n' LATEX_ROUND_FOM = 1 # MediaWiki MEDIAWIKI_MIN_VALUE = '<span style="color: green">\'\'\'{}\'\'\'</span>' MEDIAWIKI_MAX_VALUE = '{}' # '<span style="color: red">\'\'\'{}\'\'\'</span>' MEDIAWIKI_ASM_VALUE = '\'\'{}\'\'' MEDIAWIKI_C_VALUE = '{}' MEDIAWIKI_CIPHER_NAME_FORMAT = '[[Lightweight_Block_Ciphers#{}|{}]]' MEDIAWIKI_SECTION1_ROW_FORMAT = '|-\n! {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n' \ '| {}\n ' \ '<!-- AVR: v{} ({}); MSP: v{} ({}); ARM: v{} ({}); -->\n' MEDIAWIKI_SECTION2_ROW_FORMAT = '|-\n! {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n ' \ '<!-- AVR: v{} ({}); MSP: v{} ({}); ARM: v{} ({}); -->\n' MEDIAWIKI_SECTION3_ROW_FORMAT = '|-\n! {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n| {}\n ' \ '<!-- AVR: v{} ({}); MSP: v{} ({}); ARM: v{} ({}); -->\n' MEDIAWIKI_ROUND_FOM = 1 # Gnuplot dat GNUPLOT_DAT_ROW_FORMAT = '{} {} {} {} {} {} {} {} {} {} {} {} {}\n' GNUPLOT_ROUND_FOM = 1 # Statistics csv IMPLEMENTATION_FULL_NAME_FORMAT = '{}_{}_{}_v{}' STATISTICS_CSV_HEADER_ROW = ['Implementation', 'FOM Position', 'FOM Entries', 'Small code size & RAM Entries', 'Best execution time Entries']
en
0.756042
# # University of Luxembourg # Laboratory of Algorithmics, Cryptology and Security (LACS) # # FigureOfMerit (FOM) # # Copyright (C) 2015 University of Luxembourg # # Written in 2015 by <NAME> <<EMAIL>> # # This file is part of FigureOfMerit. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Architectures # FOM 1 weights # AVR # MSP # ARM # FOM 2 weights # AVR # MSP # ARM # FOM 3 weights # AVR # MSP # ARM # Max RAM & ROM # Recompute FOM # Input # Output # Debug levels # Debug messages # LaTeX # '\\textit{{{}}}' # MediaWiki # '<span style="color: red">\'\'\'{}\'\'\'</span>' #{}|{}]]' # Gnuplot dat # Statistics csv
1.668509
2
src/settings/workspace.py
struts2spring/sql-editor
9
6621452
''' Created on Feb 26, 2019 @author: xbbntni ''' import json from datetime import date from src.sqlite_executer.ConnectExecuteSqlite import SQLExecuter import os class Project(): def __init__(self, basePath=None, projectDirName=None, projectName=None, natures=list()): self.basePath = basePath self.projectName = projectName self.projectDirName = projectDirName # self.projectPath = os.path.join(basePath, projectDirName) # directory path in system self.natures = natures # java, javascript, python def addNature(self, nature=None): self.natures.append(nature) def getProjectPath(self): return os.path.join(self.basePath, self.projectDirName) def __repr__(self): return f'{{basePath:{self.basePath},projectName:{self.projectName},projectDirName:{self.projectDirName}}}' class Workspace(): def __init__(self, workspacePath=None, projects=list(), active=True): self.workspacePath = workspacePath self.projects = projects self.active = active # self.createdOn = createdOn def addProject(self, project=None): self.projects.append(project) def removeProject(self, projectName=None): for project in self.projects: if project.projectName == projectName: self.projects.remove(project) break def __repr__(self): return f'{{workspacePath:{self.workspacePath},active:{self.active},projects:{self.projects},createdOn :{self.createdOn}}}' # def serialize(obj): # """JSON serializer for objects not serializable by default json code""" # # if isinstance(obj, date): # serial = obj.isoformat() # return serial # # return obj.__dict__ class Setting(): def __init__(self, workspaces=list(), maxWorkspace=10, showWorkspaceSelectionDialog=True): self.workspaces = workspaces self.maxWorkspace = maxWorkspace # maximum number of workspaces self.showWorkspaceSelectionDialog = showWorkspaceSelectionDialog # self.loadSettings() # self.activeWorkspace = self.getActiveWorkspace() def showWorkspaceSelection(self): showDialog = True if any([workspce.active for workspce in self.workspaces]): showDialog = False return showDialog def addWorkspace(self, workspace=None): for workspce in self.workspaces: workspce.active = False if len(self.workspaces) > self.maxWorkspace: self.workspaces[0] = workspace else: self.workspaces.append(workspace) self.showWorkspaceSelectionDialog = self.showWorkspaceSelection() def getActiveWorkspace(self): activeWorkspce = None for workspce in self.workspaces: if workspce.active: activeWorkspce = workspce break return activeWorkspce def loadSettings(self): workspace = Workspace(workspacePath=r'C:\Users\xbbntni\eclipse-workspace') project = Project(basePath=r'/docs/work/python_project', projectDirName='sql_editor') project.addNature(nature='python') workspace.addProject(project) project = Project(basePath=r'/docs/new/library', projectDirName='sql-editor') project.addNature(nature='python') workspace.addProject(project) # settings = Setting() self.addWorkspace(workspace) # def write(self): # with open('user.json', 'w') as file: # json.dump(self, file, sort_keys=True, indent=4) def loadJsonSettings(self): self.loadSettings() # with open('dumpFile.txt', 'r') as file: # settingData = json.loads(file) # # setting_data=json.loads(file, object_hook=dict_to_obj) # va=str(settingData) # try: # dataform = str(settingData).strip("'<>()[]\"` ").replace('\'', '\"') # # struct = json.loads(dataform) # struct=json.loads(dataform, object_hook=dict_to_obj) # except Exception as e: # print(e) # print(settingData) # print('hi') def __repr__(self): return f'Setting:{{workspaces:{self.workspaces},maxWorkspace:{self.maxWorkspace},showWorkspaceSelectionDialog:{self.showWorkspaceSelectionDialog},activeWorkspace :{self.activeWorkspace}}}' def convert_to_dict(obj): """ A function takes in a custom object and returns a dictionary representation of the object. This dict representation includes meta data such as the object's module and class names. """ serial = None if isinstance(obj, date): serial = obj.isoformat() else: serial = obj.__module__ # Populate the dictionary with object meta data obj_dict = { "__class__": obj.__class__.__name__, "__module__": serial } # Populate the dictionary with object properties obj_dict.update(obj.__dict__) return obj_dict def dict_to_obj(our_dict): """ Function that takes in a dict and returns a custom object associated with the dict. This function makes use of the "__module__" and "__class__" metadata in the dictionary to know which object type to create. """ if "__class__" in our_dict: # Pop ensures we remove metadata from the dict to leave only the instance arguments class_name = our_dict.pop("__class__") # Get the module name from the dict and import it module_name = our_dict.pop("__module__") # We use the built in __import__ function since the module name is not yet known at runtime module = __import__(module_name) obj = None try: # Get the class from the module class_ = getattr(module, class_name) # Use dictionary unpacking to initialize the object obj = class_(**our_dict) except Exception as e: print(e) else: obj = our_dict return obj class SaveSetting(): def __init__(self): pass def save(self): settings = Setting() settings.loadJsonSettings() # settings.write() # print(settings) # settings.loadSettings() # with open('settings.json', 'w') as file: js = json.dump(settings, file, sort_keys=True, indent=4, default=convert_to_dict) with open('settings.json', 'r') as json_file: settingData = json.load(json_file) dataform = settingData.__str__().strip("'<>() ").replace('\'', '\"') dataform = dataform.replace('None', 'null') dataform = dataform.replace('True', 'true') dataform = dataform.replace('False', 'false') print(dataform) settings_reloaded = json.loads(dataform, object_hook=dict_to_obj) print('compltet') sqlExecuter = SQLExecuter() table = 'project_setting' rows = [{'id':None, 'name':'settings', 'value':dataform, 'description':'last updated value'}] sqlExecuter.sqlite_insert(table, rows) if __name__ == '__main__': # settings = Setting() # settings.loadJsonSettings() SaveSetting().save() # settings.write() # print(settings) # settings.loadSettings() # # with open('settings.json', 'w') as file: # js = json.dump(settings, file, sort_keys=True, indent=4, default=convert_to_dict) # with open('settings.json', 'r') as json_file: # settingData = json.load(json_file) # dataform = settingData.__str__().strip("'<>() ").replace('\'', '\"') # dataform = dataform.replace('None', 'null') # dataform = dataform.replace('True', 'true') # dataform = dataform.replace('False', 'false') # print(dataform) # settings_reloaded = json.loads(dataform, object_hook=dict_to_obj) # print('compltet') # # sqlExecuter = SQLExecuter() # table = 'project_setting' # rows = [{'id':None, 'name':'settings', 'value':dataform, 'description':'last updated value'}] # sqlExecuter.sqlite_insert(table, rows)
''' Created on Feb 26, 2019 @author: xbbntni ''' import json from datetime import date from src.sqlite_executer.ConnectExecuteSqlite import SQLExecuter import os class Project(): def __init__(self, basePath=None, projectDirName=None, projectName=None, natures=list()): self.basePath = basePath self.projectName = projectName self.projectDirName = projectDirName # self.projectPath = os.path.join(basePath, projectDirName) # directory path in system self.natures = natures # java, javascript, python def addNature(self, nature=None): self.natures.append(nature) def getProjectPath(self): return os.path.join(self.basePath, self.projectDirName) def __repr__(self): return f'{{basePath:{self.basePath},projectName:{self.projectName},projectDirName:{self.projectDirName}}}' class Workspace(): def __init__(self, workspacePath=None, projects=list(), active=True): self.workspacePath = workspacePath self.projects = projects self.active = active # self.createdOn = createdOn def addProject(self, project=None): self.projects.append(project) def removeProject(self, projectName=None): for project in self.projects: if project.projectName == projectName: self.projects.remove(project) break def __repr__(self): return f'{{workspacePath:{self.workspacePath},active:{self.active},projects:{self.projects},createdOn :{self.createdOn}}}' # def serialize(obj): # """JSON serializer for objects not serializable by default json code""" # # if isinstance(obj, date): # serial = obj.isoformat() # return serial # # return obj.__dict__ class Setting(): def __init__(self, workspaces=list(), maxWorkspace=10, showWorkspaceSelectionDialog=True): self.workspaces = workspaces self.maxWorkspace = maxWorkspace # maximum number of workspaces self.showWorkspaceSelectionDialog = showWorkspaceSelectionDialog # self.loadSettings() # self.activeWorkspace = self.getActiveWorkspace() def showWorkspaceSelection(self): showDialog = True if any([workspce.active for workspce in self.workspaces]): showDialog = False return showDialog def addWorkspace(self, workspace=None): for workspce in self.workspaces: workspce.active = False if len(self.workspaces) > self.maxWorkspace: self.workspaces[0] = workspace else: self.workspaces.append(workspace) self.showWorkspaceSelectionDialog = self.showWorkspaceSelection() def getActiveWorkspace(self): activeWorkspce = None for workspce in self.workspaces: if workspce.active: activeWorkspce = workspce break return activeWorkspce def loadSettings(self): workspace = Workspace(workspacePath=r'C:\Users\xbbntni\eclipse-workspace') project = Project(basePath=r'/docs/work/python_project', projectDirName='sql_editor') project.addNature(nature='python') workspace.addProject(project) project = Project(basePath=r'/docs/new/library', projectDirName='sql-editor') project.addNature(nature='python') workspace.addProject(project) # settings = Setting() self.addWorkspace(workspace) # def write(self): # with open('user.json', 'w') as file: # json.dump(self, file, sort_keys=True, indent=4) def loadJsonSettings(self): self.loadSettings() # with open('dumpFile.txt', 'r') as file: # settingData = json.loads(file) # # setting_data=json.loads(file, object_hook=dict_to_obj) # va=str(settingData) # try: # dataform = str(settingData).strip("'<>()[]\"` ").replace('\'', '\"') # # struct = json.loads(dataform) # struct=json.loads(dataform, object_hook=dict_to_obj) # except Exception as e: # print(e) # print(settingData) # print('hi') def __repr__(self): return f'Setting:{{workspaces:{self.workspaces},maxWorkspace:{self.maxWorkspace},showWorkspaceSelectionDialog:{self.showWorkspaceSelectionDialog},activeWorkspace :{self.activeWorkspace}}}' def convert_to_dict(obj): """ A function takes in a custom object and returns a dictionary representation of the object. This dict representation includes meta data such as the object's module and class names. """ serial = None if isinstance(obj, date): serial = obj.isoformat() else: serial = obj.__module__ # Populate the dictionary with object meta data obj_dict = { "__class__": obj.__class__.__name__, "__module__": serial } # Populate the dictionary with object properties obj_dict.update(obj.__dict__) return obj_dict def dict_to_obj(our_dict): """ Function that takes in a dict and returns a custom object associated with the dict. This function makes use of the "__module__" and "__class__" metadata in the dictionary to know which object type to create. """ if "__class__" in our_dict: # Pop ensures we remove metadata from the dict to leave only the instance arguments class_name = our_dict.pop("__class__") # Get the module name from the dict and import it module_name = our_dict.pop("__module__") # We use the built in __import__ function since the module name is not yet known at runtime module = __import__(module_name) obj = None try: # Get the class from the module class_ = getattr(module, class_name) # Use dictionary unpacking to initialize the object obj = class_(**our_dict) except Exception as e: print(e) else: obj = our_dict return obj class SaveSetting(): def __init__(self): pass def save(self): settings = Setting() settings.loadJsonSettings() # settings.write() # print(settings) # settings.loadSettings() # with open('settings.json', 'w') as file: js = json.dump(settings, file, sort_keys=True, indent=4, default=convert_to_dict) with open('settings.json', 'r') as json_file: settingData = json.load(json_file) dataform = settingData.__str__().strip("'<>() ").replace('\'', '\"') dataform = dataform.replace('None', 'null') dataform = dataform.replace('True', 'true') dataform = dataform.replace('False', 'false') print(dataform) settings_reloaded = json.loads(dataform, object_hook=dict_to_obj) print('compltet') sqlExecuter = SQLExecuter() table = 'project_setting' rows = [{'id':None, 'name':'settings', 'value':dataform, 'description':'last updated value'}] sqlExecuter.sqlite_insert(table, rows) if __name__ == '__main__': # settings = Setting() # settings.loadJsonSettings() SaveSetting().save() # settings.write() # print(settings) # settings.loadSettings() # # with open('settings.json', 'w') as file: # js = json.dump(settings, file, sort_keys=True, indent=4, default=convert_to_dict) # with open('settings.json', 'r') as json_file: # settingData = json.load(json_file) # dataform = settingData.__str__().strip("'<>() ").replace('\'', '\"') # dataform = dataform.replace('None', 'null') # dataform = dataform.replace('True', 'true') # dataform = dataform.replace('False', 'false') # print(dataform) # settings_reloaded = json.loads(dataform, object_hook=dict_to_obj) # print('compltet') # # sqlExecuter = SQLExecuter() # table = 'project_setting' # rows = [{'id':None, 'name':'settings', 'value':dataform, 'description':'last updated value'}] # sqlExecuter.sqlite_insert(table, rows)
en
0.526662
Created on Feb 26, 2019 @author: xbbntni # self.projectPath = os.path.join(basePath, projectDirName) # directory path in system # java, javascript, python # self.createdOn = createdOn # def serialize(obj): # """JSON serializer for objects not serializable by default json code""" # # if isinstance(obj, date): # serial = obj.isoformat() # return serial # # return obj.__dict__ # maximum number of workspaces # self.loadSettings() # self.activeWorkspace = self.getActiveWorkspace() # settings = Setting() # def write(self): # with open('user.json', 'w') as file: # json.dump(self, file, sort_keys=True, indent=4) # with open('dumpFile.txt', 'r') as file: # settingData = json.loads(file) # # setting_data=json.loads(file, object_hook=dict_to_obj) # va=str(settingData) # try: # dataform = str(settingData).strip("'<>()[]\"` ").replace('\'', '\"') # # struct = json.loads(dataform) # struct=json.loads(dataform, object_hook=dict_to_obj) # except Exception as e: # print(e) # print(settingData) # print('hi') A function takes in a custom object and returns a dictionary representation of the object. This dict representation includes meta data such as the object's module and class names. # Populate the dictionary with object meta data # Populate the dictionary with object properties Function that takes in a dict and returns a custom object associated with the dict. This function makes use of the "__module__" and "__class__" metadata in the dictionary to know which object type to create. # Pop ensures we remove metadata from the dict to leave only the instance arguments # Get the module name from the dict and import it # We use the built in __import__ function since the module name is not yet known at runtime # Get the class from the module # Use dictionary unpacking to initialize the object # settings.write() # print(settings) # settings.loadSettings() # # settings = Setting() # settings.loadJsonSettings() # settings.write() # print(settings) # settings.loadSettings() # # with open('settings.json', 'w') as file: # js = json.dump(settings, file, sort_keys=True, indent=4, default=convert_to_dict) # with open('settings.json', 'r') as json_file: # settingData = json.load(json_file) # dataform = settingData.__str__().strip("'<>() ").replace('\'', '\"') # dataform = dataform.replace('None', 'null') # dataform = dataform.replace('True', 'true') # dataform = dataform.replace('False', 'false') # print(dataform) # settings_reloaded = json.loads(dataform, object_hook=dict_to_obj) # print('compltet') # # sqlExecuter = SQLExecuter() # table = 'project_setting' # rows = [{'id':None, 'name':'settings', 'value':dataform, 'description':'last updated value'}] # sqlExecuter.sqlite_insert(table, rows)
2.535485
3
project/GroundFloor.py
louay-rouabeh/Energy-Management-Live-Dashboard
0
6621453
import dash_core_components as dcc import dash_html_components as html import pandas as pd from dash.dependencies import Output, Input from app import app from dash_extensions import Download from dash_extensions.snippets import send_data_frame ground = pd.read_excel("consumption.xlsx", "Sheet1") content0 = html.Div([html.Button("Download", id="btn"), Download(id="download"), dcc.Graph( figure=dict( data=[ dict( x=ground['Month'], y=ground['consumption'], name='2019', marker=dict( color='rgb(55, 83, 109)' ) ), ], layout=dict( title='consumption ', showlegend=True, legend=dict( x=0, y=1.0 ), margin=dict(l=40, r=0, t=40, b=30) ) ), style={'height': 300}, id='my-graph' )]) @app.callback(Output("download", "data"), [Input("btn", "n_clicks")]) def func(n_clicks): return send_data_frame(ground.to_csv, "groundFloor.csv", index=False)
import dash_core_components as dcc import dash_html_components as html import pandas as pd from dash.dependencies import Output, Input from app import app from dash_extensions import Download from dash_extensions.snippets import send_data_frame ground = pd.read_excel("consumption.xlsx", "Sheet1") content0 = html.Div([html.Button("Download", id="btn"), Download(id="download"), dcc.Graph( figure=dict( data=[ dict( x=ground['Month'], y=ground['consumption'], name='2019', marker=dict( color='rgb(55, 83, 109)' ) ), ], layout=dict( title='consumption ', showlegend=True, legend=dict( x=0, y=1.0 ), margin=dict(l=40, r=0, t=40, b=30) ) ), style={'height': 300}, id='my-graph' )]) @app.callback(Output("download", "data"), [Input("btn", "n_clicks")]) def func(n_clicks): return send_data_frame(ground.to_csv, "groundFloor.csv", index=False)
none
1
2.758678
3
test/py_orchestrator/test_stage.py
abhinav-kumar-thakur/orchestrator
0
6621454
from py_orchestrator.stage import Stage class Adder(Stage): def perform(self, state): return self.configs['a'] + self.configs['b'] class TestStage: def test_run(self): configs = {'a': 1, 'b': 2} assert Adder(configs).perform(None) == 3
from py_orchestrator.stage import Stage class Adder(Stage): def perform(self, state): return self.configs['a'] + self.configs['b'] class TestStage: def test_run(self): configs = {'a': 1, 'b': 2} assert Adder(configs).perform(None) == 3
none
1
2.715396
3
app.py
Healthedata1/pubsub-endpoint
0
6621455
<reponame>Healthedata1/pubsub-endpoint<filename>app.py # A very simple Flask app to get started with using # FHIR Subscriptions # This is a reciever for the FHIR R4 Server URL (https://subscriptions.argo.run/) # with an ednpoint = "http://healthedatainc2.pythonanywhere.com/webhook" # It just saves the subscription notification data to a flat csv file "data.csv" # to initialize the data.csv: # # data = dict( # timestamp = [], #Bundle['timestamp'] # foo = [], # Bundle['entry'][0]['resource']['parameter'][5]['valueCode'] # status = [], # Bundle['entry'][0]['resource']['parameter'][4]['valueCode'] # topic = [], # Bundle['entry'][0]['resource']['parameter'][1]['valueUri'] # event_id = [], # Bundle['entry'][0]['fullUri'] # ) # df = pd.DataFrame(data=data) # df # # file_name = 'data.csv' # df.to_csv(file_name) # print(f'saving {file_name} as csv ...') # my_csv = pd.read_csv(file_name, index_col = 0) # my_csv# # # and display subscription notifications data # the csv file "data.csv" is consantly appended and not created each time from flask import Flask, request, Response, render_template, session import os import logging from datetime import datetime from json import dumps, loads import pandas as pd logging.basicConfig( filename='demo.log', level=logging.DEBUG, format='[%(asctime)s] %(levelname)s in %(module)s %(lineno)d}: %(message)s') app = Flask(__name__) app.config["DEBUG"] = True app.secret_key = 'my_secret_key' file_name = 'data.csv' empty_table = dict( timestamp = [], #Bundle['timestamp'] type = [], # Bundle['entry'][0]['resource']['parameter'][5]['valueCode'] status = [], # Bundle['entry'][0]['resource']['parameter'][4]['valueCode'] topic = [], # Bundle['entry'][0]['resource']['parameter'][1]['valueUri'] event_id = [], # Bundle['entry'][0]['fullUri'] ) #see add_url_rule to conditionally open rest hook.= e.g after subscribing" @app.route('/webhook', methods=['POST']) def respond(): # webhook logic to do something app.logger.info(request.headers) app.logger.info(request.json) try: # sometimes is empty bundle_event_id = request.json['entry'][1]['fullUrl'] except IndexError: # if no entry that is OK #app.logger.exception(e) bundle_event_id = None except KeyError: # if no fullUrl that is no good #app.logger.exception(e) return Response(status=400) try: # if these are empty then fail bundle_ts = request.json['timestamp'] params = request.json['entry'][0]['resource']['parameter'] bundle_type = [param['valueCode'] for param in params if param['name']=='type'][0] bundle_status = [param['valueCode'] for param in params if param['name']=='status'][0] bundle_topic = [param['valueUri'] for param in params if param['name']=='topic'][0] except Exception as e: # work on python 3.x #app.logger.exception(e) return Response(status=400) else: df = pd.read_csv(file_name, index_col = 0) my_row = pd.Series( data = [bundle_ts,bundle_type,bundle_status,bundle_topic,bundle_event_id,], index=df.columns, ) #app.logger.info(f'{df.shape[0]} rows') df = df.append(my_row, ignore_index=True) df.to_csv(file_name) #app.logger.info(f'saving {file_name} as csv ...') return Response(status=200) @app.route('/',methods = ['POST', 'GET']) def html_table(): #app.logger.info(f"request.method = {request.method}") if "clear_button" in request.form: #app.logger.info("clear table") df = pd.DataFrame(data=empty_table) df.to_csv(file_name) df = pd.read_csv(file_name, index_col = 0, keep_default_na=False ) #app.logger.info("update table") return render_template('index.html', tables=[df.to_html(classes='data')], titles = df.columns.values,) if __name__ == '__main__': app.run(debug=True)
# A very simple Flask app to get started with using # FHIR Subscriptions # This is a reciever for the FHIR R4 Server URL (https://subscriptions.argo.run/) # with an ednpoint = "http://healthedatainc2.pythonanywhere.com/webhook" # It just saves the subscription notification data to a flat csv file "data.csv" # to initialize the data.csv: # # data = dict( # timestamp = [], #Bundle['timestamp'] # foo = [], # Bundle['entry'][0]['resource']['parameter'][5]['valueCode'] # status = [], # Bundle['entry'][0]['resource']['parameter'][4]['valueCode'] # topic = [], # Bundle['entry'][0]['resource']['parameter'][1]['valueUri'] # event_id = [], # Bundle['entry'][0]['fullUri'] # ) # df = pd.DataFrame(data=data) # df # # file_name = 'data.csv' # df.to_csv(file_name) # print(f'saving {file_name} as csv ...') # my_csv = pd.read_csv(file_name, index_col = 0) # my_csv# # # and display subscription notifications data # the csv file "data.csv" is consantly appended and not created each time from flask import Flask, request, Response, render_template, session import os import logging from datetime import datetime from json import dumps, loads import pandas as pd logging.basicConfig( filename='demo.log', level=logging.DEBUG, format='[%(asctime)s] %(levelname)s in %(module)s %(lineno)d}: %(message)s') app = Flask(__name__) app.config["DEBUG"] = True app.secret_key = 'my_secret_key' file_name = 'data.csv' empty_table = dict( timestamp = [], #Bundle['timestamp'] type = [], # Bundle['entry'][0]['resource']['parameter'][5]['valueCode'] status = [], # Bundle['entry'][0]['resource']['parameter'][4]['valueCode'] topic = [], # Bundle['entry'][0]['resource']['parameter'][1]['valueUri'] event_id = [], # Bundle['entry'][0]['fullUri'] ) #see add_url_rule to conditionally open rest hook.= e.g after subscribing" @app.route('/webhook', methods=['POST']) def respond(): # webhook logic to do something app.logger.info(request.headers) app.logger.info(request.json) try: # sometimes is empty bundle_event_id = request.json['entry'][1]['fullUrl'] except IndexError: # if no entry that is OK #app.logger.exception(e) bundle_event_id = None except KeyError: # if no fullUrl that is no good #app.logger.exception(e) return Response(status=400) try: # if these are empty then fail bundle_ts = request.json['timestamp'] params = request.json['entry'][0]['resource']['parameter'] bundle_type = [param['valueCode'] for param in params if param['name']=='type'][0] bundle_status = [param['valueCode'] for param in params if param['name']=='status'][0] bundle_topic = [param['valueUri'] for param in params if param['name']=='topic'][0] except Exception as e: # work on python 3.x #app.logger.exception(e) return Response(status=400) else: df = pd.read_csv(file_name, index_col = 0) my_row = pd.Series( data = [bundle_ts,bundle_type,bundle_status,bundle_topic,bundle_event_id,], index=df.columns, ) #app.logger.info(f'{df.shape[0]} rows') df = df.append(my_row, ignore_index=True) df.to_csv(file_name) #app.logger.info(f'saving {file_name} as csv ...') return Response(status=200) @app.route('/',methods = ['POST', 'GET']) def html_table(): #app.logger.info(f"request.method = {request.method}") if "clear_button" in request.form: #app.logger.info("clear table") df = pd.DataFrame(data=empty_table) df.to_csv(file_name) df = pd.read_csv(file_name, index_col = 0, keep_default_na=False ) #app.logger.info("update table") return render_template('index.html', tables=[df.to_html(classes='data')], titles = df.columns.values,) if __name__ == '__main__': app.run(debug=True)
en
0.529515
# A very simple Flask app to get started with using # FHIR Subscriptions # This is a reciever for the FHIR R4 Server URL (https://subscriptions.argo.run/) # with an ednpoint = "http://healthedatainc2.pythonanywhere.com/webhook" # It just saves the subscription notification data to a flat csv file "data.csv" # to initialize the data.csv: # # data = dict( # timestamp = [], #Bundle['timestamp'] # foo = [], # Bundle['entry'][0]['resource']['parameter'][5]['valueCode'] # status = [], # Bundle['entry'][0]['resource']['parameter'][4]['valueCode'] # topic = [], # Bundle['entry'][0]['resource']['parameter'][1]['valueUri'] # event_id = [], # Bundle['entry'][0]['fullUri'] # ) # df = pd.DataFrame(data=data) # df # # file_name = 'data.csv' # df.to_csv(file_name) # print(f'saving {file_name} as csv ...') # my_csv = pd.read_csv(file_name, index_col = 0) # my_csv# # # and display subscription notifications data # the csv file "data.csv" is consantly appended and not created each time #Bundle['timestamp'] # Bundle['entry'][0]['resource']['parameter'][5]['valueCode'] # Bundle['entry'][0]['resource']['parameter'][4]['valueCode'] # Bundle['entry'][0]['resource']['parameter'][1]['valueUri'] # Bundle['entry'][0]['fullUri'] #see add_url_rule to conditionally open rest hook.= e.g after subscribing" # webhook logic to do something # sometimes is empty # if no entry that is OK #app.logger.exception(e) # if no fullUrl that is no good #app.logger.exception(e) # if these are empty then fail # work on python 3.x #app.logger.exception(e) #app.logger.info(f'{df.shape[0]} rows') #app.logger.info(f'saving {file_name} as csv ...') #app.logger.info(f"request.method = {request.method}") #app.logger.info("clear table") #app.logger.info("update table")
2.498161
2
example_snippets/multimenus_snippets/Snippets/SciPy/Special functions/Orthogonal polynomials/laguerre Coefficients of the $n$th order Laguerre polynoimal, $L_n(x)$.py
kuanpern/jupyterlab-snippets-multimenus
0
6621456
special.laguerre(n[, monic])
special.laguerre(n[, monic])
none
1
1.134404
1
website/drawquest/management/commands/initialize_qotd.py
bopopescu/drawquest-web
19
6621457
import datetime from django.core.management.base import BaseCommand from canvas.notifications.actions import Actions from drawquest import knobs, economy from drawquest.apps.quests.models import Quest, ScheduledQuest class Command(BaseCommand): args = '' help = '' def handle(self, *args, **options): if ScheduledQuest.objects.count(): print "You've already got a quest of the day, unless something went very wrong." return try: quest = Quest.objects.all()[0] except IndexError: print "Please create a quest first by visiting http://dq.savnac.com/admin/post_thread" return scheduled_quest = ScheduledQuest.get_or_create(quest) scheduled_quest.set_as_current_quest()
import datetime from django.core.management.base import BaseCommand from canvas.notifications.actions import Actions from drawquest import knobs, economy from drawquest.apps.quests.models import Quest, ScheduledQuest class Command(BaseCommand): args = '' help = '' def handle(self, *args, **options): if ScheduledQuest.objects.count(): print "You've already got a quest of the day, unless something went very wrong." return try: quest = Quest.objects.all()[0] except IndexError: print "Please create a quest first by visiting http://dq.savnac.com/admin/post_thread" return scheduled_quest = ScheduledQuest.get_or_create(quest) scheduled_quest.set_as_current_quest()
none
1
2.241616
2
idgraph/__init__.py
jbn/idgraph
2
6621458
<reponame>jbn/idgraph # -*- coding: utf-8 -*- import argparse import json import os import requests import shlex import sys from IPython.display import Markdown JINJA2_ENABLED = True try: import jinja2 except ImportError: # pragma: no cover JINJA2_ENABLED = False JMESPATH_ENABLED = True try: import jmespath except ImportError: # pragma: no cover JMESPATH_ENABLED = False __author__ = """<NAME>""" __email__ = '<EMAIL>' __version__ = '0.0.1' DEFAULT_ADDR = "localhost:8080" def parse_line_magic_args(line): parser = argparse.ArgumentParser(prog="%%dgraph", description="Cell magic for dgraph", add_help=False) parser.add_argument("--json", dest="as_json", action="store_true", default=False, help="Send command as JSON instead of RDF or GraphQL") parser.add_argument("--addr", dest="addr", default=DEFAULT_ADDR, help="Address of dgraph") parser.add_argument("--mutate", dest="mutate", action="store_true", default=False, help="Do a mutation") parser.add_argument("--alter", dest="alter", action="store_true", default=False, help="Do an alteration") parser.add_argument("--skip", dest="skip", action="store_true", default=False, help="Do not send this cell for execution") parser.add_argument("--into", dest="into", default="", help="store output json into variable") parser.add_argument("--jmespath", dest="jmespath", default="", help="extract response via a jmespath") parser.add_argument("--as-jinja", dest="interpolate_jinja", action="store_true", default=False, help="Interpolate as a jinja2 template") parser.add_argument('--print-jinja', dest='print_jinja', action='store_true', default=False, help="Print interpolated jinja source then bail.") parser.add_argument("--full-resp", dest="full_resp", action="store_true", default=False, help="Report full response instead of data only") args = parser.parse_args(shlex.split(line)) if args.addr == DEFAULT_ADDR: override_addr = os.environ.get('DGRAPH_ADDR') if override_addr: args.addr = override_addr return args def to_markdown_listing(code, kind): tmpl = "```{kind}\n{code}\n```\n".format(kind=kind, code=code) return Markdown(data=tmpl) CONTENT_TYPE_MAP = { True: {}, False: { 'alter': 'application/graphql+-', 'mutate': 'application/rdf', 'query': 'application/graphql+-' } } ENDPOINTS = { 'query': "/query", 'mutate': "/mutate?commitNow=true", 'alter': "/alter" } def get_user_ns(): return sys.modules['__main__'] def write_into(name, obj): vars(get_user_ns())[name] = obj JSON_MIME = 'application/json' def load_and_interpolate_jinja2(src, ns): # The FileSystemLoader should operate in the current working directory. # By assumption, extended jinja templates aren't temporary files -- # the user wrote them by hand. They are part of code you would want in # your repository! fs_loader = jinja2.FileSystemLoader(os.getcwd()) tmpl_env = jinja2.Environment(loader=fs_loader, variable_start_string="<<", variable_end_string=">>", block_start_string="<%", block_end_string="%>") # The final template -- the one that may extend a custom template -- # may be in the current directory or in a temporary one. So, it's # passed as a string. tmpl = tmpl_env.from_string(src) return tmpl.render(**ns) def execute_request(args, cell): assert not (args.mutate and args.alter), "It's alter either mutate, not both" kind = 'query' if args.mutate: kind = 'mutate' elif args.alter: kind = 'alter' headers = { 'Content-type': CONTENT_TYPE_MAP[args.as_json].get(kind, JSON_MIME) } url = "http://{}{}".format(args.addr, ENDPOINTS[kind]) return requests.post(url, headers=headers, data=cell.encode('utf8')).json() def dgraph(line, cell): args = parse_line_magic_args(line) if args.skip: return Markdown("Execution skipped. Remove `--skip` flag to execute.") if args.interpolate_jinja: if not JINJA2_ENABLED: print("Please install jinja2", file=sys.stderr) print("$ pip install jinja2", file=sys.stderr) return cell = load_and_interpolate_jinja2(cell, vars(get_user_ns())) # TODO: Find appropriate language override in kind? if args.print_jinja: return to_markdown_listing(cell, "text") resp = full_resp = execute_request(args, cell) # If full_resp is not set and there is a data field, limit the returned # response to just that to make things less noisy. If jmespath is set # execute the search and return the result. if args.jmespath != '': if not JMESPATH_ENABLED: print("Please install jmespath", file=sys.stderr) print("$ pip install jmespath", file=sys.stderr) return resp = jmespath.search(args.jmespath, resp) elif not args.full_resp and 'data' in resp: resp = resp['data'] # Mimicking expected behavior with `_` auto-assignment in IPython this # magic assigns the *filtered* result to `_dgraph` and the full result # to `_dgraph_full`. However, if the user specifies an `into` argument # assignment goes to `{into}` and `{into}_full` instead. output_var = args.into if args.into != '' else '_dgraph' write_into(output_var, resp) write_into(output_var + "_full", full_resp) # When an error occurs, this magic writes it to stderr so that it is # clearly a different interaction (should be written to a red # background in Jupyter.) Only the error messages are written. # Full inspection via `_dgraph_full` still possible. if 'errors' in full_resp and not full_resp.get('data'): for error in full_resp['errors']: print(error.get("message"), file=sys.stderr) else: return to_markdown_listing(json.dumps(resp, indent=" "), "json") def load_ipython_extension(ipython): # pragma: no cover ipython.register_magic_function(dgraph, magic_kind='cell')
# -*- coding: utf-8 -*- import argparse import json import os import requests import shlex import sys from IPython.display import Markdown JINJA2_ENABLED = True try: import jinja2 except ImportError: # pragma: no cover JINJA2_ENABLED = False JMESPATH_ENABLED = True try: import jmespath except ImportError: # pragma: no cover JMESPATH_ENABLED = False __author__ = """<NAME>""" __email__ = '<EMAIL>' __version__ = '0.0.1' DEFAULT_ADDR = "localhost:8080" def parse_line_magic_args(line): parser = argparse.ArgumentParser(prog="%%dgraph", description="Cell magic for dgraph", add_help=False) parser.add_argument("--json", dest="as_json", action="store_true", default=False, help="Send command as JSON instead of RDF or GraphQL") parser.add_argument("--addr", dest="addr", default=DEFAULT_ADDR, help="Address of dgraph") parser.add_argument("--mutate", dest="mutate", action="store_true", default=False, help="Do a mutation") parser.add_argument("--alter", dest="alter", action="store_true", default=False, help="Do an alteration") parser.add_argument("--skip", dest="skip", action="store_true", default=False, help="Do not send this cell for execution") parser.add_argument("--into", dest="into", default="", help="store output json into variable") parser.add_argument("--jmespath", dest="jmespath", default="", help="extract response via a jmespath") parser.add_argument("--as-jinja", dest="interpolate_jinja", action="store_true", default=False, help="Interpolate as a jinja2 template") parser.add_argument('--print-jinja', dest='print_jinja', action='store_true', default=False, help="Print interpolated jinja source then bail.") parser.add_argument("--full-resp", dest="full_resp", action="store_true", default=False, help="Report full response instead of data only") args = parser.parse_args(shlex.split(line)) if args.addr == DEFAULT_ADDR: override_addr = os.environ.get('DGRAPH_ADDR') if override_addr: args.addr = override_addr return args def to_markdown_listing(code, kind): tmpl = "```{kind}\n{code}\n```\n".format(kind=kind, code=code) return Markdown(data=tmpl) CONTENT_TYPE_MAP = { True: {}, False: { 'alter': 'application/graphql+-', 'mutate': 'application/rdf', 'query': 'application/graphql+-' } } ENDPOINTS = { 'query': "/query", 'mutate': "/mutate?commitNow=true", 'alter': "/alter" } def get_user_ns(): return sys.modules['__main__'] def write_into(name, obj): vars(get_user_ns())[name] = obj JSON_MIME = 'application/json' def load_and_interpolate_jinja2(src, ns): # The FileSystemLoader should operate in the current working directory. # By assumption, extended jinja templates aren't temporary files -- # the user wrote them by hand. They are part of code you would want in # your repository! fs_loader = jinja2.FileSystemLoader(os.getcwd()) tmpl_env = jinja2.Environment(loader=fs_loader, variable_start_string="<<", variable_end_string=">>", block_start_string="<%", block_end_string="%>") # The final template -- the one that may extend a custom template -- # may be in the current directory or in a temporary one. So, it's # passed as a string. tmpl = tmpl_env.from_string(src) return tmpl.render(**ns) def execute_request(args, cell): assert not (args.mutate and args.alter), "It's alter either mutate, not both" kind = 'query' if args.mutate: kind = 'mutate' elif args.alter: kind = 'alter' headers = { 'Content-type': CONTENT_TYPE_MAP[args.as_json].get(kind, JSON_MIME) } url = "http://{}{}".format(args.addr, ENDPOINTS[kind]) return requests.post(url, headers=headers, data=cell.encode('utf8')).json() def dgraph(line, cell): args = parse_line_magic_args(line) if args.skip: return Markdown("Execution skipped. Remove `--skip` flag to execute.") if args.interpolate_jinja: if not JINJA2_ENABLED: print("Please install jinja2", file=sys.stderr) print("$ pip install jinja2", file=sys.stderr) return cell = load_and_interpolate_jinja2(cell, vars(get_user_ns())) # TODO: Find appropriate language override in kind? if args.print_jinja: return to_markdown_listing(cell, "text") resp = full_resp = execute_request(args, cell) # If full_resp is not set and there is a data field, limit the returned # response to just that to make things less noisy. If jmespath is set # execute the search and return the result. if args.jmespath != '': if not JMESPATH_ENABLED: print("Please install jmespath", file=sys.stderr) print("$ pip install jmespath", file=sys.stderr) return resp = jmespath.search(args.jmespath, resp) elif not args.full_resp and 'data' in resp: resp = resp['data'] # Mimicking expected behavior with `_` auto-assignment in IPython this # magic assigns the *filtered* result to `_dgraph` and the full result # to `_dgraph_full`. However, if the user specifies an `into` argument # assignment goes to `{into}` and `{into}_full` instead. output_var = args.into if args.into != '' else '_dgraph' write_into(output_var, resp) write_into(output_var + "_full", full_resp) # When an error occurs, this magic writes it to stderr so that it is # clearly a different interaction (should be written to a red # background in Jupyter.) Only the error messages are written. # Full inspection via `_dgraph_full` still possible. if 'errors' in full_resp and not full_resp.get('data'): for error in full_resp['errors']: print(error.get("message"), file=sys.stderr) else: return to_markdown_listing(json.dumps(resp, indent=" "), "json") def load_ipython_extension(ipython): # pragma: no cover ipython.register_magic_function(dgraph, magic_kind='cell')
en
0.878098
# -*- coding: utf-8 -*- # pragma: no cover # pragma: no cover <NAME> # The FileSystemLoader should operate in the current working directory. # By assumption, extended jinja templates aren't temporary files -- # the user wrote them by hand. They are part of code you would want in # your repository! # The final template -- the one that may extend a custom template -- # may be in the current directory or in a temporary one. So, it's # passed as a string. # TODO: Find appropriate language override in kind? # If full_resp is not set and there is a data field, limit the returned # response to just that to make things less noisy. If jmespath is set # execute the search and return the result. # Mimicking expected behavior with `_` auto-assignment in IPython this # magic assigns the *filtered* result to `_dgraph` and the full result # to `_dgraph_full`. However, if the user specifies an `into` argument # assignment goes to `{into}` and `{into}_full` instead. # When an error occurs, this magic writes it to stderr so that it is # clearly a different interaction (should be written to a red # background in Jupyter.) Only the error messages are written. # Full inspection via `_dgraph_full` still possible. # pragma: no cover
2.334485
2
tests/test_charm.py
xavpaice/lma-proxy-operator
0
6621459
# Copyright 2021 Canonical # See LICENSE file for licensing details. # # Learn more about testing at: https://juju.is/docs/sdk/testing import unittest from charm import LMAProxyCharm from ops.model import ActiveStatus, BlockedStatus from ops.testing import Harness class LMAProxyCharmTest(unittest.TestCase): def setUp(self): self.harness = Harness(LMAProxyCharm) self.harness.set_model_info( name="testmodel", uuid="1234567890" ) self.addCleanup(self.harness.cleanup) self.harness.begin() def test_prometheus_target_relation_without_upstream(self): rel_id = self.harness.add_relation("prometheus-target", "target-app") self.harness.add_relation_unit(rel_id, "target-app/0") self.harness.update_relation_data( rel_id, "target-app/0", { "hostname": "scrape_target_0", "port": "1234", }, ) self.harness.add_relation_unit(rel_id, "target-app/1") self.harness.update_relation_data( rel_id, "target-app/1", { "hostname": "scrape_target_1", "port": "1234", }, ) self.assertIn( { "job_name": "juju_testmodel_1234567_target-app/0_prometheus_scrape", "static_configs": { "targets": ["scrape_target_0:1234"], "labels": { "juju_model": "testmodel", "juju_model_uuid": "1234567890", "juju_application": "target-app", "juju_unit": "target-app/0" } } }, self.harness.charm._stored.scrape_jobs ) self.assertIn( { "job_name": "juju_testmodel_1234567_target-app/1_prometheus_scrape", "static_configs": { "targets": ["scrape_target_1:1234"], "labels": { "juju_model": "testmodel", "juju_model_uuid": "1234567890", "juju_application": "target-app", "juju_unit": "target-app/1" } } }, self.harness.charm._stored.scrape_jobs ) self.assertEqual( self.harness.model.unit.status, BlockedStatus("Missing needed upstream relations: upstream-prometheus-scrape") ) def test_prometheus_target_relation_with_upstream(self): self.harness.add_relation("upstream-prometheus-scrape", "lma-prometheus") rel_id = self.harness.add_relation("prometheus-target", "target-app") self.harness.add_relation_unit(rel_id, "target-app/0") self.harness.update_relation_data( rel_id, "target-app/0", { "hostname": "scrape_target_0", "port": "1234", }, ) self.harness.add_relation_unit(rel_id, "target-app/1") self.harness.update_relation_data( rel_id, "target-app/1", { "hostname": "scrape_target_1", "port": "1234", }, ) self.assertEqual(len(self.harness.charm._stored.scrape_jobs), 2) self.assertEqual(self.harness.model.unit.status, ActiveStatus())
# Copyright 2021 Canonical # See LICENSE file for licensing details. # # Learn more about testing at: https://juju.is/docs/sdk/testing import unittest from charm import LMAProxyCharm from ops.model import ActiveStatus, BlockedStatus from ops.testing import Harness class LMAProxyCharmTest(unittest.TestCase): def setUp(self): self.harness = Harness(LMAProxyCharm) self.harness.set_model_info( name="testmodel", uuid="1234567890" ) self.addCleanup(self.harness.cleanup) self.harness.begin() def test_prometheus_target_relation_without_upstream(self): rel_id = self.harness.add_relation("prometheus-target", "target-app") self.harness.add_relation_unit(rel_id, "target-app/0") self.harness.update_relation_data( rel_id, "target-app/0", { "hostname": "scrape_target_0", "port": "1234", }, ) self.harness.add_relation_unit(rel_id, "target-app/1") self.harness.update_relation_data( rel_id, "target-app/1", { "hostname": "scrape_target_1", "port": "1234", }, ) self.assertIn( { "job_name": "juju_testmodel_1234567_target-app/0_prometheus_scrape", "static_configs": { "targets": ["scrape_target_0:1234"], "labels": { "juju_model": "testmodel", "juju_model_uuid": "1234567890", "juju_application": "target-app", "juju_unit": "target-app/0" } } }, self.harness.charm._stored.scrape_jobs ) self.assertIn( { "job_name": "juju_testmodel_1234567_target-app/1_prometheus_scrape", "static_configs": { "targets": ["scrape_target_1:1234"], "labels": { "juju_model": "testmodel", "juju_model_uuid": "1234567890", "juju_application": "target-app", "juju_unit": "target-app/1" } } }, self.harness.charm._stored.scrape_jobs ) self.assertEqual( self.harness.model.unit.status, BlockedStatus("Missing needed upstream relations: upstream-prometheus-scrape") ) def test_prometheus_target_relation_with_upstream(self): self.harness.add_relation("upstream-prometheus-scrape", "lma-prometheus") rel_id = self.harness.add_relation("prometheus-target", "target-app") self.harness.add_relation_unit(rel_id, "target-app/0") self.harness.update_relation_data( rel_id, "target-app/0", { "hostname": "scrape_target_0", "port": "1234", }, ) self.harness.add_relation_unit(rel_id, "target-app/1") self.harness.update_relation_data( rel_id, "target-app/1", { "hostname": "scrape_target_1", "port": "1234", }, ) self.assertEqual(len(self.harness.charm._stored.scrape_jobs), 2) self.assertEqual(self.harness.model.unit.status, ActiveStatus())
en
0.628941
# Copyright 2021 Canonical # See LICENSE file for licensing details. # # Learn more about testing at: https://juju.is/docs/sdk/testing
1.937857
2
src/robotec_sensor_processing/devices/daily_bms.py
robotec-ua/robotec_sensor_processing
0
6621460
<filename>src/robotec_sensor_processing/devices/daily_bms.py<gh_stars>0 # -*- coding: utf-8 -*- """ Class DailyBMS The class is dedicated to provide a functionality of Daily BMS (Battery Management System) to control battery status. """ from .bms import GenericBMS class DailyBMS(GenericBMS): """ Constructor. Delegates the parameters to the superclass. """ def __init__(self, protocol, message, topic, data_size, capacity, banks): super().__init__(protocol, message, topic, data_size, capacity, banks) """ Calculating the overall value of voltage. The method opens the serial port, reads and verifies data from it, processes it and then closes the port. """ def calculateVoltage(self): # data = bytearray(communicate(self._voltage_command, self._data_size)) # Check if data is correct if len(data) != self._data_size: raise ValueError("Data is not correct!") # Calculate the main voltage voltage = float(((data[4]<<8)|data[5]))/10 return voltage """ Calculating voltages of the banks. The method opens the serial port, reads and verifies data from it, processes it (proessing differs from the method of processing the overall voltage) and then closes the port. """ def calculateBanksVoltage(self): voltages = [] # Array of banks' voltages index = 0 # Index of bank array_amount = self._banks / 3 # Every response contains data of 3 banks # data = communicate(self._banks_command, self._data_size * array_amount) # Check if data is correct if len(data) != self._data_size: raise ValueError("Data is not correct!") # Calculate all the voltages for array_index in range(0, array_amount): # Get voltage data array (without unused information) voltage_data = data[array_index * self._data_size + 5 : array_index * self._data_size + 10] # Read data from the given response for data_index in range(0, 3): # Get voltage from the data voltage = (voltage_data[data_index] << 8) voltage = voltage | voltage_data[data_index + 1] # Append a new-found voltage to the array voltage = float(voltage) / 1000 voltages.append(voltage) # Increase the index to save the next bank's voltage index += 1 return voltages """ Method for calculating the overall charge of battery """ def calculateCharge(self, voltage): charge = int((self._message.voltage-26.1)/0.036) return charge """ Create a message to publish """ def createMessage(self): self._message.voltage = calculateVoltage() self._message.charge = calculateCharge(self._message.voltage) self._message.banks_voltage = calculateBanksVoltage() return self._message
<filename>src/robotec_sensor_processing/devices/daily_bms.py<gh_stars>0 # -*- coding: utf-8 -*- """ Class DailyBMS The class is dedicated to provide a functionality of Daily BMS (Battery Management System) to control battery status. """ from .bms import GenericBMS class DailyBMS(GenericBMS): """ Constructor. Delegates the parameters to the superclass. """ def __init__(self, protocol, message, topic, data_size, capacity, banks): super().__init__(protocol, message, topic, data_size, capacity, banks) """ Calculating the overall value of voltage. The method opens the serial port, reads and verifies data from it, processes it and then closes the port. """ def calculateVoltage(self): # data = bytearray(communicate(self._voltage_command, self._data_size)) # Check if data is correct if len(data) != self._data_size: raise ValueError("Data is not correct!") # Calculate the main voltage voltage = float(((data[4]<<8)|data[5]))/10 return voltage """ Calculating voltages of the banks. The method opens the serial port, reads and verifies data from it, processes it (proessing differs from the method of processing the overall voltage) and then closes the port. """ def calculateBanksVoltage(self): voltages = [] # Array of banks' voltages index = 0 # Index of bank array_amount = self._banks / 3 # Every response contains data of 3 banks # data = communicate(self._banks_command, self._data_size * array_amount) # Check if data is correct if len(data) != self._data_size: raise ValueError("Data is not correct!") # Calculate all the voltages for array_index in range(0, array_amount): # Get voltage data array (without unused information) voltage_data = data[array_index * self._data_size + 5 : array_index * self._data_size + 10] # Read data from the given response for data_index in range(0, 3): # Get voltage from the data voltage = (voltage_data[data_index] << 8) voltage = voltage | voltage_data[data_index + 1] # Append a new-found voltage to the array voltage = float(voltage) / 1000 voltages.append(voltage) # Increase the index to save the next bank's voltage index += 1 return voltages """ Method for calculating the overall charge of battery """ def calculateCharge(self, voltage): charge = int((self._message.voltage-26.1)/0.036) return charge """ Create a message to publish """ def createMessage(self): self._message.voltage = calculateVoltage() self._message.charge = calculateCharge(self._message.voltage) self._message.banks_voltage = calculateBanksVoltage() return self._message
en
0.833888
# -*- coding: utf-8 -*- Class DailyBMS The class is dedicated to provide a functionality of Daily BMS (Battery Management System) to control battery status. Constructor. Delegates the parameters to the superclass. Calculating the overall value of voltage. The method opens the serial port, reads and verifies data from it, processes it and then closes the port. # # Check if data is correct # Calculate the main voltage Calculating voltages of the banks. The method opens the serial port, reads and verifies data from it, processes it (proessing differs from the method of processing the overall voltage) and then closes the port. # Array of banks' voltages # Index of bank # Every response contains data of 3 banks # # Check if data is correct # Calculate all the voltages # Get voltage data array (without unused information) # Read data from the given response # Get voltage from the data # Append a new-found voltage to the array # Increase the index to save the next bank's voltage Method for calculating the overall charge of battery Create a message to publish
3.257563
3
Codewars/8kyu/fix-the-bugs-syntax-my-first-kata/Python/solution1.py
RevansChen/online-judge
7
6621461
<reponame>RevansChen/online-judge # Python - 2.7.6 def my_first_kata(a, b): if (type(a) != int) or (type(b) != int): return False else: return a % b + b % a
# Python - 2.7.6 def my_first_kata(a, b): if (type(a) != int) or (type(b) != int): return False else: return a % b + b % a
en
0.123735
# Python - 2.7.6
3.501208
4
test/test_dada_fildb.py
loostrum/psrdada_filterbankdb
0
6621462
<filename>test/test_dada_fildb.py import os import unittest import time import multiprocessing as mp import numpy as np from psrdada import Reader, Writer from dada_fildb import dada_fildb from dada_fildb.sigproc import SigprocFile class TestDadaFildb(unittest.TestCase): def setUp(self): """ Set configuration, create filterbank files """ self.nbeam = 2 self.nfreq = 384 self.pagesize = 1024 self.npage = 3 self.dada_key = 'dada' self.buffer = None self.files = [] for beam in range(self.nbeam): self.files.append(self.create_filterbank(beam)) def create_filterbank(self, beam): """ Create a test filterbank file :return: path to file, pointer to file """ fname = f'test_beam{beam:02d}.fil' header = {'rawdatafile': fname, 'source_name': 'FAKE', 'machine_id': 15, 'barycentric': 0, 'telescope_id': 10, 'src_raj': 0., 'src_dej': 0., 'az_start': 0., 'za_start': 0., 'data_type': 1, 'fch1': 1520., 'foff': -1., 'nchans': 384, 'nbeams': self.nbeam, 'ibeam': beam, 'nbits': 8, 'tstart': 55000.0, 'tsamp': 1e-3, 'nifs': 1} filterbank = SigprocFile.new_file(fname, header) # add some data for page in range(self.npage): # data is increasing values in time and freq in each page, multiplied by page and beam index (1-based) data = (np.arange(self.pagesize)[:, None] * np.arange(self.nfreq)[None, :] * (beam + 1) * (page + 1)).astype(np.uint8) filterbank.append_spectra(data, fname) return fname, filterbank def create_ringbuffer(self): """ Create a PSRDADA ringbuffer :return: """ hdr_size = 40960 buffer_size = self.nbeam * self.nfreq * self.pagesize nbuffer = 5 nreader = 1 cmd = f'dada_db -w -a {hdr_size} -b {buffer_size} -k {self.dada_key} -n {nbuffer} -r {nreader}' self.buffer = mp.Process(target=os.system, args=(cmd,)) self.buffer.start() time.sleep(.1) def tearDown(self): """ Remove any remaining buffers and files """ try: self.buffer.terminate() except AttributeError: pass for fname, filterbank in self.files: try: filterbank.fp.close() os.remove(fname) except FileNotFoundError: pass def test_dada_fildb(self): """ Write filterbank to buffer and read back the header and data """ # create a buffer self.create_ringbuffer() # start dada_fildb dada_fildb(np.transpose(self.files)[0], self.dada_key, order='FT', pagesize=self.pagesize) # init reader reader = Reader(int(self.dada_key, 16)) # read header header = reader.getHeader() # remove raw header del header['__RAW_HEADER__'] # read data ind = 0 for page in reader: data = np.asarray(page) # calculate expected sum: each point is product of time, freq, page, beam index, mod 255 (i.e. 2**nbit-1) expected_total = (np.arange(self.pagesize)[:, None, None] * np.arange(self.nfreq)[None, :, None] * np.arange(1, self.nbeam + 1)[None, None, :] * (ind + 1)).astype(np.uint8).sum() self.assertEqual(data.sum(), expected_total) ind += 1 # disconnect reader.disconnect() if __name__ == '__main__': unittest.main()
<filename>test/test_dada_fildb.py import os import unittest import time import multiprocessing as mp import numpy as np from psrdada import Reader, Writer from dada_fildb import dada_fildb from dada_fildb.sigproc import SigprocFile class TestDadaFildb(unittest.TestCase): def setUp(self): """ Set configuration, create filterbank files """ self.nbeam = 2 self.nfreq = 384 self.pagesize = 1024 self.npage = 3 self.dada_key = 'dada' self.buffer = None self.files = [] for beam in range(self.nbeam): self.files.append(self.create_filterbank(beam)) def create_filterbank(self, beam): """ Create a test filterbank file :return: path to file, pointer to file """ fname = f'test_beam{beam:02d}.fil' header = {'rawdatafile': fname, 'source_name': 'FAKE', 'machine_id': 15, 'barycentric': 0, 'telescope_id': 10, 'src_raj': 0., 'src_dej': 0., 'az_start': 0., 'za_start': 0., 'data_type': 1, 'fch1': 1520., 'foff': -1., 'nchans': 384, 'nbeams': self.nbeam, 'ibeam': beam, 'nbits': 8, 'tstart': 55000.0, 'tsamp': 1e-3, 'nifs': 1} filterbank = SigprocFile.new_file(fname, header) # add some data for page in range(self.npage): # data is increasing values in time and freq in each page, multiplied by page and beam index (1-based) data = (np.arange(self.pagesize)[:, None] * np.arange(self.nfreq)[None, :] * (beam + 1) * (page + 1)).astype(np.uint8) filterbank.append_spectra(data, fname) return fname, filterbank def create_ringbuffer(self): """ Create a PSRDADA ringbuffer :return: """ hdr_size = 40960 buffer_size = self.nbeam * self.nfreq * self.pagesize nbuffer = 5 nreader = 1 cmd = f'dada_db -w -a {hdr_size} -b {buffer_size} -k {self.dada_key} -n {nbuffer} -r {nreader}' self.buffer = mp.Process(target=os.system, args=(cmd,)) self.buffer.start() time.sleep(.1) def tearDown(self): """ Remove any remaining buffers and files """ try: self.buffer.terminate() except AttributeError: pass for fname, filterbank in self.files: try: filterbank.fp.close() os.remove(fname) except FileNotFoundError: pass def test_dada_fildb(self): """ Write filterbank to buffer and read back the header and data """ # create a buffer self.create_ringbuffer() # start dada_fildb dada_fildb(np.transpose(self.files)[0], self.dada_key, order='FT', pagesize=self.pagesize) # init reader reader = Reader(int(self.dada_key, 16)) # read header header = reader.getHeader() # remove raw header del header['__RAW_HEADER__'] # read data ind = 0 for page in reader: data = np.asarray(page) # calculate expected sum: each point is product of time, freq, page, beam index, mod 255 (i.e. 2**nbit-1) expected_total = (np.arange(self.pagesize)[:, None, None] * np.arange(self.nfreq)[None, :, None] * np.arange(1, self.nbeam + 1)[None, None, :] * (ind + 1)).astype(np.uint8).sum() self.assertEqual(data.sum(), expected_total) ind += 1 # disconnect reader.disconnect() if __name__ == '__main__': unittest.main()
en
0.74257
Set configuration, create filterbank files Create a test filterbank file :return: path to file, pointer to file # add some data # data is increasing values in time and freq in each page, multiplied by page and beam index (1-based) Create a PSRDADA ringbuffer :return: Remove any remaining buffers and files Write filterbank to buffer and read back the header and data # create a buffer # start dada_fildb # init reader # read header # remove raw header # read data # calculate expected sum: each point is product of time, freq, page, beam index, mod 255 (i.e. 2**nbit-1) # disconnect
2.365953
2
nova3/engines/skytorrents.py
chr0nu5/qBittorrent-Plugins-Easy-Install
0
6621463
# -*- coding: utf-8 -*- #VERSION: 2.0 #AUTHORS: <NAME> (<EMAIL>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. try: from html.parser import HTMLParser except ImportError: from HTMLParser import HTMLParser # import qBT modules try: from novaprinter import prettyPrinter from helpers import retrieve_url except ImportError: pass class skytorrents(object): """Class used by qBittorrent to search for torrents""" url = 'https://www.skytorrents.lol/' name = 'Sky Torrents LOL' # defines which search categories are supported by this search engine # and their corresponding id. Possible categories are: # 'all', 'movies', 'tv', 'music', 'games', 'anime', 'software', 'pictures', # 'books' supported_categories = {'all': 'all'} class SkySearchParser(HTMLParser): """ Parses Template browse page for search results and prints them""" def __init__(self, results, url): self._url = url try: super().__init__() except: # See: http://stackoverflow.com/questions/9698614/ HTMLParser.__init__(self) self.results = results self.engine_url = url self.curr = None self.catch_name = False self.td_counter = 0 def handle_starttag(self, tag, attr): if tag == 'a': self.handle_a(attr) def handle_endtag(self, tag): if tag == 'td': self.handle_td() def handle_a(self, attr): attr = dict(attr) if 'href' in attr: if 'info/' in attr['href']: res = {'desc_link': urljoin(self.engine_url, attr['href']), 'engine_url': self.engine_url} self.catch_name = True self.curr = self.curr or res elif attr['href'].startswith('magnet:'): self.curr['link'] = attr['href'] def handle_td(self): self.td_counter += 1 # we've caught all info, add it to the results # then reset the counters for the next result if self.td_counter > 5: if self.curr['seeds'] or self.curr['leech']: # filter noise self.results.append(self.curr) self.curr = None self.td_counter = 0 def handle_data(self, data): if self.catch_name: self.curr['name'] = data.strip() self.catch_name = False elif self.td_counter == 1: self.curr['size'] = data.strip() elif self.td_counter == 4: try: self.curr['seeds'] = int(data.strip()) except ValueError: self.curr['seeds'] = -1 elif self.td_counter == 5: try: self.curr['leech'] = int(data.strip()) except ValueError: self.curr['leech'] = -1 # DO NOT CHANGE the name and parameters of this function # This function will be the one called by nova2.py def search(self, what, cat='all'): """ Retreive and parse engine search results by category and query. Parameters: :param what: a string with the search tokens, already escaped (e.g. "Ubuntu+Linux") :param cat: the name of a search category, see supported_categories. """ results = [] page = 1 parser = self.SkySearchParser(results, self.url) while True: url = str( "{site}?query={query}&page={page}" .format(site=self.url, page=page, query=what)) res = retrieve_url(url) parser.feed(res) if not results: break for each in results: prettyPrinter(each) del results[:] page += 1 parser.close() if __name__ == '__main__': skytorrents().search('red+alert')
# -*- coding: utf-8 -*- #VERSION: 2.0 #AUTHORS: <NAME> (<EMAIL>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. try: from html.parser import HTMLParser except ImportError: from HTMLParser import HTMLParser # import qBT modules try: from novaprinter import prettyPrinter from helpers import retrieve_url except ImportError: pass class skytorrents(object): """Class used by qBittorrent to search for torrents""" url = 'https://www.skytorrents.lol/' name = 'Sky Torrents LOL' # defines which search categories are supported by this search engine # and their corresponding id. Possible categories are: # 'all', 'movies', 'tv', 'music', 'games', 'anime', 'software', 'pictures', # 'books' supported_categories = {'all': 'all'} class SkySearchParser(HTMLParser): """ Parses Template browse page for search results and prints them""" def __init__(self, results, url): self._url = url try: super().__init__() except: # See: http://stackoverflow.com/questions/9698614/ HTMLParser.__init__(self) self.results = results self.engine_url = url self.curr = None self.catch_name = False self.td_counter = 0 def handle_starttag(self, tag, attr): if tag == 'a': self.handle_a(attr) def handle_endtag(self, tag): if tag == 'td': self.handle_td() def handle_a(self, attr): attr = dict(attr) if 'href' in attr: if 'info/' in attr['href']: res = {'desc_link': urljoin(self.engine_url, attr['href']), 'engine_url': self.engine_url} self.catch_name = True self.curr = self.curr or res elif attr['href'].startswith('magnet:'): self.curr['link'] = attr['href'] def handle_td(self): self.td_counter += 1 # we've caught all info, add it to the results # then reset the counters for the next result if self.td_counter > 5: if self.curr['seeds'] or self.curr['leech']: # filter noise self.results.append(self.curr) self.curr = None self.td_counter = 0 def handle_data(self, data): if self.catch_name: self.curr['name'] = data.strip() self.catch_name = False elif self.td_counter == 1: self.curr['size'] = data.strip() elif self.td_counter == 4: try: self.curr['seeds'] = int(data.strip()) except ValueError: self.curr['seeds'] = -1 elif self.td_counter == 5: try: self.curr['leech'] = int(data.strip()) except ValueError: self.curr['leech'] = -1 # DO NOT CHANGE the name and parameters of this function # This function will be the one called by nova2.py def search(self, what, cat='all'): """ Retreive and parse engine search results by category and query. Parameters: :param what: a string with the search tokens, already escaped (e.g. "Ubuntu+Linux") :param cat: the name of a search category, see supported_categories. """ results = [] page = 1 parser = self.SkySearchParser(results, self.url) while True: url = str( "{site}?query={query}&page={page}" .format(site=self.url, page=page, query=what)) res = retrieve_url(url) parser.feed(res) if not results: break for each in results: prettyPrinter(each) del results[:] page += 1 parser.close() if __name__ == '__main__': skytorrents().search('red+alert')
en
0.81361
# -*- coding: utf-8 -*- #VERSION: 2.0 #AUTHORS: <NAME> (<EMAIL>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # import qBT modules Class used by qBittorrent to search for torrents # defines which search categories are supported by this search engine # and their corresponding id. Possible categories are: # 'all', 'movies', 'tv', 'music', 'games', 'anime', 'software', 'pictures', # 'books' Parses Template browse page for search results and prints them # See: http://stackoverflow.com/questions/9698614/ # we've caught all info, add it to the results # then reset the counters for the next result # filter noise # DO NOT CHANGE the name and parameters of this function # This function will be the one called by nova2.py Retreive and parse engine search results by category and query. Parameters: :param what: a string with the search tokens, already escaped (e.g. "Ubuntu+Linux") :param cat: the name of a search category, see supported_categories.
2.597398
3
__init__.py
stobinaator/virtual-assistant
0
6621464
# -*- coding: utf-8 -*- from __future__ import print_function import requests import json import datetime import pickle import os.path import webbrowser import random import wikipedia import time import os from time import ctime from configparser import ConfigParser import speech_recognition as sr from gtts import gTTS from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from googleapiclient.discovery import build #Read config.ini file config_object = ConfigParser() config_object.read("config.ini") # accessing information from config.ini apis = config_object["APIS"] scopes = config_object["SCOPES"] keys = config_object["KEYS"] with open('greetings.json') as greet_f: data = json.load(greet_f) GREETING_RESPONSES = data['greeting responses'] GREETING_NAMES = data['greeting names'] times = 0 listening = True def listen(): global times global listening r = sr.Recognizer() with sr.Microphone() as source: print("I am listening..." + "\n") audio = r.listen(source) data = "" try: data = r.recognize_google(audio) print("You said: " + data + "\n") times = 0 except sr.UnknownValueError: if times < 4: print("Google Speech Recognition did not understand audio" + "\n") times += 1 else: data = "stop listening" except sr.RequestError as e: print("Request Failed; {0}".format(e)) return data def respond(audioString): print(audioString + "\n") tts = gTTS(text=audioString, lang='en') tts.save("speech.mp3") os.system("mpg321 -q speech.mp3") time.sleep(2) def calendar(): """Shows basic usage of the Google Calendar API. Prints the start and name of the next 10 events on the user's calendar. """ creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. try: if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'config.ini', scopes["google_calendar"]) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) service = build('calendar', 'v3', credentials=creds) # Call the Calendar API now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time print('Getting the upcoming 5 events...') events_result = service.events().list(calendarId='primary', timeMin=now, maxResults=5, singleEvents=True, orderBy='startTime').execute() events = events_result.get('items', []) if not events: print('No upcoming events found.') for event in events: start = event['start'].get('dateTime', event['start'].get('date')) print(start, event['summary']) except json.decoder.JSONDecodeError: print("whoops..something went wrong.") def weather(data): listening = True data = data.split(" ") location = str(data[5]) units = "metric" url = apis["weather_api"] + "appid=" + keys["weather_api_key"] + "&q=" + location + "&units=" + units js = requests.get(url).json() if js["cod"] != "404": weather = js["main"] temp = weather["temp"] temp_min = weather["temp_min"] temp_max = weather["temp_max"] temp_feels = weather["feels_like"] #hum = weather["humidity"] desc = js["weather"][0]["description"] resp_string = " The temperature in {0} in Celsius is ".format(location) \ + str(temp) + "°C. But it really feels like " + str(temp_feels) \ + "°C. Minimum is " + str(temp_min) + " and Maximum is " + str(temp_max) \ + "°C. The weather description is: " + str(desc) respond(resp_string) else: respond("City not found") def maps(data): listening = True data = data.split(" ") location_url = "https://www.google.com/maps/place/" + str(data[2]) + "," \ + str(data[3]) respond("Hold on Stoyan, I will show you where " + data[2] + "," + data[3] + " is.") webbrowser.open(location_url) def sites(s): listening=True respond("Redirecting to " + s.upper() + "\n") if s == "golem": webbrowser.open("https://www." + s + ".de", new=2) else: webbrowser.open("https://www." + s + ".com", new=2) def search(data): listening = True data = data.split(" ") words = data[1:] sentence = "" for word in words: sentence += word sentence += "+" location_url = "https://www.google.com/search?q=" + sentence respond("Let me Google that for you..." + "\n") webbrowser.open(location_url, new=2) def open(data): listening = True data = data.split(" ") app = str(data[1]) if app == 'Messenger': arg = '/usr/bin/open -a "/Applications/Messenger.app"' os.system(arg) elif app == 'Spotify' or app == 'spotify': arg = '/usr/bin/open -a "/Applications/Spotify.app"' os.system(arg) def choices(): print("Which site would you like to visit?") print("1. Reddit") print("2. BBC") print("3. Golem") print("4. Polygon") def chuck_norris_joke(): listening = True json = requests.get(apis["chuck_api"]).json() print("Here is a joke.." + "\n") respond(json["value"]) def random_number_facts(): listening = True js = requests.get(apis["trivia_api"]) js2 = requests.get(apis["date_api"]) respond(js.content.decode('utf-8')) print("\n") respond(js2.content.decode('utf-8')) def random_advice(): listening = True js = requests.get(apis["advice_api"]).json() print("Here is an advice.." + "\n") respond(js["slip"]["advice"]) # Function to get a person first and last name def get_person(data): try: wordList = data.split(" ") # Split the text into a list of words for i in range(0, len(wordList)): if i + 3 <= len(wordList) - 1 and wordList[i].lower() == 'who' and wordList[i + 1].lower() == 'is': person = wordList[i + 2] + ' ' + wordList[i + 3] wiki = wikipedia.summary(person, sentences=2) respond(wiki) except wikipedia.exceptions.PageError: print("wooops. didn't get that. can you repeat?") def digital_assistant(data): global listening if "who is" in data: listening = True get_person(data) if "how are you" in data: listening = True respond("I am well") if "what time is it" in data: listening = True respond(ctime()) if "where is" in data: maps(data) if "what is the weather in" in data: weather(data) if "search" in data: search(data) if "calendar" in data: calendar() if "open" in data: open(data) if "sites" in data: choices() if "Reddit" in data: sites("reddit") elif "BBC" in data: sites("bbc") elif "golem" in data: sites("golem") elif "polygon" in data: sites("polygon") if "<NAME>" in data: chuck_norris_joke() if "numbers" in data: random_number_facts() if "advice" in data: random_advice() if "who made you" in data or "who created you" in data: listening = True respond("I was built by Stoyan.") if "stop listening" in data or "thank you" in data: listening = False print("Listening stopped") respond("Bye") return listening return listening def main(): global listening global times for key in apis: print(key) time.sleep(2) respond(random.choice(GREETING_RESPONSES) + " , " + random.choice(GREETING_NAMES) + "?") while listening == True: data = listen() listening = digital_assistant(data) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- from __future__ import print_function import requests import json import datetime import pickle import os.path import webbrowser import random import wikipedia import time import os from time import ctime from configparser import ConfigParser import speech_recognition as sr from gtts import gTTS from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from googleapiclient.discovery import build #Read config.ini file config_object = ConfigParser() config_object.read("config.ini") # accessing information from config.ini apis = config_object["APIS"] scopes = config_object["SCOPES"] keys = config_object["KEYS"] with open('greetings.json') as greet_f: data = json.load(greet_f) GREETING_RESPONSES = data['greeting responses'] GREETING_NAMES = data['greeting names'] times = 0 listening = True def listen(): global times global listening r = sr.Recognizer() with sr.Microphone() as source: print("I am listening..." + "\n") audio = r.listen(source) data = "" try: data = r.recognize_google(audio) print("You said: " + data + "\n") times = 0 except sr.UnknownValueError: if times < 4: print("Google Speech Recognition did not understand audio" + "\n") times += 1 else: data = "stop listening" except sr.RequestError as e: print("Request Failed; {0}".format(e)) return data def respond(audioString): print(audioString + "\n") tts = gTTS(text=audioString, lang='en') tts.save("speech.mp3") os.system("mpg321 -q speech.mp3") time.sleep(2) def calendar(): """Shows basic usage of the Google Calendar API. Prints the start and name of the next 10 events on the user's calendar. """ creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. try: if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'config.ini', scopes["google_calendar"]) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) service = build('calendar', 'v3', credentials=creds) # Call the Calendar API now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time print('Getting the upcoming 5 events...') events_result = service.events().list(calendarId='primary', timeMin=now, maxResults=5, singleEvents=True, orderBy='startTime').execute() events = events_result.get('items', []) if not events: print('No upcoming events found.') for event in events: start = event['start'].get('dateTime', event['start'].get('date')) print(start, event['summary']) except json.decoder.JSONDecodeError: print("whoops..something went wrong.") def weather(data): listening = True data = data.split(" ") location = str(data[5]) units = "metric" url = apis["weather_api"] + "appid=" + keys["weather_api_key"] + "&q=" + location + "&units=" + units js = requests.get(url).json() if js["cod"] != "404": weather = js["main"] temp = weather["temp"] temp_min = weather["temp_min"] temp_max = weather["temp_max"] temp_feels = weather["feels_like"] #hum = weather["humidity"] desc = js["weather"][0]["description"] resp_string = " The temperature in {0} in Celsius is ".format(location) \ + str(temp) + "°C. But it really feels like " + str(temp_feels) \ + "°C. Minimum is " + str(temp_min) + " and Maximum is " + str(temp_max) \ + "°C. The weather description is: " + str(desc) respond(resp_string) else: respond("City not found") def maps(data): listening = True data = data.split(" ") location_url = "https://www.google.com/maps/place/" + str(data[2]) + "," \ + str(data[3]) respond("Hold on Stoyan, I will show you where " + data[2] + "," + data[3] + " is.") webbrowser.open(location_url) def sites(s): listening=True respond("Redirecting to " + s.upper() + "\n") if s == "golem": webbrowser.open("https://www." + s + ".de", new=2) else: webbrowser.open("https://www." + s + ".com", new=2) def search(data): listening = True data = data.split(" ") words = data[1:] sentence = "" for word in words: sentence += word sentence += "+" location_url = "https://www.google.com/search?q=" + sentence respond("Let me Google that for you..." + "\n") webbrowser.open(location_url, new=2) def open(data): listening = True data = data.split(" ") app = str(data[1]) if app == 'Messenger': arg = '/usr/bin/open -a "/Applications/Messenger.app"' os.system(arg) elif app == 'Spotify' or app == 'spotify': arg = '/usr/bin/open -a "/Applications/Spotify.app"' os.system(arg) def choices(): print("Which site would you like to visit?") print("1. Reddit") print("2. BBC") print("3. Golem") print("4. Polygon") def chuck_norris_joke(): listening = True json = requests.get(apis["chuck_api"]).json() print("Here is a joke.." + "\n") respond(json["value"]) def random_number_facts(): listening = True js = requests.get(apis["trivia_api"]) js2 = requests.get(apis["date_api"]) respond(js.content.decode('utf-8')) print("\n") respond(js2.content.decode('utf-8')) def random_advice(): listening = True js = requests.get(apis["advice_api"]).json() print("Here is an advice.." + "\n") respond(js["slip"]["advice"]) # Function to get a person first and last name def get_person(data): try: wordList = data.split(" ") # Split the text into a list of words for i in range(0, len(wordList)): if i + 3 <= len(wordList) - 1 and wordList[i].lower() == 'who' and wordList[i + 1].lower() == 'is': person = wordList[i + 2] + ' ' + wordList[i + 3] wiki = wikipedia.summary(person, sentences=2) respond(wiki) except wikipedia.exceptions.PageError: print("wooops. didn't get that. can you repeat?") def digital_assistant(data): global listening if "who is" in data: listening = True get_person(data) if "how are you" in data: listening = True respond("I am well") if "what time is it" in data: listening = True respond(ctime()) if "where is" in data: maps(data) if "what is the weather in" in data: weather(data) if "search" in data: search(data) if "calendar" in data: calendar() if "open" in data: open(data) if "sites" in data: choices() if "Reddit" in data: sites("reddit") elif "BBC" in data: sites("bbc") elif "golem" in data: sites("golem") elif "polygon" in data: sites("polygon") if "<NAME>" in data: chuck_norris_joke() if "numbers" in data: random_number_facts() if "advice" in data: random_advice() if "who made you" in data or "who created you" in data: listening = True respond("I was built by Stoyan.") if "stop listening" in data or "thank you" in data: listening = False print("Listening stopped") respond("Bye") return listening return listening def main(): global listening global times for key in apis: print(key) time.sleep(2) respond(random.choice(GREETING_RESPONSES) + " , " + random.choice(GREETING_NAMES) + "?") while listening == True: data = listen() listening = digital_assistant(data) if __name__ == '__main__': main()
en
0.799388
# -*- coding: utf-8 -*- #Read config.ini file # accessing information from config.ini Shows basic usage of the Google Calendar API. Prints the start and name of the next 10 events on the user's calendar. # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. # If there are no (valid) credentials available, let the user log in. # Save the credentials for the next run # Call the Calendar API # 'Z' indicates UTC time #hum = weather["humidity"] # Function to get a person first and last name # Split the text into a list of words
3.073953
3
whatismyip.py
MaxBrady/whatismyip
0
6621465
#!/usr/bin/python3 import os ip = os.getenv('REMOTE_ADDR') site_title = "Max's Test Site" print('Content-Type: text/html\n\n') print('') # Print Header print('<html>') print('<head>') print('<title>%s</title>' % site_title) print('</head>') print('<body>') print("<b>Max's IP Page</b>") print('My ip adress is %s' % ip) print('</body>') print('</html>')
#!/usr/bin/python3 import os ip = os.getenv('REMOTE_ADDR') site_title = "Max's Test Site" print('Content-Type: text/html\n\n') print('') # Print Header print('<html>') print('<head>') print('<title>%s</title>' % site_title) print('</head>') print('<body>') print("<b>Max's IP Page</b>") print('My ip adress is %s' % ip) print('</body>') print('</html>')
fr
0.163132
#!/usr/bin/python3 # Print Header
2.411081
2
code/chapter_04/listing_04_10.py
guinslym/python_earth_science_book
80
6621466
import pandas as pd import matplotlib.pyplot as plt my_dataset = pd.read_excel( 'Smith_glass_post_NYT_data.xlsx', sheet_name='Supp_traces') my_dataset1 = my_dataset[my_dataset.Epoch == 'one'] my_dataset2 = my_dataset[my_dataset.Epoch == 'two'] fig = plt.figure() ax1 = fig.add_subplot(2, 1, 1) ax1.scatter(my_dataset1.Zr, my_dataset1.Th, marker='s', color='#c7ddf4', edgecolor='#000000', label="First Epoch") ax1.scatter(my_dataset2.Zr, my_dataset2.Th, marker='o', color='#ff464a', edgecolor='#000000', label="Second Epoch") ax1.set_xlabel("Zr [ppm]") ax1.set_ylabel("Th [ppm]") ax1.legend(loc='upper left', framealpha=1, frameon=True, title="Age < 15 ky", title_fontsize=10) ax2 = fig.add_subplot(2, 1, 2) ax2.scatter(my_dataset1.Zr, my_dataset1.Th, marker='s', color='#c7ddf4', edgecolor='#000000', label="First Epoch") ax2.scatter(my_dataset2.Zr, my_dataset2.Th, marker='o', color='#ff464a', edgecolor='#000000', label="Second Epoch") ax2.set_xlabel("Zr [ppm]") ax2.set_ylabel("Th [ppm]") ax2.legend(frameon=False, loc='lower right', ncol=2, title="Age < 15 ky", title_fontsize=10) fig.tight_layout()
import pandas as pd import matplotlib.pyplot as plt my_dataset = pd.read_excel( 'Smith_glass_post_NYT_data.xlsx', sheet_name='Supp_traces') my_dataset1 = my_dataset[my_dataset.Epoch == 'one'] my_dataset2 = my_dataset[my_dataset.Epoch == 'two'] fig = plt.figure() ax1 = fig.add_subplot(2, 1, 1) ax1.scatter(my_dataset1.Zr, my_dataset1.Th, marker='s', color='#c7ddf4', edgecolor='#000000', label="First Epoch") ax1.scatter(my_dataset2.Zr, my_dataset2.Th, marker='o', color='#ff464a', edgecolor='#000000', label="Second Epoch") ax1.set_xlabel("Zr [ppm]") ax1.set_ylabel("Th [ppm]") ax1.legend(loc='upper left', framealpha=1, frameon=True, title="Age < 15 ky", title_fontsize=10) ax2 = fig.add_subplot(2, 1, 2) ax2.scatter(my_dataset1.Zr, my_dataset1.Th, marker='s', color='#c7ddf4', edgecolor='#000000', label="First Epoch") ax2.scatter(my_dataset2.Zr, my_dataset2.Th, marker='o', color='#ff464a', edgecolor='#000000', label="Second Epoch") ax2.set_xlabel("Zr [ppm]") ax2.set_ylabel("Th [ppm]") ax2.legend(frameon=False, loc='lower right', ncol=2, title="Age < 15 ky", title_fontsize=10) fig.tight_layout()
none
1
2.859349
3
webRoot/patric/static/generateSitemapIndex.py
PATRIC3/patric3_website
6
6621467
<gh_stars>1-10 #!/usr/bin/python import os import sys import datetime # setting default encoding to utf-8 reload(sys) sys.setdefaultencoding("UTF-8") outfile = open("sitemapindex.xml", "wb") print >>outfile, """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">""" # read directory listing for fileName in os.listdir("./sitemaps/"): print >>outfile, """ <sitemap> <loc>http://patricbrc.org/patric/static/sitemaps/%s</loc> <lastmod>%s</lastmod> </sitemap>""" % (fileName, datetime.date.fromtimestamp(os.path.getmtime("./sitemaps/"+fileName))) print >>outfile, "</sitemapindex>"
#!/usr/bin/python import os import sys import datetime # setting default encoding to utf-8 reload(sys) sys.setdefaultencoding("UTF-8") outfile = open("sitemapindex.xml", "wb") print >>outfile, """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">""" # read directory listing for fileName in os.listdir("./sitemaps/"): print >>outfile, """ <sitemap> <loc>http://patricbrc.org/patric/static/sitemaps/%s</loc> <lastmod>%s</lastmod> </sitemap>""" % (fileName, datetime.date.fromtimestamp(os.path.getmtime("./sitemaps/"+fileName))) print >>outfile, "</sitemapindex>"
en
0.181248
#!/usr/bin/python # setting default encoding to utf-8 <?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> # read directory listing <sitemap> <loc>http://patricbrc.org/patric/static/sitemaps/%s</loc> <lastmod>%s</lastmod> </sitemap>
2.538384
3
profiles_api/permissions.py
Chau-Ngoc/profiles_project_api
0
6621468
from rest_framework.permissions import BasePermission, SAFE_METHODS class ProfileUpdatePermission(BasePermission): """Allow user to only update their own profile.""" def has_object_permission(self, request, view, obj) -> bool: """Check if user is trying to update their own profile.""" if request.method in SAFE_METHODS: return True return obj.id == request.user.id class UpdateFeedPermission(BasePermission): """Allow users to only update their own status feeds.""" message = "You don't have permission to modify this feed." def has_object_permission(self, request, view, obj): if request.method in SAFE_METHODS: return True return obj.user.id == request.user.id
from rest_framework.permissions import BasePermission, SAFE_METHODS class ProfileUpdatePermission(BasePermission): """Allow user to only update their own profile.""" def has_object_permission(self, request, view, obj) -> bool: """Check if user is trying to update their own profile.""" if request.method in SAFE_METHODS: return True return obj.id == request.user.id class UpdateFeedPermission(BasePermission): """Allow users to only update their own status feeds.""" message = "You don't have permission to modify this feed." def has_object_permission(self, request, view, obj): if request.method in SAFE_METHODS: return True return obj.user.id == request.user.id
en
0.962413
Allow user to only update their own profile. Check if user is trying to update their own profile. Allow users to only update their own status feeds.
2.768073
3
faimed3d/models/unet.py
KRoszyk/faimed3d
22
6621469
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/06d_models.unet.ipynb (unless otherwise specified). __all__ = ['ConvTranspose3D', 'UnetBlock3D', 'ResizeToOrig', 'DynamicUnet3D'] # Cell # export from fastai.basics import * from ..basics import * from fastai.vision.all import create_body, hook_outputs from torchvision.models.video import r3d_18 from fastai.vision.models.unet import DynamicUnet, _get_sz_change_idxs # Cell import faimed3d from ..layers import * # Cell class ConvTranspose3D(nn.Sequential): "Upsample by 2` from `ni` filters to `nf` (default `ni`), using `nn.ConvTranspose3D`." def __init__(self, ni, nf=None, scale=2, blur=False, act_cls=None, norm_type=None, **kwargs): super().__init__() nf = ifnone(nf, ni) layers = [ConvLayer(ni, nf, ndim=3, act_cls=act_cls, norm_type=norm_type, transpose=True, **kwargs)] # layers[0].weight.data.copy_(icnr_init(layers[0].weight.data)) if blur: layers += [nn.ReplicationPad3d((1,0,1,0,1,0)), nn.AvgPool3d(2, stride=1)] super().__init__(*layers) # Cell class UnetBlock3D(Module): "A quasi-UNet block, using `ConvTranspose3d` for upsampling`." @delegates(ConvLayer.__init__) def __init__(self, up_in_c, x_in_c, hook, final_div=True, blur=False, act_cls=defaults.activation, self_attention=False, init=nn.init.kaiming_normal_, norm_type=None, **kwargs): self.hook = hook self.up = ConvTranspose3D(up_in_c, up_in_c//2, blur=blur, act_cls=act_cls, norm_type=norm_type, **kwargs) self.bn = BatchNorm(x_in_c, ndim=3) ni = up_in_c//2 + x_in_c nf = ni if final_div else ni//2 self.conv1 = ConvLayer(ni, nf, ndim=3, act_cls=act_cls, norm_type=norm_type, **kwargs) self.conv2 = ConvLayer(nf, nf, ndim=3, act_cls=act_cls, norm_type=norm_type, xtra=SelfAttention(nf) if self_attention else None, **kwargs) self.relu = act_cls() apply_init(nn.Sequential(self.conv1, self.conv2), init) def forward(self, up_in): s = self.hook.stored up_out = self.up(up_in) ssh = s.shape[-3:] if ssh != up_out.shape[-3:]: up_out = F.interpolate(up_out, s.shape[-3:], mode='nearest') cat_x = self.relu(torch.cat([up_out, self.bn(s)], dim=1)) return self.conv2(self.conv1(cat_x)) # Cell class ResizeToOrig(Module): "Merge a shortcut with the result of the module by adding them or concatenating them if `dense=True`." def __init__(self, mode='nearest'): self.mode = mode def forward(self, x): if x.orig.shape[-3:] != x.shape[-3:]: x = F.interpolate(x, x.orig.shape[-3:], mode=self.mode) return x # Cell class DynamicUnet3D(SequentialEx): "Create a U-Net from a given architecture." def __init__(self, encoder, n_out, img_size, blur=False, blur_final=True, self_attention=False, y_range=None, last_cross=True, bottle=False, act_cls=defaults.activation, init=nn.init.kaiming_normal_, norm_type=None, **kwargs): sizes = model_sizes(encoder, size=img_size) sz_chg_idxs = list(reversed(_get_sz_change_idxs(sizes))) self.sfs = hook_outputs([encoder[i] for i in sz_chg_idxs], detach=False) x = dummy_eval(encoder, img_size).detach() ni = x.size(1) middle_conv = nn.Sequential(ConvLayer(ni, ni*2, act_cls=act_cls, norm_type=norm_type, ndim = len(img_size), **kwargs), ConvLayer(ni*2, ni, act_cls=act_cls, norm_type=norm_type, ndim = len(img_size), **kwargs)).eval() x = middle_conv(x) layers = [encoder, middle_conv] for i,idx in enumerate(sz_chg_idxs): not_final = i!=len(sz_chg_idxs)-1 up_in_c, x_in_c = int(x.shape[1]), int(sizes[idx][1]) do_blur = blur and (not_final or blur_final) sa = self_attention and (i==len(sz_chg_idxs)-3) unet_block = UnetBlock3D(up_in_c, x_in_c, self.sfs[i], final_div=not_final, blur=do_blur, self_attention=sa, act_cls=act_cls, init=init, norm_type=norm_type, **kwargs).eval() layers.append(unet_block) x = unet_block(x) ni = x.shape[1] if img_size != sizes[0][-3:]: layers.append(ConvTranspose3D(ni)) layers.append(ResizeToOrig()) if last_cross: layers.append(MergeLayer(dense=True)) ni += in_channels(encoder) layers.append(ResBlock(1, ni, ni//2 if bottle else ni, act_cls=act_cls, norm_type=norm_type, ndim = 3, **kwargs)) layers += [ConvLayer(ni, n_out, ks=1, act_cls=None, norm_type=norm_type, ndim = 3, **kwargs)] apply_init(nn.Sequential(layers[3], layers[-2]), init) #apply_init(nn.Sequential(layers[2]), init) if y_range is not None: layers.append(SigmoidRange(*y_range)) super().__init__(*layers) def __del__(self): if hasattr(self, "sfs"): self.sfs.remove()
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/06d_models.unet.ipynb (unless otherwise specified). __all__ = ['ConvTranspose3D', 'UnetBlock3D', 'ResizeToOrig', 'DynamicUnet3D'] # Cell # export from fastai.basics import * from ..basics import * from fastai.vision.all import create_body, hook_outputs from torchvision.models.video import r3d_18 from fastai.vision.models.unet import DynamicUnet, _get_sz_change_idxs # Cell import faimed3d from ..layers import * # Cell class ConvTranspose3D(nn.Sequential): "Upsample by 2` from `ni` filters to `nf` (default `ni`), using `nn.ConvTranspose3D`." def __init__(self, ni, nf=None, scale=2, blur=False, act_cls=None, norm_type=None, **kwargs): super().__init__() nf = ifnone(nf, ni) layers = [ConvLayer(ni, nf, ndim=3, act_cls=act_cls, norm_type=norm_type, transpose=True, **kwargs)] # layers[0].weight.data.copy_(icnr_init(layers[0].weight.data)) if blur: layers += [nn.ReplicationPad3d((1,0,1,0,1,0)), nn.AvgPool3d(2, stride=1)] super().__init__(*layers) # Cell class UnetBlock3D(Module): "A quasi-UNet block, using `ConvTranspose3d` for upsampling`." @delegates(ConvLayer.__init__) def __init__(self, up_in_c, x_in_c, hook, final_div=True, blur=False, act_cls=defaults.activation, self_attention=False, init=nn.init.kaiming_normal_, norm_type=None, **kwargs): self.hook = hook self.up = ConvTranspose3D(up_in_c, up_in_c//2, blur=blur, act_cls=act_cls, norm_type=norm_type, **kwargs) self.bn = BatchNorm(x_in_c, ndim=3) ni = up_in_c//2 + x_in_c nf = ni if final_div else ni//2 self.conv1 = ConvLayer(ni, nf, ndim=3, act_cls=act_cls, norm_type=norm_type, **kwargs) self.conv2 = ConvLayer(nf, nf, ndim=3, act_cls=act_cls, norm_type=norm_type, xtra=SelfAttention(nf) if self_attention else None, **kwargs) self.relu = act_cls() apply_init(nn.Sequential(self.conv1, self.conv2), init) def forward(self, up_in): s = self.hook.stored up_out = self.up(up_in) ssh = s.shape[-3:] if ssh != up_out.shape[-3:]: up_out = F.interpolate(up_out, s.shape[-3:], mode='nearest') cat_x = self.relu(torch.cat([up_out, self.bn(s)], dim=1)) return self.conv2(self.conv1(cat_x)) # Cell class ResizeToOrig(Module): "Merge a shortcut with the result of the module by adding them or concatenating them if `dense=True`." def __init__(self, mode='nearest'): self.mode = mode def forward(self, x): if x.orig.shape[-3:] != x.shape[-3:]: x = F.interpolate(x, x.orig.shape[-3:], mode=self.mode) return x # Cell class DynamicUnet3D(SequentialEx): "Create a U-Net from a given architecture." def __init__(self, encoder, n_out, img_size, blur=False, blur_final=True, self_attention=False, y_range=None, last_cross=True, bottle=False, act_cls=defaults.activation, init=nn.init.kaiming_normal_, norm_type=None, **kwargs): sizes = model_sizes(encoder, size=img_size) sz_chg_idxs = list(reversed(_get_sz_change_idxs(sizes))) self.sfs = hook_outputs([encoder[i] for i in sz_chg_idxs], detach=False) x = dummy_eval(encoder, img_size).detach() ni = x.size(1) middle_conv = nn.Sequential(ConvLayer(ni, ni*2, act_cls=act_cls, norm_type=norm_type, ndim = len(img_size), **kwargs), ConvLayer(ni*2, ni, act_cls=act_cls, norm_type=norm_type, ndim = len(img_size), **kwargs)).eval() x = middle_conv(x) layers = [encoder, middle_conv] for i,idx in enumerate(sz_chg_idxs): not_final = i!=len(sz_chg_idxs)-1 up_in_c, x_in_c = int(x.shape[1]), int(sizes[idx][1]) do_blur = blur and (not_final or blur_final) sa = self_attention and (i==len(sz_chg_idxs)-3) unet_block = UnetBlock3D(up_in_c, x_in_c, self.sfs[i], final_div=not_final, blur=do_blur, self_attention=sa, act_cls=act_cls, init=init, norm_type=norm_type, **kwargs).eval() layers.append(unet_block) x = unet_block(x) ni = x.shape[1] if img_size != sizes[0][-3:]: layers.append(ConvTranspose3D(ni)) layers.append(ResizeToOrig()) if last_cross: layers.append(MergeLayer(dense=True)) ni += in_channels(encoder) layers.append(ResBlock(1, ni, ni//2 if bottle else ni, act_cls=act_cls, norm_type=norm_type, ndim = 3, **kwargs)) layers += [ConvLayer(ni, n_out, ks=1, act_cls=None, norm_type=norm_type, ndim = 3, **kwargs)] apply_init(nn.Sequential(layers[3], layers[-2]), init) #apply_init(nn.Sequential(layers[2]), init) if y_range is not None: layers.append(SigmoidRange(*y_range)) super().__init__(*layers) def __del__(self): if hasattr(self, "sfs"): self.sfs.remove()
en
0.429098
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/06d_models.unet.ipynb (unless otherwise specified). # Cell # export # Cell # Cell # layers[0].weight.data.copy_(icnr_init(layers[0].weight.data)) # Cell # Cell # Cell #apply_init(nn.Sequential(layers[2]), init)
1.809362
2
src/ntopng_constants.py
samuelesabella/Detecting-network-anomalies-using-the-feature-space-latent-representation
1
6621470
<reponame>samuelesabella/Detecting-network-anomalies-using-the-feature-space-latent-representation import numpy as np import logging import copy from collections import defaultdict # ----- ----- NDPI ----- ----- # # ----- ----- ---- ----- ----- # NDPI_DEFAULTS = { "unknown": "unspecified", "ftp_control": "download-filetransfer-filesharing", "pop3": "email", "smtp": "email", "imap": "email", "dns": "network", "ipp": "system", "http": "web", "mdns": "network", "ntp": "system", "netbios": "system", "nfs": "datatransfer", "ssdp": "system", "bgp": "network", "snmp": "network", "xdmcp": "remoteaccess", "smbv1": "system", "syslog": "system", "dhcp": "network", "postgresql": "database", "mysql": "database", "hotmail": "email", "direct_download_link": "download-filetransfer-filesharing", "pops": "email", "applejuice": "download-filetransfer-filesharing", "directconnect": "download-filetransfer-filesharing", "ntop": "network", "coap": "rpc", "vmware": "remoteaccess", "smtps": "email", "facebookzero": "socialnetwork", "ubntac2": "network", "kontiki": "media", "openft": "download-filetransfer-filesharing", "fasttrack": "download-filetransfer-filesharing", "gnutella": "download-filetransfer-filesharing", "edonkey": "download-filetransfer-filesharing", "bittorrent": "download-filetransfer-filesharing", "skypecall": "voip", "signal": "chat", "memcached": "network", "smbv23": "system", "mining": "mining", "nestlogsink": "cloud", "modbus": "network", "whatsappcall": "voip", "datasaver": "web", "xbox": "game", "qq": "chat", "tiktok": "socialnetwork", "rtsp": "media", "imaps": "email", "icecast": "media", "pplive": "media", "ppstream": "video", "zattoo": "video", "shoutcast": "music", "sopcast": "video", "tvants": "video", "tvuplayer": "video", "http_download": "download-filetransfer-filesharing", "qqlive": "video", "thunder": "download-filetransfer-filesharing", "soulseek": "download-filetransfer-filesharing", "ps_vue": "video", "irc": "chat", "ayiya": "network", "unencrypted_jabber": "web", "msn": "web", "oscar": "chat", "yahoo": "web", "battlefield": "game", "googleplus": "socialnetwork", "vrrp": "network", "steam": "game", "halflife2": "game", "worldofwarcraft": "game", "telnet": "remoteaccess", "stun": "network", "ipsec": "vpn", "gre": "network", "icmp": "network", "igmp": "network", "egp": "network", "sctp": "network", "ospf": "network", "ip_in_ip": "network", "rtp": "media", "rdp": "remoteaccess", "vnc": "remoteaccess", "pcanywhere": "remoteaccess", "tls": "web", "ssh": "remoteaccess", "usenet": "web", "mgcp": "voip", "iax": "voip", "tftp": "datatransfer", "afp": "datatransfer", "stealthnet": "download-filetransfer-filesharing", "aimini": "download-filetransfer-filesharing", "sip": "voip", "truphone": "voip", "icmpv6": "network", "dhcpv6": "network", "armagetron": "game", "crossfire": "rpc", "dofus": "game", "fiesta": "game", "florensia": "game", "guildwars": "game", "http_activesync": "cloud", "kerberos": "network", "ldap": "system", "maplestory": "game", "mssql-tds": "database", "pptp": "vpn", "warcraft3": "game", "worldofkungfu": "game", "slack": "collaborative", "facebook": "socialnetwork", "twitter": "socialnetwork", "dropbox": "cloud", "gmail": "email", "googlemaps": "web", "youtube": "media", "skype": "voip", "google": "web", "dce_rpc": "rpc", "netflow": "network", "sflow": "network", "http_connect": "web", "http_proxy": "web", "citrix": "network", "netflix": "video", "lastfm": "music", "waze": "web", "youtubeupload": "media", "hulu": "streaming", "checkmk": "datatransfer", "ajp": "web", "apple": "web", "webex": "voip", "whatsapp": "chat", "appleicloud": "web", "viber": "voip", "appleitunes": "streaming", "radius": "network", "windowsupdate": "softwareupdate", "teamviewer": "remoteaccess", "tuenti": "voip", "lotusnotes": "collaborative", "sap": "network", "gtp": "network", "upnp": "network", "llmnr": "network", "remotescan": "network", "spotify": "music", "messenger": "voip", "h323": "voip", "openvpn": "vpn", "noe": "voip", "ciscovpn": "vpn", "teamspeak": "voip", "tor": "vpn", "ciscoskinny": "voip", "rtcp": "voip", "rsync": "datatransfer", "oracle": "database", "corba": "rpc", "ubuntuone": "cloud", "whois-das": "network", "collectd": "system", "socks": "web", "nintendo": "game", "rtmp": "media", "ftp_data": "download-filetransfer-filesharing", "wikipedia": "web", "zeromq": "rpc", "amazon": "web", "ebay": "shopping", "cnn": "web", "megaco": "voip", "redis": "database", "pando_media_booster": "web", "vhua": "voip", "telegram": "chat", "vevo": "music", "pandora": "streaming", "quic": "web", "zoom": "video", "eaq": "network", "ookla": "network", "amqp": "rpc", "kakaotalk": "chat", "kakaotalk_voice": "voip", "twitch": "video", "doh_dot": "network", "wechat": "chat", "mpeg_ts": "media", "snapchat": "socialnetwork", "sina(weibo)": "socialnetwork", "googlehangoutduo": "voip", "iflix": "video", "github": "collaborative", "bjnp": "system", "free_205": "voip", "wireguard": "vpn", "smpp": "download-filetransfer-filesharing", "dnscrypt": "network", "tinc": "vpn", "deezer": "music", "instagram": "socialnetwork", "microsoft": "cloud", "starcraft": "game", "teredo": "network", "hotspotshield": "vpn", "imo": "voip", "googledrive": "cloud", "ocs": "media", "office365": "collaborative", "cloudflare": "web", "ms_onedrive": "cloud", "mqtt": "rpc", "rx": "rpc", "applestore": "softwareupdate", "opendns": "web", "git": "collaborative", "drda": "database", "playstore": "softwareupdate", "someip": "rpc", "fix": "rpc", "playstation": "game", "pastebin": "download-filetransfer-filesharing", "linkedin": "socialnetwork", "soundcloud": "music", "csgo": "game", "lisp": "cloud", "diameter": "network", "applepush": "cloud", "googleservices": "web", "amazonvideo": "cloud", "googledocs": "collaborative", "whatsappfiles": "download-filetransfer-filesharing", "targus dataspeed": "network", "dnp3": "network", "iec60870": "network", "bloomberg": "network", "capwap": "network", "zabbix": "network", "s7comm": "network" } class key_dependent_dict(defaultdict): """https://www.reddit.com/r/Python/comments/27crqg/making_defaultdict_create_defaults_that_are_a/ """ def __init__(self, f_of_x, basedict): super().__init__(None, basedict) # base class doesn't get a factory self.f_of_x = f_of_x # save f(x) def __missing__(self, key): # called when a default needed ret = self.f_of_x(key) # calculate default value self[key] = ret # and install it in the dict return ret def ndpi_value2cat(x): logging.warning(f"unknown L7 protocol {x}") return "unmapped ndpi" NDPI_VALUE2CAT = key_dependent_dict(ndpi_value2cat, NDPI_DEFAULTS) NDPI_FLOWS_COMPLETE = set({f"ndpi_flows:num_flows__{x}" for x in NDPI_VALUE2CAT.values()}) NDPI_BYTES_RCVD_COMPLETE = set({f"ndpi:bytes_rcvd__{x}" for x in NDPI_VALUE2CAT.values()}) NDPI_BYTES_SENT_COMPLETE = set({f"ndpi:bytes_sent__{x}" for x in NDPI_VALUE2CAT.values()}) NDPI_COMPLETE = NDPI_FLOWS_COMPLETE | NDPI_BYTES_RCVD_COMPLETE | NDPI_BYTES_SENT_COMPLETE # ----- ----- FEATURES ----- ----- # # ----- ----- -------- ----- ----- # BASIC_LEVEL_LOW = set({ "traffic:bytes_rcvd", "traffic:bytes_sent", "echo_packets:packets_rcvd", "echo_packets:packets_sent", "echo_reply_packets:packets_rcvd", "echo_reply_packets:packets_sent", "tcp_packets:packets_rcvd", "tcp_packets:packets_sent", "tcp_rx_stats:lost_packets", "tcp_rx_stats:out_of_order_packets", "tcp_rx_stats:retransmission_packets", "tcp_tx_stats:lost_packets", "tcp_tx_stats:out_of_order_packets", "tcp_tx_stats:retransmission_packets", "udp_pkts:packets_rcvd", "udp_pkts:packets_sent", "udp_sent_unicast:bytes_sent_non_unicast", "udp_sent_unicast:bytes_sent_unicast"}) SIX_IS_THE_MAGIK_NUM = set({ "active_flows:flows_as_client", "active_flows:flows_as_server", "contacts:num_as_client", "contacts:num_as_server", "unreachable_flows:flows_as_client", "unreachable_flows:flows_as_server", "misbehaving_flows:flows_as_client", "misbehaving_flows:flows_as_server", "tcp_rx_stats:lost_packets", "tcp_rx_stats:out_of_order_packets", "tcp_rx_stats:retransmission_packets", "tcp_tx_stats:lost_packets", "tcp_tx_stats:out_of_order_packets", "tcp_tx_stats:retransmission_packets", }) BASIC_LEVEL_HIGH = set({ "active_flows:flows_as_client", "active_flows:flows_as_server", "total_flows:flows_as_client", "total_flows:flows_as_server", "misbehaving_flows:flows_as_client", "misbehaving_flows:flows_as_server", "unreachable_flows:flows_as_client", "unreachable_flows:flows_as_server", "host_unreachable_flows:flows_as_client", "host_unreachable_flows:flows_as_server", "dns_qry_rcvd_rsp_sent:queries_packets", "dns_qry_rcvd_rsp_sent:replies_error_packets", "dns_qry_rcvd_rsp_sent:replies_ok_packets", "dns_qry_sent_rsp_rcvd:queries_packets", "dns_qry_sent_rsp_rcvd:replies_error_packets", "dns_qry_sent_rsp_rcvd:replies_ok_packets", "contacts:num_as_client", "contacts:num_as_server", }) # To prevent new feature coming with new versions of ntopng BASIC_FEATURES = BASIC_LEVEL_LOW | BASIC_LEVEL_HIGH FEATURES_COMPLETE = copy.deepcopy(BASIC_FEATURES) FEATURES_COMPLETE |= NDPI_FLOWS_COMPLETE | NDPI_BYTES_RCVD_COMPLETE | NDPI_BYTES_SENT_COMPLETE NON_DECREASING = ["dns_qry_sent_rsp_rcvd:", "dns_qry_rcvd_rsp_sent:", "echo_packets:", "echo_reply_packets:", "host_unreachable_flows:", "misbehaving_flows:", "ndpi:", "tcp_packets:", "tcp_rx_stats:", "tcp_tx_stats:", "total_flows:", "traffic:", "udp_pkts:", "udp_sent_unicast:", "unreachable_flows:"] NON_DECREASING = list(filter(lambda x: any([f in x for f in NON_DECREASING]), FEATURES_COMPLETE)) # ----- ----- FEATURES LEVELS ----- ----- # # ----- ----- --------------- ----- ----- # FEATURE_LEVELS = { "MAGIK": SIX_IS_THE_MAGIK_NUM, "NF_BL": NDPI_FLOWS_COMPLETE | BASIC_FEATURES, "BL": BASIC_FEATURES, "NF_BLH": NDPI_FLOWS_COMPLETE | BASIC_LEVEL_HIGH, "NF_BLMISC": NDPI_FLOWS_COMPLETE | set({ "active_flows:flows_as_server", "active_flows:flows_as_client", "traffic:bytes_rcvd", "traffic:bytes_sent", "contacts:num_as_client", "contacts:num_as_server", "dns_qry_sent_rsp_rcvd:replies_error_packets", "dns_qry_sent_rsp_rcvd:replies_ok_packets", }), "BLL": BASIC_LEVEL_LOW }
import numpy as np import logging import copy from collections import defaultdict # ----- ----- NDPI ----- ----- # # ----- ----- ---- ----- ----- # NDPI_DEFAULTS = { "unknown": "unspecified", "ftp_control": "download-filetransfer-filesharing", "pop3": "email", "smtp": "email", "imap": "email", "dns": "network", "ipp": "system", "http": "web", "mdns": "network", "ntp": "system", "netbios": "system", "nfs": "datatransfer", "ssdp": "system", "bgp": "network", "snmp": "network", "xdmcp": "remoteaccess", "smbv1": "system", "syslog": "system", "dhcp": "network", "postgresql": "database", "mysql": "database", "hotmail": "email", "direct_download_link": "download-filetransfer-filesharing", "pops": "email", "applejuice": "download-filetransfer-filesharing", "directconnect": "download-filetransfer-filesharing", "ntop": "network", "coap": "rpc", "vmware": "remoteaccess", "smtps": "email", "facebookzero": "socialnetwork", "ubntac2": "network", "kontiki": "media", "openft": "download-filetransfer-filesharing", "fasttrack": "download-filetransfer-filesharing", "gnutella": "download-filetransfer-filesharing", "edonkey": "download-filetransfer-filesharing", "bittorrent": "download-filetransfer-filesharing", "skypecall": "voip", "signal": "chat", "memcached": "network", "smbv23": "system", "mining": "mining", "nestlogsink": "cloud", "modbus": "network", "whatsappcall": "voip", "datasaver": "web", "xbox": "game", "qq": "chat", "tiktok": "socialnetwork", "rtsp": "media", "imaps": "email", "icecast": "media", "pplive": "media", "ppstream": "video", "zattoo": "video", "shoutcast": "music", "sopcast": "video", "tvants": "video", "tvuplayer": "video", "http_download": "download-filetransfer-filesharing", "qqlive": "video", "thunder": "download-filetransfer-filesharing", "soulseek": "download-filetransfer-filesharing", "ps_vue": "video", "irc": "chat", "ayiya": "network", "unencrypted_jabber": "web", "msn": "web", "oscar": "chat", "yahoo": "web", "battlefield": "game", "googleplus": "socialnetwork", "vrrp": "network", "steam": "game", "halflife2": "game", "worldofwarcraft": "game", "telnet": "remoteaccess", "stun": "network", "ipsec": "vpn", "gre": "network", "icmp": "network", "igmp": "network", "egp": "network", "sctp": "network", "ospf": "network", "ip_in_ip": "network", "rtp": "media", "rdp": "remoteaccess", "vnc": "remoteaccess", "pcanywhere": "remoteaccess", "tls": "web", "ssh": "remoteaccess", "usenet": "web", "mgcp": "voip", "iax": "voip", "tftp": "datatransfer", "afp": "datatransfer", "stealthnet": "download-filetransfer-filesharing", "aimini": "download-filetransfer-filesharing", "sip": "voip", "truphone": "voip", "icmpv6": "network", "dhcpv6": "network", "armagetron": "game", "crossfire": "rpc", "dofus": "game", "fiesta": "game", "florensia": "game", "guildwars": "game", "http_activesync": "cloud", "kerberos": "network", "ldap": "system", "maplestory": "game", "mssql-tds": "database", "pptp": "vpn", "warcraft3": "game", "worldofkungfu": "game", "slack": "collaborative", "facebook": "socialnetwork", "twitter": "socialnetwork", "dropbox": "cloud", "gmail": "email", "googlemaps": "web", "youtube": "media", "skype": "voip", "google": "web", "dce_rpc": "rpc", "netflow": "network", "sflow": "network", "http_connect": "web", "http_proxy": "web", "citrix": "network", "netflix": "video", "lastfm": "music", "waze": "web", "youtubeupload": "media", "hulu": "streaming", "checkmk": "datatransfer", "ajp": "web", "apple": "web", "webex": "voip", "whatsapp": "chat", "appleicloud": "web", "viber": "voip", "appleitunes": "streaming", "radius": "network", "windowsupdate": "softwareupdate", "teamviewer": "remoteaccess", "tuenti": "voip", "lotusnotes": "collaborative", "sap": "network", "gtp": "network", "upnp": "network", "llmnr": "network", "remotescan": "network", "spotify": "music", "messenger": "voip", "h323": "voip", "openvpn": "vpn", "noe": "voip", "ciscovpn": "vpn", "teamspeak": "voip", "tor": "vpn", "ciscoskinny": "voip", "rtcp": "voip", "rsync": "datatransfer", "oracle": "database", "corba": "rpc", "ubuntuone": "cloud", "whois-das": "network", "collectd": "system", "socks": "web", "nintendo": "game", "rtmp": "media", "ftp_data": "download-filetransfer-filesharing", "wikipedia": "web", "zeromq": "rpc", "amazon": "web", "ebay": "shopping", "cnn": "web", "megaco": "voip", "redis": "database", "pando_media_booster": "web", "vhua": "voip", "telegram": "chat", "vevo": "music", "pandora": "streaming", "quic": "web", "zoom": "video", "eaq": "network", "ookla": "network", "amqp": "rpc", "kakaotalk": "chat", "kakaotalk_voice": "voip", "twitch": "video", "doh_dot": "network", "wechat": "chat", "mpeg_ts": "media", "snapchat": "socialnetwork", "sina(weibo)": "socialnetwork", "googlehangoutduo": "voip", "iflix": "video", "github": "collaborative", "bjnp": "system", "free_205": "voip", "wireguard": "vpn", "smpp": "download-filetransfer-filesharing", "dnscrypt": "network", "tinc": "vpn", "deezer": "music", "instagram": "socialnetwork", "microsoft": "cloud", "starcraft": "game", "teredo": "network", "hotspotshield": "vpn", "imo": "voip", "googledrive": "cloud", "ocs": "media", "office365": "collaborative", "cloudflare": "web", "ms_onedrive": "cloud", "mqtt": "rpc", "rx": "rpc", "applestore": "softwareupdate", "opendns": "web", "git": "collaborative", "drda": "database", "playstore": "softwareupdate", "someip": "rpc", "fix": "rpc", "playstation": "game", "pastebin": "download-filetransfer-filesharing", "linkedin": "socialnetwork", "soundcloud": "music", "csgo": "game", "lisp": "cloud", "diameter": "network", "applepush": "cloud", "googleservices": "web", "amazonvideo": "cloud", "googledocs": "collaborative", "whatsappfiles": "download-filetransfer-filesharing", "targus dataspeed": "network", "dnp3": "network", "iec60870": "network", "bloomberg": "network", "capwap": "network", "zabbix": "network", "s7comm": "network" } class key_dependent_dict(defaultdict): """https://www.reddit.com/r/Python/comments/27crqg/making_defaultdict_create_defaults_that_are_a/ """ def __init__(self, f_of_x, basedict): super().__init__(None, basedict) # base class doesn't get a factory self.f_of_x = f_of_x # save f(x) def __missing__(self, key): # called when a default needed ret = self.f_of_x(key) # calculate default value self[key] = ret # and install it in the dict return ret def ndpi_value2cat(x): logging.warning(f"unknown L7 protocol {x}") return "unmapped ndpi" NDPI_VALUE2CAT = key_dependent_dict(ndpi_value2cat, NDPI_DEFAULTS) NDPI_FLOWS_COMPLETE = set({f"ndpi_flows:num_flows__{x}" for x in NDPI_VALUE2CAT.values()}) NDPI_BYTES_RCVD_COMPLETE = set({f"ndpi:bytes_rcvd__{x}" for x in NDPI_VALUE2CAT.values()}) NDPI_BYTES_SENT_COMPLETE = set({f"ndpi:bytes_sent__{x}" for x in NDPI_VALUE2CAT.values()}) NDPI_COMPLETE = NDPI_FLOWS_COMPLETE | NDPI_BYTES_RCVD_COMPLETE | NDPI_BYTES_SENT_COMPLETE # ----- ----- FEATURES ----- ----- # # ----- ----- -------- ----- ----- # BASIC_LEVEL_LOW = set({ "traffic:bytes_rcvd", "traffic:bytes_sent", "echo_packets:packets_rcvd", "echo_packets:packets_sent", "echo_reply_packets:packets_rcvd", "echo_reply_packets:packets_sent", "tcp_packets:packets_rcvd", "tcp_packets:packets_sent", "tcp_rx_stats:lost_packets", "tcp_rx_stats:out_of_order_packets", "tcp_rx_stats:retransmission_packets", "tcp_tx_stats:lost_packets", "tcp_tx_stats:out_of_order_packets", "tcp_tx_stats:retransmission_packets", "udp_pkts:packets_rcvd", "udp_pkts:packets_sent", "udp_sent_unicast:bytes_sent_non_unicast", "udp_sent_unicast:bytes_sent_unicast"}) SIX_IS_THE_MAGIK_NUM = set({ "active_flows:flows_as_client", "active_flows:flows_as_server", "contacts:num_as_client", "contacts:num_as_server", "unreachable_flows:flows_as_client", "unreachable_flows:flows_as_server", "misbehaving_flows:flows_as_client", "misbehaving_flows:flows_as_server", "tcp_rx_stats:lost_packets", "tcp_rx_stats:out_of_order_packets", "tcp_rx_stats:retransmission_packets", "tcp_tx_stats:lost_packets", "tcp_tx_stats:out_of_order_packets", "tcp_tx_stats:retransmission_packets", }) BASIC_LEVEL_HIGH = set({ "active_flows:flows_as_client", "active_flows:flows_as_server", "total_flows:flows_as_client", "total_flows:flows_as_server", "misbehaving_flows:flows_as_client", "misbehaving_flows:flows_as_server", "unreachable_flows:flows_as_client", "unreachable_flows:flows_as_server", "host_unreachable_flows:flows_as_client", "host_unreachable_flows:flows_as_server", "dns_qry_rcvd_rsp_sent:queries_packets", "dns_qry_rcvd_rsp_sent:replies_error_packets", "dns_qry_rcvd_rsp_sent:replies_ok_packets", "dns_qry_sent_rsp_rcvd:queries_packets", "dns_qry_sent_rsp_rcvd:replies_error_packets", "dns_qry_sent_rsp_rcvd:replies_ok_packets", "contacts:num_as_client", "contacts:num_as_server", }) # To prevent new feature coming with new versions of ntopng BASIC_FEATURES = BASIC_LEVEL_LOW | BASIC_LEVEL_HIGH FEATURES_COMPLETE = copy.deepcopy(BASIC_FEATURES) FEATURES_COMPLETE |= NDPI_FLOWS_COMPLETE | NDPI_BYTES_RCVD_COMPLETE | NDPI_BYTES_SENT_COMPLETE NON_DECREASING = ["dns_qry_sent_rsp_rcvd:", "dns_qry_rcvd_rsp_sent:", "echo_packets:", "echo_reply_packets:", "host_unreachable_flows:", "misbehaving_flows:", "ndpi:", "tcp_packets:", "tcp_rx_stats:", "tcp_tx_stats:", "total_flows:", "traffic:", "udp_pkts:", "udp_sent_unicast:", "unreachable_flows:"] NON_DECREASING = list(filter(lambda x: any([f in x for f in NON_DECREASING]), FEATURES_COMPLETE)) # ----- ----- FEATURES LEVELS ----- ----- # # ----- ----- --------------- ----- ----- # FEATURE_LEVELS = { "MAGIK": SIX_IS_THE_MAGIK_NUM, "NF_BL": NDPI_FLOWS_COMPLETE | BASIC_FEATURES, "BL": BASIC_FEATURES, "NF_BLH": NDPI_FLOWS_COMPLETE | BASIC_LEVEL_HIGH, "NF_BLMISC": NDPI_FLOWS_COMPLETE | set({ "active_flows:flows_as_server", "active_flows:flows_as_client", "traffic:bytes_rcvd", "traffic:bytes_sent", "contacts:num_as_client", "contacts:num_as_server", "dns_qry_sent_rsp_rcvd:replies_error_packets", "dns_qry_sent_rsp_rcvd:replies_ok_packets", }), "BLL": BASIC_LEVEL_LOW }
en
0.40538
# ----- ----- NDPI ----- ----- # # ----- ----- ---- ----- ----- # https://www.reddit.com/r/Python/comments/27crqg/making_defaultdict_create_defaults_that_are_a/ # base class doesn't get a factory # save f(x) # called when a default needed # calculate default value # and install it in the dict # ----- ----- FEATURES ----- ----- # # ----- ----- -------- ----- ----- # # To prevent new feature coming with new versions of ntopng # ----- ----- FEATURES LEVELS ----- ----- # # ----- ----- --------------- ----- ----- #
1.716633
2
Bangu/Semana 5/Exemplo013/main.py
profoswaldo/Unisuam_2022-1
2
6621471
# numero1 = 6 # numero2 = 10 # numero3 = 4 numeros = [6, 10, 4] # print(numeros[0]) # print(numeros[1]) # print(numeros[2]) for i in range(3): print(numeros[i])
# numero1 = 6 # numero2 = 10 # numero3 = 4 numeros = [6, 10, 4] # print(numeros[0]) # print(numeros[1]) # print(numeros[2]) for i in range(3): print(numeros[i])
it
0.411001
# numero1 = 6 # numero2 = 10 # numero3 = 4 # print(numeros[0]) # print(numeros[1]) # print(numeros[2])
3.899752
4
openff/cli/get_conformer_energies.py
openforcefield/openff-cli
0
6621472
<reponame>openforcefield/openff-cli<gh_stars>0 import argparse from typing import List from openforcefield.topology import Molecule from openforcefield.utils.toolkits import ToolkitRegistry from simtk import unit from openff.cli.core import ( _build_simulation, _get_conformer_data, _get_forcefield, _get_rms_two_conformers, _minimize_conformer, make_registry, ) from openff.cli.utils.utils import _enforce_dependency_version def get_conformer_energies( molecule: str, registry: ToolkitRegistry, forcefield: str, constrained: bool = False, ) -> List[Molecule]: _enforce_dependency_version("openforcefield", "0.7.0") file_format = molecule.split(".")[-1] loaded_molecules = registry.call( "from_file", molecule, file_format=file_format, ) if type(loaded_molecules) is not list: loaded_molecules = [loaded_molecules] mols = [loaded_molecules[0]] for mol in loaded_molecules[1:]: if mol == mols[-1]: for conformer in mol.conformers: mols[-1].add_conformer(conformer) else: mols.append(molecule) n_molecules = len(mols) n_conformers = sum([mol.n_conformers for mol in mols]) print( f"{n_molecules} unique molecule(s) loaded, with {n_conformers} total conformers" ) ff = _get_forcefield(forcefield, constrained) mols_with_charges = [] for mol in mols: if mol.partial_charges is not None: mols_with_charges.append(mol) # This is duplicated from generate_conformers minimized_mols = [] for mol in mols: if mol in mols_with_charges: mol_with_charge = [mol] else: mol_with_charge = [] simulation, partial_charges = _build_simulation( molecule=mol, forcefield=ff, mols_with_charge=mol_with_charge, ) mol._partial_charges = partial_charges mol.properties["minimized against: "] = forcefield conformer_property_keys = [ "original conformer energies (kcal/mol)", "minimized conformer energies (kcal/mol)", "RMSD of minimized conformers (angstrom)", ] for prop in conformer_property_keys: mol.properties[prop] = mol.n_conformers * [None] for i, conformer in enumerate(mol.conformers): simulation.context.setPositions(conformer) pre_energy, pre_positions = _get_conformer_data(simulation) mol.properties["original conformer energies (kcal/mol)"][i] = pre_energy simulation = _minimize_conformer(simulation, conformer) min_energy, min_positions = _get_conformer_data(simulation) mol.properties["minimized conformer energies (kcal/mol)"][i] = min_energy mol.conformers[i] = min_positions rms = _get_rms_two_conformers(mol, pre_positions, min_positions) mol.properties["RMSD of minimized conformers (angstrom)"][i] = rms minimized_mols.append(mol) return minimized_mols def _print_mol_data(mols): pre_key = "original conformer energies (kcal/mol)" min_key = "minimized conformer energies (kcal/mol)" rmsd_key = "RMSD of minimized conformers (angstrom)" for mol_idx, mol in enumerate(mols): forcefield = mol.properties["minimized against: "] print(f"Conformer energies of mol {mol.name}, minimized against {forcefield}") print( "Conformer Initial PE Minimized PE " "RMS between initial and minimized conformer" ) for conformer_idx in range(mol.n_conformers): pre_energy = mol.properties[pre_key][conformer_idx] min_energy = mol.properties[min_key][conformer_idx] rmsd = mol.properties[rmsd_key][conformer_idx] print( "%5d / %5d : %8.3f kcal/mol %8.3f kcal/mol %8.3f Angstroms" % ( conformer_idx + 1, mol.n_conformers, pre_energy / unit.kilocalories_per_mole, min_energy / unit.kilocalories_per_mole, rmsd, ) ) if __name__ == "__main__": parser = argparse.ArgumentParser( description="Evaluate conformer energies with OpenMM" ) parser._action_groups.pop() required_args = parser.add_argument_group("required arguments") optional_args = parser.add_argument_group("optional arguments") required_args.add_argument( "-t", "--toolkit", type=str, required=True, help="Name of the underlying cheminformatics toolkit to use. Accepted" " values are openeye and rdkit", ) required_args.add_argument( "-f", "--forcefield", type=str, required=True, help="Name of the force field to use, i.e. openff-1.0.0", ) required_args.add_argument( "-m", "--molecule", type=str, required=True, help="Path to an input file containing a molecule(s), single or multi-conformers", ) optional_args.add_argument( "--constrained", type=bool, default=False, help="Whether or not to use a constrained version of the force field", ) args = parser.parse_args() registry = make_registry(args.toolkit) mols = get_conformer_energies( molecule=args.molecule, registry=registry, forcefield=args.forcefield, constrained=args.constrained, ) _print_mol_data(mols)
import argparse from typing import List from openforcefield.topology import Molecule from openforcefield.utils.toolkits import ToolkitRegistry from simtk import unit from openff.cli.core import ( _build_simulation, _get_conformer_data, _get_forcefield, _get_rms_two_conformers, _minimize_conformer, make_registry, ) from openff.cli.utils.utils import _enforce_dependency_version def get_conformer_energies( molecule: str, registry: ToolkitRegistry, forcefield: str, constrained: bool = False, ) -> List[Molecule]: _enforce_dependency_version("openforcefield", "0.7.0") file_format = molecule.split(".")[-1] loaded_molecules = registry.call( "from_file", molecule, file_format=file_format, ) if type(loaded_molecules) is not list: loaded_molecules = [loaded_molecules] mols = [loaded_molecules[0]] for mol in loaded_molecules[1:]: if mol == mols[-1]: for conformer in mol.conformers: mols[-1].add_conformer(conformer) else: mols.append(molecule) n_molecules = len(mols) n_conformers = sum([mol.n_conformers for mol in mols]) print( f"{n_molecules} unique molecule(s) loaded, with {n_conformers} total conformers" ) ff = _get_forcefield(forcefield, constrained) mols_with_charges = [] for mol in mols: if mol.partial_charges is not None: mols_with_charges.append(mol) # This is duplicated from generate_conformers minimized_mols = [] for mol in mols: if mol in mols_with_charges: mol_with_charge = [mol] else: mol_with_charge = [] simulation, partial_charges = _build_simulation( molecule=mol, forcefield=ff, mols_with_charge=mol_with_charge, ) mol._partial_charges = partial_charges mol.properties["minimized against: "] = forcefield conformer_property_keys = [ "original conformer energies (kcal/mol)", "minimized conformer energies (kcal/mol)", "RMSD of minimized conformers (angstrom)", ] for prop in conformer_property_keys: mol.properties[prop] = mol.n_conformers * [None] for i, conformer in enumerate(mol.conformers): simulation.context.setPositions(conformer) pre_energy, pre_positions = _get_conformer_data(simulation) mol.properties["original conformer energies (kcal/mol)"][i] = pre_energy simulation = _minimize_conformer(simulation, conformer) min_energy, min_positions = _get_conformer_data(simulation) mol.properties["minimized conformer energies (kcal/mol)"][i] = min_energy mol.conformers[i] = min_positions rms = _get_rms_two_conformers(mol, pre_positions, min_positions) mol.properties["RMSD of minimized conformers (angstrom)"][i] = rms minimized_mols.append(mol) return minimized_mols def _print_mol_data(mols): pre_key = "original conformer energies (kcal/mol)" min_key = "minimized conformer energies (kcal/mol)" rmsd_key = "RMSD of minimized conformers (angstrom)" for mol_idx, mol in enumerate(mols): forcefield = mol.properties["minimized against: "] print(f"Conformer energies of mol {mol.name}, minimized against {forcefield}") print( "Conformer Initial PE Minimized PE " "RMS between initial and minimized conformer" ) for conformer_idx in range(mol.n_conformers): pre_energy = mol.properties[pre_key][conformer_idx] min_energy = mol.properties[min_key][conformer_idx] rmsd = mol.properties[rmsd_key][conformer_idx] print( "%5d / %5d : %8.3f kcal/mol %8.3f kcal/mol %8.3f Angstroms" % ( conformer_idx + 1, mol.n_conformers, pre_energy / unit.kilocalories_per_mole, min_energy / unit.kilocalories_per_mole, rmsd, ) ) if __name__ == "__main__": parser = argparse.ArgumentParser( description="Evaluate conformer energies with OpenMM" ) parser._action_groups.pop() required_args = parser.add_argument_group("required arguments") optional_args = parser.add_argument_group("optional arguments") required_args.add_argument( "-t", "--toolkit", type=str, required=True, help="Name of the underlying cheminformatics toolkit to use. Accepted" " values are openeye and rdkit", ) required_args.add_argument( "-f", "--forcefield", type=str, required=True, help="Name of the force field to use, i.e. openff-1.0.0", ) required_args.add_argument( "-m", "--molecule", type=str, required=True, help="Path to an input file containing a molecule(s), single or multi-conformers", ) optional_args.add_argument( "--constrained", type=bool, default=False, help="Whether or not to use a constrained version of the force field", ) args = parser.parse_args() registry = make_registry(args.toolkit) mols = get_conformer_energies( molecule=args.molecule, registry=registry, forcefield=args.forcefield, constrained=args.constrained, ) _print_mol_data(mols)
en
0.892721
# This is duplicated from generate_conformers
2.39362
2
hello_genomics/main.py
chkra/calc_batch_correct
0
6621473
<filename>hello_genomics/main.py<gh_stars>0 #!/usr/bin/env python # coding: utf-8 ''' Combat batch correction app for FASTGenomics ''' import json import pathlib import random import csv import jinja2 import logging import enum import pandas as pd import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.cm as cm from sklearn import decomposition from scipy.spatial.distance import pdist from collections import defaultdict from fastgenomics import io as fg_io from hello_genomics import logging_config import combat as cb # initialize logging logging_config.configure_logging(level=logging.INFO) logger = logging.getLogger('hello_genomics_calc') # set paths to jinja2-templates for summary.md etc. TEMPLATE_PATH = pathlib.Path(__file__).parent.parent / 'templates' class Columns(str, enum.Enum): # @todo: this is horrible on so many levels ... CELLS = 'cellId*Ganzzahl' GENES = 'entrezId*Ganzzahl' EXPR = 'expressionValue*Zahl' # BATCH = '_generated_batch*Text' BATCH = 'batch' def get_data(): logger.info('Loading genes and cell annotation matrice') # @todo: tidy up # genes_path = fg_io.get_input_path('genes_data_input') expre_path = fg_io.get_input_path('expression_input') cells_meta = fg_io.get_input_path('cells_meta_input') # combat requires full matrix input - unstack input file # combat expects matrix of shape [genes x cells], so index columns accordingly # @todo: check if this truly makes sense # @todo: the Columns.enum-Trick sucks, this should be some global definition # @todo: will blow up for large data files - X = pd.read_csv(expre_path , sep='\t') print(X.head(10)) data = X.set_index([Columns.GENES, Columns.CELLS])\ .unstack() \ .fillna(0) # @todo this sucks as well - won't hurt to select this column, but # @todo I'd rather have a global data scheme # .loc[:, Columns.EXPR] pheno = pd.read_csv(cells_meta, sep='\t') return data, pheno def get_test_data(): # @todo: how to test? logger.info('Loading genes and cell annotation matrice') genes_path = fg_io.get_input_path('test_genes_data_input') cells_meta = fg_io.get_input_path('test_cells_meta_input') #genes_path = './bladder-expr.txt' #cells_meta = './bladder-pheno.txt' data = pd.read_csv(genes_path, sep='\t') pheno = pd.read_csv(cells_meta, sep='\t') return data, pheno def check_batch_distribution(X, batch_anno, axis, title=''): pca = decomposition.PCA(n_components=2) pca.fit(X) X_trans = pca.transform(X) all_batch_reps = [] labels = set(batch_anno) colors = cm.spectral(np.linspace(0, 1, len(labels))) for val, col in zip(labels, colors): Z = X_trans[np.ix_((batch_anno == val))] rep = np.mean(Z, axis=0) all_batch_reps.append(rep) axis.scatter(Z[:, 0], Z[:, 1], label=val, marker='o', c=col, edgecolor='none') axis.add_artist(plt.Circle(rep, 5, color=col)) axis.set_title(title) axis.legend(numpoints=1) all_batch_reps = np.array(all_batch_reps) return np.sum(pdist(all_batch_reps)) def make_output(data, corr, pheno, parameters): f, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 4)) total_batch_dist = check_batch_distribution(data.values.T, pheno[Columns.BATCH], ax1, 'Before Batch Correction') total_batch_dist_corr = check_batch_distribution(corr.values.T, pheno[Columns.BATCH], ax2, 'After Batch Correction') logger.info('Batch center distance before correction: ' + str(total_batch_dist)) logger.info('Batch center distance after correction: ' + str(total_batch_dist_corr)) corr_ratio = total_batch_dist / total_batch_dist_corr if corr_ratio >= 1: logger.info('Batch completed without errors. Reduced batch center distance by ratio of ' + str(np.round(corr_ratio, 2))) else: logger.error('Batch correction modified data in invald way!') logger.error('Batch center ratio is less than 1:' + str(np.round(corr_ratio, 2))) doc_img_path = fg_io.get_output_path('batch_corr_img') logger.info('Plotting PCA embedding of data for documentation.') # plt.savefig(doc_img_path, bbox_inches='tight') logger.info("Storing matrix of batch-corrected gene expressions.") output_path = fg_io.get_output_path('batch_corr_matrix') corr.to_csv(output_path) results = {'num_batches': len(set(pheno[Columns.BATCH])), 'ctr_dist_before': total_batch_dist, 'ctr_dist_before': total_batch_dist_corr, 'ctr_ratio': corr_ratio} logger.debug("Loading Jinja2 summary template") with open(TEMPLATE_PATH / 'summary.md.j2') as temp: template_str = temp.read() logger.debug("Rendering template") template = jinja2.Template(template_str) summary = template.render(results=results, parameters=parameters) logger.info("Writing summary") summary_path = fg_io.get_summary_path() with summary_path.open('w') as f_sum: f_sum.write(summary) def main(): ''' main routine of batch correction with combat ''' try: logger.info('Loading parameters') parameters = fg_io.get_parameters() random.seed(4711) parameters['random_seed'] = 4711 # data, pheno = get_data() #data, pheno = get_test_data() logger.info('Received data matrix of shape (genes x cells) = ' + str(data.shape)) logger.info('Found the following batches: ' + str(set(pheno[Columns.BATCH]))) logger.info('Calling combat for batch correction.') corr = cb.combat(data, pheno[Columns.BATCH]) make_output(data, corr, pheno, parameters) logger.info('Done.') except Exception as inst: logger.error(type(inst)) logger.error(inst) raise inst if __name__ == '__main__': main()
<filename>hello_genomics/main.py<gh_stars>0 #!/usr/bin/env python # coding: utf-8 ''' Combat batch correction app for FASTGenomics ''' import json import pathlib import random import csv import jinja2 import logging import enum import pandas as pd import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.cm as cm from sklearn import decomposition from scipy.spatial.distance import pdist from collections import defaultdict from fastgenomics import io as fg_io from hello_genomics import logging_config import combat as cb # initialize logging logging_config.configure_logging(level=logging.INFO) logger = logging.getLogger('hello_genomics_calc') # set paths to jinja2-templates for summary.md etc. TEMPLATE_PATH = pathlib.Path(__file__).parent.parent / 'templates' class Columns(str, enum.Enum): # @todo: this is horrible on so many levels ... CELLS = 'cellId*Ganzzahl' GENES = 'entrezId*Ganzzahl' EXPR = 'expressionValue*Zahl' # BATCH = '_generated_batch*Text' BATCH = 'batch' def get_data(): logger.info('Loading genes and cell annotation matrice') # @todo: tidy up # genes_path = fg_io.get_input_path('genes_data_input') expre_path = fg_io.get_input_path('expression_input') cells_meta = fg_io.get_input_path('cells_meta_input') # combat requires full matrix input - unstack input file # combat expects matrix of shape [genes x cells], so index columns accordingly # @todo: check if this truly makes sense # @todo: the Columns.enum-Trick sucks, this should be some global definition # @todo: will blow up for large data files - X = pd.read_csv(expre_path , sep='\t') print(X.head(10)) data = X.set_index([Columns.GENES, Columns.CELLS])\ .unstack() \ .fillna(0) # @todo this sucks as well - won't hurt to select this column, but # @todo I'd rather have a global data scheme # .loc[:, Columns.EXPR] pheno = pd.read_csv(cells_meta, sep='\t') return data, pheno def get_test_data(): # @todo: how to test? logger.info('Loading genes and cell annotation matrice') genes_path = fg_io.get_input_path('test_genes_data_input') cells_meta = fg_io.get_input_path('test_cells_meta_input') #genes_path = './bladder-expr.txt' #cells_meta = './bladder-pheno.txt' data = pd.read_csv(genes_path, sep='\t') pheno = pd.read_csv(cells_meta, sep='\t') return data, pheno def check_batch_distribution(X, batch_anno, axis, title=''): pca = decomposition.PCA(n_components=2) pca.fit(X) X_trans = pca.transform(X) all_batch_reps = [] labels = set(batch_anno) colors = cm.spectral(np.linspace(0, 1, len(labels))) for val, col in zip(labels, colors): Z = X_trans[np.ix_((batch_anno == val))] rep = np.mean(Z, axis=0) all_batch_reps.append(rep) axis.scatter(Z[:, 0], Z[:, 1], label=val, marker='o', c=col, edgecolor='none') axis.add_artist(plt.Circle(rep, 5, color=col)) axis.set_title(title) axis.legend(numpoints=1) all_batch_reps = np.array(all_batch_reps) return np.sum(pdist(all_batch_reps)) def make_output(data, corr, pheno, parameters): f, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 4)) total_batch_dist = check_batch_distribution(data.values.T, pheno[Columns.BATCH], ax1, 'Before Batch Correction') total_batch_dist_corr = check_batch_distribution(corr.values.T, pheno[Columns.BATCH], ax2, 'After Batch Correction') logger.info('Batch center distance before correction: ' + str(total_batch_dist)) logger.info('Batch center distance after correction: ' + str(total_batch_dist_corr)) corr_ratio = total_batch_dist / total_batch_dist_corr if corr_ratio >= 1: logger.info('Batch completed without errors. Reduced batch center distance by ratio of ' + str(np.round(corr_ratio, 2))) else: logger.error('Batch correction modified data in invald way!') logger.error('Batch center ratio is less than 1:' + str(np.round(corr_ratio, 2))) doc_img_path = fg_io.get_output_path('batch_corr_img') logger.info('Plotting PCA embedding of data for documentation.') # plt.savefig(doc_img_path, bbox_inches='tight') logger.info("Storing matrix of batch-corrected gene expressions.") output_path = fg_io.get_output_path('batch_corr_matrix') corr.to_csv(output_path) results = {'num_batches': len(set(pheno[Columns.BATCH])), 'ctr_dist_before': total_batch_dist, 'ctr_dist_before': total_batch_dist_corr, 'ctr_ratio': corr_ratio} logger.debug("Loading Jinja2 summary template") with open(TEMPLATE_PATH / 'summary.md.j2') as temp: template_str = temp.read() logger.debug("Rendering template") template = jinja2.Template(template_str) summary = template.render(results=results, parameters=parameters) logger.info("Writing summary") summary_path = fg_io.get_summary_path() with summary_path.open('w') as f_sum: f_sum.write(summary) def main(): ''' main routine of batch correction with combat ''' try: logger.info('Loading parameters') parameters = fg_io.get_parameters() random.seed(4711) parameters['random_seed'] = 4711 # data, pheno = get_data() #data, pheno = get_test_data() logger.info('Received data matrix of shape (genes x cells) = ' + str(data.shape)) logger.info('Found the following batches: ' + str(set(pheno[Columns.BATCH]))) logger.info('Calling combat for batch correction.') corr = cb.combat(data, pheno[Columns.BATCH]) make_output(data, corr, pheno, parameters) logger.info('Done.') except Exception as inst: logger.error(type(inst)) logger.error(inst) raise inst if __name__ == '__main__': main()
en
0.602794
#!/usr/bin/env python # coding: utf-8 Combat batch correction app for FASTGenomics # initialize logging # set paths to jinja2-templates for summary.md etc. # @todo: this is horrible on so many levels ... # BATCH = '_generated_batch*Text' # @todo: tidy up # genes_path = fg_io.get_input_path('genes_data_input') # combat requires full matrix input - unstack input file # combat expects matrix of shape [genes x cells], so index columns accordingly # @todo: check if this truly makes sense # @todo: the Columns.enum-Trick sucks, this should be some global definition # @todo: will blow up for large data files - # @todo this sucks as well - won't hurt to select this column, but # @todo I'd rather have a global data scheme # .loc[:, Columns.EXPR] # @todo: how to test? #genes_path = './bladder-expr.txt' #cells_meta = './bladder-pheno.txt' # plt.savefig(doc_img_path, bbox_inches='tight') main routine of batch correction with combat # data, pheno = get_data() #data, pheno = get_test_data()
2.174983
2
user.py
Emmanuel687/PassWord-Locker-App
0
6621474
class User: """ creates new user instances """ pass user_array = [] def __init__(self,fullName,email,mobileNumber): self.fullName = fullName self.email = email self.mobileNumber = mobileNumber def saveUserDetails(self): User.user_array.append(self) @classmethod def display_users(cls): return cls.user_array pass
class User: """ creates new user instances """ pass user_array = [] def __init__(self,fullName,email,mobileNumber): self.fullName = fullName self.email = email self.mobileNumber = mobileNumber def saveUserDetails(self): User.user_array.append(self) @classmethod def display_users(cls): return cls.user_array pass
en
0.711728
creates new user instances
3.11604
3
pt/shared.py
fstraw/django-pt
0
6621475
DEPARTMENTS = ( ('Dept 1', 'Dept 1'), ('Dept 2', 'Dept 2'), ('Dept 3', 'Dept 3'), ('Dept 4', 'Dept 4'), ('Dept 5', 'Dept 5'), ) EMPLOYEES = ( ('Employee 1', 'Employee 1'), ('Employee 2', 'Employee 2'), ('Employee 3', 'Employee 3'), ('Employee 4', 'Employee 4'), ('Employee 5', 'Employee 5'), ) PROJECT_MANAGERS = ( ('Manager 1', 'Manager 1'), ('Manager 2', 'Manager 2'), ('Manager 3', 'Manager 3'), ('Manager 4', 'Manager 4'), ('Manager 5', 'Manager 5'), ) NEPA_PLANNERS = ( ('Planner 1', 'Planner 1'), ('Planner 2', 'Planner 2'), ('Planner 3', 'Planner 3'), ('Planner 4', 'Planner 4'), ('Planner 5', 'Planner 5'), ) CLIENTS = ( ('Client 1', 'Client 1'), ('Client 2', 'Client 2'), ('Client 3', 'Client 3'), ('Client 4', 'Client 4'), ('Client 5', 'Client 5'), ) ENVIRONMENTAL_DOCUMENTS = ( ('DocType 1', 'DocType 1'), ('DocType 2', 'DocType 2'), ('DocType 3', 'DocType 3'), ('DocType 4', 'DocType 4'), ('DocType 5', 'DocType 5'), ) AIR_DOCUMENTS = ( ('PM25 Exemption', 'PM25 Exemption'), ('PM25 LOD', 'PM25 LOD'), ('GEPA Memorandum', 'GEPA Memorandum'), ('Air Assessment', 'Air Assessment'), ('Air Memorandum', 'Air Memorandum'), ) NOISE_DOCUMENTS = ( ('Assessment', 'Assessment'), ('Addendum', 'Addendum'), ('TypeIII', 'Type III'), ('Memorandum', 'Memorandum'), ) ECOLOGY_DOCUMENTS = ( ('Assessment', 'Assessment'), ('Addendum', 'Addendum'), ('AOE', 'AOE'), ) AQUATICS_DOCUMENTS = ( ('Assessment', 'Assessment'), ('Addendum', 'Addendum'), ) ARCH_DOCUMENTS = ( ('Short Form', 'Short Form'), ('Phase I', 'Phase I'), ('Phase II', 'Phase II'), ('Phase III', 'Phase III'), ) HISTORY_DOCUMENTS = ( ('HRSR', 'HRSR'), ('Memorandum', 'Memorandum'), ('AOE', 'AOE'), ) COUNTIES = {'Appling':5, 'Atkinson':4, 'Bacon':5, 'Baker':4, 'Baldwin':2, 'Banks':1, 'Barrow':1, 'Bartow':6, '<NAME>':4, 'Berrien':4, 'Bibb':3, 'Bleckley':2, 'Brantley':5, 'Brooks':4, 'Bryan':5, 'Bulloch':5, 'Burke':2, 'Butts':3, 'Calhoun':4, 'Camden':5, 'Candler':5, 'Carroll':6, 'Catoosa':6, 'Charlton':5,'Chatham':5, 'Chattahoochee':3, 'Chattooga':6, 'Cherokee':6, 'Clarke':1, 'Clay':4, 'Clayton':7, 'Clinch':4, 'Cobb':7, 'Coffee':4, 'Colquitt':4, 'Columbia':2, 'Cook':4, 'Coweta':3, 'Crawford':3, 'Crisp':4, 'Dade':6, 'Dawson':1, 'Decatur':4, 'DeKalb':7, 'Dodge':2, 'Dooly':3, 'Dougherty':4, 'Douglas':7, 'Early':4, 'Echols':4, 'Effingham':5, 'Elbert':1, 'Emanuel':2, 'Evans':5, 'Fannin':6, 'Fayette':3, 'Floyd':6, 'Forsyth':1, 'Franklin':1, 'Fulton':7, 'Gilmer':6, 'Glascock':2, 'Glynn':5, 'Gordon':6, 'Grady':4, 'Greene':2,'Gwinnett':1, 'Habersham':1, 'Hall':1, 'Hancock':2, 'Haralson':6, 'Harris':3, 'Hart':1, 'Heard':3, 'Henry':3, 'Houston':3, 'Irwin':4, 'Jackson':1, 'Jasper':2, '<NAME>':5, 'Jefferson':2, 'Jenkins':2, 'Johnson':2, 'Jones':3, 'Lamar':3, 'Lanier':4, 'Laurens':2, 'Lee':4, 'Liberty':5, 'Lincoln':2, 'Long':5, 'Lowndes':4, 'Lumpkin':1, 'McDuffie':2, 'McIntosh':5, 'Macon':3, 'Madison':1, 'Marion':3, 'Meriwether':3, 'Miller':4, 'Mitchell':4, 'Monroe':3, 'Montgomery':5, 'Morgan':2, 'Murray':6, 'Muscogee':3, 'Newton':2, 'Oconee':1, 'Oglethorpe':2, 'Paulding':6, 'Peach':3, 'Pickens':6, 'Pierce':5, 'Pike':3, 'Polk':6, 'Pulaski':3, 'Putnam':2, 'Quitman':4, 'Rabun':1, 'Randolph':4, 'Richmond':2, 'Rockdale':7, 'Schley':3, 'Screven':2, 'Seminole':4, 'Spalding':3, 'Stephens':1, 'Stewart':3, 'Sumter':3, 'Talbot':3, 'Taliaferro':2, 'Tattnall':5, 'Taylor':3, 'Telfair':5, 'Terrell':4, 'Thomas':4, 'Tift':4, 'Toombs':5, 'Towns':1, 'Treutlen':2, 'Troup':3,'Turner':4, 'Twiggs':3, 'Union':1, 'Upson':3, 'Walker':6, 'Walton':1, 'Ware':5, 'Warren':2, 'Washington':2, 'Wayne':5, 'Webster':3, 'Wheeler':5, 'White':1, 'Whitfield':6, 'Wilcox':4, 'Wilkes':2, 'Wilkinson':2, 'Worth':4, } COUNTY_NAMES = [(c, c) for c in sorted(COUNTIES.iterkeys())]
DEPARTMENTS = ( ('Dept 1', 'Dept 1'), ('Dept 2', 'Dept 2'), ('Dept 3', 'Dept 3'), ('Dept 4', 'Dept 4'), ('Dept 5', 'Dept 5'), ) EMPLOYEES = ( ('Employee 1', 'Employee 1'), ('Employee 2', 'Employee 2'), ('Employee 3', 'Employee 3'), ('Employee 4', 'Employee 4'), ('Employee 5', 'Employee 5'), ) PROJECT_MANAGERS = ( ('Manager 1', 'Manager 1'), ('Manager 2', 'Manager 2'), ('Manager 3', 'Manager 3'), ('Manager 4', 'Manager 4'), ('Manager 5', 'Manager 5'), ) NEPA_PLANNERS = ( ('Planner 1', 'Planner 1'), ('Planner 2', 'Planner 2'), ('Planner 3', 'Planner 3'), ('Planner 4', 'Planner 4'), ('Planner 5', 'Planner 5'), ) CLIENTS = ( ('Client 1', 'Client 1'), ('Client 2', 'Client 2'), ('Client 3', 'Client 3'), ('Client 4', 'Client 4'), ('Client 5', 'Client 5'), ) ENVIRONMENTAL_DOCUMENTS = ( ('DocType 1', 'DocType 1'), ('DocType 2', 'DocType 2'), ('DocType 3', 'DocType 3'), ('DocType 4', 'DocType 4'), ('DocType 5', 'DocType 5'), ) AIR_DOCUMENTS = ( ('PM25 Exemption', 'PM25 Exemption'), ('PM25 LOD', 'PM25 LOD'), ('GEPA Memorandum', 'GEPA Memorandum'), ('Air Assessment', 'Air Assessment'), ('Air Memorandum', 'Air Memorandum'), ) NOISE_DOCUMENTS = ( ('Assessment', 'Assessment'), ('Addendum', 'Addendum'), ('TypeIII', 'Type III'), ('Memorandum', 'Memorandum'), ) ECOLOGY_DOCUMENTS = ( ('Assessment', 'Assessment'), ('Addendum', 'Addendum'), ('AOE', 'AOE'), ) AQUATICS_DOCUMENTS = ( ('Assessment', 'Assessment'), ('Addendum', 'Addendum'), ) ARCH_DOCUMENTS = ( ('Short Form', 'Short Form'), ('Phase I', 'Phase I'), ('Phase II', 'Phase II'), ('Phase III', 'Phase III'), ) HISTORY_DOCUMENTS = ( ('HRSR', 'HRSR'), ('Memorandum', 'Memorandum'), ('AOE', 'AOE'), ) COUNTIES = {'Appling':5, 'Atkinson':4, 'Bacon':5, 'Baker':4, 'Baldwin':2, 'Banks':1, 'Barrow':1, 'Bartow':6, '<NAME>':4, 'Berrien':4, 'Bibb':3, 'Bleckley':2, 'Brantley':5, 'Brooks':4, 'Bryan':5, 'Bulloch':5, 'Burke':2, 'Butts':3, 'Calhoun':4, 'Camden':5, 'Candler':5, 'Carroll':6, 'Catoosa':6, 'Charlton':5,'Chatham':5, 'Chattahoochee':3, 'Chattooga':6, 'Cherokee':6, 'Clarke':1, 'Clay':4, 'Clayton':7, 'Clinch':4, 'Cobb':7, 'Coffee':4, 'Colquitt':4, 'Columbia':2, 'Cook':4, 'Coweta':3, 'Crawford':3, 'Crisp':4, 'Dade':6, 'Dawson':1, 'Decatur':4, 'DeKalb':7, 'Dodge':2, 'Dooly':3, 'Dougherty':4, 'Douglas':7, 'Early':4, 'Echols':4, 'Effingham':5, 'Elbert':1, 'Emanuel':2, 'Evans':5, 'Fannin':6, 'Fayette':3, 'Floyd':6, 'Forsyth':1, 'Franklin':1, 'Fulton':7, 'Gilmer':6, 'Glascock':2, 'Glynn':5, 'Gordon':6, 'Grady':4, 'Greene':2,'Gwinnett':1, 'Habersham':1, 'Hall':1, 'Hancock':2, 'Haralson':6, 'Harris':3, 'Hart':1, 'Heard':3, 'Henry':3, 'Houston':3, 'Irwin':4, 'Jackson':1, 'Jasper':2, '<NAME>':5, 'Jefferson':2, 'Jenkins':2, 'Johnson':2, 'Jones':3, 'Lamar':3, 'Lanier':4, 'Laurens':2, 'Lee':4, 'Liberty':5, 'Lincoln':2, 'Long':5, 'Lowndes':4, 'Lumpkin':1, 'McDuffie':2, 'McIntosh':5, 'Macon':3, 'Madison':1, 'Marion':3, 'Meriwether':3, 'Miller':4, 'Mitchell':4, 'Monroe':3, 'Montgomery':5, 'Morgan':2, 'Murray':6, 'Muscogee':3, 'Newton':2, 'Oconee':1, 'Oglethorpe':2, 'Paulding':6, 'Peach':3, 'Pickens':6, 'Pierce':5, 'Pike':3, 'Polk':6, 'Pulaski':3, 'Putnam':2, 'Quitman':4, 'Rabun':1, 'Randolph':4, 'Richmond':2, 'Rockdale':7, 'Schley':3, 'Screven':2, 'Seminole':4, 'Spalding':3, 'Stephens':1, 'Stewart':3, 'Sumter':3, 'Talbot':3, 'Taliaferro':2, 'Tattnall':5, 'Taylor':3, 'Telfair':5, 'Terrell':4, 'Thomas':4, 'Tift':4, 'Toombs':5, 'Towns':1, 'Treutlen':2, 'Troup':3,'Turner':4, 'Twiggs':3, 'Union':1, 'Upson':3, 'Walker':6, 'Walton':1, 'Ware':5, 'Warren':2, 'Washington':2, 'Wayne':5, 'Webster':3, 'Wheeler':5, 'White':1, 'Whitfield':6, 'Wilcox':4, 'Wilkes':2, 'Wilkinson':2, 'Worth':4, } COUNTY_NAMES = [(c, c) for c in sorted(COUNTIES.iterkeys())]
none
1
1.535332
2
chat/utils.py
lorenzoconcas/CheChat
0
6621476
# contiene funzioni utili per le view e per il db def json_element(dataora, contenuto, sent, mittente): # trasforma i dati del messaggio in una linea json da spedire al client return '{"time":"' + dataora.strftime("%Y-%m-%d %H:%M:%S") + '", "content":"' \ + contenuto + '", "sent":"' + str(sent) + '", "sender":"' + mittente + '"}'
# contiene funzioni utili per le view e per il db def json_element(dataora, contenuto, sent, mittente): # trasforma i dati del messaggio in una linea json da spedire al client return '{"time":"' + dataora.strftime("%Y-%m-%d %H:%M:%S") + '", "content":"' \ + contenuto + '", "sent":"' + str(sent) + '", "sender":"' + mittente + '"}'
it
0.970949
# contiene funzioni utili per le view e per il db # trasforma i dati del messaggio in una linea json da spedire al client
2.128399
2
linkedList/removeNthNodeFromEndOfList.py
G-MontaG/leetcode
1
6621477
<reponame>G-MontaG/leetcode # https://leetcode.com/problems/remove-nth-node-from-end-of-list/ class Solution: def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode: fast = slow = head for _ in range(n): fast = fast.next if not fast: return head.next while fast.next: fast = fast.next slow = slow.next slow.next = slow.next.next return head
# https://leetcode.com/problems/remove-nth-node-from-end-of-list/ class Solution: def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode: fast = slow = head for _ in range(n): fast = fast.next if not fast: return head.next while fast.next: fast = fast.next slow = slow.next slow.next = slow.next.next return head
en
0.625186
# https://leetcode.com/problems/remove-nth-node-from-end-of-list/
3.55531
4
gh.py
mnzr/json-scraper
0
6621478
<reponame>mnzr/json-scraper<filename>gh.py #!/usr/bin/env python """Script to get Github info using API""" import requests # Add any number of usernames here users = ['google', 'facebook', 'apache'] url = "https://api.github.com/users/" # Which JSON properties we want to catch properties = ['login', 'id', 'html_url', 'public_repos', 'created_at'] for user in users: # make API url with Github username current_page = url + user # take response from the url response = requests.get(current_page) # conversion to human readable format json = response.json() for prop in properties: print(prop, ':', json[prop]) print('')
#!/usr/bin/env python """Script to get Github info using API""" import requests # Add any number of usernames here users = ['google', 'facebook', 'apache'] url = "https://api.github.com/users/" # Which JSON properties we want to catch properties = ['login', 'id', 'html_url', 'public_repos', 'created_at'] for user in users: # make API url with Github username current_page = url + user # take response from the url response = requests.get(current_page) # conversion to human readable format json = response.json() for prop in properties: print(prop, ':', json[prop]) print('')
en
0.736748
#!/usr/bin/env python Script to get Github info using API # Add any number of usernames here # Which JSON properties we want to catch # make API url with Github username # take response from the url # conversion to human readable format
3.587639
4
crackers/MonoSubstitutionCracker.py
qpwoeirut/ClassicalCipherCracker
1
6621479
<filename>crackers/MonoSubstitutionCracker.py import random import string from typing import Type from ciphers.Cipher import Cipher from ciphers.MonoSubstitutionCipher import MonoSubstitutionCipher from crackers.ClimbingCracker import ClimbingCracker from crackers.SubstitutionCracker import SubstitutionCracker class MonoSubstitutionCracker(ClimbingCracker, SubstitutionCracker): def __init__(self, cipher: Type[Cipher] = MonoSubstitutionCipher, alphabet=string.ascii_uppercase, restart_threshold=200, iterations=5000): super(ClimbingCracker).__init__(alphabet=alphabet) super(MonoSubstitutionCracker, self).__init__(restart_threshold=restart_threshold, iterations=iterations) self.cipher = cipher def decrypt(self, key, ciphertext: str) -> str: return self.cipher(key).decrypt(ciphertext) # TODO maybe make keys lists or tuples instead? def generate_random_key(self) -> str: letters = self.alphabet random.shuffle(list(letters)) return ''.join(letters) def mutate_key(self, key: str) -> str: i1 = random.randint(0, len(key) - 1) i2 = random.randint(0, len(key) - 1) while i1 == i2: i2 = random.randint(0, len(key) - 1) if i1 > i2: i1, i2 = i2, i1 return key[:i1] + key[i2] + key[i1 + 1:i2] + key[i1] + key[i2 + 1:]
<filename>crackers/MonoSubstitutionCracker.py import random import string from typing import Type from ciphers.Cipher import Cipher from ciphers.MonoSubstitutionCipher import MonoSubstitutionCipher from crackers.ClimbingCracker import ClimbingCracker from crackers.SubstitutionCracker import SubstitutionCracker class MonoSubstitutionCracker(ClimbingCracker, SubstitutionCracker): def __init__(self, cipher: Type[Cipher] = MonoSubstitutionCipher, alphabet=string.ascii_uppercase, restart_threshold=200, iterations=5000): super(ClimbingCracker).__init__(alphabet=alphabet) super(MonoSubstitutionCracker, self).__init__(restart_threshold=restart_threshold, iterations=iterations) self.cipher = cipher def decrypt(self, key, ciphertext: str) -> str: return self.cipher(key).decrypt(ciphertext) # TODO maybe make keys lists or tuples instead? def generate_random_key(self) -> str: letters = self.alphabet random.shuffle(list(letters)) return ''.join(letters) def mutate_key(self, key: str) -> str: i1 = random.randint(0, len(key) - 1) i2 = random.randint(0, len(key) - 1) while i1 == i2: i2 = random.randint(0, len(key) - 1) if i1 > i2: i1, i2 = i2, i1 return key[:i1] + key[i2] + key[i1 + 1:i2] + key[i1] + key[i2 + 1:]
en
0.888641
# TODO maybe make keys lists or tuples instead?
3.141931
3
tests/examples/bankfile_test.py
kalaspuff/stockholm
15
6621480
from typing import List from stockholm import Money, get_currency from stockholm.exceptions import CurrencyMismatchError example_content = """ 000000000000001 388461894717 OLDSCHOOLFINTECHSOLUTIONS004711000003 471846827769 9173689 9999000192000000272947 USD - 3336282671946 471846827769 557274901 1000824618000272944 USD 020000047284999 484761849926 4444205 37000116301000072944 USD - 3336282671947 990000000000001X00000399300001132919USD END """ def get_transaction_amounts(content: str) -> List[Money]: amounts: List[Money] = [] total_amount: Money = Money(0) transaction_count: int = 0 transaction_count_validation: int = 0 for line_num, line in enumerate(content.split("\n"), 1): if not line.strip(): continue try: if line.startswith("00"): transaction_count = int(line[64 : (64 + 6)]) elif line.startswith("99"): total_amount = Money( line[25 : (25 + 11)], from_sub_units=True, currency=get_currency(line[36 : (36 + 3)]) ) transaction_count_validation = int(line[16 : (16 + 6)]) break elif line.startswith("4"): amount = Money(line[27 : (27 + 9)], from_sub_units=True, currency=get_currency(line[50 : (50 + 3)])) amounts.append((amount)) except Exception as exc: raise Exception( f"Invalid content format – [line number = {line_num}, line length = {len(line)}, exception = ({type(exc)} – '{exc}')]" ) if transaction_count != transaction_count_validation: raise Exception( f"Transaction count does not match – ['00': count = {transaction_count} | '99': count = {transaction_count_validation}]" ) if transaction_count != len(amounts): raise Exception( f"Transaction count does not match – ['00': count = {transaction_count} | '4X': count = {len(amounts)}]" ) if not transaction_count: return amounts total_amount_sum: Money = Money(0) try: total_amount_sum = Money.sum(amounts) except CurrencyMismatchError: currency_codes_string = "', '".join({str(a.currency_code) for a in amounts}) raise Exception( f"Multiple currency codes within the same content block – ['00': currency = '{total_amount.currency}' | '4X': currencies = ('{currency_codes_string}')]" ) if total_amount.currency != total_amount_sum.currency: raise Exception( f"Currency codes does not match – ['00': currency = '{total_amount.currency}' | '4X': currency = '{total_amount_sum.currency}']" ) if not total_amount.currency: raise Exception( f"Currency codes missing – ['00': currency = '{total_amount.currency}' | '4X': currency = '{total_amount_sum.currency}']" ) if total_amount != total_amount_sum: diff = abs(total_amount - total_amount_sum) raise Exception( f"Sums of amounts differ with {diff} – ['00': amount = {total_amount} | '4X': amount = {total_amount_sum}]" ) return amounts amounts = get_transaction_amounts(example_content)
from typing import List from stockholm import Money, get_currency from stockholm.exceptions import CurrencyMismatchError example_content = """ 000000000000001 388461894717 OLDSCHOOLFINTECHSOLUTIONS004711000003 471846827769 9173689 9999000192000000272947 USD - 3336282671946 471846827769 557274901 1000824618000272944 USD 020000047284999 484761849926 4444205 37000116301000072944 USD - 3336282671947 990000000000001X00000399300001132919USD END """ def get_transaction_amounts(content: str) -> List[Money]: amounts: List[Money] = [] total_amount: Money = Money(0) transaction_count: int = 0 transaction_count_validation: int = 0 for line_num, line in enumerate(content.split("\n"), 1): if not line.strip(): continue try: if line.startswith("00"): transaction_count = int(line[64 : (64 + 6)]) elif line.startswith("99"): total_amount = Money( line[25 : (25 + 11)], from_sub_units=True, currency=get_currency(line[36 : (36 + 3)]) ) transaction_count_validation = int(line[16 : (16 + 6)]) break elif line.startswith("4"): amount = Money(line[27 : (27 + 9)], from_sub_units=True, currency=get_currency(line[50 : (50 + 3)])) amounts.append((amount)) except Exception as exc: raise Exception( f"Invalid content format – [line number = {line_num}, line length = {len(line)}, exception = ({type(exc)} – '{exc}')]" ) if transaction_count != transaction_count_validation: raise Exception( f"Transaction count does not match – ['00': count = {transaction_count} | '99': count = {transaction_count_validation}]" ) if transaction_count != len(amounts): raise Exception( f"Transaction count does not match – ['00': count = {transaction_count} | '4X': count = {len(amounts)}]" ) if not transaction_count: return amounts total_amount_sum: Money = Money(0) try: total_amount_sum = Money.sum(amounts) except CurrencyMismatchError: currency_codes_string = "', '".join({str(a.currency_code) for a in amounts}) raise Exception( f"Multiple currency codes within the same content block – ['00': currency = '{total_amount.currency}' | '4X': currencies = ('{currency_codes_string}')]" ) if total_amount.currency != total_amount_sum.currency: raise Exception( f"Currency codes does not match – ['00': currency = '{total_amount.currency}' | '4X': currency = '{total_amount_sum.currency}']" ) if not total_amount.currency: raise Exception( f"Currency codes missing – ['00': currency = '{total_amount.currency}' | '4X': currency = '{total_amount_sum.currency}']" ) if total_amount != total_amount_sum: diff = abs(total_amount - total_amount_sum) raise Exception( f"Sums of amounts differ with {diff} – ['00': amount = {total_amount} | '4X': amount = {total_amount_sum}]" ) return amounts amounts = get_transaction_amounts(example_content)
en
0.182762
000000000000001 388461894717 OLDSCHOOLFINTECHSOLUTIONS004711000003 471846827769 9173689 9999000192000000272947 USD - 3336282671946 471846827769 557274901 1000824618000272944 USD 020000047284999 484761849926 4444205 37000116301000072944 USD - 3336282671947 990000000000001X00000399300001132919USD END
3.141073
3
dqc/rule/basic/rows_count_mismatch_and_another.py
Indexical-Metrics-Measure-Advisory/watchmen-dqc
1
6621481
from pandas import DataFrame from watchmen_boot.storage.model.data_source import DataSource from dqc.model.analysis.monitor_rule import MonitorRule from dqc.rule.utils.date_utils import get_date_range from dqc.rule.utils.topic_utils import data_is_empty, table_not_exist, init_topic_rule_result from dqc.sdk.admin.admin_sdk import get_topic_by_id from dqc.sdk.common.common_sdk import get_datasource_by_id from dqc.service.query.index import query_topic_data_count_by_datetime def init(): def rows_count_mismatch_and_another(df: DataFrame, topic, rule: MonitorRule): execute_result = init_topic_rule_result(rule, topic) if table_not_exist(df) or data_is_empty(df): return None start_date, end_date = get_date_range(rule.params.statisticalInterval) topic_id = rule.params.topicId another_topic = get_topic_by_id(topic_id) data_source: DataSource = get_datasource_by_id(another_topic.dataSourceId) current_count = len(df.index) prior_count = query_topic_data_count_by_datetime(another_topic, start_date, end_date, data_source) if current_count != prior_count: execute_result.topicResult.result = True else: execute_result.topicResult.result = False return execute_result return rows_count_mismatch_and_another
from pandas import DataFrame from watchmen_boot.storage.model.data_source import DataSource from dqc.model.analysis.monitor_rule import MonitorRule from dqc.rule.utils.date_utils import get_date_range from dqc.rule.utils.topic_utils import data_is_empty, table_not_exist, init_topic_rule_result from dqc.sdk.admin.admin_sdk import get_topic_by_id from dqc.sdk.common.common_sdk import get_datasource_by_id from dqc.service.query.index import query_topic_data_count_by_datetime def init(): def rows_count_mismatch_and_another(df: DataFrame, topic, rule: MonitorRule): execute_result = init_topic_rule_result(rule, topic) if table_not_exist(df) or data_is_empty(df): return None start_date, end_date = get_date_range(rule.params.statisticalInterval) topic_id = rule.params.topicId another_topic = get_topic_by_id(topic_id) data_source: DataSource = get_datasource_by_id(another_topic.dataSourceId) current_count = len(df.index) prior_count = query_topic_data_count_by_datetime(another_topic, start_date, end_date, data_source) if current_count != prior_count: execute_result.topicResult.result = True else: execute_result.topicResult.result = False return execute_result return rows_count_mismatch_and_another
none
1
2.281343
2
install/core/python/tank/util/system_settings.py
JoanAzpeitia/lp_sg
0
6621482
# Copyright (c) 2017 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. """ System settings management. """ import urllib class SystemSettings(object): """ Handles loading the system settings. """ @property def http_proxy(self): """ Retrieves the operating system http proxy. First, the method scans the environment for variables named http_proxy, in case insensitive way. If both lowercase and uppercase environment variables exist (and disagree), lowercase is preferred. When the method cannot find such environment variables: - for Mac OS X, it will look for proxy information from Mac OS X System Configuration, - for Windows, it will look for proxy information from Windows Systems Registry. .. note:: There is a restriction when looking for proxy information from Mac OS X System Configuration or Windows Systems Registry: in these cases, the Toolkit does not support the use of proxies which require authentication (username and password). """ # Get the dictionary of scheme to proxy server URL mappings; for example: # {"http": "http://foo:bar@172.16.17.32:80", "https": "http://172.16.17.32:443"} # "getproxies" scans the environment for variables named <scheme>_proxy, in case insensitive way. # When it cannot find it, for Mac OS X it looks for proxy information from Mac OSX System Configuration, # and for Windows it looks for proxy information from Windows Systems Registry. # If both lowercase and uppercase environment variables exist (and disagree), lowercase is preferred. # Note the following restriction: "getproxies" does not support the use of proxies which # require authentication (user and password) when looking for proxy information from # Mac OSX System Configuration or Windows Systems Registry. system_proxies = urllib.getproxies() # Get the http proxy when it exists in the dictionary. proxy = system_proxies.get("http") if proxy: # Remove any spurious "http://" from the http proxy string. proxy = proxy.replace("http://", "", 1) return proxy
# Copyright (c) 2017 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. """ System settings management. """ import urllib class SystemSettings(object): """ Handles loading the system settings. """ @property def http_proxy(self): """ Retrieves the operating system http proxy. First, the method scans the environment for variables named http_proxy, in case insensitive way. If both lowercase and uppercase environment variables exist (and disagree), lowercase is preferred. When the method cannot find such environment variables: - for Mac OS X, it will look for proxy information from Mac OS X System Configuration, - for Windows, it will look for proxy information from Windows Systems Registry. .. note:: There is a restriction when looking for proxy information from Mac OS X System Configuration or Windows Systems Registry: in these cases, the Toolkit does not support the use of proxies which require authentication (username and password). """ # Get the dictionary of scheme to proxy server URL mappings; for example: # {"http": "http://foo:bar@172.16.17.32:80", "https": "http://172.16.17.32:443"} # "getproxies" scans the environment for variables named <scheme>_proxy, in case insensitive way. # When it cannot find it, for Mac OS X it looks for proxy information from Mac OSX System Configuration, # and for Windows it looks for proxy information from Windows Systems Registry. # If both lowercase and uppercase environment variables exist (and disagree), lowercase is preferred. # Note the following restriction: "getproxies" does not support the use of proxies which # require authentication (user and password) when looking for proxy information from # Mac OSX System Configuration or Windows Systems Registry. system_proxies = urllib.getproxies() # Get the http proxy when it exists in the dictionary. proxy = system_proxies.get("http") if proxy: # Remove any spurious "http://" from the http proxy string. proxy = proxy.replace("http://", "", 1) return proxy
en
0.811014
# Copyright (c) 2017 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. System settings management. Handles loading the system settings. Retrieves the operating system http proxy. First, the method scans the environment for variables named http_proxy, in case insensitive way. If both lowercase and uppercase environment variables exist (and disagree), lowercase is preferred. When the method cannot find such environment variables: - for Mac OS X, it will look for proxy information from Mac OS X System Configuration, - for Windows, it will look for proxy information from Windows Systems Registry. .. note:: There is a restriction when looking for proxy information from Mac OS X System Configuration or Windows Systems Registry: in these cases, the Toolkit does not support the use of proxies which require authentication (username and password). # Get the dictionary of scheme to proxy server URL mappings; for example: # {"http": "http://foo:bar@172.16.17.32:80", "https": "http://172.16.17.32:443"} # "getproxies" scans the environment for variables named <scheme>_proxy, in case insensitive way. # When it cannot find it, for Mac OS X it looks for proxy information from Mac OSX System Configuration, # and for Windows it looks for proxy information from Windows Systems Registry. # If both lowercase and uppercase environment variables exist (and disagree), lowercase is preferred. # Note the following restriction: "getproxies" does not support the use of proxies which # require authentication (user and password) when looking for proxy information from # Mac OSX System Configuration or Windows Systems Registry. # Get the http proxy when it exists in the dictionary. # Remove any spurious "http://" from the http proxy string.
1.975871
2
Scrapy/Scrapy_project/settings.py
PPjaisri/Senior-project
0
6621483
# Scrapy settings for Scrapy_project project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://docs.scrapy.org/en/latest/topics/settings.html # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html # https://docs.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'Scrapy_project' SPIDER_MODULES = ['Scrapy_project.spiders'] NEWSPIDER_MODULE = 'Scrapy_project.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent # USER_AGENT = 'Scrapy_project (+https://lab107.kasetsart-university.org/)' USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36 Edg/95.0.1020.44' # Obey robots.txt rules ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16) CONCURRENT_REQUESTS = 1 # CONCURRENT_ITEMS = 300 # Configure a delay for requests for the same website (default: 0) # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs DOWNLOAD_DELAY = 2 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: # DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', # } SELENIUM_DRIVER_NAME = 'msedgedriver.exe' SELENIUM_DRIVER_EXECUTABLE_PATH = 'Scrapy\Scrapy_project\msedgedriver.exe' SELENIUM_DRIVER_ARGUMENTS = ['-headless'] # Enable or disable spider middlewares # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html SPIDER_MIDDLEWARES = { # 'Scrapy_project.middlewares.ScrapyProjectSpiderMiddleware': 543, 'scrapy_selenium.SeleniumMiddleware': 800 } # Enable or disable downloader middlewares # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html # DOWNLOADER_MIDDLEWARES = { # 'Scrapy_project.middlewares.ScrapyProjectDownloaderMiddleware': 543, # } # Enable or disable extensions # See https://docs.scrapy.org/en/latest/topics/extensions.html # EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, # } # Configure item pipelines # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html # ITEM_PIPELINES = { # 'Scrapy_project.pipelines.ScrapyProjectPipeline': 300, # } # Enable and configure the AutoThrottle extension (disabled by default) # See https://docs.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay # AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies # AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' FEED_EXPORT_ENCODING = 'utf-8' DUPEFILTER_CLASS = 'scrapy.dupefilters.BaseDupeFilter' RETRY_TIMES = 0 # LOG_LEVEL = 'INFO' LOG_LEVEL = 'DEBUG' # LOG_LEVEL = None # Selenium driver from shutil import which SELENIUM_DRIVER_NAME = 'edge' SELENIUM_DRIVER_EXECUTABLE_PATH = which('edgedriver') SELENIUM_DRIVER_ARGUMENTS=['--headless'] DOWNLOADER_MIDDLEWARES = { 'scrapy_selenium.SeleniumMiddleware': 800 }
# Scrapy settings for Scrapy_project project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://docs.scrapy.org/en/latest/topics/settings.html # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html # https://docs.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'Scrapy_project' SPIDER_MODULES = ['Scrapy_project.spiders'] NEWSPIDER_MODULE = 'Scrapy_project.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent # USER_AGENT = 'Scrapy_project (+https://lab107.kasetsart-university.org/)' USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36 Edg/95.0.1020.44' # Obey robots.txt rules ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16) CONCURRENT_REQUESTS = 1 # CONCURRENT_ITEMS = 300 # Configure a delay for requests for the same website (default: 0) # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs DOWNLOAD_DELAY = 2 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: # DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', # } SELENIUM_DRIVER_NAME = 'msedgedriver.exe' SELENIUM_DRIVER_EXECUTABLE_PATH = 'Scrapy\Scrapy_project\msedgedriver.exe' SELENIUM_DRIVER_ARGUMENTS = ['-headless'] # Enable or disable spider middlewares # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html SPIDER_MIDDLEWARES = { # 'Scrapy_project.middlewares.ScrapyProjectSpiderMiddleware': 543, 'scrapy_selenium.SeleniumMiddleware': 800 } # Enable or disable downloader middlewares # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html # DOWNLOADER_MIDDLEWARES = { # 'Scrapy_project.middlewares.ScrapyProjectDownloaderMiddleware': 543, # } # Enable or disable extensions # See https://docs.scrapy.org/en/latest/topics/extensions.html # EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, # } # Configure item pipelines # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html # ITEM_PIPELINES = { # 'Scrapy_project.pipelines.ScrapyProjectPipeline': 300, # } # Enable and configure the AutoThrottle extension (disabled by default) # See https://docs.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay # AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies # AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' FEED_EXPORT_ENCODING = 'utf-8' DUPEFILTER_CLASS = 'scrapy.dupefilters.BaseDupeFilter' RETRY_TIMES = 0 # LOG_LEVEL = 'INFO' LOG_LEVEL = 'DEBUG' # LOG_LEVEL = None # Selenium driver from shutil import which SELENIUM_DRIVER_NAME = 'edge' SELENIUM_DRIVER_EXECUTABLE_PATH = which('edgedriver') SELENIUM_DRIVER_ARGUMENTS=['--headless'] DOWNLOADER_MIDDLEWARES = { 'scrapy_selenium.SeleniumMiddleware': 800 }
en
0.642504
# Scrapy settings for Scrapy_project project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://docs.scrapy.org/en/latest/topics/settings.html # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html # https://docs.scrapy.org/en/latest/topics/spider-middleware.html # Crawl responsibly by identifying yourself (and your website) on the user-agent # USER_AGENT = 'Scrapy_project (+https://lab107.kasetsart-university.org/)' # Obey robots.txt rules # Configure maximum concurrent requests performed by Scrapy (default: 16) # CONCURRENT_ITEMS = 300 # Configure a delay for requests for the same website (default: 0) # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: # DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', # } # Enable or disable spider middlewares # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html # 'Scrapy_project.middlewares.ScrapyProjectSpiderMiddleware': 543, # Enable or disable downloader middlewares # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html # DOWNLOADER_MIDDLEWARES = { # 'Scrapy_project.middlewares.ScrapyProjectDownloaderMiddleware': 543, # } # Enable or disable extensions # See https://docs.scrapy.org/en/latest/topics/extensions.html # EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, # } # Configure item pipelines # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html # ITEM_PIPELINES = { # 'Scrapy_project.pipelines.ScrapyProjectPipeline': 300, # } # Enable and configure the AutoThrottle extension (disabled by default) # See https://docs.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay # AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies # AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' # LOG_LEVEL = 'INFO' # LOG_LEVEL = None # Selenium driver
1.676047
2
pyTCPClientServer/cli_envelop_thread_after_accept.py
a2gs/pythonStudy
0
6621484
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Social Network for Programmers and Developers # from: https://morioh.com/p/1d5fd6c04b58 import socket HEADER = 64 PORT = 5050 FORMAT = 'utf-8' DISCONNECT_MESSAGE = "!DISCONNECT" #SERVER = "192.168.1.26" SERVER = "127.0.1.1" ADDR = (SERVER, PORT) client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(ADDR) def send(msg): message = msg.encode(FORMAT) msg_length = len(message) send_length = str(msg_length).encode(FORMAT) send_length += b' ' * (HEADER - len(send_length)) client.send(send_length) client.send(message) print(client.recv(2048).decode(FORMAT)) send("Hello World!") input() send("Hello Everyone!") input() send("Hello Tim!") send(DISCONNECT_MESSAGE)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Social Network for Programmers and Developers # from: https://morioh.com/p/1d5fd6c04b58 import socket HEADER = 64 PORT = 5050 FORMAT = 'utf-8' DISCONNECT_MESSAGE = "!DISCONNECT" #SERVER = "192.168.1.26" SERVER = "127.0.1.1" ADDR = (SERVER, PORT) client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(ADDR) def send(msg): message = msg.encode(FORMAT) msg_length = len(message) send_length = str(msg_length).encode(FORMAT) send_length += b' ' * (HEADER - len(send_length)) client.send(send_length) client.send(message) print(client.recv(2048).decode(FORMAT)) send("Hello World!") input() send("Hello Everyone!") input() send("Hello Tim!") send(DISCONNECT_MESSAGE)
en
0.64769
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Social Network for Programmers and Developers # from: https://morioh.com/p/1d5fd6c04b58 #SERVER = "192.168.1.26"
3.248038
3
coffeeBot.py
nickmachnik/CoffeeBuddyBot
0
6621485
<filename>coffeeBot.py #!/usr/bin/env python import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import json import datetime import time def main(): with open("./config.json", 'r') as fin: config = json.load(fin) config["round"] = config["round"] % (len(config["recipients"]) - 1) bot = CoffeeBot(config) bot._send_mails() # bot._send_test("<EMAIL>") # bot._send_test("<EMAIL>") config["round"] += 1 with open("./config.json", 'w') as fout: json.dump(config, fout, indent=2, sort_keys=True) class CoffeeBot: def __init__(self, config): self.recipients = config["recipients"] self.sender_account = config["sender_account"] self.sender_password = config["sender_password"] self.sender_username = config["sender_username"] self.smtp_port = config["smtp_port"] self.smtp_server = config["smtp_server"] self.round = config["round"] year, week_num, day_of_week = datetime.date.today().isocalendar() self.subject = config["subject"] + " Week {} {}".format(week_num, year) def _send_test(self, recipient): server = smtplib.SMTP(self.smtp_server, self.smtp_port) server.starttls() server.login(self.sender_username, self.sender_password) message = MIMEMultipart('alternative') message['From'] = self.sender_account message['To'] = recipient message['Subject'] = self.subject body = "This is a coffee buddy bot test mail." message.attach(MIMEText(body, 'html')) text = message.as_string() server.sendmail(self.sender_account, recipient, text) server.quit() def _send_mails(self): server = smtplib.SMTP(self.smtp_server, self.smtp_port) server.starttls() server.login(self.sender_username, self.sender_password) for a, b in self._make_pairs(): print(a[0], b[0]) if a == "BREAK": self._send_break_message(server, b) elif b == "BREAK": self._send_break_message(server, a) else: self._send_buddy_message(server, a, b) self._send_buddy_message(server, b, a) time.sleep(3) server.quit() def _send_buddy_message(self, server, recipient, buddy): message = MIMEMultipart('alternative') message['From'] = self.sender_account message['To'] = recipient[1] message['Subject'] = self.subject body = "Your coffee body this week is {} ({})".format(*buddy) message.attach(MIMEText(body, 'html')) text = message.as_string() server.sendmail(self.sender_account, recipient, text) def _send_break_message(self, server, recipient): message = MIMEMultipart('alternative') message['From'] = self.sender_account message['To'] = recipient[1] message['Subject'] = self.subject body = "You have a break from coffee buddies this week :)" message.attach(MIMEText(body, 'html')) text = message.as_string() server.sendmail(self.sender_account, recipient, text) def _make_pairs(self): hs = len(self.recipients) // 2 fixed, moving = self.recipients[-1], self.recipients[:-1] current_rotation = moving[self.round:] + moving[:self.round] side_a, side_b = current_rotation[:hs], current_rotation[hs:] side_b = side_b[::-1] + [fixed] for a, b in zip(side_a, side_b): yield(a, b) if __name__ == '__main__': main()
<filename>coffeeBot.py #!/usr/bin/env python import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import json import datetime import time def main(): with open("./config.json", 'r') as fin: config = json.load(fin) config["round"] = config["round"] % (len(config["recipients"]) - 1) bot = CoffeeBot(config) bot._send_mails() # bot._send_test("<EMAIL>") # bot._send_test("<EMAIL>") config["round"] += 1 with open("./config.json", 'w') as fout: json.dump(config, fout, indent=2, sort_keys=True) class CoffeeBot: def __init__(self, config): self.recipients = config["recipients"] self.sender_account = config["sender_account"] self.sender_password = config["sender_password"] self.sender_username = config["sender_username"] self.smtp_port = config["smtp_port"] self.smtp_server = config["smtp_server"] self.round = config["round"] year, week_num, day_of_week = datetime.date.today().isocalendar() self.subject = config["subject"] + " Week {} {}".format(week_num, year) def _send_test(self, recipient): server = smtplib.SMTP(self.smtp_server, self.smtp_port) server.starttls() server.login(self.sender_username, self.sender_password) message = MIMEMultipart('alternative') message['From'] = self.sender_account message['To'] = recipient message['Subject'] = self.subject body = "This is a coffee buddy bot test mail." message.attach(MIMEText(body, 'html')) text = message.as_string() server.sendmail(self.sender_account, recipient, text) server.quit() def _send_mails(self): server = smtplib.SMTP(self.smtp_server, self.smtp_port) server.starttls() server.login(self.sender_username, self.sender_password) for a, b in self._make_pairs(): print(a[0], b[0]) if a == "BREAK": self._send_break_message(server, b) elif b == "BREAK": self._send_break_message(server, a) else: self._send_buddy_message(server, a, b) self._send_buddy_message(server, b, a) time.sleep(3) server.quit() def _send_buddy_message(self, server, recipient, buddy): message = MIMEMultipart('alternative') message['From'] = self.sender_account message['To'] = recipient[1] message['Subject'] = self.subject body = "Your coffee body this week is {} ({})".format(*buddy) message.attach(MIMEText(body, 'html')) text = message.as_string() server.sendmail(self.sender_account, recipient, text) def _send_break_message(self, server, recipient): message = MIMEMultipart('alternative') message['From'] = self.sender_account message['To'] = recipient[1] message['Subject'] = self.subject body = "You have a break from coffee buddies this week :)" message.attach(MIMEText(body, 'html')) text = message.as_string() server.sendmail(self.sender_account, recipient, text) def _make_pairs(self): hs = len(self.recipients) // 2 fixed, moving = self.recipients[-1], self.recipients[:-1] current_rotation = moving[self.round:] + moving[:self.round] side_a, side_b = current_rotation[:hs], current_rotation[hs:] side_b = side_b[::-1] + [fixed] for a, b in zip(side_a, side_b): yield(a, b) if __name__ == '__main__': main()
ja
0.145505
#!/usr/bin/env python # bot._send_test("<EMAIL>") # bot._send_test("<EMAIL>")
2.837836
3
SBR/Driver.py
Bobtron/ArbFinder
0
6621486
<reponame>Bobtron/ArbFinder<gh_stars>0 from pysbr import * import pendulum from SBR.League import League from pprint import pprint start = pendulum.now().add(days=1)#.strftime('%Y-%m-%d') end = pendulum.now().add(days=8)#.strftime('%Y-%m-%d') sb = Sportsbook() # sb_ids = sb.ids(['Pinnacle', 'Bovada', 'Bookmaker', 'BetOnline', 'Heritage Sports', 'BetOnline', 'GTbets', 'YouWager', # 'Intertops', 'JustBet', 'WagerWeb', 'SportsBetting']) print(sb.names) sb_ids = list(sb.names.keys()) epl = EPL() epl_league = League(epl) epl_league.add_events(epl.market_ids(['moneyline']), sb_ids, start, end) epl_league.pprint_events() # pp = pprint.PrettyPrinter(indent=4) # # print(end) # # nfl = NFL() # epl = EPL() # sb = Sportsbook() # # print(nfl.market_names) # # print(sb.names) # # events = EventsByDateRange([epl.league_id], start, end) # # current_line = CurrentLines(events.ids(), epl.market_ids(['moneyline']), sb.ids(['Pinnacle', 'Bovada'])) # # pp.pprint(events.list()) # # print() # # pp.pprint(current_line.list(events))
from pysbr import * import pendulum from SBR.League import League from pprint import pprint start = pendulum.now().add(days=1)#.strftime('%Y-%m-%d') end = pendulum.now().add(days=8)#.strftime('%Y-%m-%d') sb = Sportsbook() # sb_ids = sb.ids(['Pinnacle', 'Bovada', 'Bookmaker', 'BetOnline', 'Heritage Sports', 'BetOnline', 'GTbets', 'YouWager', # 'Intertops', 'JustBet', 'WagerWeb', 'SportsBetting']) print(sb.names) sb_ids = list(sb.names.keys()) epl = EPL() epl_league = League(epl) epl_league.add_events(epl.market_ids(['moneyline']), sb_ids, start, end) epl_league.pprint_events() # pp = pprint.PrettyPrinter(indent=4) # # print(end) # # nfl = NFL() # epl = EPL() # sb = Sportsbook() # # print(nfl.market_names) # # print(sb.names) # # events = EventsByDateRange([epl.league_id], start, end) # # current_line = CurrentLines(events.ids(), epl.market_ids(['moneyline']), sb.ids(['Pinnacle', 'Bovada'])) # # pp.pprint(events.list()) # # print() # # pp.pprint(current_line.list(events))
en
0.330259
#.strftime('%Y-%m-%d') #.strftime('%Y-%m-%d') # sb_ids = sb.ids(['Pinnacle', 'Bovada', 'Bookmaker', 'BetOnline', 'Heritage Sports', 'BetOnline', 'GTbets', 'YouWager', # 'Intertops', 'JustBet', 'WagerWeb', 'SportsBetting']) # pp = pprint.PrettyPrinter(indent=4) # # print(end) # # nfl = NFL() # epl = EPL() # sb = Sportsbook() # # print(nfl.market_names) # # print(sb.names) # # events = EventsByDateRange([epl.league_id], start, end) # # current_line = CurrentLines(events.ids(), epl.market_ids(['moneyline']), sb.ids(['Pinnacle', 'Bovada'])) # # pp.pprint(events.list()) # # print() # # pp.pprint(current_line.list(events))
2.484794
2
structures/migrations/0001_initial.py
buahaha/aa-structures
0
6621487
# Generated by Django 2.2.5 on 2019-11-21 03:27 import multiselectfield.db.fields import django.core.validators import django.db.models.deletion from django.db import migrations, models import structures.models class Migration(migrations.Migration): initial = True dependencies = [ ("eveonline", "0010_alliance_ticker"), ("authentication", "0016_ownershiprecord"), ] operations = [ migrations.CreateModel( name="General", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ], options={ "permissions": ( ("basic_access", "Can access this app and view"), ("view_alliance_structures", "Can view alliance structures"), ("view_all_structures", "Can view all structures"), ("add_structure_owner", "Can add new structure owner"), ), "default_permissions": (), "managed": False, }, ), migrations.CreateModel( name="EveConstellation", fields=[ ( "id", models.IntegerField( help_text="Eve Online region ID", primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(0)], ), ), ("name", models.CharField(max_length=100)), ], ), migrations.CreateModel( name="EveEntity", fields=[ ( "id", models.IntegerField( primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(0)], ), ), ( "category", models.IntegerField( choices=[ (1, "character"), (2, "corporation"), (3, "alliance"), (4, "faction"), (5, "other"), ] ), ), ( "name", models.CharField( blank=True, default=None, max_length=255, null=True ), ), ], ), migrations.CreateModel( name="EveGroup", fields=[ ( "id", models.IntegerField( help_text="Eve Online group ID", primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(0)], ), ), ("name", models.CharField(max_length=100)), ], ), migrations.CreateModel( name="EveRegion", fields=[ ( "id", models.IntegerField( help_text="Eve Online region ID", primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(0)], ), ), ("name", models.CharField(max_length=100)), ], ), migrations.CreateModel( name="EveSolarSystem", fields=[ ( "id", models.IntegerField( help_text="Eve Online solar system ID", primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(0)], ), ), ("name", models.CharField(max_length=100)), ("security_status", models.FloatField()), ( "eve_constellation", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="structures.EveConstellation", ), ), ], ), migrations.CreateModel( name="EveType", fields=[ ( "id", models.IntegerField( help_text="Eve Online type ID", primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(0)], ), ), ("name", models.CharField(max_length=100)), ( "eve_group", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="structures.EveGroup", ), ), ], ), migrations.CreateModel( name="Owner", fields=[ ( "corporation", models.OneToOneField( help_text="Corporation owning structures", on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to="eveonline.EveCorporationInfo", ), ), ( "structures_last_sync", models.DateTimeField( blank=True, default=None, help_text="when the last sync happened", null=True, ), ), ( "structures_last_error", models.IntegerField( choices=[ (0, "No error"), (1, "Invalid token"), (2, "Expired token"), (3, "Insufficient permissions"), (4, "No character set for fetching alliance contacts"), (5, "ESI API is currently unavailable"), (6, "Operaton mode does not match with current setting"), (99, "Unknown error"), ], default=0, help_text="error that occurred at the last sync atttempt (if any)", ), ), ( "notifications_last_sync", models.DateTimeField( blank=True, default=None, help_text="when the last sync happened", null=True, ), ), ( "notifications_last_error", models.IntegerField( choices=[ (0, "No error"), (1, "Invalid token"), (2, "Expired token"), (3, "Insufficient permissions"), (4, "No character set for fetching alliance contacts"), (5, "ESI API is currently unavailable"), (6, "Operaton mode does not match with current setting"), (99, "Unknown error"), ], default=0, help_text="error that occurred at the last sync atttempt (if any)", ), ), ( "character", models.ForeignKey( blank=True, default=None, help_text="character used for syncing structures", null=True, on_delete=django.db.models.deletion.SET_DEFAULT, to="authentication.CharacterOwnership", ), ), ], ), migrations.CreateModel( name="Structure", fields=[ ( "id", models.BigIntegerField( help_text="The Item ID of the structure", primary_key=True, serialize=False, ), ), ( "name", models.CharField( help_text="The full name of the structure", max_length=255 ), ), ( "position_x", models.FloatField( blank=True, default=None, help_text="x position of the structure in the solar system", null=True, ), ), ( "position_y", models.FloatField( blank=True, default=None, help_text="y position of the structure in the solar system", null=True, ), ), ( "position_z", models.FloatField( blank=True, default=None, help_text="z position of the structure in the solar system", null=True, ), ), ( "fuel_expires", models.DateTimeField( blank=True, default=None, help_text="Date on which the structure will run out of fuel", null=True, ), ), ( "next_reinforce_hour", models.IntegerField( blank=True, default=None, help_text="The requested change to reinforce_hour that will take effect at the time shown by next_reinforce_apply", null=True, validators=[ django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(23), ], ), ), ( "next_reinforce_weekday", models.IntegerField( blank=True, default=None, help_text="The date and time when the structure’s newly requested reinforcement times (e.g. next_reinforce_hour and next_reinforce_day) will take effect", null=True, validators=[ django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(6), ], ), ), ( "next_reinforce_apply", models.DateTimeField( blank=True, default=None, help_text="The requested change to reinforce_weekday that will take effect at the time shown by next_reinforce_apply", null=True, ), ), ( "reinforce_hour", models.IntegerField( help_text="The hour of day that determines the four hour window when the structure will randomly exit its reinforcement periods and become vulnerable to attack against its armor and/or hull. The structure will become vulnerable at a random time that is +/- 2 hours centered on the value of this property", validators=[ django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(23), ], ), ), ( "reinforce_weekday", models.IntegerField( blank=True, default=None, help_text="The day of the week when the structure exits its final reinforcement period and becomes vulnerable to attack against its hull. Monday is 0 and Sunday is 6", null=True, validators=[ django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(6), ], ), ), ( "state", models.IntegerField( choices=[ (0, "N/A"), (1, "anchor_vulnerable"), (2, "anchoring"), (3, "armor_reinforce"), (4, "armor_vulnerable"), (5, "deploy_vulnerable"), (6, "fitting_invulnerable"), (7, "hull_reinforce"), (8, "hull_vulnerable"), (9, "online_deprecated"), (10, "onlining_vulnerable"), (11, "shield_vulnerable"), (12, "unanchored"), (13, "unknown"), ], help_text="Current state of the structure", ), ), ( "state_timer_start", models.DateTimeField( blank=True, default=None, help_text="Date at which the structure will move to it’s next state", null=True, ), ), ( "state_timer_end", models.DateTimeField( blank=True, default=None, help_text="Date at which the structure entered it’s current state", null=True, ), ), ( "unanchors_at", models.DateTimeField( blank=True, default=None, help_text="Date at which the structure will unanchor", null=True, ), ), ( "last_updated", models.DateTimeField( blank=True, default=None, help_text="date this structure was last updated from the EVE server", null=True, ), ), ( "eve_solar_system", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="structures.EveSolarSystem", ), ), ( "eve_type", models.ForeignKey( help_text="type of the structure", on_delete=django.db.models.deletion.CASCADE, to="structures.EveType", ), ), ( "owner", models.ForeignKey( help_text="Corporation that owns the structure", on_delete=django.db.models.deletion.CASCADE, to="structures.Owner", ), ), ], ), migrations.CreateModel( name="Webhook", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "name", models.CharField( help_text="short name to identify this webhook", max_length=64, unique=True, ), ), ( "webhook_type", models.IntegerField( choices=[(1, "Discord Webhook")], default=1, help_text="type of this webhook", ), ), ( "url", models.CharField( help_text="URL of this webhook, e.g. https://discordapp.com/api/webhooks/123456/abcdef", max_length=255, unique=True, ), ), ( "notes", models.TextField( blank=True, default=None, help_text="you can add notes about this webhook here if you want", null=True, ), ), ( "notification_types", multiselectfield.db.fields.MultiSelectField( choices=[ (401, "MoonminingAutomaticFracture"), (402, "MoonminingExtractionCancelled"), (403, "MoonminingExtractionFinished"), (404, "MoonminingExtractionStarted"), (405, "MoonminingLaserFired"), (513, "OwnershipTransferred"), (501, "StructureAnchoring"), (502, "StructureDestroyed"), (503, "StructureFuelAlert"), (504, "StructureLostArmor"), (505, "StructureLostShields"), (506, "StructureOnline"), (507, "StructureServicesOffline"), (508, "StructureUnanchoring"), (509, "StructureUnderAttack"), (510, "StructureWentHighPower"), (511, "StructureWentLowPower"), ], default=structures.models.get_default_notification_types, help_text="only notifications which selected types are sent to this webhook", max_length=67, ), ), ( "is_active", models.BooleanField( default=True, help_text="whether notifications are currently sent to this webhook", ), ), ( "is_default", models.BooleanField( default=False, help_text="whether newly added owners have this automatically webhook preset", ), ), ], ), migrations.AddField( model_name="owner", name="webhooks", field=models.ManyToManyField( blank=True, default=None, help_text="notifications are sent to these webhooks. ", to="structures.Webhook", ), ), migrations.CreateModel( name="EveMoon", fields=[ ( "id", models.IntegerField( help_text="Eve Online item ID", primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(0)], ), ), ("name", models.CharField(max_length=100)), ( "position_x", models.FloatField( blank=True, default=None, help_text="x position of the structure in the solar system", null=True, ), ), ( "position_y", models.FloatField( blank=True, default=None, help_text="y position of the structure in the solar system", null=True, ), ), ( "position_z", models.FloatField( blank=True, default=None, help_text="z position of the structure in the solar system", null=True, ), ), ( "eve_solar_system", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="structures.EveSolarSystem", ), ), ], ), migrations.AddField( model_name="eveconstellation", name="eve_region", field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="structures.EveRegion" ), ), migrations.CreateModel( name="StructureService", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "name", models.CharField(help_text="Name of the service", max_length=64), ), ( "state", models.IntegerField( choices=[(1, "offline"), (2, "online")], help_text="Current state of this service", ), ), ( "structure", models.ForeignKey( help_text="Structure this service is installed to", on_delete=django.db.models.deletion.CASCADE, to="structures.Structure", ), ), ], options={ "unique_together": {("structure", "name")}, }, ), migrations.CreateModel( name="Notification", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "notification_id", models.BigIntegerField( validators=[django.core.validators.MinValueValidator(0)] ), ), ("timestamp", models.DateTimeField()), ( "notification_type", models.IntegerField( choices=[ (401, "MoonminingAutomaticFracture"), (402, "MoonminingExtractionCancelled"), (403, "MoonminingExtractionFinished"), (404, "MoonminingExtractionStarted"), (405, "MoonminingLaserFired"), (513, "OwnershipTransferred"), (501, "StructureAnchoring"), (502, "StructureDestroyed"), (503, "StructureFuelAlert"), (504, "StructureLostArmor"), (505, "StructureLostShields"), (506, "StructureOnline"), (507, "StructureServicesOffline"), (508, "StructureUnanchoring"), (509, "StructureUnderAttack"), (510, "StructureWentHighPower"), (511, "StructureWentLowPower"), ] ), ), ("text", models.TextField(blank=True, default=None, null=True)), ("is_read", models.BooleanField(blank=True, default=None, null=True)), ("is_sent", models.BooleanField(blank=True, default=False)), ("last_updated", models.DateTimeField()), ( "owner", models.ForeignKey( help_text="Corporation that received this notification", on_delete=django.db.models.deletion.CASCADE, to="structures.Owner", ), ), ( "sender", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="structures.EveEntity", ), ), ], options={ "unique_together": {("notification_id", "owner")}, }, ), ]
# Generated by Django 2.2.5 on 2019-11-21 03:27 import multiselectfield.db.fields import django.core.validators import django.db.models.deletion from django.db import migrations, models import structures.models class Migration(migrations.Migration): initial = True dependencies = [ ("eveonline", "0010_alliance_ticker"), ("authentication", "0016_ownershiprecord"), ] operations = [ migrations.CreateModel( name="General", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ], options={ "permissions": ( ("basic_access", "Can access this app and view"), ("view_alliance_structures", "Can view alliance structures"), ("view_all_structures", "Can view all structures"), ("add_structure_owner", "Can add new structure owner"), ), "default_permissions": (), "managed": False, }, ), migrations.CreateModel( name="EveConstellation", fields=[ ( "id", models.IntegerField( help_text="Eve Online region ID", primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(0)], ), ), ("name", models.CharField(max_length=100)), ], ), migrations.CreateModel( name="EveEntity", fields=[ ( "id", models.IntegerField( primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(0)], ), ), ( "category", models.IntegerField( choices=[ (1, "character"), (2, "corporation"), (3, "alliance"), (4, "faction"), (5, "other"), ] ), ), ( "name", models.CharField( blank=True, default=None, max_length=255, null=True ), ), ], ), migrations.CreateModel( name="EveGroup", fields=[ ( "id", models.IntegerField( help_text="Eve Online group ID", primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(0)], ), ), ("name", models.CharField(max_length=100)), ], ), migrations.CreateModel( name="EveRegion", fields=[ ( "id", models.IntegerField( help_text="Eve Online region ID", primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(0)], ), ), ("name", models.CharField(max_length=100)), ], ), migrations.CreateModel( name="EveSolarSystem", fields=[ ( "id", models.IntegerField( help_text="Eve Online solar system ID", primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(0)], ), ), ("name", models.CharField(max_length=100)), ("security_status", models.FloatField()), ( "eve_constellation", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="structures.EveConstellation", ), ), ], ), migrations.CreateModel( name="EveType", fields=[ ( "id", models.IntegerField( help_text="Eve Online type ID", primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(0)], ), ), ("name", models.CharField(max_length=100)), ( "eve_group", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="structures.EveGroup", ), ), ], ), migrations.CreateModel( name="Owner", fields=[ ( "corporation", models.OneToOneField( help_text="Corporation owning structures", on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to="eveonline.EveCorporationInfo", ), ), ( "structures_last_sync", models.DateTimeField( blank=True, default=None, help_text="when the last sync happened", null=True, ), ), ( "structures_last_error", models.IntegerField( choices=[ (0, "No error"), (1, "Invalid token"), (2, "Expired token"), (3, "Insufficient permissions"), (4, "No character set for fetching alliance contacts"), (5, "ESI API is currently unavailable"), (6, "Operaton mode does not match with current setting"), (99, "Unknown error"), ], default=0, help_text="error that occurred at the last sync atttempt (if any)", ), ), ( "notifications_last_sync", models.DateTimeField( blank=True, default=None, help_text="when the last sync happened", null=True, ), ), ( "notifications_last_error", models.IntegerField( choices=[ (0, "No error"), (1, "Invalid token"), (2, "Expired token"), (3, "Insufficient permissions"), (4, "No character set for fetching alliance contacts"), (5, "ESI API is currently unavailable"), (6, "Operaton mode does not match with current setting"), (99, "Unknown error"), ], default=0, help_text="error that occurred at the last sync atttempt (if any)", ), ), ( "character", models.ForeignKey( blank=True, default=None, help_text="character used for syncing structures", null=True, on_delete=django.db.models.deletion.SET_DEFAULT, to="authentication.CharacterOwnership", ), ), ], ), migrations.CreateModel( name="Structure", fields=[ ( "id", models.BigIntegerField( help_text="The Item ID of the structure", primary_key=True, serialize=False, ), ), ( "name", models.CharField( help_text="The full name of the structure", max_length=255 ), ), ( "position_x", models.FloatField( blank=True, default=None, help_text="x position of the structure in the solar system", null=True, ), ), ( "position_y", models.FloatField( blank=True, default=None, help_text="y position of the structure in the solar system", null=True, ), ), ( "position_z", models.FloatField( blank=True, default=None, help_text="z position of the structure in the solar system", null=True, ), ), ( "fuel_expires", models.DateTimeField( blank=True, default=None, help_text="Date on which the structure will run out of fuel", null=True, ), ), ( "next_reinforce_hour", models.IntegerField( blank=True, default=None, help_text="The requested change to reinforce_hour that will take effect at the time shown by next_reinforce_apply", null=True, validators=[ django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(23), ], ), ), ( "next_reinforce_weekday", models.IntegerField( blank=True, default=None, help_text="The date and time when the structure’s newly requested reinforcement times (e.g. next_reinforce_hour and next_reinforce_day) will take effect", null=True, validators=[ django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(6), ], ), ), ( "next_reinforce_apply", models.DateTimeField( blank=True, default=None, help_text="The requested change to reinforce_weekday that will take effect at the time shown by next_reinforce_apply", null=True, ), ), ( "reinforce_hour", models.IntegerField( help_text="The hour of day that determines the four hour window when the structure will randomly exit its reinforcement periods and become vulnerable to attack against its armor and/or hull. The structure will become vulnerable at a random time that is +/- 2 hours centered on the value of this property", validators=[ django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(23), ], ), ), ( "reinforce_weekday", models.IntegerField( blank=True, default=None, help_text="The day of the week when the structure exits its final reinforcement period and becomes vulnerable to attack against its hull. Monday is 0 and Sunday is 6", null=True, validators=[ django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(6), ], ), ), ( "state", models.IntegerField( choices=[ (0, "N/A"), (1, "anchor_vulnerable"), (2, "anchoring"), (3, "armor_reinforce"), (4, "armor_vulnerable"), (5, "deploy_vulnerable"), (6, "fitting_invulnerable"), (7, "hull_reinforce"), (8, "hull_vulnerable"), (9, "online_deprecated"), (10, "onlining_vulnerable"), (11, "shield_vulnerable"), (12, "unanchored"), (13, "unknown"), ], help_text="Current state of the structure", ), ), ( "state_timer_start", models.DateTimeField( blank=True, default=None, help_text="Date at which the structure will move to it’s next state", null=True, ), ), ( "state_timer_end", models.DateTimeField( blank=True, default=None, help_text="Date at which the structure entered it’s current state", null=True, ), ), ( "unanchors_at", models.DateTimeField( blank=True, default=None, help_text="Date at which the structure will unanchor", null=True, ), ), ( "last_updated", models.DateTimeField( blank=True, default=None, help_text="date this structure was last updated from the EVE server", null=True, ), ), ( "eve_solar_system", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="structures.EveSolarSystem", ), ), ( "eve_type", models.ForeignKey( help_text="type of the structure", on_delete=django.db.models.deletion.CASCADE, to="structures.EveType", ), ), ( "owner", models.ForeignKey( help_text="Corporation that owns the structure", on_delete=django.db.models.deletion.CASCADE, to="structures.Owner", ), ), ], ), migrations.CreateModel( name="Webhook", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "name", models.CharField( help_text="short name to identify this webhook", max_length=64, unique=True, ), ), ( "webhook_type", models.IntegerField( choices=[(1, "Discord Webhook")], default=1, help_text="type of this webhook", ), ), ( "url", models.CharField( help_text="URL of this webhook, e.g. https://discordapp.com/api/webhooks/123456/abcdef", max_length=255, unique=True, ), ), ( "notes", models.TextField( blank=True, default=None, help_text="you can add notes about this webhook here if you want", null=True, ), ), ( "notification_types", multiselectfield.db.fields.MultiSelectField( choices=[ (401, "MoonminingAutomaticFracture"), (402, "MoonminingExtractionCancelled"), (403, "MoonminingExtractionFinished"), (404, "MoonminingExtractionStarted"), (405, "MoonminingLaserFired"), (513, "OwnershipTransferred"), (501, "StructureAnchoring"), (502, "StructureDestroyed"), (503, "StructureFuelAlert"), (504, "StructureLostArmor"), (505, "StructureLostShields"), (506, "StructureOnline"), (507, "StructureServicesOffline"), (508, "StructureUnanchoring"), (509, "StructureUnderAttack"), (510, "StructureWentHighPower"), (511, "StructureWentLowPower"), ], default=structures.models.get_default_notification_types, help_text="only notifications which selected types are sent to this webhook", max_length=67, ), ), ( "is_active", models.BooleanField( default=True, help_text="whether notifications are currently sent to this webhook", ), ), ( "is_default", models.BooleanField( default=False, help_text="whether newly added owners have this automatically webhook preset", ), ), ], ), migrations.AddField( model_name="owner", name="webhooks", field=models.ManyToManyField( blank=True, default=None, help_text="notifications are sent to these webhooks. ", to="structures.Webhook", ), ), migrations.CreateModel( name="EveMoon", fields=[ ( "id", models.IntegerField( help_text="Eve Online item ID", primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(0)], ), ), ("name", models.CharField(max_length=100)), ( "position_x", models.FloatField( blank=True, default=None, help_text="x position of the structure in the solar system", null=True, ), ), ( "position_y", models.FloatField( blank=True, default=None, help_text="y position of the structure in the solar system", null=True, ), ), ( "position_z", models.FloatField( blank=True, default=None, help_text="z position of the structure in the solar system", null=True, ), ), ( "eve_solar_system", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="structures.EveSolarSystem", ), ), ], ), migrations.AddField( model_name="eveconstellation", name="eve_region", field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="structures.EveRegion" ), ), migrations.CreateModel( name="StructureService", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "name", models.CharField(help_text="Name of the service", max_length=64), ), ( "state", models.IntegerField( choices=[(1, "offline"), (2, "online")], help_text="Current state of this service", ), ), ( "structure", models.ForeignKey( help_text="Structure this service is installed to", on_delete=django.db.models.deletion.CASCADE, to="structures.Structure", ), ), ], options={ "unique_together": {("structure", "name")}, }, ), migrations.CreateModel( name="Notification", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "notification_id", models.BigIntegerField( validators=[django.core.validators.MinValueValidator(0)] ), ), ("timestamp", models.DateTimeField()), ( "notification_type", models.IntegerField( choices=[ (401, "MoonminingAutomaticFracture"), (402, "MoonminingExtractionCancelled"), (403, "MoonminingExtractionFinished"), (404, "MoonminingExtractionStarted"), (405, "MoonminingLaserFired"), (513, "OwnershipTransferred"), (501, "StructureAnchoring"), (502, "StructureDestroyed"), (503, "StructureFuelAlert"), (504, "StructureLostArmor"), (505, "StructureLostShields"), (506, "StructureOnline"), (507, "StructureServicesOffline"), (508, "StructureUnanchoring"), (509, "StructureUnderAttack"), (510, "StructureWentHighPower"), (511, "StructureWentLowPower"), ] ), ), ("text", models.TextField(blank=True, default=None, null=True)), ("is_read", models.BooleanField(blank=True, default=None, null=True)), ("is_sent", models.BooleanField(blank=True, default=False)), ("last_updated", models.DateTimeField()), ( "owner", models.ForeignKey( help_text="Corporation that received this notification", on_delete=django.db.models.deletion.CASCADE, to="structures.Owner", ), ), ( "sender", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="structures.EveEntity", ), ), ], options={ "unique_together": {("notification_id", "owner")}, }, ), ]
en
0.755631
# Generated by Django 2.2.5 on 2019-11-21 03:27
1.773986
2
p252.py
brandonpelfrey/project-euler
0
6621488
<filename>p252.py def random_points(max_n): S = {0: 290797} for n in xrange(2*max_n): S[n+1] = (S[n] ** 2) % 50515093 T = {} for key, val in S.items(): T[key] = (val % 2000) - 1000 return map(lambda k: (T[2*k-1], T[2*k]), xrange(1, max_n+1) ) n_points = 500 points = random_points(n_points)
<filename>p252.py def random_points(max_n): S = {0: 290797} for n in xrange(2*max_n): S[n+1] = (S[n] ** 2) % 50515093 T = {} for key, val in S.items(): T[key] = (val % 2000) - 1000 return map(lambda k: (T[2*k-1], T[2*k]), xrange(1, max_n+1) ) n_points = 500 points = random_points(n_points)
none
1
2.776737
3
skutil/grid_search.py
tgsmith61591/pynorm
38
6621489
<gh_stars>10-100 from __future__ import division, absolute_import, print_function import sklearn from .base import overrides from .utils.fixes import (_validate_X, _validate_y, _check_param_grid, _as_numpy, _CVScoreTuple) __all__ = [ 'GridSearchCV', 'RandomizedSearchCV' ] # deprecation in sklearn 0.18 if sklearn.__version__ >= '0.18': import sklearn.model_selection as ms class GridSearchCV(ms.GridSearchCV): """Exhaustive search over specified parameter values for an estimator. This class is a skutil fix of the sklearn 0.18 GridSearchCV module, and allows use with SelectiveMixins and other skutil classes that don't interact so kindly with other sklearn 0.18 structures (i.e. when ``as_df`` is True in many transformers, predicting on a column vector from a pd.DataFrame will cause issues in sklearn). Parameters ---------- estimator : estimator object. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a ``(Stratified)KFold``, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, ``StratifiedKFold`` is used. In all other cases, ``KFold`` is used. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this GridSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. return_train_score : boolean, default=True If ``'False'``, the ``cv_results_`` attribute will not include training scores. Attributes ---------- cv_results_ : dict of numpy (masked) ndarrays A dict with keys as column headers and values as columns, that can be imported into a pandas ``DataFrame``. For instance, the following table +------------+-----------+------------+-----------------+---+---------+ |param_kernel|param_gamma|param_degree|split0_test_score|...|rank_....| +============+===========+============+=================+===+=========+ | 'poly' | -- | 2 | 0.8 |...| 2 | +------------+-----------+------------+-----------------+---+---------+ | 'poly' | -- | 3 | 0.7 |...| 4 | +------------+-----------+------------+-----------------+---+---------+ | 'rbf' | 0.1 | -- | 0.8 |...| 3 | +------------+-----------+------------+-----------------+---+---------+ | 'rbf' | 0.2 | -- | 0.9 |...| 1 | +------------+-----------+------------+-----------------+---+---------+ will be represented by a ``cv_results_`` dict of: { 'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'], mask = [False False False False]...) 'param_gamma': masked_array(data = [-- -- 0.1 0.2], mask = [ True True False False]...), 'param_degree': masked_array(data = [2.0 3.0 -- --], mask = [False False True True]...), 'split0_test_score' : [0.8, 0.7, 0.8, 0.9], 'split1_test_score' : [0.82, 0.5, 0.7, 0.78], 'mean_test_score' : [0.81, 0.60, 0.75, 0.82], 'std_test_score' : [0.02, 0.01, 0.03, 0.03], 'rank_test_score' : [2, 4, 3, 1], 'split0_train_score' : [0.8, 0.9, 0.7], 'split1_train_score' : [0.82, 0.5, 0.7], 'mean_train_score' : [0.81, 0.7, 0.7], 'std_train_score' : [0.03, 0.03, 0.04], 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49], 'std_fit_time' : [0.01, 0.02, 0.01, 0.01], 'mean_score_time' : [0.007, 0.06, 0.04, 0.04], 'std_score_time' : [0.001, 0.002, 0.003, 0.005], 'params' : [{'kernel': 'poly', 'degree': 2}, ...], } NOTE that the key ``'params'`` is used to store a list of parameter settings dict for all the parameter candidates. The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and ``std_score_time`` are all in seconds. best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. best_index_ : int The index (of the ``cv_results_`` arrays) which corresponds to the best candidate parameter setting. The dict at ``search.cv_results_['params'][search.best_index_]`` gives the parameter setting for the best model, that gives the highest mean score (``search.best_score_``). scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. n_splits_ : int The number of cross-validation splits (folds/iterations). """ @overrides(ms.GridSearchCV) def fit(self, X, y=None, groups=None): """Run fit with all sets of parameters. Parameters ---------- X : array-like, shape=(n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape=(n_samples,) or (n_samples, n_output), optional (default=None) Target relative to X for classification or regression; None for unsupervised learning. groups : array-like, shape=(n_samples,), optional (default=None) Group labels for the samples used while splitting the dataset into train/test set. """ return super(GridSearchCV, self).fit(X, _as_numpy(y), groups) class RandomizedSearchCV(ms.RandomizedSearchCV): """Randomized search on hyper parameters. This class is a skutil fix of the sklearn 0.18 RandomizedSearchCV module, and allows use with SelectiveMixins and other skutil classes that don't interact so kindly with other sklearn 0.18 structures (i.e. when ``as_df`` is True in many transformers, predicting on a column vector from a pd.DataFrame will cause issues in sklearn). Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_distributions : dict Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. n_iter : int, default=10 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a ``(Stratified)KFold``, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, ``StratifiedKFold`` is used. In all other cases, ``KFold`` is used. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. return_train_score : boolean, default=True If ``'False'``, the ``cv_results_`` attribute will not include training scores. Attributes ---------- cv_results_ : dict of numpy (masked) ndarrays A dict with keys as column headers and values as columns, that can be imported into a pandas ``DataFrame``. For instance the following table: +--------------+-------------+-------------------+---+---------------+ | param_kernel | param_gamma | split0_test_score |...|rank_test_score| +==============+=============+===================+===+===============+ | 'rbf' | 0.1 | 0.8 |...| 2 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.2 | 0.9 |...| 1 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.3 | 0.7 |...| 1 | +--------------+-------------+-------------------+---+---------------+ will be represented by a ``cv_results_`` dict of: { 'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'], mask = False), 'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False), 'split0_test_score' : [0.8, 0.9, 0.7], 'split1_test_score' : [0.82, 0.5, 0.7], 'mean_test_score' : [0.81, 0.7, 0.7], 'std_test_score' : [0.02, 0.2, 0.], 'rank_test_score' : [3, 1, 1], 'split0_train_score' : [0.8, 0.9, 0.7], 'split1_train_score' : [0.82, 0.5, 0.7], 'mean_train_score' : [0.81, 0.7, 0.7], 'std_train_score' : [0.03, 0.03, 0.04], 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49], 'std_fit_time' : [0.01, 0.02, 0.01, 0.01], 'mean_score_time' : [0.007, 0.06, 0.04, 0.04], 'std_score_time' : [0.001, 0.002, 0.003, 0.005], 'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...], } NOTE that the key ``'params'`` is used to store a list of parameter settings dict for all the parameter candidates. The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and ``std_score_time`` are all in seconds. best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. best_index_ : int The index (of the ``cv_results_`` arrays) which corresponds to the best candidate parameter setting. The dict at ``search.cv_results_['params'][search.best_index_]`` gives the parameter setting for the best model, that gives the highest mean score (``search.best_score_``). scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. n_splits_ : int The number of cross-validation splits (folds/iterations). """ @overrides(ms.RandomizedSearchCV) def fit(self, X, y=None, groups=None): """Run fit on the estimator with randomly drawn parameters. Parameters ---------- X : array-like, shape=(n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape=(n_samples,) or (n_samples, n_output), optional (default=None) Target relative to X for classification or regression; None for unsupervised learning. groups : array-like, shape=(n_samples,), optional (default=None) Group labels for the samples used while splitting the dataset into train/test set. """ return super(RandomizedSearchCV, self).fit(X, _as_numpy(y), groups) else: """ sklearn deprecates the GridSearch and cross validation API we know and love in 0.18, thus, we only define these methods if we're using < 0.18. Otherwise, we'll use their default. These are defined in skutil.utils.fixes """ from .utils import fixes class GridSearchCV(fixes._SK17GridSearchCV): """Exhaustive search over specified parameter values for an estimator. This class is a skutil fix of the sklearn 0.17 GridSearchCV module, and allows use with SelectiveMixins and other skutil classes that don't interact so kindly with other sklearn 0.17 structures. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, `sklearn.model_selection.StratifiedKFold` is used. In all other cases, `sklearn.model_selection.KFold` is used. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this GridSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. """ pass class RandomizedSearchCV(fixes._SK17RandomizedSearchCV): """Randomized search on hyper parameters. This class is a skutil fix of the sklearn 0.17 RandomizedSearchCV module, and allows use with SelectiveMixins and other skutil classes that don't interact so kindly with other sklearn 0.17 structures. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_distributions : dict Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. n_iter : int, default=10 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, `sklearn.model_selection.StratifiedKFold` is used. In all other cases, `sklearn.model_selection.KFold` is used. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. """ pass
from __future__ import division, absolute_import, print_function import sklearn from .base import overrides from .utils.fixes import (_validate_X, _validate_y, _check_param_grid, _as_numpy, _CVScoreTuple) __all__ = [ 'GridSearchCV', 'RandomizedSearchCV' ] # deprecation in sklearn 0.18 if sklearn.__version__ >= '0.18': import sklearn.model_selection as ms class GridSearchCV(ms.GridSearchCV): """Exhaustive search over specified parameter values for an estimator. This class is a skutil fix of the sklearn 0.18 GridSearchCV module, and allows use with SelectiveMixins and other skutil classes that don't interact so kindly with other sklearn 0.18 structures (i.e. when ``as_df`` is True in many transformers, predicting on a column vector from a pd.DataFrame will cause issues in sklearn). Parameters ---------- estimator : estimator object. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a ``(Stratified)KFold``, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, ``StratifiedKFold`` is used. In all other cases, ``KFold`` is used. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this GridSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. return_train_score : boolean, default=True If ``'False'``, the ``cv_results_`` attribute will not include training scores. Attributes ---------- cv_results_ : dict of numpy (masked) ndarrays A dict with keys as column headers and values as columns, that can be imported into a pandas ``DataFrame``. For instance, the following table +------------+-----------+------------+-----------------+---+---------+ |param_kernel|param_gamma|param_degree|split0_test_score|...|rank_....| +============+===========+============+=================+===+=========+ | 'poly' | -- | 2 | 0.8 |...| 2 | +------------+-----------+------------+-----------------+---+---------+ | 'poly' | -- | 3 | 0.7 |...| 4 | +------------+-----------+------------+-----------------+---+---------+ | 'rbf' | 0.1 | -- | 0.8 |...| 3 | +------------+-----------+------------+-----------------+---+---------+ | 'rbf' | 0.2 | -- | 0.9 |...| 1 | +------------+-----------+------------+-----------------+---+---------+ will be represented by a ``cv_results_`` dict of: { 'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'], mask = [False False False False]...) 'param_gamma': masked_array(data = [-- -- 0.1 0.2], mask = [ True True False False]...), 'param_degree': masked_array(data = [2.0 3.0 -- --], mask = [False False True True]...), 'split0_test_score' : [0.8, 0.7, 0.8, 0.9], 'split1_test_score' : [0.82, 0.5, 0.7, 0.78], 'mean_test_score' : [0.81, 0.60, 0.75, 0.82], 'std_test_score' : [0.02, 0.01, 0.03, 0.03], 'rank_test_score' : [2, 4, 3, 1], 'split0_train_score' : [0.8, 0.9, 0.7], 'split1_train_score' : [0.82, 0.5, 0.7], 'mean_train_score' : [0.81, 0.7, 0.7], 'std_train_score' : [0.03, 0.03, 0.04], 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49], 'std_fit_time' : [0.01, 0.02, 0.01, 0.01], 'mean_score_time' : [0.007, 0.06, 0.04, 0.04], 'std_score_time' : [0.001, 0.002, 0.003, 0.005], 'params' : [{'kernel': 'poly', 'degree': 2}, ...], } NOTE that the key ``'params'`` is used to store a list of parameter settings dict for all the parameter candidates. The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and ``std_score_time`` are all in seconds. best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. best_index_ : int The index (of the ``cv_results_`` arrays) which corresponds to the best candidate parameter setting. The dict at ``search.cv_results_['params'][search.best_index_]`` gives the parameter setting for the best model, that gives the highest mean score (``search.best_score_``). scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. n_splits_ : int The number of cross-validation splits (folds/iterations). """ @overrides(ms.GridSearchCV) def fit(self, X, y=None, groups=None): """Run fit with all sets of parameters. Parameters ---------- X : array-like, shape=(n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape=(n_samples,) or (n_samples, n_output), optional (default=None) Target relative to X for classification or regression; None for unsupervised learning. groups : array-like, shape=(n_samples,), optional (default=None) Group labels for the samples used while splitting the dataset into train/test set. """ return super(GridSearchCV, self).fit(X, _as_numpy(y), groups) class RandomizedSearchCV(ms.RandomizedSearchCV): """Randomized search on hyper parameters. This class is a skutil fix of the sklearn 0.18 RandomizedSearchCV module, and allows use with SelectiveMixins and other skutil classes that don't interact so kindly with other sklearn 0.18 structures (i.e. when ``as_df`` is True in many transformers, predicting on a column vector from a pd.DataFrame will cause issues in sklearn). Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_distributions : dict Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. n_iter : int, default=10 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a ``(Stratified)KFold``, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, ``StratifiedKFold`` is used. In all other cases, ``KFold`` is used. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. return_train_score : boolean, default=True If ``'False'``, the ``cv_results_`` attribute will not include training scores. Attributes ---------- cv_results_ : dict of numpy (masked) ndarrays A dict with keys as column headers and values as columns, that can be imported into a pandas ``DataFrame``. For instance the following table: +--------------+-------------+-------------------+---+---------------+ | param_kernel | param_gamma | split0_test_score |...|rank_test_score| +==============+=============+===================+===+===============+ | 'rbf' | 0.1 | 0.8 |...| 2 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.2 | 0.9 |...| 1 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.3 | 0.7 |...| 1 | +--------------+-------------+-------------------+---+---------------+ will be represented by a ``cv_results_`` dict of: { 'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'], mask = False), 'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False), 'split0_test_score' : [0.8, 0.9, 0.7], 'split1_test_score' : [0.82, 0.5, 0.7], 'mean_test_score' : [0.81, 0.7, 0.7], 'std_test_score' : [0.02, 0.2, 0.], 'rank_test_score' : [3, 1, 1], 'split0_train_score' : [0.8, 0.9, 0.7], 'split1_train_score' : [0.82, 0.5, 0.7], 'mean_train_score' : [0.81, 0.7, 0.7], 'std_train_score' : [0.03, 0.03, 0.04], 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49], 'std_fit_time' : [0.01, 0.02, 0.01, 0.01], 'mean_score_time' : [0.007, 0.06, 0.04, 0.04], 'std_score_time' : [0.001, 0.002, 0.003, 0.005], 'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...], } NOTE that the key ``'params'`` is used to store a list of parameter settings dict for all the parameter candidates. The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and ``std_score_time`` are all in seconds. best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. best_index_ : int The index (of the ``cv_results_`` arrays) which corresponds to the best candidate parameter setting. The dict at ``search.cv_results_['params'][search.best_index_]`` gives the parameter setting for the best model, that gives the highest mean score (``search.best_score_``). scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. n_splits_ : int The number of cross-validation splits (folds/iterations). """ @overrides(ms.RandomizedSearchCV) def fit(self, X, y=None, groups=None): """Run fit on the estimator with randomly drawn parameters. Parameters ---------- X : array-like, shape=(n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape=(n_samples,) or (n_samples, n_output), optional (default=None) Target relative to X for classification or regression; None for unsupervised learning. groups : array-like, shape=(n_samples,), optional (default=None) Group labels for the samples used while splitting the dataset into train/test set. """ return super(RandomizedSearchCV, self).fit(X, _as_numpy(y), groups) else: """ sklearn deprecates the GridSearch and cross validation API we know and love in 0.18, thus, we only define these methods if we're using < 0.18. Otherwise, we'll use their default. These are defined in skutil.utils.fixes """ from .utils import fixes class GridSearchCV(fixes._SK17GridSearchCV): """Exhaustive search over specified parameter values for an estimator. This class is a skutil fix of the sklearn 0.17 GridSearchCV module, and allows use with SelectiveMixins and other skutil classes that don't interact so kindly with other sklearn 0.17 structures. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, `sklearn.model_selection.StratifiedKFold` is used. In all other cases, `sklearn.model_selection.KFold` is used. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this GridSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. """ pass class RandomizedSearchCV(fixes._SK17RandomizedSearchCV): """Randomized search on hyper parameters. This class is a skutil fix of the sklearn 0.17 RandomizedSearchCV module, and allows use with SelectiveMixins and other skutil classes that don't interact so kindly with other sklearn 0.17 structures. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_distributions : dict Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. n_iter : int, default=10 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, `sklearn.model_selection.StratifiedKFold` is used. In all other cases, `sklearn.model_selection.KFold` is used. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. """ pass
en
0.701465
# deprecation in sklearn 0.18 Exhaustive search over specified parameter values for an estimator. This class is a skutil fix of the sklearn 0.18 GridSearchCV module, and allows use with SelectiveMixins and other skutil classes that don't interact so kindly with other sklearn 0.18 structures (i.e. when ``as_df`` is True in many transformers, predicting on a column vector from a pd.DataFrame will cause issues in sklearn). Parameters ---------- estimator : estimator object. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a ``(Stratified)KFold``, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, ``StratifiedKFold`` is used. In all other cases, ``KFold`` is used. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this GridSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. return_train_score : boolean, default=True If ``'False'``, the ``cv_results_`` attribute will not include training scores. Attributes ---------- cv_results_ : dict of numpy (masked) ndarrays A dict with keys as column headers and values as columns, that can be imported into a pandas ``DataFrame``. For instance, the following table +------------+-----------+------------+-----------------+---+---------+ |param_kernel|param_gamma|param_degree|split0_test_score|...|rank_....| +============+===========+============+=================+===+=========+ | 'poly' | -- | 2 | 0.8 |...| 2 | +------------+-----------+------------+-----------------+---+---------+ | 'poly' | -- | 3 | 0.7 |...| 4 | +------------+-----------+------------+-----------------+---+---------+ | 'rbf' | 0.1 | -- | 0.8 |...| 3 | +------------+-----------+------------+-----------------+---+---------+ | 'rbf' | 0.2 | -- | 0.9 |...| 1 | +------------+-----------+------------+-----------------+---+---------+ will be represented by a ``cv_results_`` dict of: { 'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'], mask = [False False False False]...) 'param_gamma': masked_array(data = [-- -- 0.1 0.2], mask = [ True True False False]...), 'param_degree': masked_array(data = [2.0 3.0 -- --], mask = [False False True True]...), 'split0_test_score' : [0.8, 0.7, 0.8, 0.9], 'split1_test_score' : [0.82, 0.5, 0.7, 0.78], 'mean_test_score' : [0.81, 0.60, 0.75, 0.82], 'std_test_score' : [0.02, 0.01, 0.03, 0.03], 'rank_test_score' : [2, 4, 3, 1], 'split0_train_score' : [0.8, 0.9, 0.7], 'split1_train_score' : [0.82, 0.5, 0.7], 'mean_train_score' : [0.81, 0.7, 0.7], 'std_train_score' : [0.03, 0.03, 0.04], 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49], 'std_fit_time' : [0.01, 0.02, 0.01, 0.01], 'mean_score_time' : [0.007, 0.06, 0.04, 0.04], 'std_score_time' : [0.001, 0.002, 0.003, 0.005], 'params' : [{'kernel': 'poly', 'degree': 2}, ...], } NOTE that the key ``'params'`` is used to store a list of parameter settings dict for all the parameter candidates. The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and ``std_score_time`` are all in seconds. best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. best_index_ : int The index (of the ``cv_results_`` arrays) which corresponds to the best candidate parameter setting. The dict at ``search.cv_results_['params'][search.best_index_]`` gives the parameter setting for the best model, that gives the highest mean score (``search.best_score_``). scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. n_splits_ : int The number of cross-validation splits (folds/iterations). Run fit with all sets of parameters. Parameters ---------- X : array-like, shape=(n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape=(n_samples,) or (n_samples, n_output), optional (default=None) Target relative to X for classification or regression; None for unsupervised learning. groups : array-like, shape=(n_samples,), optional (default=None) Group labels for the samples used while splitting the dataset into train/test set. Randomized search on hyper parameters. This class is a skutil fix of the sklearn 0.18 RandomizedSearchCV module, and allows use with SelectiveMixins and other skutil classes that don't interact so kindly with other sklearn 0.18 structures (i.e. when ``as_df`` is True in many transformers, predicting on a column vector from a pd.DataFrame will cause issues in sklearn). Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_distributions : dict Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. n_iter : int, default=10 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a ``(Stratified)KFold``, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, ``StratifiedKFold`` is used. In all other cases, ``KFold`` is used. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. return_train_score : boolean, default=True If ``'False'``, the ``cv_results_`` attribute will not include training scores. Attributes ---------- cv_results_ : dict of numpy (masked) ndarrays A dict with keys as column headers and values as columns, that can be imported into a pandas ``DataFrame``. For instance the following table: +--------------+-------------+-------------------+---+---------------+ | param_kernel | param_gamma | split0_test_score |...|rank_test_score| +==============+=============+===================+===+===============+ | 'rbf' | 0.1 | 0.8 |...| 2 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.2 | 0.9 |...| 1 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.3 | 0.7 |...| 1 | +--------------+-------------+-------------------+---+---------------+ will be represented by a ``cv_results_`` dict of: { 'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'], mask = False), 'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False), 'split0_test_score' : [0.8, 0.9, 0.7], 'split1_test_score' : [0.82, 0.5, 0.7], 'mean_test_score' : [0.81, 0.7, 0.7], 'std_test_score' : [0.02, 0.2, 0.], 'rank_test_score' : [3, 1, 1], 'split0_train_score' : [0.8, 0.9, 0.7], 'split1_train_score' : [0.82, 0.5, 0.7], 'mean_train_score' : [0.81, 0.7, 0.7], 'std_train_score' : [0.03, 0.03, 0.04], 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49], 'std_fit_time' : [0.01, 0.02, 0.01, 0.01], 'mean_score_time' : [0.007, 0.06, 0.04, 0.04], 'std_score_time' : [0.001, 0.002, 0.003, 0.005], 'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...], } NOTE that the key ``'params'`` is used to store a list of parameter settings dict for all the parameter candidates. The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and ``std_score_time`` are all in seconds. best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. best_index_ : int The index (of the ``cv_results_`` arrays) which corresponds to the best candidate parameter setting. The dict at ``search.cv_results_['params'][search.best_index_]`` gives the parameter setting for the best model, that gives the highest mean score (``search.best_score_``). scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. n_splits_ : int The number of cross-validation splits (folds/iterations). Run fit on the estimator with randomly drawn parameters. Parameters ---------- X : array-like, shape=(n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape=(n_samples,) or (n_samples, n_output), optional (default=None) Target relative to X for classification or regression; None for unsupervised learning. groups : array-like, shape=(n_samples,), optional (default=None) Group labels for the samples used while splitting the dataset into train/test set. sklearn deprecates the GridSearch and cross validation API we know and love in 0.18, thus, we only define these methods if we're using < 0.18. Otherwise, we'll use their default. These are defined in skutil.utils.fixes Exhaustive search over specified parameter values for an estimator. This class is a skutil fix of the sklearn 0.17 GridSearchCV module, and allows use with SelectiveMixins and other skutil classes that don't interact so kindly with other sklearn 0.17 structures. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, `sklearn.model_selection.StratifiedKFold` is used. In all other cases, `sklearn.model_selection.KFold` is used. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this GridSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. Randomized search on hyper parameters. This class is a skutil fix of the sklearn 0.17 RandomizedSearchCV module, and allows use with SelectiveMixins and other skutil classes that don't interact so kindly with other sklearn 0.17 structures. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_distributions : dict Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. n_iter : int, default=10 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, `sklearn.model_selection.StratifiedKFold` is used. In all other cases, `sklearn.model_selection.KFold` is used. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data.
2.273548
2
Driver.py
Thigos/Detector-de-Carros-OpenCV
2
6621490
<filename>Driver.py import os import cv2 import pyautogui import mahotas #Local do video video_original = cv2.VideoCapture('teste.wmv') #O local aonde os arquivos estão diret = os.path.dirname(os.path.abspath(__file__)) while True: #Frame do video ret, frame = video_original.read() #Converte para escalas de cinza gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #Suaviza a imagem suavizador = cv2.GaussianBlur(gray, (7, 7), 0) #Utiliza o método otsu para transformar a imagem em binário 0,1 (preto ou branco) otsu = mahotas.thresholding.otsu(suavizador) #Copia a imagem em escalas de cinza binar = gray.copy() #Calcula áreas em que há um pico de intensidade e transforma em branco (255) ou preto (0) binar[binar > otsu] = 255 binar[binar < 255] = 0 binar = cv2.bitwise_not(binar) #Pega todos os arquivos que estão na pasta ML_RECORT for nome in os.listdir(diret + '\\ML_RECORT'): #Lê o arquivo carro = cv2.imread(diret + '\\ML_RECORT\\' + str(nome)) #Converte a imagem para escalas de cinza carroGray = cv2.cvtColor(carro, cv2.COLOR_BGR2GRAY) #Pega o w (largura) e h (altura) da imagem w, h = carroGray.shape[::-1] #Compara a imagem modelo (carroGray) com a imagem de entrada (binar) res = cv2.matchTemplate(binar, carroGray, cv2.TM_CCOEFF) limit = 200000000 #Recebe as informações da comparação min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) #Verificar se o valor da comparação é maior ou igual a 200000000 if (max_val >= limit): #Forma um retângulo e uma palavra para identificar o carro cv2.rectangle(frame, max_loc, (max_loc[0] + w, max_loc[1] + h), (0, 0, 255), 2) fonte = cv2.FONT_HERSHEY_SIMPLEX texto = "CARRO" #Os frames 136 e 236 são os pontos em que o carro está mais perto do outro #Então, o programa avisará para frear o carro e apertará o 's' que é o freio do carro no jogo if(nome == "frame136.png" or nome == "frame236.png"): texto = "FREAR" #pyautogui.press('s') cv2.putText(frame, texto, (max_loc[0], max_loc[1] - 5), fonte, 0.5, (0, 0, 255), 1, cv2.LINE_AA) #É preciso que o for pare, assim não terá conflitos de frames break #Mostra o video cv2.imshow("Driver", frame) #Aperte 'q' para sair key = cv2.waitKey(1) if key == ord('q'): break cv2.destroyAllWindows()
<filename>Driver.py import os import cv2 import pyautogui import mahotas #Local do video video_original = cv2.VideoCapture('teste.wmv') #O local aonde os arquivos estão diret = os.path.dirname(os.path.abspath(__file__)) while True: #Frame do video ret, frame = video_original.read() #Converte para escalas de cinza gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #Suaviza a imagem suavizador = cv2.GaussianBlur(gray, (7, 7), 0) #Utiliza o método otsu para transformar a imagem em binário 0,1 (preto ou branco) otsu = mahotas.thresholding.otsu(suavizador) #Copia a imagem em escalas de cinza binar = gray.copy() #Calcula áreas em que há um pico de intensidade e transforma em branco (255) ou preto (0) binar[binar > otsu] = 255 binar[binar < 255] = 0 binar = cv2.bitwise_not(binar) #Pega todos os arquivos que estão na pasta ML_RECORT for nome in os.listdir(diret + '\\ML_RECORT'): #Lê o arquivo carro = cv2.imread(diret + '\\ML_RECORT\\' + str(nome)) #Converte a imagem para escalas de cinza carroGray = cv2.cvtColor(carro, cv2.COLOR_BGR2GRAY) #Pega o w (largura) e h (altura) da imagem w, h = carroGray.shape[::-1] #Compara a imagem modelo (carroGray) com a imagem de entrada (binar) res = cv2.matchTemplate(binar, carroGray, cv2.TM_CCOEFF) limit = 200000000 #Recebe as informações da comparação min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) #Verificar se o valor da comparação é maior ou igual a 200000000 if (max_val >= limit): #Forma um retângulo e uma palavra para identificar o carro cv2.rectangle(frame, max_loc, (max_loc[0] + w, max_loc[1] + h), (0, 0, 255), 2) fonte = cv2.FONT_HERSHEY_SIMPLEX texto = "CARRO" #Os frames 136 e 236 são os pontos em que o carro está mais perto do outro #Então, o programa avisará para frear o carro e apertará o 's' que é o freio do carro no jogo if(nome == "frame136.png" or nome == "frame236.png"): texto = "FREAR" #pyautogui.press('s') cv2.putText(frame, texto, (max_loc[0], max_loc[1] - 5), fonte, 0.5, (0, 0, 255), 1, cv2.LINE_AA) #É preciso que o for pare, assim não terá conflitos de frames break #Mostra o video cv2.imshow("Driver", frame) #Aperte 'q' para sair key = cv2.waitKey(1) if key == ord('q'): break cv2.destroyAllWindows()
pt
0.988454
#Local do video #O local aonde os arquivos estão #Frame do video #Converte para escalas de cinza #Suaviza a imagem #Utiliza o método otsu para transformar a imagem em binário 0,1 (preto ou branco) #Copia a imagem em escalas de cinza #Calcula áreas em que há um pico de intensidade e transforma em branco (255) ou preto (0) #Pega todos os arquivos que estão na pasta ML_RECORT #Lê o arquivo #Converte a imagem para escalas de cinza #Pega o w (largura) e h (altura) da imagem #Compara a imagem modelo (carroGray) com a imagem de entrada (binar) #Recebe as informações da comparação #Verificar se o valor da comparação é maior ou igual a 200000000 #Forma um retângulo e uma palavra para identificar o carro #Os frames 136 e 236 são os pontos em que o carro está mais perto do outro #Então, o programa avisará para frear o carro e apertará o 's' que é o freio do carro no jogo #pyautogui.press('s') #É preciso que o for pare, assim não terá conflitos de frames #Mostra o video #Aperte 'q' para sair
2.979019
3
Py exercises/UnderstandingScopingAndNestedFunctions.py
arvindkarir/python-pandas-code
0
6621491
#something to understanding scoping and nested functions def f(x): def g(): x = 'abc' print 'function x=', x def h(): z = x print 'hz =', z x = x + 1 print 'x plus 1 =', x h() g() print 'gx=', x return g x = 3 z = f(x) print 'zx=', x
#something to understanding scoping and nested functions def f(x): def g(): x = 'abc' print 'function x=', x def h(): z = x print 'hz =', z x = x + 1 print 'x plus 1 =', x h() g() print 'gx=', x return g x = 3 z = f(x) print 'zx=', x
en
0.839422
#something to understanding scoping and nested functions
4.000284
4
DNA2VEC/model.py
NaiveTom/all_model
0
6621492
<gh_stars>0 from keras import initializers from tensorflow.keras.layers import Layer, InputSpec from keras import backend as K from keras.layers import * from keras.models import * from keras.optimizers import Adam from keras.regularizers import l1, l2 import keras import numpy as np MAX_LEN_en = 3000 MAX_LEN_pr = 2000 NB_WORDS = 4097 EMBEDDING_DIM = 100 embedding_matrix = np.load('embedding_matrix.npy') #################### # ACGT #################### embedding_matrix_one_hot = np.array([[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) class AttLayer_1(Layer): def __init__(self, attention_dim): # self.init = initializers.get('normal') self.init = initializers.RandomNormal(seed=10) self.supports_masking = True self.attention_dim = attention_dim super(AttLayer_1, self).__init__() def build(self, input_shape): assert len(input_shape) == 3 self.W = K.variable(self.init((input_shape[-1], self.attention_dim)), name='W_1') self.b = K.variable(self.init((self.attention_dim, )), name='b_1') self.u = K.variable(self.init((self.attention_dim, 1)), name='u_1') self._trainable_weights = [self.W, self.b, self.u] super(AttLayer_1, self).build(input_shape) def compute_mask(self, inputs, mask=None): return mask def call(self, x, mask=None): # size of x :[batch_size, sel_len, attention_dim] # size of u :[batch_size, attention_dim] # uit = tanh(xW+b) uit = K.tanh(K.bias_add(K.dot(x, self.W), self.b)) ait = K.dot(uit, self.u) ait = K.squeeze(ait, -1) ait = K.exp(ait) if mask is not None: # Cast the mask to floatX to avoid float64 upcasting in theano ait *= K.cast(mask, K.floatx()) ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx()) ait = K.expand_dims(ait) weighted_input = x * ait output = K.sum(weighted_input, axis=1) return output def compute_output_shape(self, input_shape): return (input_shape[0], input_shape[-1]) class AttLayer_2(Layer): def __init__(self, attention_dim): # self.init = initializers.get('normal') self.init = initializers.RandomNormal(seed=10) self.supports_masking = True self.attention_dim = attention_dim super(AttLayer_2, self).__init__() def build(self, input_shape): assert len(input_shape) == 3 self.W = K.variable(self.init((input_shape[-1], self.attention_dim)), name='W_2') self.b = K.variable(self.init((self.attention_dim, )), name='b_2') self.u = K.variable(self.init((self.attention_dim, 1)), name='u_2') self._trainable_weights = [self.W, self.b, self.u] super(AttLayer_2, self).build(input_shape) def compute_mask(self, inputs, mask=None): return mask def call(self, x, mask=None): # size of x :[batch_size, sel_len, attention_dim] # size of u :[batch_size, attention_dim] # uit = tanh(xW+b) uit = K.tanh(K.bias_add(K.dot(x, self.W), self.b)) ait = K.dot(uit, self.u) ait = K.squeeze(ait, -1) ait = K.exp(ait) if mask is not None: # Cast the mask to floatX to avoid float64 upcasting in theano ait *= K.cast(mask, K.floatx()) ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx()) ait = K.expand_dims(ait) weighted_input = x * ait output = K.sum(weighted_input, axis=1) return output def compute_output_shape(self, input_shape): return (input_shape[0], input_shape[-1]) def get_model(): #################### # 输入部分 #################### enhancers = Input(shape=(MAX_LEN_en,)) promoters = Input(shape=(MAX_LEN_pr,)) #################### # embedding 部分 #################### emb_en = Embedding(NB_WORDS, EMBEDDING_DIM, weights=[ embedding_matrix], trainable=True)(enhancers) emb_pr = Embedding(NB_WORDS, EMBEDDING_DIM, weights=[ embedding_matrix], trainable=True)(promoters) #################### # enhancer 输入部分 #################### enhancer_conv_layer = Convolution1D(filters=64, kernel_size=60, padding='same', # 'same' kernel_initializer='he_normal', ) enhancer_max_pool_layer = MaxPooling1D(pool_size=30, strides=30) # Build enhancer branch enhancer_branch = Sequential() enhancer_branch.add(enhancer_conv_layer) enhancer_branch.add(Activation("relu")) enhancer_branch.add(enhancer_max_pool_layer) enhancer_branch.add(BatchNormalization()) enhancer_branch.add(Dropout(0.5)) enhancer_out = enhancer_branch(emb_en) #################### # promoter 输入部分 #################### promoter_conv_layer = Convolution1D(filters=64, kernel_size=40, padding='same', kernel_initializer='he_normal', ) promoter_max_pool_layer = MaxPooling1D(pool_size=20, strides=20) # promoter_length_slim = 2039 # n_kernels_slim = 200 # filter_length_slim = 20 # Build promoter branch promoter_branch = Sequential() promoter_branch.add(promoter_conv_layer) promoter_branch.add(Activation("relu")) promoter_branch.add(promoter_max_pool_layer) promoter_branch.add(BatchNormalization()) promoter_branch.add(Dropout(0.5)) promoter_out = promoter_branch(emb_pr) # enhancer_conv_layer = Conv1D(filters = 32,kernel_size = 40,padding = "valid",activation='relu')(emb_en) # enhancer_max_pool_layer = MaxPooling1D(pool_size = 30, strides = 30)(enhancer_conv_layer) # promoter_conv_layer = Conv1D(filters = 32,kernel_size = 40,padding = "valid",activation='relu')(emb_pr) # promoter_max_pool_layer = MaxPooling1D(pool_size = 20, strides = 20)(promoter_conv_layer) #################### # 合并部分 #################### l_gru_1 = Bidirectional(GRU(50, return_sequences=True), name='gru1')(enhancer_out) l_gru_2 = Bidirectional(GRU(50, return_sequences=True), name='gru2')(promoter_out) l_att_1 = AttLayer_1(50)(l_gru_1) l_att_2 = AttLayer_2(50)(l_gru_2) # 测试一下到底是因为什么崩溃的 # l_att_1 = l_gru_1 # l_att_2 = l_gru_2 subtract_layer = Subtract()([l_att_1, l_att_2]) multiply_layer = Multiply()([l_att_1, l_att_2]) merge_layer=Concatenate(axis=1)([l_att_1, l_att_2, subtract_layer, multiply_layer]) # merge_layer = Concatenate(axis=1)([l_att_1, l_att_2]) bn = BatchNormalization()(merge_layer) dt = Dropout(0.5)(bn) # l_gru = Bidirectional(LSTM(50))(dt) # l_att = AttLayer(50)(l_gru) # bn2 = BatchNormalization()(l_gru) # dt2 = Dropout(0.5)(bn2) # dt = BatchNormalization()(dt) # dt = Dropout(0.5)(dt) #################### # dense 部分 #################### dt = Dense(512, kernel_initializer='glorot_uniform')(dt) dt = BatchNormalization()(dt) dt = Activation('relu')(dt) dt = Dropout(0.5)(dt) preds = Dense(1, activation='sigmoid')(dt) model = Model([enhancers, promoters], preds) adam = keras.optimizers.Adam(lr=5e-6, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy']) return model
from keras import initializers from tensorflow.keras.layers import Layer, InputSpec from keras import backend as K from keras.layers import * from keras.models import * from keras.optimizers import Adam from keras.regularizers import l1, l2 import keras import numpy as np MAX_LEN_en = 3000 MAX_LEN_pr = 2000 NB_WORDS = 4097 EMBEDDING_DIM = 100 embedding_matrix = np.load('embedding_matrix.npy') #################### # ACGT #################### embedding_matrix_one_hot = np.array([[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) class AttLayer_1(Layer): def __init__(self, attention_dim): # self.init = initializers.get('normal') self.init = initializers.RandomNormal(seed=10) self.supports_masking = True self.attention_dim = attention_dim super(AttLayer_1, self).__init__() def build(self, input_shape): assert len(input_shape) == 3 self.W = K.variable(self.init((input_shape[-1], self.attention_dim)), name='W_1') self.b = K.variable(self.init((self.attention_dim, )), name='b_1') self.u = K.variable(self.init((self.attention_dim, 1)), name='u_1') self._trainable_weights = [self.W, self.b, self.u] super(AttLayer_1, self).build(input_shape) def compute_mask(self, inputs, mask=None): return mask def call(self, x, mask=None): # size of x :[batch_size, sel_len, attention_dim] # size of u :[batch_size, attention_dim] # uit = tanh(xW+b) uit = K.tanh(K.bias_add(K.dot(x, self.W), self.b)) ait = K.dot(uit, self.u) ait = K.squeeze(ait, -1) ait = K.exp(ait) if mask is not None: # Cast the mask to floatX to avoid float64 upcasting in theano ait *= K.cast(mask, K.floatx()) ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx()) ait = K.expand_dims(ait) weighted_input = x * ait output = K.sum(weighted_input, axis=1) return output def compute_output_shape(self, input_shape): return (input_shape[0], input_shape[-1]) class AttLayer_2(Layer): def __init__(self, attention_dim): # self.init = initializers.get('normal') self.init = initializers.RandomNormal(seed=10) self.supports_masking = True self.attention_dim = attention_dim super(AttLayer_2, self).__init__() def build(self, input_shape): assert len(input_shape) == 3 self.W = K.variable(self.init((input_shape[-1], self.attention_dim)), name='W_2') self.b = K.variable(self.init((self.attention_dim, )), name='b_2') self.u = K.variable(self.init((self.attention_dim, 1)), name='u_2') self._trainable_weights = [self.W, self.b, self.u] super(AttLayer_2, self).build(input_shape) def compute_mask(self, inputs, mask=None): return mask def call(self, x, mask=None): # size of x :[batch_size, sel_len, attention_dim] # size of u :[batch_size, attention_dim] # uit = tanh(xW+b) uit = K.tanh(K.bias_add(K.dot(x, self.W), self.b)) ait = K.dot(uit, self.u) ait = K.squeeze(ait, -1) ait = K.exp(ait) if mask is not None: # Cast the mask to floatX to avoid float64 upcasting in theano ait *= K.cast(mask, K.floatx()) ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx()) ait = K.expand_dims(ait) weighted_input = x * ait output = K.sum(weighted_input, axis=1) return output def compute_output_shape(self, input_shape): return (input_shape[0], input_shape[-1]) def get_model(): #################### # 输入部分 #################### enhancers = Input(shape=(MAX_LEN_en,)) promoters = Input(shape=(MAX_LEN_pr,)) #################### # embedding 部分 #################### emb_en = Embedding(NB_WORDS, EMBEDDING_DIM, weights=[ embedding_matrix], trainable=True)(enhancers) emb_pr = Embedding(NB_WORDS, EMBEDDING_DIM, weights=[ embedding_matrix], trainable=True)(promoters) #################### # enhancer 输入部分 #################### enhancer_conv_layer = Convolution1D(filters=64, kernel_size=60, padding='same', # 'same' kernel_initializer='he_normal', ) enhancer_max_pool_layer = MaxPooling1D(pool_size=30, strides=30) # Build enhancer branch enhancer_branch = Sequential() enhancer_branch.add(enhancer_conv_layer) enhancer_branch.add(Activation("relu")) enhancer_branch.add(enhancer_max_pool_layer) enhancer_branch.add(BatchNormalization()) enhancer_branch.add(Dropout(0.5)) enhancer_out = enhancer_branch(emb_en) #################### # promoter 输入部分 #################### promoter_conv_layer = Convolution1D(filters=64, kernel_size=40, padding='same', kernel_initializer='he_normal', ) promoter_max_pool_layer = MaxPooling1D(pool_size=20, strides=20) # promoter_length_slim = 2039 # n_kernels_slim = 200 # filter_length_slim = 20 # Build promoter branch promoter_branch = Sequential() promoter_branch.add(promoter_conv_layer) promoter_branch.add(Activation("relu")) promoter_branch.add(promoter_max_pool_layer) promoter_branch.add(BatchNormalization()) promoter_branch.add(Dropout(0.5)) promoter_out = promoter_branch(emb_pr) # enhancer_conv_layer = Conv1D(filters = 32,kernel_size = 40,padding = "valid",activation='relu')(emb_en) # enhancer_max_pool_layer = MaxPooling1D(pool_size = 30, strides = 30)(enhancer_conv_layer) # promoter_conv_layer = Conv1D(filters = 32,kernel_size = 40,padding = "valid",activation='relu')(emb_pr) # promoter_max_pool_layer = MaxPooling1D(pool_size = 20, strides = 20)(promoter_conv_layer) #################### # 合并部分 #################### l_gru_1 = Bidirectional(GRU(50, return_sequences=True), name='gru1')(enhancer_out) l_gru_2 = Bidirectional(GRU(50, return_sequences=True), name='gru2')(promoter_out) l_att_1 = AttLayer_1(50)(l_gru_1) l_att_2 = AttLayer_2(50)(l_gru_2) # 测试一下到底是因为什么崩溃的 # l_att_1 = l_gru_1 # l_att_2 = l_gru_2 subtract_layer = Subtract()([l_att_1, l_att_2]) multiply_layer = Multiply()([l_att_1, l_att_2]) merge_layer=Concatenate(axis=1)([l_att_1, l_att_2, subtract_layer, multiply_layer]) # merge_layer = Concatenate(axis=1)([l_att_1, l_att_2]) bn = BatchNormalization()(merge_layer) dt = Dropout(0.5)(bn) # l_gru = Bidirectional(LSTM(50))(dt) # l_att = AttLayer(50)(l_gru) # bn2 = BatchNormalization()(l_gru) # dt2 = Dropout(0.5)(bn2) # dt = BatchNormalization()(dt) # dt = Dropout(0.5)(dt) #################### # dense 部分 #################### dt = Dense(512, kernel_initializer='glorot_uniform')(dt) dt = BatchNormalization()(dt) dt = Activation('relu')(dt) dt = Dropout(0.5)(dt) preds = Dense(1, activation='sigmoid')(dt) model = Model([enhancers, promoters], preds) adam = keras.optimizers.Adam(lr=5e-6, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy']) return model
en
0.325064
#################### # ACGT #################### # self.init = initializers.get('normal') # size of x :[batch_size, sel_len, attention_dim] # size of u :[batch_size, attention_dim] # uit = tanh(xW+b) # Cast the mask to floatX to avoid float64 upcasting in theano # self.init = initializers.get('normal') # size of x :[batch_size, sel_len, attention_dim] # size of u :[batch_size, attention_dim] # uit = tanh(xW+b) # Cast the mask to floatX to avoid float64 upcasting in theano #################### # 输入部分 #################### #################### # embedding 部分 #################### #################### # enhancer 输入部分 #################### # 'same' # Build enhancer branch #################### # promoter 输入部分 #################### # promoter_length_slim = 2039 # n_kernels_slim = 200 # filter_length_slim = 20 # Build promoter branch # enhancer_conv_layer = Conv1D(filters = 32,kernel_size = 40,padding = "valid",activation='relu')(emb_en) # enhancer_max_pool_layer = MaxPooling1D(pool_size = 30, strides = 30)(enhancer_conv_layer) # promoter_conv_layer = Conv1D(filters = 32,kernel_size = 40,padding = "valid",activation='relu')(emb_pr) # promoter_max_pool_layer = MaxPooling1D(pool_size = 20, strides = 20)(promoter_conv_layer) #################### # 合并部分 #################### # 测试一下到底是因为什么崩溃的 # l_att_1 = l_gru_1 # l_att_2 = l_gru_2 # merge_layer = Concatenate(axis=1)([l_att_1, l_att_2]) # l_gru = Bidirectional(LSTM(50))(dt) # l_att = AttLayer(50)(l_gru) # bn2 = BatchNormalization()(l_gru) # dt2 = Dropout(0.5)(bn2) # dt = BatchNormalization()(dt) # dt = Dropout(0.5)(dt) #################### # dense 部分 ####################
2.463606
2
kong_admin/sync/apis.py
peterayeni/django-kong-admin
2
6621493
<filename>kong_admin/sync/apis.py # -*- coding: utf-8 -*- from __future__ import unicode_literals, print_function from kong.exceptions import ConflictError from kong_admin.models import APIReference, PluginConfigurationReference, PluginConfigurationField from .base import KongProxySyncEngine class APISyncEngine(KongProxySyncEngine): def plugins(self): return PluginConfigurationSyncEngine() def get_proxy_class(self): return APIReference def on_retrieve_all(self, client): apis = list(client.apis.iterate()) for api_struct in apis: yield api_struct def is_published(self, client, kong_id, parent_kong_id=None): try: result = client.apis.retrieve(str(kong_id)) except ValueError: return False return result is not None def on_publish(self, client, obj): try: api_struct = client.apis.add_or_update( api_id=obj.kong_id, target_url=obj.target_url, name=obj.name, public_dns=obj.public_dns, path=obj.path, strip_path=obj.strip_path) except ConflictError: api_struct = client.apis.update( name_or_id=(obj.name or obj.public_dns), target_url=obj.target_url, name=obj.name, public_dns=obj.public_dns, path=obj.path, strip_path=obj.strip_path) name = api_struct['name'] if obj.name != name: obj.name = name self.get_proxy_class().objects.filter(id=obj.id).update(name=obj.name) return api_struct['id'] def on_withdraw_by_id(self, client, kong_id, parent_kong_id=None): client.apis.delete(str(kong_id)) class PluginConfigurationSyncEngine(KongProxySyncEngine): def get_proxy_class(self): return PluginConfigurationReference def on_retrieve_all(self, client): apis = list(client.apis.iterate()) for api_struct in apis: api_kong_id = api_struct.get('id', None) assert api_kong_id is not None plugin_configurations = client.apis.plugins(api_kong_id).list(size=100).get('data', None) assert plugin_configurations is not None for plugin_configuration_struct in plugin_configurations: yield plugin_configuration_struct def is_published(self, client, kong_id, parent_kong_id=None): try: result = client.apis.plugins(str(parent_kong_id)).retrieve(str(kong_id)) except ValueError: return False return result is not None def get_parent_object(self, obj): return obj.api def get_parent_key(self): return 'api_id' def on_publish(self, client, obj): fields = {} for field in PluginConfigurationField.objects.filter(configuration=obj): fields[field.property] = field.value api_kong_id = obj.api.kong_id consumer_kong_id = obj.consumer.kong_id if obj.consumer is not None else None try: plugin_configuration_struct = client.apis.plugins(str(api_kong_id)).create_or_update( plugin_configuration_id=obj.kong_id, plugin_name=obj.name, enabled=obj.enabled, consumer_id=consumer_kong_id, **fields) except ConflictError: plugin_configuration_struct = client.apis.plugins(str(api_kong_id)).update( plugin_name=obj.name, enabled=obj.enabled, consumer_id=consumer_kong_id, **fields) return plugin_configuration_struct['id'] def on_withdraw_by_id(self, client, kong_id, parent_kong_id=None): assert kong_id is not None assert parent_kong_id is not None client.apis.plugins(parent_kong_id).delete(kong_id)
<filename>kong_admin/sync/apis.py # -*- coding: utf-8 -*- from __future__ import unicode_literals, print_function from kong.exceptions import ConflictError from kong_admin.models import APIReference, PluginConfigurationReference, PluginConfigurationField from .base import KongProxySyncEngine class APISyncEngine(KongProxySyncEngine): def plugins(self): return PluginConfigurationSyncEngine() def get_proxy_class(self): return APIReference def on_retrieve_all(self, client): apis = list(client.apis.iterate()) for api_struct in apis: yield api_struct def is_published(self, client, kong_id, parent_kong_id=None): try: result = client.apis.retrieve(str(kong_id)) except ValueError: return False return result is not None def on_publish(self, client, obj): try: api_struct = client.apis.add_or_update( api_id=obj.kong_id, target_url=obj.target_url, name=obj.name, public_dns=obj.public_dns, path=obj.path, strip_path=obj.strip_path) except ConflictError: api_struct = client.apis.update( name_or_id=(obj.name or obj.public_dns), target_url=obj.target_url, name=obj.name, public_dns=obj.public_dns, path=obj.path, strip_path=obj.strip_path) name = api_struct['name'] if obj.name != name: obj.name = name self.get_proxy_class().objects.filter(id=obj.id).update(name=obj.name) return api_struct['id'] def on_withdraw_by_id(self, client, kong_id, parent_kong_id=None): client.apis.delete(str(kong_id)) class PluginConfigurationSyncEngine(KongProxySyncEngine): def get_proxy_class(self): return PluginConfigurationReference def on_retrieve_all(self, client): apis = list(client.apis.iterate()) for api_struct in apis: api_kong_id = api_struct.get('id', None) assert api_kong_id is not None plugin_configurations = client.apis.plugins(api_kong_id).list(size=100).get('data', None) assert plugin_configurations is not None for plugin_configuration_struct in plugin_configurations: yield plugin_configuration_struct def is_published(self, client, kong_id, parent_kong_id=None): try: result = client.apis.plugins(str(parent_kong_id)).retrieve(str(kong_id)) except ValueError: return False return result is not None def get_parent_object(self, obj): return obj.api def get_parent_key(self): return 'api_id' def on_publish(self, client, obj): fields = {} for field in PluginConfigurationField.objects.filter(configuration=obj): fields[field.property] = field.value api_kong_id = obj.api.kong_id consumer_kong_id = obj.consumer.kong_id if obj.consumer is not None else None try: plugin_configuration_struct = client.apis.plugins(str(api_kong_id)).create_or_update( plugin_configuration_id=obj.kong_id, plugin_name=obj.name, enabled=obj.enabled, consumer_id=consumer_kong_id, **fields) except ConflictError: plugin_configuration_struct = client.apis.plugins(str(api_kong_id)).update( plugin_name=obj.name, enabled=obj.enabled, consumer_id=consumer_kong_id, **fields) return plugin_configuration_struct['id'] def on_withdraw_by_id(self, client, kong_id, parent_kong_id=None): assert kong_id is not None assert parent_kong_id is not None client.apis.plugins(parent_kong_id).delete(kong_id)
en
0.769321
# -*- coding: utf-8 -*-
1.907779
2
second/pytorch/models/__init__.py
yukke42/SECOND
0
6621494
<gh_stars>0 from . import net_multi_head
from . import net_multi_head
none
1
1.033595
1
Application/system/events_monitor.py
Unicorn-Dev/Minimal.io
0
6621495
import sys from Application.system.game_functions import * import Application.system.game_functions as gf settings = None screen = None stats = None def set_global_var(setts, scr, statistics): global settings global screen global stats settings = setts screen = scr stats = statistics class Monitor: def __init__(self, *handlers): self.__handlers = [handler() for handler in handlers] for i, handler in enumerate(self.__handlers): if i + 1 < len(self.__handlers): handler.set_next(self.__handlers[i + 1]) def add_handler(self, handler): self.__handlers.append(handler()) self.__handlers[len(self.__handlers) - 2].set_next( self.__handlers[len(self.__handlers) - 1]) def monitor(self, request, params): self.__handlers[0].handle(request, params) class EventsMonitor(Monitor): def __init__(self): super().__init__( CloseHandler, DownHandler, UpHandler, ButtonHandler, ) class BaseHandler: def __init__(self): self.__next = None def set_next(self, handler): self.__next = handler def handle(self, request, params): pass def handle_next(self, request, params): if self.__next: self.__next.handle(request, params) class CloseHandler(BaseHandler): def handle(self, request, params): if request.type == pygame.QUIT or \ (request.type == pygame.KEYDOWN and request.key == pygame.K_q)\ or (len(params) == 5 and params[4].text == 'Quit'): sys.exit() else: self.handle_next(request, params) class DownHandler(BaseHandler): def handle(self, request, params): if request.type == pygame.KEYDOWN: mon = Monitor() mon.add_handler(ChooseGameTypeHandler) mon.add_handler(PauseHandler) mon.add_handler(BackToMainMenuHandler) mon.add_handler(GameActiveHandler) mon.add_handler(ChangeGunHandler) mon.add_handler(FirstMoveHandler) mon.add_handler(SecondMoveHandler) mon.monitor(request, params) else: self.handle_next(request, params) class UpHandler(BaseHandler): def handle(self, request, params): if request.type == pygame.KEYUP: mon = Monitor(FirstMoveHandler, SecondMoveHandler) mon.monitor(request, params) else: self.handle_next(request, params) class ButtonHandler(BaseHandler): def handle(self, request, params): if request.type == pygame.MOUSEBUTTONDOWN: mon = Monitor() mon.add_handler(GameNotActiveHandler) mon.add_handler(ProcessButtonsHandler) mon.monitor(request, params) else: self.handle_next(request, params) class ProcessButtonsHandler(BaseHandler): def handle(self, request, params): params.append(None) for button in params[0]: params[4] = button mon = Monitor() mon.add_handler(ButtonClickedHandler) mon.add_handler(ButtonContinueHandler) mon.add_handler(ButtonChooseGameTypeHandler) mon.add_handler(OnePlayerModeHandler) mon.add_handler(TwoPlayersModeHandler) mon.add_handler(ButtonBackToMainMenuHandler) mon.add_handler(CloseHandler) mon.monitor(request, params) self.handle_next(request, params) class ButtonClickedHandler(BaseHandler): def handle(self, request, params): button_clicked = params[4].rect.collidepoint(pygame.mouse.get_pos()) if button_clicked: self.handle_next(request, params) class ChooseGameTypeHandler(BaseHandler): def handle(self, request, params): if request.key == pygame.K_p and not stats.game_active: stats.choosing_game_type = True else: self.handle_next(request, params) class ButtonChooseGameTypeHandler(BaseHandler): def handle(self, request, params): if len(params) == 5 and (params[4].text == 'Play' or params[4].text == 'Restart'): stats.choosing_game_type = True else: self.handle_next(request, params) class ButtonContinueHandler(BaseHandler): def handle(self, request, params): if params[4].text == 'Continue': gf.set_pause(False) else: self.handle_next(request, params) class OnePlayerModeHandler(BaseHandler): def handle(self, request, params): if params[4].text == 'One player': stats.single_player = True params[1].clear() params[1].append(Hero()) gf.start_game(params[1], params[2], params[3]) else: self.handle_next(request, params) class TwoPlayersModeHandler(BaseHandler): def handle(self, request, params): if params[4].text == 'Two players': stats.single_player = False params[1].clear() params[1].append(Hero()) params[1].append(Hero()) gf.start_game(params[1], params[2], params[3]) else: self.handle_next(request, params) class PauseHandler(BaseHandler): def handle(self, request, params): if request.key == pygame.K_ESCAPE and stats.game_active != stats.pause: gf.set_pause(not stats.pause) else: self.handle_next(request, params) class BackToMainMenuHandler(BaseHandler): def handle(self, request, params): if request.key == pygame.K_ESCAPE and stats.choosing_game_type: stats.choosing_game_type = False else: self.handle_next(request, params) class ButtonBackToMainMenuHandler(BaseHandler): def handle(self, request, params): if len(params) == 5 and params[4].text == 'Back': stats.choosing_game_type = False else: self.handle_next(request, params) class GameActiveHandler(BaseHandler): def handle(self, request, params): if stats.game_active: self.handle_next(request, params) class GameNotActiveHandler(BaseHandler): def handle(self, request, params): if not stats.game_active: self.handle_next(request, params) class ChangeGunHandler(BaseHandler): def handle(self, request, params): for hero in params[1]: if request.key == pygame.K_n: hero.change_bullets("Bullet") if request.key == pygame.K_f: hero.change_bullets("FastBullet") if request.key == pygame.K_b: hero.change_bullets("BigBullet") if request.key == pygame.K_EQUALS: hero.bullet_type = None self.handle_next(request, params) class FirstMoveHandler(BaseHandler): def handle(self, request, params): if request.key == pygame.K_UP: params[1][0].speed_y = -params[1][0].speed \ if request.type == pygame.KEYDOWN else 0 elif request.key == pygame.K_DOWN: params[1][0].speed_y = params[1][0].speed \ if request.type == pygame.KEYDOWN else 0 elif request.key == pygame.K_LEFT: params[1][0].speed_x = -params[1][0].speed \ if request.type == pygame.KEYDOWN else 0 elif request.key == pygame.K_RIGHT: params[1][0].speed_x = params[1][0].speed \ if request.type == pygame.KEYDOWN else 0 self.handle_next(request, params) class SecondMoveHandler(BaseHandler): def handle(self, request, params): if not stats.single_player: if request.key == pygame.K_w: params[1][1].speed_y = -params[1][1].speed \ if request.type == pygame.KEYDOWN else 0 elif request.key == pygame.K_s: params[1][1].speed_y = params[1][1].speed \ if request.type == pygame.KEYDOWN else 0 elif request.key == pygame.K_a: params[1][1].speed_x = -params[1][1].speed \ if request.type == pygame.KEYDOWN else 0 elif request.key == pygame.K_d: params[1][1].speed_x = params[1][1].speed \ if request.type == pygame.KEYDOWN else 0 self.handle_next(request, params)
import sys from Application.system.game_functions import * import Application.system.game_functions as gf settings = None screen = None stats = None def set_global_var(setts, scr, statistics): global settings global screen global stats settings = setts screen = scr stats = statistics class Monitor: def __init__(self, *handlers): self.__handlers = [handler() for handler in handlers] for i, handler in enumerate(self.__handlers): if i + 1 < len(self.__handlers): handler.set_next(self.__handlers[i + 1]) def add_handler(self, handler): self.__handlers.append(handler()) self.__handlers[len(self.__handlers) - 2].set_next( self.__handlers[len(self.__handlers) - 1]) def monitor(self, request, params): self.__handlers[0].handle(request, params) class EventsMonitor(Monitor): def __init__(self): super().__init__( CloseHandler, DownHandler, UpHandler, ButtonHandler, ) class BaseHandler: def __init__(self): self.__next = None def set_next(self, handler): self.__next = handler def handle(self, request, params): pass def handle_next(self, request, params): if self.__next: self.__next.handle(request, params) class CloseHandler(BaseHandler): def handle(self, request, params): if request.type == pygame.QUIT or \ (request.type == pygame.KEYDOWN and request.key == pygame.K_q)\ or (len(params) == 5 and params[4].text == 'Quit'): sys.exit() else: self.handle_next(request, params) class DownHandler(BaseHandler): def handle(self, request, params): if request.type == pygame.KEYDOWN: mon = Monitor() mon.add_handler(ChooseGameTypeHandler) mon.add_handler(PauseHandler) mon.add_handler(BackToMainMenuHandler) mon.add_handler(GameActiveHandler) mon.add_handler(ChangeGunHandler) mon.add_handler(FirstMoveHandler) mon.add_handler(SecondMoveHandler) mon.monitor(request, params) else: self.handle_next(request, params) class UpHandler(BaseHandler): def handle(self, request, params): if request.type == pygame.KEYUP: mon = Monitor(FirstMoveHandler, SecondMoveHandler) mon.monitor(request, params) else: self.handle_next(request, params) class ButtonHandler(BaseHandler): def handle(self, request, params): if request.type == pygame.MOUSEBUTTONDOWN: mon = Monitor() mon.add_handler(GameNotActiveHandler) mon.add_handler(ProcessButtonsHandler) mon.monitor(request, params) else: self.handle_next(request, params) class ProcessButtonsHandler(BaseHandler): def handle(self, request, params): params.append(None) for button in params[0]: params[4] = button mon = Monitor() mon.add_handler(ButtonClickedHandler) mon.add_handler(ButtonContinueHandler) mon.add_handler(ButtonChooseGameTypeHandler) mon.add_handler(OnePlayerModeHandler) mon.add_handler(TwoPlayersModeHandler) mon.add_handler(ButtonBackToMainMenuHandler) mon.add_handler(CloseHandler) mon.monitor(request, params) self.handle_next(request, params) class ButtonClickedHandler(BaseHandler): def handle(self, request, params): button_clicked = params[4].rect.collidepoint(pygame.mouse.get_pos()) if button_clicked: self.handle_next(request, params) class ChooseGameTypeHandler(BaseHandler): def handle(self, request, params): if request.key == pygame.K_p and not stats.game_active: stats.choosing_game_type = True else: self.handle_next(request, params) class ButtonChooseGameTypeHandler(BaseHandler): def handle(self, request, params): if len(params) == 5 and (params[4].text == 'Play' or params[4].text == 'Restart'): stats.choosing_game_type = True else: self.handle_next(request, params) class ButtonContinueHandler(BaseHandler): def handle(self, request, params): if params[4].text == 'Continue': gf.set_pause(False) else: self.handle_next(request, params) class OnePlayerModeHandler(BaseHandler): def handle(self, request, params): if params[4].text == 'One player': stats.single_player = True params[1].clear() params[1].append(Hero()) gf.start_game(params[1], params[2], params[3]) else: self.handle_next(request, params) class TwoPlayersModeHandler(BaseHandler): def handle(self, request, params): if params[4].text == 'Two players': stats.single_player = False params[1].clear() params[1].append(Hero()) params[1].append(Hero()) gf.start_game(params[1], params[2], params[3]) else: self.handle_next(request, params) class PauseHandler(BaseHandler): def handle(self, request, params): if request.key == pygame.K_ESCAPE and stats.game_active != stats.pause: gf.set_pause(not stats.pause) else: self.handle_next(request, params) class BackToMainMenuHandler(BaseHandler): def handle(self, request, params): if request.key == pygame.K_ESCAPE and stats.choosing_game_type: stats.choosing_game_type = False else: self.handle_next(request, params) class ButtonBackToMainMenuHandler(BaseHandler): def handle(self, request, params): if len(params) == 5 and params[4].text == 'Back': stats.choosing_game_type = False else: self.handle_next(request, params) class GameActiveHandler(BaseHandler): def handle(self, request, params): if stats.game_active: self.handle_next(request, params) class GameNotActiveHandler(BaseHandler): def handle(self, request, params): if not stats.game_active: self.handle_next(request, params) class ChangeGunHandler(BaseHandler): def handle(self, request, params): for hero in params[1]: if request.key == pygame.K_n: hero.change_bullets("Bullet") if request.key == pygame.K_f: hero.change_bullets("FastBullet") if request.key == pygame.K_b: hero.change_bullets("BigBullet") if request.key == pygame.K_EQUALS: hero.bullet_type = None self.handle_next(request, params) class FirstMoveHandler(BaseHandler): def handle(self, request, params): if request.key == pygame.K_UP: params[1][0].speed_y = -params[1][0].speed \ if request.type == pygame.KEYDOWN else 0 elif request.key == pygame.K_DOWN: params[1][0].speed_y = params[1][0].speed \ if request.type == pygame.KEYDOWN else 0 elif request.key == pygame.K_LEFT: params[1][0].speed_x = -params[1][0].speed \ if request.type == pygame.KEYDOWN else 0 elif request.key == pygame.K_RIGHT: params[1][0].speed_x = params[1][0].speed \ if request.type == pygame.KEYDOWN else 0 self.handle_next(request, params) class SecondMoveHandler(BaseHandler): def handle(self, request, params): if not stats.single_player: if request.key == pygame.K_w: params[1][1].speed_y = -params[1][1].speed \ if request.type == pygame.KEYDOWN else 0 elif request.key == pygame.K_s: params[1][1].speed_y = params[1][1].speed \ if request.type == pygame.KEYDOWN else 0 elif request.key == pygame.K_a: params[1][1].speed_x = -params[1][1].speed \ if request.type == pygame.KEYDOWN else 0 elif request.key == pygame.K_d: params[1][1].speed_x = params[1][1].speed \ if request.type == pygame.KEYDOWN else 0 self.handle_next(request, params)
none
1
2.454015
2
python/examples/synchronous-scraper.py
Fe-Nick-S/experiments
2
6621496
<filename>python/examples/synchronous-scraper.py from urllib.request import Request, urlopen from time import time SITES = [ "http://news.ycombinator.com/", "https://www.yahoo.com/", "http://salmonofcapistrano.com/", "https://mail.ru/" ] def find_size(url): req = Request(url) with urlopen(req) as response: page = response.read() return len(page) def main(): for site in SITES: start_time = time() size = find_size(site) print("Read {:8d} chars from {} in {:6.3f} secs".format(size, site, time() - start_time)) if __name__ == '__main__': start_time = time() print("Start executing...") main() print("Ran in {:6.3f} secs".format(time() - start_time)) print("End executing...")
<filename>python/examples/synchronous-scraper.py from urllib.request import Request, urlopen from time import time SITES = [ "http://news.ycombinator.com/", "https://www.yahoo.com/", "http://salmonofcapistrano.com/", "https://mail.ru/" ] def find_size(url): req = Request(url) with urlopen(req) as response: page = response.read() return len(page) def main(): for site in SITES: start_time = time() size = find_size(site) print("Read {:8d} chars from {} in {:6.3f} secs".format(size, site, time() - start_time)) if __name__ == '__main__': start_time = time() print("Start executing...") main() print("Ran in {:6.3f} secs".format(time() - start_time)) print("End executing...")
none
1
3.251456
3
scripts/experiments/performance.py
jjbrophy47/tree_deletion
1
6621497
""" This experiment tests predictive performance. """ import os import sys import time import argparse import resource from datetime import datetime import numpy as np import pandas as pd from sklearn.model_selection import GridSearchCV from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import StratifiedKFold from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier here = os.path.abspath(os.path.dirname(__file__)) sys.path.insert(0, here + '/../../') sys.path.insert(0, here + '/../') import dare from utility import data_util from utility import exp_util from utility import print_util def _get_model(args): """ Return the appropriate model. """ if args.model in ['dare']: model = dare.Forest(criterion=args.criterion, max_depth=args.max_depth, n_estimators=args.n_estimators, max_features=args.max_features, topd=args.topd, k=args.k, verbose=args.verbose, random_state=args.rs) elif args.model == 'extra_trees': model = ExtraTreesClassifier(n_estimators=args.n_estimators, max_depth=args.max_depth, max_features=args.max_features, criterion=args.criterion, random_state=args.rs) elif args.model == 'extra_trees_k1': model = ExtraTreesClassifier(n_estimators=args.n_estimators, max_depth=args.max_depth, max_features=1, criterion=args.criterion, random_state=args.rs) elif args.model == 'sklearn': model = RandomForestClassifier(n_estimators=args.n_estimators, max_depth=args.max_depth, max_features=args.max_features, criterion=args.criterion, random_state=args.rs, bootstrap=args.bootstrap) else: raise ValueError('model {} unknown!'.format(args.model)) return model def _get_model_dict(args, params): """ Return the appropriate model. """ if args.model == 'dare': model = dare.Forest(criterion=args.criterion, max_depth=params['max_depth'], n_estimators=params['n_estimators'], max_features=args.max_features, topd=args.topd, k=params['k'], verbose=args.verbose, random_state=args.rs) elif args.model == 'extra_trees': model = ExtraTreesClassifier(n_estimators=params['n_estimators'], max_depth=params['max_depth'], max_features=args.max_features, criterion=args.criterion, random_state=args.rs) elif args.model == 'extra_trees_k1': model = ExtraTreesClassifier(n_estimators=params['n_estimators'], max_depth=params['max_depth'], max_features=1, criterion=args.criterion, random_state=args.rs) elif args.model == 'sklearn': model = RandomForestClassifier(n_estimators=params['n_estimators'], max_depth=params['max_depth'], max_features=args.max_features, criterion=args.criterion, random_state=args.rs, bootstrap=args.bootstrap) else: raise ValueError('model {} unknown!'.format(args.model)) return model def _get_best_params(gs, param_grid, keys, logger, tol=1e-3): """ Chooses the set of hyperparameters whose `mean_fit_score` is within `tol` of the best `mean_fit_score` and has the lowest `mean_fit_time`. """ pd.set_option('display.max_columns', 100) pd.set_option('display.max_rows', 100) cols = ['mean_fit_time', 'mean_test_score', 'rank_test_score'] cols += ['param_{}'.format(param) for param in keys] df = pd.DataFrame(gs.cv_results_) logger.info('gridsearch results:') logger.info(df[cols].sort_values('rank_test_score')) # filter the parameters with the highest performances logger.info('tolerance: {}'.format(args.tol)) df = df[df['mean_test_score'].max() - df['mean_test_score'] <= tol] best_df = df.sort_values('mean_fit_time').reset_index().loc[0] best_ndx = best_df['index'] best_params = best_df['params'] logger.info('best_index: {}, best_params: {}'.format(best_ndx, best_params)) return best_params def performance(args, out_dir, logger): begin = time.time() # obtain data X_train, X_test, y_train, y_test = data_util.get_data(args.dataset, data_dir=args.data_dir) # dataset statistics logger.info('train instances: {:,}'.format(X_train.shape[0])) logger.info('test instances: {:,}'.format(X_test.shape[0])) logger.info('attributes: {:,}'.format(X_train.shape[1])) logger.info('split criterion: {}'.format(args.criterion)) # tune on a fraction of the training data if not args.no_tune: if args.tune_frac < 1.0: sss = StratifiedShuffleSplit(n_splits=1, test_size=2, train_size=args.tune_frac, random_state=args.rs) tune_indices, _ = list(sss.split(X_train, y_train))[0] X_train_sub, y_train_sub = X_train[tune_indices], y_train[tune_indices] logger.info('tune instances: {:,}'.format(X_train_sub.shape[0])) else: X_train_sub, y_train_sub = X_train, y_train else: X_train_sub, y_train_sub = X_train, y_train # hyperparameter values n_estimators = [10, 50, 100, 250] max_depth = [1, 3, 5, 10, 20] # set hyperparameter grid param_grid = {'max_depth': max_depth, 'n_estimators': n_estimators} # add additional parameter for DaRE if args.model == 'dare': param_grid['k'] = [5, 10, 25, 50] # get hyperparameter names keys = list(param_grid.keys()) # test model logger.info('\n{}'.format(args.model.capitalize())) start = time.time() model = _get_model(args) # tune hyperparameters if not args.no_tune: logger.info('param_grid: {}'.format(param_grid)) # cross-validation skf = StratifiedKFold(n_splits=args.cv, shuffle=True, random_state=args.rs) gs = GridSearchCV(model, param_grid, scoring=args.scoring, cv=skf, verbose=args.verbose, refit=False) gs = gs.fit(X_train_sub, y_train_sub) best_params = _get_best_params(gs, param_grid, keys, logger, args.tol) model = _get_model_dict(args, best_params) # record time it takes to tune the model tune_time = time.time() - start # train best model start = time.time() model = model.fit(X_train, y_train) train_time = time.time() - start logger.info('train time: {:.3f}s'.format(train_time)) n_nodes, n_random, n_greedy = model.trees_[0].get_node_statistics() print('[Tree 0] no. nodes: {:,}, no. random: {:,}, no. greedy: {:,}'.format(n_nodes, n_random, n_greedy)) print('[Tree 0] memory usage: {:,} bytes'.format(model.trees_[0].get_memory_usage())) print('[Forest] memory usage: {:,} bytes'.format(model.get_memory_usage())) print('max_rss: {:,}'.format(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)) exit(0) # evaluate auc, acc, ap = exp_util.performance(model, X_test, y_test, name=args.model, logger=logger) # save results result = model.get_params() result['model'] = args.model result['bootstrap'] = args.bootstrap result['auc'] = auc result['acc'] = acc result['ap'] = ap result['train_time'] = train_time result['tune_train_time'] = tune_time + train_time result['max_rss'] = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss np.save(os.path.join(out_dir, 'results.npy'), result) logger.info('total time: {:.3f}s'.format(time.time() - begin)) logger.info('max_rss: {:,}'.format(result['max_rss'])) def main(args): # create output dir out_dir = os.path.join(args.out_dir, args.dataset, args.criterion) # add tuning to filepath if args.no_tune: out_dir = os.path.join(out_dir, 'no_tune', 'rs_{}'.format(args.rs)) else: out_dir = os.path.join(out_dir, 'tuned', 'rs_{}'.format(args.rs)) # create filename if args.model == 'sklearn': out_dir = os.path.join(out_dir, args.model) if args.bootstrap: out_dir = os.path.join(out_dir, 'bootstrap') elif args.model == 'dare': assert args.topd == 0 out_dir = os.path.join(out_dir, args.model) elif args.model in ['extra_trees', 'extra_trees_k1', 'borat']: out_dir = os.path.join(out_dir, args.model) else: raise ValueError('model {} unknown!'.format(args.model)) # create output directory and clear any previous contents os.makedirs(out_dir, exist_ok=True) print_util.clear_dir(out_dir) # create logger logger = print_util.get_logger(os.path.join(out_dir, 'log.txt')) logger.info(args) logger.info(datetime.now()) # write everything printed to stdout to this log file logfile, stdout, stderr = print_util.stdout_stderr_to_log(os.path.join(out_dir, 'log+.txt')) # run experiment performance(args, out_dir, logger) # restore original stdout and stderr settings print_util.reset_stdout_stderr(logfile, stdout, stderr) if __name__ == '__main__': parser = argparse.ArgumentParser() # I/O settings parser.add_argument('--data_dir', type=str, default='data', help='data directory.') parser.add_argument('--out_dir', type=str, default='output/performance/', help='output directory.') parser.add_argument('--dataset', default='surgical', help='dataset to use for the experiment.') # experiment settings parser.add_argument('--rs', type=int, default=1, help='random state.') parser.add_argument('--model', type=str, default='dare', help='type of model.') parser.add_argument('--criterion', type=str, default='gini', help='splitting criterion.') parser.add_argument('--topd', type=int, default=0, help='0 for exact, 1000 for random.') parser.add_argument('--k', type=int, default=25, help='no. of candidate thresholds to sample.') parser.add_argument('--bootstrap', action='store_true', default=False, help='use bootstrapping with sklearn.') # tuning settings parser.add_argument('--no_tune', action='store_true', default=False, help='do not tune.') parser.add_argument('--tune_frac', type=float, default=1.0, help='fraction of training to use for tuning.') parser.add_argument('--cv', type=int, default=5, help='number of cross-validation folds for tuning.') parser.add_argument('--scoring', type=str, default='roc_auc', help='metric for tuning.') parser.add_argument('--tol', type=float, default=1e-3, help='allowable accuracy difference from the best.') # tree/forest hyperparameters parser.add_argument('--n_estimators', type=int, default=100, help='number of trees in the forest.') parser.add_argument('--max_features', type=str, default='sqrt', help='maximum no. features to sample.') parser.add_argument('--max_depth', type=int, default=20, help='maximum depth of the tree.') # display settings parser.add_argument('--verbose', type=int, default=2, help='verbosity level.') args = parser.parse_args() main(args)
""" This experiment tests predictive performance. """ import os import sys import time import argparse import resource from datetime import datetime import numpy as np import pandas as pd from sklearn.model_selection import GridSearchCV from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import StratifiedKFold from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier here = os.path.abspath(os.path.dirname(__file__)) sys.path.insert(0, here + '/../../') sys.path.insert(0, here + '/../') import dare from utility import data_util from utility import exp_util from utility import print_util def _get_model(args): """ Return the appropriate model. """ if args.model in ['dare']: model = dare.Forest(criterion=args.criterion, max_depth=args.max_depth, n_estimators=args.n_estimators, max_features=args.max_features, topd=args.topd, k=args.k, verbose=args.verbose, random_state=args.rs) elif args.model == 'extra_trees': model = ExtraTreesClassifier(n_estimators=args.n_estimators, max_depth=args.max_depth, max_features=args.max_features, criterion=args.criterion, random_state=args.rs) elif args.model == 'extra_trees_k1': model = ExtraTreesClassifier(n_estimators=args.n_estimators, max_depth=args.max_depth, max_features=1, criterion=args.criterion, random_state=args.rs) elif args.model == 'sklearn': model = RandomForestClassifier(n_estimators=args.n_estimators, max_depth=args.max_depth, max_features=args.max_features, criterion=args.criterion, random_state=args.rs, bootstrap=args.bootstrap) else: raise ValueError('model {} unknown!'.format(args.model)) return model def _get_model_dict(args, params): """ Return the appropriate model. """ if args.model == 'dare': model = dare.Forest(criterion=args.criterion, max_depth=params['max_depth'], n_estimators=params['n_estimators'], max_features=args.max_features, topd=args.topd, k=params['k'], verbose=args.verbose, random_state=args.rs) elif args.model == 'extra_trees': model = ExtraTreesClassifier(n_estimators=params['n_estimators'], max_depth=params['max_depth'], max_features=args.max_features, criterion=args.criterion, random_state=args.rs) elif args.model == 'extra_trees_k1': model = ExtraTreesClassifier(n_estimators=params['n_estimators'], max_depth=params['max_depth'], max_features=1, criterion=args.criterion, random_state=args.rs) elif args.model == 'sklearn': model = RandomForestClassifier(n_estimators=params['n_estimators'], max_depth=params['max_depth'], max_features=args.max_features, criterion=args.criterion, random_state=args.rs, bootstrap=args.bootstrap) else: raise ValueError('model {} unknown!'.format(args.model)) return model def _get_best_params(gs, param_grid, keys, logger, tol=1e-3): """ Chooses the set of hyperparameters whose `mean_fit_score` is within `tol` of the best `mean_fit_score` and has the lowest `mean_fit_time`. """ pd.set_option('display.max_columns', 100) pd.set_option('display.max_rows', 100) cols = ['mean_fit_time', 'mean_test_score', 'rank_test_score'] cols += ['param_{}'.format(param) for param in keys] df = pd.DataFrame(gs.cv_results_) logger.info('gridsearch results:') logger.info(df[cols].sort_values('rank_test_score')) # filter the parameters with the highest performances logger.info('tolerance: {}'.format(args.tol)) df = df[df['mean_test_score'].max() - df['mean_test_score'] <= tol] best_df = df.sort_values('mean_fit_time').reset_index().loc[0] best_ndx = best_df['index'] best_params = best_df['params'] logger.info('best_index: {}, best_params: {}'.format(best_ndx, best_params)) return best_params def performance(args, out_dir, logger): begin = time.time() # obtain data X_train, X_test, y_train, y_test = data_util.get_data(args.dataset, data_dir=args.data_dir) # dataset statistics logger.info('train instances: {:,}'.format(X_train.shape[0])) logger.info('test instances: {:,}'.format(X_test.shape[0])) logger.info('attributes: {:,}'.format(X_train.shape[1])) logger.info('split criterion: {}'.format(args.criterion)) # tune on a fraction of the training data if not args.no_tune: if args.tune_frac < 1.0: sss = StratifiedShuffleSplit(n_splits=1, test_size=2, train_size=args.tune_frac, random_state=args.rs) tune_indices, _ = list(sss.split(X_train, y_train))[0] X_train_sub, y_train_sub = X_train[tune_indices], y_train[tune_indices] logger.info('tune instances: {:,}'.format(X_train_sub.shape[0])) else: X_train_sub, y_train_sub = X_train, y_train else: X_train_sub, y_train_sub = X_train, y_train # hyperparameter values n_estimators = [10, 50, 100, 250] max_depth = [1, 3, 5, 10, 20] # set hyperparameter grid param_grid = {'max_depth': max_depth, 'n_estimators': n_estimators} # add additional parameter for DaRE if args.model == 'dare': param_grid['k'] = [5, 10, 25, 50] # get hyperparameter names keys = list(param_grid.keys()) # test model logger.info('\n{}'.format(args.model.capitalize())) start = time.time() model = _get_model(args) # tune hyperparameters if not args.no_tune: logger.info('param_grid: {}'.format(param_grid)) # cross-validation skf = StratifiedKFold(n_splits=args.cv, shuffle=True, random_state=args.rs) gs = GridSearchCV(model, param_grid, scoring=args.scoring, cv=skf, verbose=args.verbose, refit=False) gs = gs.fit(X_train_sub, y_train_sub) best_params = _get_best_params(gs, param_grid, keys, logger, args.tol) model = _get_model_dict(args, best_params) # record time it takes to tune the model tune_time = time.time() - start # train best model start = time.time() model = model.fit(X_train, y_train) train_time = time.time() - start logger.info('train time: {:.3f}s'.format(train_time)) n_nodes, n_random, n_greedy = model.trees_[0].get_node_statistics() print('[Tree 0] no. nodes: {:,}, no. random: {:,}, no. greedy: {:,}'.format(n_nodes, n_random, n_greedy)) print('[Tree 0] memory usage: {:,} bytes'.format(model.trees_[0].get_memory_usage())) print('[Forest] memory usage: {:,} bytes'.format(model.get_memory_usage())) print('max_rss: {:,}'.format(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)) exit(0) # evaluate auc, acc, ap = exp_util.performance(model, X_test, y_test, name=args.model, logger=logger) # save results result = model.get_params() result['model'] = args.model result['bootstrap'] = args.bootstrap result['auc'] = auc result['acc'] = acc result['ap'] = ap result['train_time'] = train_time result['tune_train_time'] = tune_time + train_time result['max_rss'] = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss np.save(os.path.join(out_dir, 'results.npy'), result) logger.info('total time: {:.3f}s'.format(time.time() - begin)) logger.info('max_rss: {:,}'.format(result['max_rss'])) def main(args): # create output dir out_dir = os.path.join(args.out_dir, args.dataset, args.criterion) # add tuning to filepath if args.no_tune: out_dir = os.path.join(out_dir, 'no_tune', 'rs_{}'.format(args.rs)) else: out_dir = os.path.join(out_dir, 'tuned', 'rs_{}'.format(args.rs)) # create filename if args.model == 'sklearn': out_dir = os.path.join(out_dir, args.model) if args.bootstrap: out_dir = os.path.join(out_dir, 'bootstrap') elif args.model == 'dare': assert args.topd == 0 out_dir = os.path.join(out_dir, args.model) elif args.model in ['extra_trees', 'extra_trees_k1', 'borat']: out_dir = os.path.join(out_dir, args.model) else: raise ValueError('model {} unknown!'.format(args.model)) # create output directory and clear any previous contents os.makedirs(out_dir, exist_ok=True) print_util.clear_dir(out_dir) # create logger logger = print_util.get_logger(os.path.join(out_dir, 'log.txt')) logger.info(args) logger.info(datetime.now()) # write everything printed to stdout to this log file logfile, stdout, stderr = print_util.stdout_stderr_to_log(os.path.join(out_dir, 'log+.txt')) # run experiment performance(args, out_dir, logger) # restore original stdout and stderr settings print_util.reset_stdout_stderr(logfile, stdout, stderr) if __name__ == '__main__': parser = argparse.ArgumentParser() # I/O settings parser.add_argument('--data_dir', type=str, default='data', help='data directory.') parser.add_argument('--out_dir', type=str, default='output/performance/', help='output directory.') parser.add_argument('--dataset', default='surgical', help='dataset to use for the experiment.') # experiment settings parser.add_argument('--rs', type=int, default=1, help='random state.') parser.add_argument('--model', type=str, default='dare', help='type of model.') parser.add_argument('--criterion', type=str, default='gini', help='splitting criterion.') parser.add_argument('--topd', type=int, default=0, help='0 for exact, 1000 for random.') parser.add_argument('--k', type=int, default=25, help='no. of candidate thresholds to sample.') parser.add_argument('--bootstrap', action='store_true', default=False, help='use bootstrapping with sklearn.') # tuning settings parser.add_argument('--no_tune', action='store_true', default=False, help='do not tune.') parser.add_argument('--tune_frac', type=float, default=1.0, help='fraction of training to use for tuning.') parser.add_argument('--cv', type=int, default=5, help='number of cross-validation folds for tuning.') parser.add_argument('--scoring', type=str, default='roc_auc', help='metric for tuning.') parser.add_argument('--tol', type=float, default=1e-3, help='allowable accuracy difference from the best.') # tree/forest hyperparameters parser.add_argument('--n_estimators', type=int, default=100, help='number of trees in the forest.') parser.add_argument('--max_features', type=str, default='sqrt', help='maximum no. features to sample.') parser.add_argument('--max_depth', type=int, default=20, help='maximum depth of the tree.') # display settings parser.add_argument('--verbose', type=int, default=2, help='verbosity level.') args = parser.parse_args() main(args)
en
0.650919
This experiment tests predictive performance. Return the appropriate model. Return the appropriate model. Chooses the set of hyperparameters whose `mean_fit_score` is within `tol` of the best `mean_fit_score` and has the lowest `mean_fit_time`. # filter the parameters with the highest performances # obtain data # dataset statistics # tune on a fraction of the training data # hyperparameter values # set hyperparameter grid # add additional parameter for DaRE # get hyperparameter names # test model # tune hyperparameters # cross-validation # record time it takes to tune the model # train best model # evaluate # save results # create output dir # add tuning to filepath # create filename # create output directory and clear any previous contents # create logger # write everything printed to stdout to this log file # run experiment # restore original stdout and stderr settings # I/O settings # experiment settings # tuning settings # tree/forest hyperparameters # display settings
2.723881
3
lib/disco/task.py
pooya/disco
786
6621498
""" :mod:`disco.task` -- Disco Tasks ================================ This module defines objects for interfacing with :term:`tasks <task>` assigned by the master. """ import os, time from disco.compat import basestring, integer_types from disco import dPickle from disco.util import hexhash def jobdata(*objs): """ :return: :ref:`jobdata` needed for instantiating the :class:`disco.job.Job` on the node. """ return dPickle.dumps(objs, -1) class Task(object): """ Encapsulates the information specific to a particular :term:`task` coming from the master. .. attribute:: host The name of the host this task is running on. .. attribute:: jobname The name of the :term:`job` this task is part of. .. attribute:: master The name of the master host for this task. .. attribute:: mode The phase which this task is part of. Currently either :term:`map` or :term:`reduce`. .. attribute:: taskid The id of this task, assigned by the master. .. attribute:: uid A unique id for this particular task instance. """ def __init__(self, host='', jobfile='', jobname='', master=None, disco_port=None, put_port=None, ddfs_data='', disco_data='', stage=None, group=None, grouping=None, taskid=-1): from disco.job import JobPack from disco.ddfs import DDFS self.host = host self.jobfile = jobfile self.jobname = jobname self.jobpack = JobPack.load(open(jobfile, 'rb')) self.jobobjs = dPickle.loads(self.jobpack.jobdata) self.master = master self.disco_port = disco_port self.put_port = put_port self.ddfs_data = ddfs_data self.disco_data = disco_data self.stage = stage self.group = '{0[0]}-{0[1]}'.format(group) self.group_label, self.group_host = group self.grouping = grouping self.taskid = taskid self.outputs = {} self.uid = '{0}:{1}-{2}-{3}-{4}'.format(self.stage, DDFS.safe_name(self.group), self.taskid, hexhash(str((time.time())).encode()), os.getpid()) @property def jobpath(self): return os.path.join(self.host, hexhash(self.jobname), self.jobname) @property def taskpath(self): return os.path.join(hexhash(self.uid.encode()), self.uid) def makedirs(self): from disco.fileutils import ensure_path ensure_path(self.taskpath) def output_filename(self, label): if not isinstance(label, integer_types): raise ValueError("Output label ({0} : {1}) must be an integer or None".format(label, type(label))) return '{0}-{1}-{2}'.format(self.stage, self.group, label) def output_path(self, label): return self.path(self.output_filename(label)) def output(self, label=None, typ='disco'): if label is None: return self.path(self.uid), typ, 0 return self.output_path(label), 'part', label def path(self, name): """ :return: The *name* joined to the :attr:`taskpath`. """ return os.path.join(self.taskpath, name) def url(self, name, scheme='disco'): return '{0}://{1}/disco/{2}/{3}/{4}'.format(scheme, self.host, self.jobpath, self.taskpath, name) def get(self, key): """ Gets an out-of-band result for the task with the key *key*. Given the semantics of OOB results, this means that only the reduce phase can access results produced in the preceding map phase. """ from disco.util import load_oob return load_oob(self.master, self.jobname, key) def put(self, key, value): """ Stores an out-of-band result *value* (bytes) with the key *key*. Key must be unique in this job. Maximum key length is 256 characters. Only characters in the set ``[a-zA-Z_\-:0-9@]`` are allowed in the key. """ from disco.ddfs import DDFS from disco.util import save_oob from disco.error import DiscoError if DDFS.safe_name(key) != key: raise DiscoError("OOB key contains invalid characters ({0})".format(key)) save_oob(self.master, self.jobname, key, value)
""" :mod:`disco.task` -- Disco Tasks ================================ This module defines objects for interfacing with :term:`tasks <task>` assigned by the master. """ import os, time from disco.compat import basestring, integer_types from disco import dPickle from disco.util import hexhash def jobdata(*objs): """ :return: :ref:`jobdata` needed for instantiating the :class:`disco.job.Job` on the node. """ return dPickle.dumps(objs, -1) class Task(object): """ Encapsulates the information specific to a particular :term:`task` coming from the master. .. attribute:: host The name of the host this task is running on. .. attribute:: jobname The name of the :term:`job` this task is part of. .. attribute:: master The name of the master host for this task. .. attribute:: mode The phase which this task is part of. Currently either :term:`map` or :term:`reduce`. .. attribute:: taskid The id of this task, assigned by the master. .. attribute:: uid A unique id for this particular task instance. """ def __init__(self, host='', jobfile='', jobname='', master=None, disco_port=None, put_port=None, ddfs_data='', disco_data='', stage=None, group=None, grouping=None, taskid=-1): from disco.job import JobPack from disco.ddfs import DDFS self.host = host self.jobfile = jobfile self.jobname = jobname self.jobpack = JobPack.load(open(jobfile, 'rb')) self.jobobjs = dPickle.loads(self.jobpack.jobdata) self.master = master self.disco_port = disco_port self.put_port = put_port self.ddfs_data = ddfs_data self.disco_data = disco_data self.stage = stage self.group = '{0[0]}-{0[1]}'.format(group) self.group_label, self.group_host = group self.grouping = grouping self.taskid = taskid self.outputs = {} self.uid = '{0}:{1}-{2}-{3}-{4}'.format(self.stage, DDFS.safe_name(self.group), self.taskid, hexhash(str((time.time())).encode()), os.getpid()) @property def jobpath(self): return os.path.join(self.host, hexhash(self.jobname), self.jobname) @property def taskpath(self): return os.path.join(hexhash(self.uid.encode()), self.uid) def makedirs(self): from disco.fileutils import ensure_path ensure_path(self.taskpath) def output_filename(self, label): if not isinstance(label, integer_types): raise ValueError("Output label ({0} : {1}) must be an integer or None".format(label, type(label))) return '{0}-{1}-{2}'.format(self.stage, self.group, label) def output_path(self, label): return self.path(self.output_filename(label)) def output(self, label=None, typ='disco'): if label is None: return self.path(self.uid), typ, 0 return self.output_path(label), 'part', label def path(self, name): """ :return: The *name* joined to the :attr:`taskpath`. """ return os.path.join(self.taskpath, name) def url(self, name, scheme='disco'): return '{0}://{1}/disco/{2}/{3}/{4}'.format(scheme, self.host, self.jobpath, self.taskpath, name) def get(self, key): """ Gets an out-of-band result for the task with the key *key*. Given the semantics of OOB results, this means that only the reduce phase can access results produced in the preceding map phase. """ from disco.util import load_oob return load_oob(self.master, self.jobname, key) def put(self, key, value): """ Stores an out-of-band result *value* (bytes) with the key *key*. Key must be unique in this job. Maximum key length is 256 characters. Only characters in the set ``[a-zA-Z_\-:0-9@]`` are allowed in the key. """ from disco.ddfs import DDFS from disco.util import save_oob from disco.error import DiscoError if DDFS.safe_name(key) != key: raise DiscoError("OOB key contains invalid characters ({0})".format(key)) save_oob(self.master, self.jobname, key, value)
en
0.805024
:mod:`disco.task` -- Disco Tasks ================================ This module defines objects for interfacing with :term:`tasks <task>` assigned by the master. :return: :ref:`jobdata` needed for instantiating the :class:`disco.job.Job` on the node. Encapsulates the information specific to a particular :term:`task` coming from the master. .. attribute:: host The name of the host this task is running on. .. attribute:: jobname The name of the :term:`job` this task is part of. .. attribute:: master The name of the master host for this task. .. attribute:: mode The phase which this task is part of. Currently either :term:`map` or :term:`reduce`. .. attribute:: taskid The id of this task, assigned by the master. .. attribute:: uid A unique id for this particular task instance. :return: The *name* joined to the :attr:`taskpath`. Gets an out-of-band result for the task with the key *key*. Given the semantics of OOB results, this means that only the reduce phase can access results produced in the preceding map phase. Stores an out-of-band result *value* (bytes) with the key *key*. Key must be unique in this job. Maximum key length is 256 characters. Only characters in the set ``[a-zA-Z_\-:0-9@]`` are allowed in the key.
2.423995
2
textcnn/model.py
bhsimon0810/text-classification
1
6621499
import tensorflow as tf import numpy as np class TextCNN(object): """ A CNN for text classification. Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer. """ def __init__( self, sequence_length, num_class, vocab_size, embedding_size, filter_sizes, num_filter, pretrained_embeddings, l2_reg_lambda=0.0): # placeholders self.inputs = tf.compat.v1.placeholder(tf.int32, [None, sequence_length], name="inputs") self.labels = tf.compat.v1.placeholder(tf.int32, [None], name="labels") self.dropout_keep_prob = tf.compat.v1.placeholder(tf.float32, name="dropout_keep_prob") # embedding layer with tf.device('/cpu:0'), tf.compat.v1.variable_scope("embedding"): self.embedding_matrix = tf.compat.v1.get_variable( name="embedding_matrix", shape=[vocab_size, embedding_size], initializer=tf.constant_initializer(pretrained_embeddings), dtype=tf.float32) # with tf.device('/cpu:0'), tf.name_scope("embedding"): # self.embedding_matrix = tf.Variable( # tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0), # name="W") self.embedded_inputs = tf.expand_dims(tf.nn.embedding_lookup(self.embedding_matrix, self.inputs), -1) # create a convolution + maxpool layer for each filter size pooled_outputs = [] for i, filter_size in enumerate(filter_sizes): with tf.compat.v1.name_scope("conv-maxpool-%s" % filter_size): # Convolution Layer filter_shape = [filter_size, embedding_size, 1, num_filter] W = tf.compat.v1.Variable(tf.random.truncated_normal(filter_shape, stddev=0.1), name="W") b = tf.compat.v1.Variable(tf.constant(0.1, shape=[num_filter]), name="b") conv = tf.nn.conv2d( self.embedded_inputs, W, strides=[1, 1, 1, 1], padding="VALID", name="conv") # Apply nonlinearity h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu") # Maxpooling over the outputs pooled = tf.nn.max_pool2d( h, ksize=[1, sequence_length - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name="pool") pooled_outputs.append(pooled) # combine all the pooled features num_filters_total = num_filter * len(filter_sizes) self.h_pool = tf.concat(pooled_outputs, 3) self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total]) # add dropout with tf.compat.v1.name_scope("dropout"): self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob) # final (unnormalized) scores and predictions with tf.compat.v1.name_scope("output"): W = tf.compat.v1.get_variable( "W", shape=[num_filters_total, num_class], initializer=tf.contrib.layers.xavier_initializer()) b = tf.Variable(tf.constant(0.1, shape=[num_class]), name="b") self.logits = tf.compat.v1.nn.xw_plus_b(self.h_drop, W, b, name="logits") self.predictions = tf.argmax(self.logits, 1, name="predictions") # calculate mean cross-entropy loss with tf.name_scope("loss"): labels = tf.one_hot(self.labels, depth=num_class) losses = tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=labels, logits=self.logits) l2_reg = tf.add_n([tf.nn.l2_loss(v) for v in tf.compat.v1.trainable_variables()]) * l2_reg_lambda self.loss = tf.reduce_mean(losses) + l2_reg # accuracy with tf.name_scope("accuracy"): correct_predictions = tf.equal(self.predictions, tf.argmax(labels, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
import tensorflow as tf import numpy as np class TextCNN(object): """ A CNN for text classification. Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer. """ def __init__( self, sequence_length, num_class, vocab_size, embedding_size, filter_sizes, num_filter, pretrained_embeddings, l2_reg_lambda=0.0): # placeholders self.inputs = tf.compat.v1.placeholder(tf.int32, [None, sequence_length], name="inputs") self.labels = tf.compat.v1.placeholder(tf.int32, [None], name="labels") self.dropout_keep_prob = tf.compat.v1.placeholder(tf.float32, name="dropout_keep_prob") # embedding layer with tf.device('/cpu:0'), tf.compat.v1.variable_scope("embedding"): self.embedding_matrix = tf.compat.v1.get_variable( name="embedding_matrix", shape=[vocab_size, embedding_size], initializer=tf.constant_initializer(pretrained_embeddings), dtype=tf.float32) # with tf.device('/cpu:0'), tf.name_scope("embedding"): # self.embedding_matrix = tf.Variable( # tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0), # name="W") self.embedded_inputs = tf.expand_dims(tf.nn.embedding_lookup(self.embedding_matrix, self.inputs), -1) # create a convolution + maxpool layer for each filter size pooled_outputs = [] for i, filter_size in enumerate(filter_sizes): with tf.compat.v1.name_scope("conv-maxpool-%s" % filter_size): # Convolution Layer filter_shape = [filter_size, embedding_size, 1, num_filter] W = tf.compat.v1.Variable(tf.random.truncated_normal(filter_shape, stddev=0.1), name="W") b = tf.compat.v1.Variable(tf.constant(0.1, shape=[num_filter]), name="b") conv = tf.nn.conv2d( self.embedded_inputs, W, strides=[1, 1, 1, 1], padding="VALID", name="conv") # Apply nonlinearity h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu") # Maxpooling over the outputs pooled = tf.nn.max_pool2d( h, ksize=[1, sequence_length - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name="pool") pooled_outputs.append(pooled) # combine all the pooled features num_filters_total = num_filter * len(filter_sizes) self.h_pool = tf.concat(pooled_outputs, 3) self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total]) # add dropout with tf.compat.v1.name_scope("dropout"): self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob) # final (unnormalized) scores and predictions with tf.compat.v1.name_scope("output"): W = tf.compat.v1.get_variable( "W", shape=[num_filters_total, num_class], initializer=tf.contrib.layers.xavier_initializer()) b = tf.Variable(tf.constant(0.1, shape=[num_class]), name="b") self.logits = tf.compat.v1.nn.xw_plus_b(self.h_drop, W, b, name="logits") self.predictions = tf.argmax(self.logits, 1, name="predictions") # calculate mean cross-entropy loss with tf.name_scope("loss"): labels = tf.one_hot(self.labels, depth=num_class) losses = tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=labels, logits=self.logits) l2_reg = tf.add_n([tf.nn.l2_loss(v) for v in tf.compat.v1.trainable_variables()]) * l2_reg_lambda self.loss = tf.reduce_mean(losses) + l2_reg # accuracy with tf.name_scope("accuracy"): correct_predictions = tf.equal(self.predictions, tf.argmax(labels, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
en
0.546746
A CNN for text classification. Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer. # placeholders # embedding layer # with tf.device('/cpu:0'), tf.name_scope("embedding"): # self.embedding_matrix = tf.Variable( # tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0), # name="W") # create a convolution + maxpool layer for each filter size # Convolution Layer # Apply nonlinearity # Maxpooling over the outputs # combine all the pooled features # add dropout # final (unnormalized) scores and predictions # calculate mean cross-entropy loss # accuracy
3.196596
3
benchmark_primes.py
VadymTkachuk/prime-numbers
0
6621500
#!/usr/bin/python # -*- coding: utf-8 -*- # # Author: <NAME> # Date: 02.03.2019 # License: MIT # Purpose: This is a module for speed testing prime's module functions. import timeit import primes def run_test(): SETUP_CODE = ''' import primes import random import numpy as np # n = random.randint(10000000, 100000000)''' # Making names of is_prime#() functions and testing them. # a_prime = 99999989 is a prime number close to 100 000 000. It takes too long in some cases # 99991 a_prime = 30011 #9973 # With less a_prime numbers speed results may differ from function number! print("Testing 12 functions with " + str(a_prime) + ".") for n in range (1, 13): TEST_CODE = "primes.is_prime" + str(n) + "(" + str(a_prime) + ")" # Making funcs names. times = timeit.repeat(setup=SETUP_CODE, stmt=TEST_CODE, repeat=3, number=10) print('Is_primes' + str(n) + '() minimum time is: {}'.format(min(times))) def benchmark_matrix(): def fill_matrix(): a_prime = 17 dimension_const = 100 arr = [[[0 for _ in range(dimension_const)] for _ in range(dimension_const)] for _ in range(dimension_const)] for i in range(1,dimension_const): for j in range(1, dimension_const): for k in range(1, dimension_const): arr[i][j][k] = primes.is_prime1(a_prime) SETUP_CODE = '''''' TEST_CODE = fill_matrix times = timeit.repeat(setup=SETUP_CODE, stmt=TEST_CODE, repeat=3, number=10) print('\nFill_matrix minimum time is: {}'.format(min(times))) if __name__ == '__main__': run_test() benchmark_matrix()
#!/usr/bin/python # -*- coding: utf-8 -*- # # Author: <NAME> # Date: 02.03.2019 # License: MIT # Purpose: This is a module for speed testing prime's module functions. import timeit import primes def run_test(): SETUP_CODE = ''' import primes import random import numpy as np # n = random.randint(10000000, 100000000)''' # Making names of is_prime#() functions and testing them. # a_prime = 99999989 is a prime number close to 100 000 000. It takes too long in some cases # 99991 a_prime = 30011 #9973 # With less a_prime numbers speed results may differ from function number! print("Testing 12 functions with " + str(a_prime) + ".") for n in range (1, 13): TEST_CODE = "primes.is_prime" + str(n) + "(" + str(a_prime) + ")" # Making funcs names. times = timeit.repeat(setup=SETUP_CODE, stmt=TEST_CODE, repeat=3, number=10) print('Is_primes' + str(n) + '() minimum time is: {}'.format(min(times))) def benchmark_matrix(): def fill_matrix(): a_prime = 17 dimension_const = 100 arr = [[[0 for _ in range(dimension_const)] for _ in range(dimension_const)] for _ in range(dimension_const)] for i in range(1,dimension_const): for j in range(1, dimension_const): for k in range(1, dimension_const): arr[i][j][k] = primes.is_prime1(a_prime) SETUP_CODE = '''''' TEST_CODE = fill_matrix times = timeit.repeat(setup=SETUP_CODE, stmt=TEST_CODE, repeat=3, number=10) print('\nFill_matrix minimum time is: {}'.format(min(times))) if __name__ == '__main__': run_test() benchmark_matrix()
en
0.623819
#!/usr/bin/python # -*- coding: utf-8 -*- # # Author: <NAME> # Date: 02.03.2019 # License: MIT # Purpose: This is a module for speed testing prime's module functions. import primes import random import numpy as np # n = random.randint(10000000, 100000000) # Making names of is_prime#() functions and testing them. # a_prime = 99999989 is a prime number close to 100 000 000. It takes too long in some cases # 99991 #9973 # With less a_prime numbers speed results may differ from function number! # Making funcs names.
3.311122
3
iguanas/rule_scoring/rule_scorer.py
Aditya-Kapadiya/Iguanas
0
6621501
<reponame>Aditya-Kapadiya/Iguanas<gh_stars>0 """ Generates scores for each rule in a set. Scaling functions can also be applied to the scores. """ from iguanas.rule_scoring.rule_scoring_methods import PerformanceScorer, LogRegScorer,\ RandomForestScorer from iguanas.utils.typing import PandasDataFrameType, PandasSeriesType from iguanas.rule_scoring.rule_score_scalers import MinMaxScaler, ConstantScaler from typing import Union class RuleScorer: """ Generates rule scores using the rule binary columns and the target column. Parameters ---------- scoring_class : Union[PerformanceScorer, LogRegScorer, RandomForestScorer] The instantiated scoring class - this defines the method for generating the scores. Scoring classes are available in the `rule_scoring_methods` module. scaling_class : Union[MinMaxScaler, ConstantScaler], optional The instantiated scaling class - this defines the method for scaling the raw scores from the scoring class. Scaling classes are available in the `rule_score_scalers` module. Defaults to None. Attributes ---------- rule_scores : Dict[str, int] Contains the generated score (values) for each rule (keys). """ def __init__(self, scoring_class: Union[PerformanceScorer, LogRegScorer, RandomForestScorer], scaling_class=None): self.scoring_class = scoring_class self.scaling_class = scaling_class def fit(self, X_rules: PandasDataFrameType, y: PandasSeriesType, sample_weight=None) -> None: """ Generates rule scores using the rule binary columns and the binary target column. Parameters ---------- X_rules : PandasDataFrameType The rule binary columns. y : PandasSeriesType The binary target column. sample_weight : PandasSeriesType, optional Row-wise weights to apply in the `scoring_class`. Defaults to None. """ self.rule_scores = self.scoring_class.fit( X_rules=X_rules, y=y, sample_weight=sample_weight) if self.scaling_class is not None: self.rule_scores = self.scaling_class.fit( rule_scores=self.rule_scores) def transform(self, X_rules: PandasDataFrameType) -> PandasDataFrameType: """ Transforms the rule binary columns to show the generated scores applied to the dataset (i.e. replaces the 1 in `X_rules` with the generated score). Parameters ---------- X_rules : PandasDataFrameType The rule binary columns. Returns ------- PandasDataFrameType The generated scores applied to the dataset. """ X_scores = self.rule_scores * X_rules return X_scores def fit_transform(self, X_rules: PandasDataFrameType, y: PandasSeriesType, sample_weight=None) -> PandasDataFrameType: """ Generates rule scores using the rule binary columns and the binary target column, then transforms the rule binary columns to show the generated scores applied to the dataset (i.e. replaces the 1 in `X_rules` with the generated score). Parameters ---------- X_rules : PandasDataFrameType The rule binary columns. y : PandasSeriesType The binary target column. sample_weight : PandasSeriesType, optional Row-wise weights to apply in the `scoring_class`. Defaults to None. Returns ------- PandasDataFrameType The generated scores applied to the dataset. """ self.fit(X_rules=X_rules, y=y, sample_weight=sample_weight) X_scores = self.transform(X_rules=X_rules) return X_scores
""" Generates scores for each rule in a set. Scaling functions can also be applied to the scores. """ from iguanas.rule_scoring.rule_scoring_methods import PerformanceScorer, LogRegScorer,\ RandomForestScorer from iguanas.utils.typing import PandasDataFrameType, PandasSeriesType from iguanas.rule_scoring.rule_score_scalers import MinMaxScaler, ConstantScaler from typing import Union class RuleScorer: """ Generates rule scores using the rule binary columns and the target column. Parameters ---------- scoring_class : Union[PerformanceScorer, LogRegScorer, RandomForestScorer] The instantiated scoring class - this defines the method for generating the scores. Scoring classes are available in the `rule_scoring_methods` module. scaling_class : Union[MinMaxScaler, ConstantScaler], optional The instantiated scaling class - this defines the method for scaling the raw scores from the scoring class. Scaling classes are available in the `rule_score_scalers` module. Defaults to None. Attributes ---------- rule_scores : Dict[str, int] Contains the generated score (values) for each rule (keys). """ def __init__(self, scoring_class: Union[PerformanceScorer, LogRegScorer, RandomForestScorer], scaling_class=None): self.scoring_class = scoring_class self.scaling_class = scaling_class def fit(self, X_rules: PandasDataFrameType, y: PandasSeriesType, sample_weight=None) -> None: """ Generates rule scores using the rule binary columns and the binary target column. Parameters ---------- X_rules : PandasDataFrameType The rule binary columns. y : PandasSeriesType The binary target column. sample_weight : PandasSeriesType, optional Row-wise weights to apply in the `scoring_class`. Defaults to None. """ self.rule_scores = self.scoring_class.fit( X_rules=X_rules, y=y, sample_weight=sample_weight) if self.scaling_class is not None: self.rule_scores = self.scaling_class.fit( rule_scores=self.rule_scores) def transform(self, X_rules: PandasDataFrameType) -> PandasDataFrameType: """ Transforms the rule binary columns to show the generated scores applied to the dataset (i.e. replaces the 1 in `X_rules` with the generated score). Parameters ---------- X_rules : PandasDataFrameType The rule binary columns. Returns ------- PandasDataFrameType The generated scores applied to the dataset. """ X_scores = self.rule_scores * X_rules return X_scores def fit_transform(self, X_rules: PandasDataFrameType, y: PandasSeriesType, sample_weight=None) -> PandasDataFrameType: """ Generates rule scores using the rule binary columns and the binary target column, then transforms the rule binary columns to show the generated scores applied to the dataset (i.e. replaces the 1 in `X_rules` with the generated score). Parameters ---------- X_rules : PandasDataFrameType The rule binary columns. y : PandasSeriesType The binary target column. sample_weight : PandasSeriesType, optional Row-wise weights to apply in the `scoring_class`. Defaults to None. Returns ------- PandasDataFrameType The generated scores applied to the dataset. """ self.fit(X_rules=X_rules, y=y, sample_weight=sample_weight) X_scores = self.transform(X_rules=X_rules) return X_scores
en
0.602884
Generates scores for each rule in a set. Scaling functions can also be applied to the scores. Generates rule scores using the rule binary columns and the target column. Parameters ---------- scoring_class : Union[PerformanceScorer, LogRegScorer, RandomForestScorer] The instantiated scoring class - this defines the method for generating the scores. Scoring classes are available in the `rule_scoring_methods` module. scaling_class : Union[MinMaxScaler, ConstantScaler], optional The instantiated scaling class - this defines the method for scaling the raw scores from the scoring class. Scaling classes are available in the `rule_score_scalers` module. Defaults to None. Attributes ---------- rule_scores : Dict[str, int] Contains the generated score (values) for each rule (keys). Generates rule scores using the rule binary columns and the binary target column. Parameters ---------- X_rules : PandasDataFrameType The rule binary columns. y : PandasSeriesType The binary target column. sample_weight : PandasSeriesType, optional Row-wise weights to apply in the `scoring_class`. Defaults to None. Transforms the rule binary columns to show the generated scores applied to the dataset (i.e. replaces the 1 in `X_rules` with the generated score). Parameters ---------- X_rules : PandasDataFrameType The rule binary columns. Returns ------- PandasDataFrameType The generated scores applied to the dataset. Generates rule scores using the rule binary columns and the binary target column, then transforms the rule binary columns to show the generated scores applied to the dataset (i.e. replaces the 1 in `X_rules` with the generated score). Parameters ---------- X_rules : PandasDataFrameType The rule binary columns. y : PandasSeriesType The binary target column. sample_weight : PandasSeriesType, optional Row-wise weights to apply in the `scoring_class`. Defaults to None. Returns ------- PandasDataFrameType The generated scores applied to the dataset.
3.289587
3
examples/deposit.py
kaurifinance/python-sdk
0
6621502
<reponame>kaurifinance/python-sdk from kauripay.processing import KauriPay api_key = '' api_secret = '' host = '' pay = KauriPay(api_key=api_key, api_secret=api_secret, host=host) def generate_crypto_deposit_address(cryptocurrency='BTC', deposit_email='<EMAIL>', callback_url='https://my_host.com/for_callbacks') -> str: """ Generates a new deposit address for chosen cryptocurrency with custom callback_url for notifications :param cryptocurrency: cryptocurrency name :param deposit_email: user's email :param callback_url: url for order's status notifications :return: str type new deposit address """ result = pay.generate_crypto_deposit_address(currency=cryptocurrency, deposit_email=deposit_email, callback_url=callback_url) address = result.get('address') return address def generate_crypto_deposit_address_with_payment_method(cryptocurrency='USDT', payment_method='ERC20', deposit_email='<EMAIL>', callback_url='https://my_host.com/for_callbacks') -> str: """ Generates a new deposit address for chosen cryptocurrency with custom callback_url for notifications :param cryptocurrency: cryptocurrency name :param deposit_email: user's email :param payment_method: you must specify this param if several blockchains are available for chosen currency. E.g. if currency == 'USDT' payment_method can be 'ERC20', 'TRC20, 'BEP20' :param callback_url: url for order's status notifications :return: str type new deposit address """ result = pay.generate_crypto_deposit_address(currency=cryptocurrency, deposit_email=deposit_email, payment_method=payment_method, callback_url=callback_url) address = result.get('address') return address def generate_crypto_address_with_currency_convert(base_cryptocurrency='BTC', deposit_email='<EMAIL>', currency_convert_to='USDT') -> str: """ Generates a new deposit address for chosen cryptocurrency with auto convert to desired currency :param base_cryptocurrency: cryptocurrency that will be sent to the deposit address and will be converted :param deposit_email: user's email :param currency_convert_to: cryptocurrency into which the funds will be converted :return: str type new deposit address """ result = pay.generate_crypto_deposit_address(currency=base_cryptocurrency, deposit_email=deposit_email, currency_convert_to=currency_convert_to) address = result.get('address') return address def generate_payment_link_with_amount_to_spend(currency='UAH', deposit_email='<EMAIL>', amount_to_spend='1000') -> str: """ Generates URL for deposit of chosen fiat currency. Chosen amount will be charged. Chosen amount_to_spend minus the fee will be deposited to account. :param currency: currency to deposit :param deposit_email: email of user to whose account the deposit is made :param amount_to_spend: amount to deposit - the fee will be subtracted from it :return: URL for deposit payment page """ result_url = pay.generate_fiat_deposit_address(currency=currency, deposit_email=deposit_email, amount_to_spend=amount_to_spend) return result_url def generate_payment_link_with_amount_to_receive(currency='UAH', deposit_email='<EMAIL>', amount_to_receive='1000') -> str: """ Generates URL for deposit of chosen fiat currency. Chosen amount_to_receive plus the fee will be charged from the card. Chosen amount_to_receive will be deposited to account. :param currency: currency to deposit :param deposit_email: email of user to whose account the deposit is made :param amount_to_receive: amount to deposit - the fee will NOT be subtracted from it, but charged separately from the card :return: URL for deposit payment page """ result_url = pay.generate_fiat_deposit_address(currency=currency, deposit_email=deposit_email, amount_to_receive=amount_to_receive) return result_url def generate_payment_link_with_redirect_urls(currency='UAH', deposit_email='<EMAIL>', amount_to_receive='1000', fail_url='https://my_host.com/fail_paid_url/', success_url='https://my_host.com/success_paid_url/', processing_url='https://my_host.com/processing_url/', ) -> str: """ Generates payment URL for deposit of chosen currency. Custom redirect URLs for fail and success cases are used. The general case processing_url is retained, to be used if the client doesn't want or can't use redirects for separate fail and success cases. :param currency: currency to deposit :param deposit_email: email of user to whose account the deposit is made :param amount_to_receive: amount to deposit - the fee will NOT be subtracted from it, but charged separately from the card :param fail_url: redirect URL for when deposit fails :param success_url: redirect URL for when deposit succeeds :param processing_url: general case redirect URL to be used if the client doesn't want or can't use redirects for separate fail and success cases :return: URL for deposit payment page """ result_url = pay.generate_fiat_deposit_address(currency=currency, deposit_email=deposit_email, amount_to_receive=amount_to_receive, fail_url=fail_url, success_url=success_url, processing_url=processing_url) return result_url def generate_payment_link_with_custom_callback_url(currency='UAH', deposit_email='<EMAIL>', amount_to_receive='1000', callback_url='https://my_host.com/callback_url/', ) -> str: """ Generates payment URL for deposit of chosen currency. Custom callback URL is used for notifications :param currency: currency to deposit :param deposit_email: email of the user to whose account the deposit is made :param amount_to_receive: amount to deposit - the fee will NOT be subtracted from it, but charged separately from the card :param callback_url: url for order's status notifications :return: URL for deposit payment page """ result_url = pay.generate_fiat_deposit_address(currency=currency, deposit_email=deposit_email, amount_to_receive=amount_to_receive, callback_url=callback_url) return result_url
from kauripay.processing import KauriPay api_key = '' api_secret = '' host = '' pay = KauriPay(api_key=api_key, api_secret=api_secret, host=host) def generate_crypto_deposit_address(cryptocurrency='BTC', deposit_email='<EMAIL>', callback_url='https://my_host.com/for_callbacks') -> str: """ Generates a new deposit address for chosen cryptocurrency with custom callback_url for notifications :param cryptocurrency: cryptocurrency name :param deposit_email: user's email :param callback_url: url for order's status notifications :return: str type new deposit address """ result = pay.generate_crypto_deposit_address(currency=cryptocurrency, deposit_email=deposit_email, callback_url=callback_url) address = result.get('address') return address def generate_crypto_deposit_address_with_payment_method(cryptocurrency='USDT', payment_method='ERC20', deposit_email='<EMAIL>', callback_url='https://my_host.com/for_callbacks') -> str: """ Generates a new deposit address for chosen cryptocurrency with custom callback_url for notifications :param cryptocurrency: cryptocurrency name :param deposit_email: user's email :param payment_method: you must specify this param if several blockchains are available for chosen currency. E.g. if currency == 'USDT' payment_method can be 'ERC20', 'TRC20, 'BEP20' :param callback_url: url for order's status notifications :return: str type new deposit address """ result = pay.generate_crypto_deposit_address(currency=cryptocurrency, deposit_email=deposit_email, payment_method=payment_method, callback_url=callback_url) address = result.get('address') return address def generate_crypto_address_with_currency_convert(base_cryptocurrency='BTC', deposit_email='<EMAIL>', currency_convert_to='USDT') -> str: """ Generates a new deposit address for chosen cryptocurrency with auto convert to desired currency :param base_cryptocurrency: cryptocurrency that will be sent to the deposit address and will be converted :param deposit_email: user's email :param currency_convert_to: cryptocurrency into which the funds will be converted :return: str type new deposit address """ result = pay.generate_crypto_deposit_address(currency=base_cryptocurrency, deposit_email=deposit_email, currency_convert_to=currency_convert_to) address = result.get('address') return address def generate_payment_link_with_amount_to_spend(currency='UAH', deposit_email='<EMAIL>', amount_to_spend='1000') -> str: """ Generates URL for deposit of chosen fiat currency. Chosen amount will be charged. Chosen amount_to_spend minus the fee will be deposited to account. :param currency: currency to deposit :param deposit_email: email of user to whose account the deposit is made :param amount_to_spend: amount to deposit - the fee will be subtracted from it :return: URL for deposit payment page """ result_url = pay.generate_fiat_deposit_address(currency=currency, deposit_email=deposit_email, amount_to_spend=amount_to_spend) return result_url def generate_payment_link_with_amount_to_receive(currency='UAH', deposit_email='<EMAIL>', amount_to_receive='1000') -> str: """ Generates URL for deposit of chosen fiat currency. Chosen amount_to_receive plus the fee will be charged from the card. Chosen amount_to_receive will be deposited to account. :param currency: currency to deposit :param deposit_email: email of user to whose account the deposit is made :param amount_to_receive: amount to deposit - the fee will NOT be subtracted from it, but charged separately from the card :return: URL for deposit payment page """ result_url = pay.generate_fiat_deposit_address(currency=currency, deposit_email=deposit_email, amount_to_receive=amount_to_receive) return result_url def generate_payment_link_with_redirect_urls(currency='UAH', deposit_email='<EMAIL>', amount_to_receive='1000', fail_url='https://my_host.com/fail_paid_url/', success_url='https://my_host.com/success_paid_url/', processing_url='https://my_host.com/processing_url/', ) -> str: """ Generates payment URL for deposit of chosen currency. Custom redirect URLs for fail and success cases are used. The general case processing_url is retained, to be used if the client doesn't want or can't use redirects for separate fail and success cases. :param currency: currency to deposit :param deposit_email: email of user to whose account the deposit is made :param amount_to_receive: amount to deposit - the fee will NOT be subtracted from it, but charged separately from the card :param fail_url: redirect URL for when deposit fails :param success_url: redirect URL for when deposit succeeds :param processing_url: general case redirect URL to be used if the client doesn't want or can't use redirects for separate fail and success cases :return: URL for deposit payment page """ result_url = pay.generate_fiat_deposit_address(currency=currency, deposit_email=deposit_email, amount_to_receive=amount_to_receive, fail_url=fail_url, success_url=success_url, processing_url=processing_url) return result_url def generate_payment_link_with_custom_callback_url(currency='UAH', deposit_email='<EMAIL>', amount_to_receive='1000', callback_url='https://my_host.com/callback_url/', ) -> str: """ Generates payment URL for deposit of chosen currency. Custom callback URL is used for notifications :param currency: currency to deposit :param deposit_email: email of the user to whose account the deposit is made :param amount_to_receive: amount to deposit - the fee will NOT be subtracted from it, but charged separately from the card :param callback_url: url for order's status notifications :return: URL for deposit payment page """ result_url = pay.generate_fiat_deposit_address(currency=currency, deposit_email=deposit_email, amount_to_receive=amount_to_receive, callback_url=callback_url) return result_url
en
0.832188
Generates a new deposit address for chosen cryptocurrency with custom callback_url for notifications :param cryptocurrency: cryptocurrency name :param deposit_email: user's email :param callback_url: url for order's status notifications :return: str type new deposit address Generates a new deposit address for chosen cryptocurrency with custom callback_url for notifications :param cryptocurrency: cryptocurrency name :param deposit_email: user's email :param payment_method: you must specify this param if several blockchains are available for chosen currency. E.g. if currency == 'USDT' payment_method can be 'ERC20', 'TRC20, 'BEP20' :param callback_url: url for order's status notifications :return: str type new deposit address Generates a new deposit address for chosen cryptocurrency with auto convert to desired currency :param base_cryptocurrency: cryptocurrency that will be sent to the deposit address and will be converted :param deposit_email: user's email :param currency_convert_to: cryptocurrency into which the funds will be converted :return: str type new deposit address Generates URL for deposit of chosen fiat currency. Chosen amount will be charged. Chosen amount_to_spend minus the fee will be deposited to account. :param currency: currency to deposit :param deposit_email: email of user to whose account the deposit is made :param amount_to_spend: amount to deposit - the fee will be subtracted from it :return: URL for deposit payment page Generates URL for deposit of chosen fiat currency. Chosen amount_to_receive plus the fee will be charged from the card. Chosen amount_to_receive will be deposited to account. :param currency: currency to deposit :param deposit_email: email of user to whose account the deposit is made :param amount_to_receive: amount to deposit - the fee will NOT be subtracted from it, but charged separately from the card :return: URL for deposit payment page Generates payment URL for deposit of chosen currency. Custom redirect URLs for fail and success cases are used. The general case processing_url is retained, to be used if the client doesn't want or can't use redirects for separate fail and success cases. :param currency: currency to deposit :param deposit_email: email of user to whose account the deposit is made :param amount_to_receive: amount to deposit - the fee will NOT be subtracted from it, but charged separately from the card :param fail_url: redirect URL for when deposit fails :param success_url: redirect URL for when deposit succeeds :param processing_url: general case redirect URL to be used if the client doesn't want or can't use redirects for separate fail and success cases :return: URL for deposit payment page Generates payment URL for deposit of chosen currency. Custom callback URL is used for notifications :param currency: currency to deposit :param deposit_email: email of the user to whose account the deposit is made :param amount_to_receive: amount to deposit - the fee will NOT be subtracted from it, but charged separately from the card :param callback_url: url for order's status notifications :return: URL for deposit payment page
2.768093
3
services/arguments/arguments_service_base.py
ktodorov/eval-historical-texts
9
6621503
import argparse from typing import List, Dict from entities.custom_argument_parser import CustomArgumentParser from enums.evaluation_type import EvaluationType from enums.language import Language from enums.output_format import OutputFormat from enums.challenge import Challenge from enums.configuration import Configuration from enums.metric_type import MetricType from enums.experiment_type import ExperimentType class ArgumentsServiceBase: def __init__(self, raise_errors_on_invalid_args: bool = True): self._raise_errors_on_invalid_args = raise_errors_on_invalid_args self._arguments: argparse.Namespace = {} self._parse_arguments() def print_arguments(self): """Prints the arguments which the program was initialized with """ print(f'Arguments initialized: {self._arguments}') def get_configuration_name(self) -> str: result = f'{str(self.challenge)}-{str(self.language)}' if self.checkpoint_name is not None: result += f'-{str(self.checkpoint_name)}' return result def _parse_arguments(self): parser = CustomArgumentParser( raise_errors_on_invalid_args=self._raise_errors_on_invalid_args) self._add_base_arguments(parser) self._add_specific_arguments(parser) self._arguments: Dict[str, object] = vars(parser.parse_args()) self._validate_arguments(parser) def _add_specific_arguments(self, parser: argparse.ArgumentParser): pass def _add_base_arguments(self, parser: argparse.ArgumentParser): parser.add_argument('--epochs', default=500, type=int, help='max number of epochs') parser.add_argument('--eval-freq', default=50, type=int, help='evaluate every x batches') parser.add_argument('--batch-size', default=8, type=int, help='size of batches') parser.add_argument('--max-training-minutes', default=24 * 60, type=int, help='max mins of training before save-and-kill') parser.add_argument("--device", type=str, default='cuda', help="Device to be used. Pick from cpu/cuda. If default none is used automatic check will be done") parser.add_argument("--seed", type=int, default=42, metavar="S", help="random seed (default: 42)") parser.add_argument("--evaluate", action='store_true', help="run in evaluation mode") parser.add_argument("--patience", type=int, default=30, help="how long will the model wait for improvement before stopping training") parser.add_argument("--language", type=Language, choices=list(Language), default=Language.English, help="which language to train on") parser.add_argument("--shuffle", action='store_false', help="shuffle datasets while training") parser.add_argument("--learning-rate", type=float, default=1e-5, help="learning rate for training models") parser.add_argument("--weight-decay", type=float, default=1e-8, help="weight decay for optimizer. Default is `1e-8`") parser.add_argument("--momentum", type=float, default=0, help="momentum for optimizer. Default is `0`") parser.add_argument("--checkpoint-name", type=str, default=None, help="name that can be used to distinguish checkpoints") parser.add_argument("--resume-training", action='store_true', help="resume training using saved checkpoints") parser.add_argument("--resume-checkpoint-name", type=str, default=None, help="Checkpoint name that will be used to resume training from. If None is given, then current checkpoint name will be used. Default is `None`") parser.add_argument("--skip-best-metrics-on-resume", action='store_true', help="Whether to skip loading saved metrics and continuing from last best checkpoint. Default is `False`") parser.add_argument("--data-folder", type=str, default='data', help='folder where data will be taken from') parser.add_argument("--output-folder", type=str, default='results', help='folder where results and checkpoints will be saved') parser.add_argument('--checkpoint-folder', type=str, default=None, help='folder where checkpoints will be saved/loaded. If it is not provided, the output folder will be used') parser.add_argument('--evaluation-type', type=EvaluationType, choices=list(EvaluationType), nargs='*', help='what type of evaluations should be performed') parser.add_argument('--output-eval-format', type=OutputFormat, choices=list(OutputFormat), help='what the format of the output after evaluation will be') parser.add_argument("--challenge", type=Challenge, choices=list(Challenge), help='Optional challenge that the model is being trained for. If given, data and output results will be put into a specific folder') parser.add_argument('--configuration', type=Configuration, choices=list(Configuration), default=Configuration.KBert, help='Which configuration of model to load and use. Default is kbert') parser.add_argument('--metric-types', type=MetricType, choices=list(MetricType), default=MetricType.JaccardSimilarity, nargs='*', help='What metrics should be calculated. Default is only Jaccard similarity') parser.add_argument('--joint-model', action='store_true', help='If a joint model should be used instead of a single one') parser.add_argument('--joint-model-amount', type=int, default=2, help='How many models should be trained jointly') parser.add_argument('--enable-external-logging', action='store_true', help='Should logging to external service be enabled') parser.add_argument('--train-dataset-limit-size', type=int, default=None, help='Limit the train dataset. By default no limit is done.') parser.add_argument('--validation-dataset-limit-size', type=int, default=None, help='Limit the validation dataset. By default no limit is done.') parser.add_argument('--skip-validation', action='store_true', help='Whether validation should be skipped, meaning no validation dataset is loaded and no evaluation is done while training. By default is false') parser.add_argument('--run-experiments', action='store_true', help='Whether to run experiments instead of training or evaluation') parser.add_argument('--experiment-types', type=ExperimentType, choices=list(ExperimentType), default=None, nargs='*', help='What types of experiments should be run') parser.add_argument('--reset-training-on-early-stop', action='store_true', help='Whether resetting of training should be done if early stopping is activated and the first epoch has not yet been finished') parser.add_argument('--resets-limit', type=int, default=1, help='How many times should the training be reset during first epoch if early stopping is activated. Default is 1') parser.add_argument('--training-reset-epoch-limit', type=int, default=1, help='Until which epoch the training reset should be performed. Default is 1') parser.add_argument('--save-checkpoint-on-crash', action='store_true', help='If this is set to true, then in the event of an exception or crash of the program, the model\'s checkpoint will be saved to the file system. Default is `False`') parser.add_argument('--save-checkpoint-on-finish', action='store_true', help='If this is set to true, then when the model has converged, its checkpoint will be saved to the file system. Keep in mind that this will not be the best model checkpoint as the stopping will occur after some amount of iterations without any improvement. Default is `False`') def _validate_arguments(self, parser: argparse.ArgumentParser): pass def _get_argument(self, key: str) -> object: """Returns an argument value from the list of registered arguments :param key: key of the argument :type key: str :raises LookupError: if no argument is found, lookup error will be raised :return: the argument value :rtype: object """ if key not in self._arguments.keys(): raise LookupError(f'{key} not found in arguments') return self._arguments[key] @property def epochs(self) -> int: return self._get_argument('epochs') @property def eval_freq(self) -> int: return self._get_argument('eval_freq') @property def batch_size(self) -> int: return self._get_argument('batch_size') @property def max_training_minutes(self) -> int: return self._get_argument('max_training_minutes') @property def device(self) -> str: return self._get_argument('device') @property def seed(self) -> int: return self._get_argument('seed') @property def evaluate(self) -> bool: return self._get_argument('evaluate') @property def patience(self) -> int: return self._get_argument('patience') @property def language(self) -> Language: return self._get_argument('language') @property def shuffle(self) -> bool: return self._get_argument('shuffle') @property def learning_rate(self) -> float: return self._get_argument('learning_rate') @property def momentum(self) -> float: return self._get_argument('momentum') @property def weight_decay(self) -> float: return self._get_argument('weight_decay') @property def checkpoint_name(self) -> str: return self._get_argument('checkpoint_name') @property def resume_training(self) -> bool: return self._get_argument('resume_training') @property def resume_checkpoint_name(self) -> str: return self._get_argument('resume_checkpoint_name') @property def skip_best_metrics_on_resume(self) -> bool: return self._get_argument('skip_best_metrics_on_resume') @property def data_folder(self) -> str: return self._get_argument('data_folder') @property def output_folder(self) -> str: return self._get_argument('output_folder') @property def checkpoint_folder(self) -> str: return self._get_argument('checkpoint_folder') @property def evaluation_type(self) -> List[EvaluationType]: return self._get_argument('evaluation_type') @property def output_eval_format(self) -> OutputFormat: return self._get_argument('output_eval_format') @property def challenge(self) -> Challenge: return self._get_argument('challenge') @property def configuration(self) -> Configuration: return self._get_argument('configuration') @property def metric_types(self) -> List[MetricType]: return self._get_argument('metric_types') @property def train_dataset_limit_size(self) -> int: return self._get_argument('train_dataset_limit_size') @property def validation_dataset_limit_size(self) -> int: return self._get_argument('validation_dataset_limit_size') @property def joint_model(self) -> bool: return self._get_argument('joint_model') @property def joint_model_amount(self) -> int: return self._get_argument('joint_model_amount') @property def enable_external_logging(self) -> bool: return self._get_argument('enable_external_logging') @property def skip_validation(self) -> bool: return self._get_argument('skip_validation') @property def run_experiments(self) -> bool: return self._get_argument('run_experiments') @property def experiment_types(self) -> List[ExperimentType]: return self._get_argument('experiment_types') @property def reset_training_on_early_stop(self) -> bool: return self._get_argument('reset_training_on_early_stop') @property def resets_limit(self) -> int: return self._get_argument('resets_limit') @property def training_reset_epoch_limit(self) -> int: return self._get_argument('training_reset_epoch_limit') @property def save_checkpoint_on_crash(self) -> bool: return self._get_argument('save_checkpoint_on_crash') @property def save_checkpoint_on_finish(self) -> bool: return self._get_argument('save_checkpoint_on_finish')
import argparse from typing import List, Dict from entities.custom_argument_parser import CustomArgumentParser from enums.evaluation_type import EvaluationType from enums.language import Language from enums.output_format import OutputFormat from enums.challenge import Challenge from enums.configuration import Configuration from enums.metric_type import MetricType from enums.experiment_type import ExperimentType class ArgumentsServiceBase: def __init__(self, raise_errors_on_invalid_args: bool = True): self._raise_errors_on_invalid_args = raise_errors_on_invalid_args self._arguments: argparse.Namespace = {} self._parse_arguments() def print_arguments(self): """Prints the arguments which the program was initialized with """ print(f'Arguments initialized: {self._arguments}') def get_configuration_name(self) -> str: result = f'{str(self.challenge)}-{str(self.language)}' if self.checkpoint_name is not None: result += f'-{str(self.checkpoint_name)}' return result def _parse_arguments(self): parser = CustomArgumentParser( raise_errors_on_invalid_args=self._raise_errors_on_invalid_args) self._add_base_arguments(parser) self._add_specific_arguments(parser) self._arguments: Dict[str, object] = vars(parser.parse_args()) self._validate_arguments(parser) def _add_specific_arguments(self, parser: argparse.ArgumentParser): pass def _add_base_arguments(self, parser: argparse.ArgumentParser): parser.add_argument('--epochs', default=500, type=int, help='max number of epochs') parser.add_argument('--eval-freq', default=50, type=int, help='evaluate every x batches') parser.add_argument('--batch-size', default=8, type=int, help='size of batches') parser.add_argument('--max-training-minutes', default=24 * 60, type=int, help='max mins of training before save-and-kill') parser.add_argument("--device", type=str, default='cuda', help="Device to be used. Pick from cpu/cuda. If default none is used automatic check will be done") parser.add_argument("--seed", type=int, default=42, metavar="S", help="random seed (default: 42)") parser.add_argument("--evaluate", action='store_true', help="run in evaluation mode") parser.add_argument("--patience", type=int, default=30, help="how long will the model wait for improvement before stopping training") parser.add_argument("--language", type=Language, choices=list(Language), default=Language.English, help="which language to train on") parser.add_argument("--shuffle", action='store_false', help="shuffle datasets while training") parser.add_argument("--learning-rate", type=float, default=1e-5, help="learning rate for training models") parser.add_argument("--weight-decay", type=float, default=1e-8, help="weight decay for optimizer. Default is `1e-8`") parser.add_argument("--momentum", type=float, default=0, help="momentum for optimizer. Default is `0`") parser.add_argument("--checkpoint-name", type=str, default=None, help="name that can be used to distinguish checkpoints") parser.add_argument("--resume-training", action='store_true', help="resume training using saved checkpoints") parser.add_argument("--resume-checkpoint-name", type=str, default=None, help="Checkpoint name that will be used to resume training from. If None is given, then current checkpoint name will be used. Default is `None`") parser.add_argument("--skip-best-metrics-on-resume", action='store_true', help="Whether to skip loading saved metrics and continuing from last best checkpoint. Default is `False`") parser.add_argument("--data-folder", type=str, default='data', help='folder where data will be taken from') parser.add_argument("--output-folder", type=str, default='results', help='folder where results and checkpoints will be saved') parser.add_argument('--checkpoint-folder', type=str, default=None, help='folder where checkpoints will be saved/loaded. If it is not provided, the output folder will be used') parser.add_argument('--evaluation-type', type=EvaluationType, choices=list(EvaluationType), nargs='*', help='what type of evaluations should be performed') parser.add_argument('--output-eval-format', type=OutputFormat, choices=list(OutputFormat), help='what the format of the output after evaluation will be') parser.add_argument("--challenge", type=Challenge, choices=list(Challenge), help='Optional challenge that the model is being trained for. If given, data and output results will be put into a specific folder') parser.add_argument('--configuration', type=Configuration, choices=list(Configuration), default=Configuration.KBert, help='Which configuration of model to load and use. Default is kbert') parser.add_argument('--metric-types', type=MetricType, choices=list(MetricType), default=MetricType.JaccardSimilarity, nargs='*', help='What metrics should be calculated. Default is only Jaccard similarity') parser.add_argument('--joint-model', action='store_true', help='If a joint model should be used instead of a single one') parser.add_argument('--joint-model-amount', type=int, default=2, help='How many models should be trained jointly') parser.add_argument('--enable-external-logging', action='store_true', help='Should logging to external service be enabled') parser.add_argument('--train-dataset-limit-size', type=int, default=None, help='Limit the train dataset. By default no limit is done.') parser.add_argument('--validation-dataset-limit-size', type=int, default=None, help='Limit the validation dataset. By default no limit is done.') parser.add_argument('--skip-validation', action='store_true', help='Whether validation should be skipped, meaning no validation dataset is loaded and no evaluation is done while training. By default is false') parser.add_argument('--run-experiments', action='store_true', help='Whether to run experiments instead of training or evaluation') parser.add_argument('--experiment-types', type=ExperimentType, choices=list(ExperimentType), default=None, nargs='*', help='What types of experiments should be run') parser.add_argument('--reset-training-on-early-stop', action='store_true', help='Whether resetting of training should be done if early stopping is activated and the first epoch has not yet been finished') parser.add_argument('--resets-limit', type=int, default=1, help='How many times should the training be reset during first epoch if early stopping is activated. Default is 1') parser.add_argument('--training-reset-epoch-limit', type=int, default=1, help='Until which epoch the training reset should be performed. Default is 1') parser.add_argument('--save-checkpoint-on-crash', action='store_true', help='If this is set to true, then in the event of an exception or crash of the program, the model\'s checkpoint will be saved to the file system. Default is `False`') parser.add_argument('--save-checkpoint-on-finish', action='store_true', help='If this is set to true, then when the model has converged, its checkpoint will be saved to the file system. Keep in mind that this will not be the best model checkpoint as the stopping will occur after some amount of iterations without any improvement. Default is `False`') def _validate_arguments(self, parser: argparse.ArgumentParser): pass def _get_argument(self, key: str) -> object: """Returns an argument value from the list of registered arguments :param key: key of the argument :type key: str :raises LookupError: if no argument is found, lookup error will be raised :return: the argument value :rtype: object """ if key not in self._arguments.keys(): raise LookupError(f'{key} not found in arguments') return self._arguments[key] @property def epochs(self) -> int: return self._get_argument('epochs') @property def eval_freq(self) -> int: return self._get_argument('eval_freq') @property def batch_size(self) -> int: return self._get_argument('batch_size') @property def max_training_minutes(self) -> int: return self._get_argument('max_training_minutes') @property def device(self) -> str: return self._get_argument('device') @property def seed(self) -> int: return self._get_argument('seed') @property def evaluate(self) -> bool: return self._get_argument('evaluate') @property def patience(self) -> int: return self._get_argument('patience') @property def language(self) -> Language: return self._get_argument('language') @property def shuffle(self) -> bool: return self._get_argument('shuffle') @property def learning_rate(self) -> float: return self._get_argument('learning_rate') @property def momentum(self) -> float: return self._get_argument('momentum') @property def weight_decay(self) -> float: return self._get_argument('weight_decay') @property def checkpoint_name(self) -> str: return self._get_argument('checkpoint_name') @property def resume_training(self) -> bool: return self._get_argument('resume_training') @property def resume_checkpoint_name(self) -> str: return self._get_argument('resume_checkpoint_name') @property def skip_best_metrics_on_resume(self) -> bool: return self._get_argument('skip_best_metrics_on_resume') @property def data_folder(self) -> str: return self._get_argument('data_folder') @property def output_folder(self) -> str: return self._get_argument('output_folder') @property def checkpoint_folder(self) -> str: return self._get_argument('checkpoint_folder') @property def evaluation_type(self) -> List[EvaluationType]: return self._get_argument('evaluation_type') @property def output_eval_format(self) -> OutputFormat: return self._get_argument('output_eval_format') @property def challenge(self) -> Challenge: return self._get_argument('challenge') @property def configuration(self) -> Configuration: return self._get_argument('configuration') @property def metric_types(self) -> List[MetricType]: return self._get_argument('metric_types') @property def train_dataset_limit_size(self) -> int: return self._get_argument('train_dataset_limit_size') @property def validation_dataset_limit_size(self) -> int: return self._get_argument('validation_dataset_limit_size') @property def joint_model(self) -> bool: return self._get_argument('joint_model') @property def joint_model_amount(self) -> int: return self._get_argument('joint_model_amount') @property def enable_external_logging(self) -> bool: return self._get_argument('enable_external_logging') @property def skip_validation(self) -> bool: return self._get_argument('skip_validation') @property def run_experiments(self) -> bool: return self._get_argument('run_experiments') @property def experiment_types(self) -> List[ExperimentType]: return self._get_argument('experiment_types') @property def reset_training_on_early_stop(self) -> bool: return self._get_argument('reset_training_on_early_stop') @property def resets_limit(self) -> int: return self._get_argument('resets_limit') @property def training_reset_epoch_limit(self) -> int: return self._get_argument('training_reset_epoch_limit') @property def save_checkpoint_on_crash(self) -> bool: return self._get_argument('save_checkpoint_on_crash') @property def save_checkpoint_on_finish(self) -> bool: return self._get_argument('save_checkpoint_on_finish')
en
0.619166
Prints the arguments which the program was initialized with Returns an argument value from the list of registered arguments :param key: key of the argument :type key: str :raises LookupError: if no argument is found, lookup error will be raised :return: the argument value :rtype: object
2.3475
2
algorithm/deep_learning/gradient3.py
kake777/python_sample
0
6621504
#勾配のグラフ import numpy as np import matplotlib.pylab as plt from mpl_toolkits.mplot3d import Axes3D from common_function import numerical_gradient, function_2 def tangent_line(f, x): d = numerical_gradient(f, x) print(d) y = f(x) - d*x return lambda t: d*t + y if __name__ == '__main__': x0 = np.arange(-2, 2.5, 0.25) x1 = np.arange(-2, 2.5, 0.25) X, Y = np.meshgrid(x0, x1) X = X.flatten() Y = Y.flatten() grad = numerical_gradient(function_2, np.array([X, Y]).T).T plt.figure() plt.quiver(X, Y, -grad[0], -grad[1], angles="xy",color="#666666") plt.xlim([-2, 2]) plt.ylim([-2, 2]) plt.xlabel('x0') plt.ylabel('x1') plt.grid() plt.draw() plt.show()
#勾配のグラフ import numpy as np import matplotlib.pylab as plt from mpl_toolkits.mplot3d import Axes3D from common_function import numerical_gradient, function_2 def tangent_line(f, x): d = numerical_gradient(f, x) print(d) y = f(x) - d*x return lambda t: d*t + y if __name__ == '__main__': x0 = np.arange(-2, 2.5, 0.25) x1 = np.arange(-2, 2.5, 0.25) X, Y = np.meshgrid(x0, x1) X = X.flatten() Y = Y.flatten() grad = numerical_gradient(function_2, np.array([X, Y]).T).T plt.figure() plt.quiver(X, Y, -grad[0], -grad[1], angles="xy",color="#666666") plt.xlim([-2, 2]) plt.ylim([-2, 2]) plt.xlabel('x0') plt.ylabel('x1') plt.grid() plt.draw() plt.show()
none
1
3.316455
3
pun/first/config.py
Unviray/pun
2
6621505
# Parent path to search for punfile.py PARENT_LEN = 2
# Parent path to search for punfile.py PARENT_LEN = 2
en
0.732154
# Parent path to search for punfile.py
1.15729
1
drf_auth/exceptions.py
TheArtling/django-drf-auth
0
6621506
"""Exceptions for the drf_auth app.""" class AccessTokenException(Exception): # pragma: nocover """ Raised when we could not get an app access token from Facebook. This can happen when our settings are wrong. Facebook will return a 400 response, but we don't want to show this to the user, because it is our own fault. We will raise this exception instead. """ def __init__(self, message=None): if message is None: message = ( 'Could not obtain access token from Facebook. Check your' ' settings for FACEBOOK_APP_ID and FACEBOOK_APP_SECRET') super(AccessTokenException, self).__init__(message) class FacebookLoginException(Exception): # pragma: nocover """ Raised when, despite all checks, we could authenticate the facebook_user. This should never happen. """ def __init__(self, message=None): if message is None: message = ( 'Unable to authenticate facebook_user. This should never' ' happen.') super(FacebookLoginException, self).__init__(message)
"""Exceptions for the drf_auth app.""" class AccessTokenException(Exception): # pragma: nocover """ Raised when we could not get an app access token from Facebook. This can happen when our settings are wrong. Facebook will return a 400 response, but we don't want to show this to the user, because it is our own fault. We will raise this exception instead. """ def __init__(self, message=None): if message is None: message = ( 'Could not obtain access token from Facebook. Check your' ' settings for FACEBOOK_APP_ID and FACEBOOK_APP_SECRET') super(AccessTokenException, self).__init__(message) class FacebookLoginException(Exception): # pragma: nocover """ Raised when, despite all checks, we could authenticate the facebook_user. This should never happen. """ def __init__(self, message=None): if message is None: message = ( 'Unable to authenticate facebook_user. This should never' ' happen.') super(FacebookLoginException, self).__init__(message)
en
0.868615
Exceptions for the drf_auth app. # pragma: nocover Raised when we could not get an app access token from Facebook. This can happen when our settings are wrong. Facebook will return a 400 response, but we don't want to show this to the user, because it is our own fault. We will raise this exception instead. # pragma: nocover Raised when, despite all checks, we could authenticate the facebook_user. This should never happen.
3.073008
3
lyapy/systems/system.py
vdorobantu/lyapy
36
6621507
<reponame>vdorobantu/lyapy<filename>lyapy/systems/system.py<gh_stars>10-100 """Base class for dynamical systems of the form x_dot = f(t, x).""" from scipy.integrate import solve_ivp class System: """Base class for dynamical systems of the form x_dot = f(t, x). Override dx. Let n be number of states. """ def dx(self, t, x): """Evaluate state derivative at a time and state. Outputs a numpy array (n,). Inputs: Time, t: float State, x: numpy array (n,) """ pass def simulate(self, x_0, t_eval, rtol=1e-6, atol=1e-6): """Simulate closed-loop system using Runge-Kutta 4,5. Solution is evaluated at N time steps. Outputs times and corresponding solutions as numpy array (N,) * numpy array (N, n). Inputs: Initial condition, x_0: numpy array (n,) Solution times, t_eval: numpy array (N,) RK45 relative tolerance, rtol: float RK45 absolute tolerance, atol: float """ t_span = [t_eval[0], t_eval[-1]] sol = solve_ivp(self.dx, t_span, x_0, t_eval=t_eval, rtol=rtol, atol=atol) return sol.t, sol.y.T
"""Base class for dynamical systems of the form x_dot = f(t, x).""" from scipy.integrate import solve_ivp class System: """Base class for dynamical systems of the form x_dot = f(t, x). Override dx. Let n be number of states. """ def dx(self, t, x): """Evaluate state derivative at a time and state. Outputs a numpy array (n,). Inputs: Time, t: float State, x: numpy array (n,) """ pass def simulate(self, x_0, t_eval, rtol=1e-6, atol=1e-6): """Simulate closed-loop system using Runge-Kutta 4,5. Solution is evaluated at N time steps. Outputs times and corresponding solutions as numpy array (N,) * numpy array (N, n). Inputs: Initial condition, x_0: numpy array (n,) Solution times, t_eval: numpy array (N,) RK45 relative tolerance, rtol: float RK45 absolute tolerance, atol: float """ t_span = [t_eval[0], t_eval[-1]] sol = solve_ivp(self.dx, t_span, x_0, t_eval=t_eval, rtol=rtol, atol=atol) return sol.t, sol.y.T
en
0.634002
Base class for dynamical systems of the form x_dot = f(t, x). Base class for dynamical systems of the form x_dot = f(t, x). Override dx. Let n be number of states. Evaluate state derivative at a time and state. Outputs a numpy array (n,). Inputs: Time, t: float State, x: numpy array (n,) Simulate closed-loop system using Runge-Kutta 4,5. Solution is evaluated at N time steps. Outputs times and corresponding solutions as numpy array (N,) * numpy array (N, n). Inputs: Initial condition, x_0: numpy array (n,) Solution times, t_eval: numpy array (N,) RK45 relative tolerance, rtol: float RK45 absolute tolerance, atol: float
3.518088
4
terkey.py
djunekz/terkey
0
6621508
import os from time import sleep a ='\033[92m' b ='\033[91m' c ='\033[0m' def setup(): try: os.mkdir('/data/data/com.termux/files/home/.termux') except: pass key = "extra-keys = [['ESC','/','-','HOME','UP','END','PGUP'],['TAB','CTRL','ALT','LEFT','DOWN','RIGHT','PGDN']]" open('/data/data/com.termux/files/home/.termux/termux.properties','w').write(key) os.system('termux-reload-settings') def banner(): os.system('clear') print(a+'Tombol Shortcut buat para newbie'.center(40)) print(b+'Djunekz'.center(40)) print("".join([i for i in "\n"*3])) if __name__=='__main__': banner() from threading import Thread as td t = td(target=setup) t.start() while t.is_alive(): for i in "-\|/-\|/": print('\rPlease wait '+i+' ',end="",flush=True) sleep(0.1) banner() print(c+'Silahkan lihat-lihat script lainnya '+a+'https://github.com/djunekz'+c+' jika ada yang mau di bicarakan terkait tool ini, bisnis atau sekedar bertanya kabar. \nTerimakasih ^_^') # ini cuma shortcut buat bantu para newbie # D J U N E K Z
import os from time import sleep a ='\033[92m' b ='\033[91m' c ='\033[0m' def setup(): try: os.mkdir('/data/data/com.termux/files/home/.termux') except: pass key = "extra-keys = [['ESC','/','-','HOME','UP','END','PGUP'],['TAB','CTRL','ALT','LEFT','DOWN','RIGHT','PGDN']]" open('/data/data/com.termux/files/home/.termux/termux.properties','w').write(key) os.system('termux-reload-settings') def banner(): os.system('clear') print(a+'Tombol Shortcut buat para newbie'.center(40)) print(b+'Djunekz'.center(40)) print("".join([i for i in "\n"*3])) if __name__=='__main__': banner() from threading import Thread as td t = td(target=setup) t.start() while t.is_alive(): for i in "-\|/-\|/": print('\rPlease wait '+i+' ',end="",flush=True) sleep(0.1) banner() print(c+'Silahkan lihat-lihat script lainnya '+a+'https://github.com/djunekz'+c+' jika ada yang mau di bicarakan terkait tool ini, bisnis atau sekedar bertanya kabar. \nTerimakasih ^_^') # ini cuma shortcut buat bantu para newbie # D J U N E K Z
id
0.511568
# ini cuma shortcut buat bantu para newbie # D J U N E K Z
2.011941
2
src/easy/real-fake/solutions/python/solution.py
rdtsc/codeeval-solutions
0
6621509
#!/usr/bin/env python3 import sys for line in sys.stdin: digits = [int(c) for c in line if c.isdigit()] tally = sum(n * 2 for n in digits[::2]) + sum(digits[1::2]) print('Fake' if tally % 10 else 'Real')
#!/usr/bin/env python3 import sys for line in sys.stdin: digits = [int(c) for c in line if c.isdigit()] tally = sum(n * 2 for n in digits[::2]) + sum(digits[1::2]) print('Fake' if tally % 10 else 'Real')
fr
0.221828
#!/usr/bin/env python3
3.334594
3
poly/app.py
entangle2giraffe/scikitlearn_demo
0
6621510
import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error from sklearn.preprocessing import PolynomialFeatures import sys # Import the local module # The module import pandas and slicing dataset # for specific country of the input of variable 'coa' from util import country, des, instance # Read the table coa = input("Country:") # Store the variable in cdf cdf = country(coa) # Separate 80% for train dataset and rest as test dataset msk = np.random.rand(len(cdf)) < 0.8 train = cdf[msk] test = cdf[~msk] # Import Linear Regression model regr = LinearRegression() # Prompt the user for degree degree = input("Degree(only int):") deg_int = int(degree) # Train dataset # Convert List -> Array train_x = np.asanyarray(train[["Year"]]) train_y = np.asanyarray(train[["Value"]]) # Test dataset test_x = np.asanyarray(test[["Year"]]) test_y = np.asanyarray(test[["Value"]]) # Transform x # Polynomial poly = PolynomialFeatures(deg_int) train_x_poly = poly.fit_transform(train_x) # Learning train_y_ = regr.fit(train_x_poly, train_y) des(coa, deg_int) print(f"Coefficient: ", regr.coef_) print(f"Intercept: ", regr.intercept_) # Initialize x dimension XX = np.arange(1950, 2011, 60/7) def f(deg, arr): const = regr.intercept_[0] if deg <= 0: return 0 else: yy = regr.coef_[0][deg] * np.power(arr, deg) + f(deg - 1, arr) return yy + const func = f(deg_int, XX) plt.plot(XX, func, '-r') plt.xlabel("Year") plt.ylabel("Value") plt.show() test_x_poly = poly.fit_transform(test_x) test_y_ = regr.predict(test_x_poly) # Plot plt.show() # Accuracy print("MAE: ", mean_absolute_error(test_y_, test_y)) print("MSE: ", mean_squared_error(test_y_, test_y)) print("R2: ", r2_score(test_y, test_y))
import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error from sklearn.preprocessing import PolynomialFeatures import sys # Import the local module # The module import pandas and slicing dataset # for specific country of the input of variable 'coa' from util import country, des, instance # Read the table coa = input("Country:") # Store the variable in cdf cdf = country(coa) # Separate 80% for train dataset and rest as test dataset msk = np.random.rand(len(cdf)) < 0.8 train = cdf[msk] test = cdf[~msk] # Import Linear Regression model regr = LinearRegression() # Prompt the user for degree degree = input("Degree(only int):") deg_int = int(degree) # Train dataset # Convert List -> Array train_x = np.asanyarray(train[["Year"]]) train_y = np.asanyarray(train[["Value"]]) # Test dataset test_x = np.asanyarray(test[["Year"]]) test_y = np.asanyarray(test[["Value"]]) # Transform x # Polynomial poly = PolynomialFeatures(deg_int) train_x_poly = poly.fit_transform(train_x) # Learning train_y_ = regr.fit(train_x_poly, train_y) des(coa, deg_int) print(f"Coefficient: ", regr.coef_) print(f"Intercept: ", regr.intercept_) # Initialize x dimension XX = np.arange(1950, 2011, 60/7) def f(deg, arr): const = regr.intercept_[0] if deg <= 0: return 0 else: yy = regr.coef_[0][deg] * np.power(arr, deg) + f(deg - 1, arr) return yy + const func = f(deg_int, XX) plt.plot(XX, func, '-r') plt.xlabel("Year") plt.ylabel("Value") plt.show() test_x_poly = poly.fit_transform(test_x) test_y_ = regr.predict(test_x_poly) # Plot plt.show() # Accuracy print("MAE: ", mean_absolute_error(test_y_, test_y)) print("MSE: ", mean_squared_error(test_y_, test_y)) print("R2: ", r2_score(test_y, test_y))
en
0.578102
# Import the local module # The module import pandas and slicing dataset # for specific country of the input of variable 'coa' # Read the table # Store the variable in cdf # Separate 80% for train dataset and rest as test dataset # Import Linear Regression model # Prompt the user for degree # Train dataset # Convert List -> Array # Test dataset # Transform x # Polynomial # Learning # Initialize x dimension # Plot # Accuracy
3.706198
4
pyds8k/exceptions.py
27149chen/pyds8k
7
6621511
<filename>pyds8k/exceptions.py ############################################################################## # Copyright 2019 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## """ Exception definitions. """ from pyds8k.utils import get_subclasses, \ get_response_parser_class from pyds8k import messages class BaseRestError(Exception): pass class InvalidArgumentError(Exception): def __init__(self, reason): self.reason = reason def __str__(self): return messages.INVALID_ARGUMENT.format( self.reason ) class OperationNotAllowed(Exception): """ The operation performed on the resource is not allowed. """ def __init__(self, operation_name, resource_name=''): self.operation_name = operation_name self.resource_name = resource_name def __str__(self): return messages.OPERATION_NOT_ALLOWED.format( self.operation_name, self.resource_name ) class URLNotSpecifiedError(Exception): """ The URL is not specified. """ def __str__(self): return messages.URL_NOT_SPECIFIED class URLMissingError(Exception): """ The URL is missing. """ def __str__(self): return messages.URL_MISSING class IDMissingError(Exception): """ The id field is missing or None. """ def __str__(self): return messages.ID_MISSING class ResponseBodyMissingError(Exception): """ The response body is missing. """ def __str__(self): return messages.RESPONSE_BODY_MISSING class URLParseError(Exception): """ Can not get the URL """ def __str__(self): return messages.CAN_NOT_GET_URL class RepresentationParseError(Exception): """ Can not get the representation """ def __str__(self): return messages.CAN_NOT_GET_REPRESENTATION class FieldReadOnly(Exception): """ Field is read only. """ def __init__(self, field_name): self.field_name = field_name def __str__(self): return messages.FIELD_READONLY.format(self.field_name) class ConnectionError(Exception): """ Could not open a connection to the API service. """ pass class Timeout(Exception): """ The request timed out. """ def __init__(self, url): self.url = url def __str__(self): return messages.REQUEST_TIMED_OUT.format(self.url) class ClientException(Exception): """ The base exception class for all HTTP client or server errors. """ def __init__(self, code, message=None, detail='', origin_data=None): self.code = code self.message = message self.detail = detail self.error_data = origin_data if self.message and self.detail: self.details = '[{}] {}'.format(self.message, self.detail) elif self.message or self.detail: self.details = self.message or self.detail else: self.details = '' def __str__(self): return "HTTP {0} {1}. {2}".format( self.code, self.reason_phrase, self.details ) class ClientError(ClientException): """ HTTP 4xx - Client Error """ status_code = '4xx' reason_phrase = "Client Error" class ServerError(ClientException): """ HTTP 5xx - Server Error """ status_code = '5xx' reason_phrase = "Server Error" class BadRequest(ClientError): """ HTTP 400 - Bad request: you sent some malformed data. """ status_code = '400' reason_phrase = "Bad Request" class Unauthorized(ClientError): """ HTTP 401 - Unauthorized: bad credentials. """ status_code = '401' reason_phrase = "Unauthorized" class Forbidden(ClientError): """ HTTP 403 - Forbidden: your credentials don't give you access to this resource. """ status_code = '403' reason_phrase = "Forbidden" class NotFound(ClientError): """ HTTP 404 - Not found """ status_code = '404' reason_phrase = "Not Found" class MethodNotAllowed(ClientError): """ HTTP 405 - Method Not Allowed """ status_code = '405' reason_phrase = "Method Not Allowed" class Conflict(ClientError): """ HTTP 409 - Conflict """ status_code = '409' reason_phrase = "Conflict" class UnsupportedMediaType(ClientError): """ HTTP 415 - Unsupported Media Type """ status_code = '415' reason_phrase = "Unsupported Media Type" class InternalServerError(ServerError): """ HTTP 500 - Internal Server Error: The server encountered an unexpected condition which prevented it from fulfilling the request. """ status_code = '500' reason_phrase = "Internal Server Error" class ServiceUnavailable(ServerError): """ HTTP 503 - Service Unavailable """ status_code = '503' reason_phrase = "Service Unavailable" class GatewayTimeout(ServerError): """ HTTP 504 - Gateway Timeout """ status_code = '504' reason_phrase = "Gateway Timeout" _error_dict = dict((c.status_code, c) for c in get_subclasses(ClientException)) def raise_error(response, body, service_type=''): """ Return an instance of an ClientException or subclass based on an requests response. """ ResponseParser = get_response_parser_class(service_type) cls = _error_dict.get(str(response.status_code), ClientException) if body: res_p = ResponseParser(body) message = res_p.get_error_code() details = res_p.get_error_msg() data = res_p.get_status_body() return cls(code=response.status_code, message=message, detail=details, origin_data=data ) else: return cls(code=response.status_code, message=response.reason, origin_data=body )
<filename>pyds8k/exceptions.py ############################################################################## # Copyright 2019 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## """ Exception definitions. """ from pyds8k.utils import get_subclasses, \ get_response_parser_class from pyds8k import messages class BaseRestError(Exception): pass class InvalidArgumentError(Exception): def __init__(self, reason): self.reason = reason def __str__(self): return messages.INVALID_ARGUMENT.format( self.reason ) class OperationNotAllowed(Exception): """ The operation performed on the resource is not allowed. """ def __init__(self, operation_name, resource_name=''): self.operation_name = operation_name self.resource_name = resource_name def __str__(self): return messages.OPERATION_NOT_ALLOWED.format( self.operation_name, self.resource_name ) class URLNotSpecifiedError(Exception): """ The URL is not specified. """ def __str__(self): return messages.URL_NOT_SPECIFIED class URLMissingError(Exception): """ The URL is missing. """ def __str__(self): return messages.URL_MISSING class IDMissingError(Exception): """ The id field is missing or None. """ def __str__(self): return messages.ID_MISSING class ResponseBodyMissingError(Exception): """ The response body is missing. """ def __str__(self): return messages.RESPONSE_BODY_MISSING class URLParseError(Exception): """ Can not get the URL """ def __str__(self): return messages.CAN_NOT_GET_URL class RepresentationParseError(Exception): """ Can not get the representation """ def __str__(self): return messages.CAN_NOT_GET_REPRESENTATION class FieldReadOnly(Exception): """ Field is read only. """ def __init__(self, field_name): self.field_name = field_name def __str__(self): return messages.FIELD_READONLY.format(self.field_name) class ConnectionError(Exception): """ Could not open a connection to the API service. """ pass class Timeout(Exception): """ The request timed out. """ def __init__(self, url): self.url = url def __str__(self): return messages.REQUEST_TIMED_OUT.format(self.url) class ClientException(Exception): """ The base exception class for all HTTP client or server errors. """ def __init__(self, code, message=None, detail='', origin_data=None): self.code = code self.message = message self.detail = detail self.error_data = origin_data if self.message and self.detail: self.details = '[{}] {}'.format(self.message, self.detail) elif self.message or self.detail: self.details = self.message or self.detail else: self.details = '' def __str__(self): return "HTTP {0} {1}. {2}".format( self.code, self.reason_phrase, self.details ) class ClientError(ClientException): """ HTTP 4xx - Client Error """ status_code = '4xx' reason_phrase = "Client Error" class ServerError(ClientException): """ HTTP 5xx - Server Error """ status_code = '5xx' reason_phrase = "Server Error" class BadRequest(ClientError): """ HTTP 400 - Bad request: you sent some malformed data. """ status_code = '400' reason_phrase = "Bad Request" class Unauthorized(ClientError): """ HTTP 401 - Unauthorized: bad credentials. """ status_code = '401' reason_phrase = "Unauthorized" class Forbidden(ClientError): """ HTTP 403 - Forbidden: your credentials don't give you access to this resource. """ status_code = '403' reason_phrase = "Forbidden" class NotFound(ClientError): """ HTTP 404 - Not found """ status_code = '404' reason_phrase = "Not Found" class MethodNotAllowed(ClientError): """ HTTP 405 - Method Not Allowed """ status_code = '405' reason_phrase = "Method Not Allowed" class Conflict(ClientError): """ HTTP 409 - Conflict """ status_code = '409' reason_phrase = "Conflict" class UnsupportedMediaType(ClientError): """ HTTP 415 - Unsupported Media Type """ status_code = '415' reason_phrase = "Unsupported Media Type" class InternalServerError(ServerError): """ HTTP 500 - Internal Server Error: The server encountered an unexpected condition which prevented it from fulfilling the request. """ status_code = '500' reason_phrase = "Internal Server Error" class ServiceUnavailable(ServerError): """ HTTP 503 - Service Unavailable """ status_code = '503' reason_phrase = "Service Unavailable" class GatewayTimeout(ServerError): """ HTTP 504 - Gateway Timeout """ status_code = '504' reason_phrase = "Gateway Timeout" _error_dict = dict((c.status_code, c) for c in get_subclasses(ClientException)) def raise_error(response, body, service_type=''): """ Return an instance of an ClientException or subclass based on an requests response. """ ResponseParser = get_response_parser_class(service_type) cls = _error_dict.get(str(response.status_code), ClientException) if body: res_p = ResponseParser(body) message = res_p.get_error_code() details = res_p.get_error_msg() data = res_p.get_status_body() return cls(code=response.status_code, message=message, detail=details, origin_data=data ) else: return cls(code=response.status_code, message=response.reason, origin_data=body )
en
0.750318
############################################################################## # Copyright 2019 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## Exception definitions. The operation performed on the resource is not allowed. The URL is not specified. The URL is missing. The id field is missing or None. The response body is missing. Can not get the URL Can not get the representation Field is read only. Could not open a connection to the API service. The request timed out. The base exception class for all HTTP client or server errors. HTTP 4xx - Client Error HTTP 5xx - Server Error HTTP 400 - Bad request: you sent some malformed data. HTTP 401 - Unauthorized: bad credentials. HTTP 403 - Forbidden: your credentials don't give you access to this resource. HTTP 404 - Not found HTTP 405 - Method Not Allowed HTTP 409 - Conflict HTTP 415 - Unsupported Media Type HTTP 500 - Internal Server Error: The server encountered an unexpected condition which prevented it from fulfilling the request. HTTP 503 - Service Unavailable HTTP 504 - Gateway Timeout Return an instance of an ClientException or subclass based on an requests response.
2.332659
2
pyjac/loopy_utils/loopy_utils.py
stgeke/pyJac-v2
9
6621512
<reponame>stgeke/pyJac-v2<filename>pyjac/loopy_utils/loopy_utils.py from __future__ import print_function import logging import os import stat import re import six from string import Template # package imports import loopy as lp from loopy.target.c.c_execution import CPlusPlusCompiler import numpy as np import warnings try: import pyopencl as cl from pyopencl.tools import clear_first_arg_caches except ImportError: cl = None pass # local imports from pyjac import utils from pyjac.core.enum_types import (RateSpecialization, JacobianType, JacobianFormat, KernelType) from pyjac.core import array_creator as arc from pyjac.core.exceptions import (MissingPlatformError, MissingDeviceError, BrokenPlatformError) from pyjac.loopy_utils.loopy_edit_script import substitute as codefix from pyjac.schemas import build_and_validate edit_script = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'loopy_edit_script.py') adept_edit_script = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'adept_edit_script.py') def load_platform(codegen): """ Loads a code-generation platform from a file, and returns the corresponding :class:`loopy_options` Parameters ---------- codegen: str The user-specified code-generation platform yaml file Returns ------- :class:`loopy_options` The loaded platform Raises ------ :class:`cerberus.ValidationError`: A validation error if the supplied codegen platform doesn't comply with the :doc:`../schemas/codegen_platform.yaml` """ platform = build_and_validate('codegen_platform.yaml', codegen)['platform'] width = platform.get('width', None) depth = platform.get('depth', None) # TODO: implement memory limits loading here # optional params get passed as kwargs kwargs = {} if 'order' in platform and platform['order'] is not None: kwargs['order'] = platform['order'] if 'atomic_doubles' in platform: kwargs['use_atomic_doubles'] = platform['atomic_doubles'] if 'atomic_ints' in platform: kwargs['use_atomic_ints'] = platform['atomic_ints'] return loopy_options(width=width, depth=depth, lang=platform['lang'], platform=platform['name'], **kwargs) class loopy_options(object): """ Loopy Objects class Attributes ---------- width : int If not None, the SIMD lane/SIMT block width. Cannot be specified along with depth depth : int If not None, the SIMD lane/SIMT block depth. Cannot be specified along with width ilp : bool If True, use the ILP tag on the species loop. Cannot be specified along with unr unr : int If not None, the unroll length to apply to the species loop. Cannot be specified along with ilp order : {'C', 'F'} The memory layout of the arrays, C (row major) or Fortran (column major) lang : ['opencl', 'c', 'cuda'] One of the supported languages rate_spec : RateSpecialization Controls the level to which Arrenhius rate evaluations are specialized rate_spec_kernels : bool If True, break different Arrenhius rate specializations into different kernels rop_net_kernels : bool If True, break different ROP values (fwd / back / pdep) into different kernels platform : {'CPU', 'GPU', or other vendor specific name} The OpenCL platform to run on. * If 'CPU' or 'GPU', the first available matching platform will be used * If a vendor specific string, it will be passed to pyopencl to get the platform use_atomic_doubles : bool [True] Use atomic updates where necessary for proper deep-vectorization If not, a sequential deep-vectorization (with only one thread/lane active) will be used use_atomic_ints : bool [True] Use atomic integer operations for the driver kernel. jac_type: :class:`JacobianType` [JacobianType.full] The type of Jacobian kernel (full or approximate) to generate jac_format: :class:`JacobianFormat` [JacobianFormat.full] The format of Jacobian kernel (full or sparse) to generate is_simd: bool [None] If supplied, override the user-specified flag :param:`explicit_simd`, used for testing. unique_pointers: bool [False] If specified, this indicates that the pointers passed to the generated pyJac methods will be unique (i.e., distinct per OpenMP thread / OpenCL work-group). This option is most useful for coupling to external codes an that have already been parallelized. explicit_simd: bool [False] Attempt to utilize explict-SIMD instructions in OpenCL """ def __init__(self, width=None, depth=None, ilp=False, unr=None, lang='opencl', order='C', rate_spec=RateSpecialization.fixed, rate_spec_kernels=False, rop_net_kernels=False, platform='', kernel_type=KernelType.jacobian, auto_diff=False, use_atomic_doubles=True, use_atomic_ints=True, jac_type=JacobianType.exact, jac_format=JacobianFormat.full, device=None, device_type=None, is_simd=None, unique_pointers=False, explicit_simd=None): self.width = width self.depth = depth if not utils.can_vectorize_lang[lang]: assert not (width or depth), ( "Can't use a vectorized form with unvectorizable language," " {}".format(lang)) assert not (width and depth), ( 'Cannot use deep and wide vectorizations simulataneously') self.ilp = ilp self.unr = unr utils.check_lang(lang) self.lang = lang utils.check_order(order) self.order = order self.rate_spec = utils.to_enum(rate_spec, RateSpecialization) self.rate_spec_kernels = rate_spec_kernels self.rop_net_kernels = rop_net_kernels self.platform = platform self.device_type = device_type self.device = device self.auto_diff = auto_diff self.use_atomic_doubles = use_atomic_doubles self.use_atomic_ints = use_atomic_ints self.jac_format = utils.to_enum(jac_format, JacobianFormat) self.jac_type = utils.to_enum(jac_type, JacobianType) self._is_simd = is_simd self.explicit_simd = explicit_simd self.explicit_simd_warned = False if self.lang != 'opencl' and self.explicit_simd: logger = logging.getLogger(__name__) logger.warn('explicit-SIMD flag has no effect on non-OpenCL targets.') self.kernel_type = utils.to_enum(kernel_type, KernelType) self.unique_pointers = unique_pointers if self._is_simd or self.explicit_simd: assert width or depth, ( 'Cannot use explicit SIMD types without vectorization') # need to find the first platform that has the device of the correct # type if self.lang == 'opencl' and not self.platform_is_pyopencl \ and cl is not None: self.device_type = cl.device_type.ALL check_name = None if self.platform_name.lower() == 'cpu': self.device_type = cl.device_type.CPU elif self.platform_name.lower() == 'gpu': self.device_type = cl.device_type.GPU elif self.platform_name.lower() == 'accelerator': self.device_type = cl.device_type.ACCELERATOR else: check_name = self.platform self.platform = None platforms = cl.get_platforms() for p in platforms: try: cl.Context( dev_type=self.device_type, properties=[(cl.context_properties.PLATFORM, p)]) if not check_name or check_name.lower() in p.get_info( cl.platform_info.NAME).lower(): self.platform = p break except cl.cffi_cl.RuntimeError: pass if not self.platform: raise MissingPlatformError(platform) if not isinstance(self.device, cl.Device) and ( self.device_type is not None): # finally a matching device self.device = self.platform.get_devices( device_type=self.device_type) if not self.device: raise MissingDeviceError(self.device_type, self.platform) self.device = self.device[0] self.device_type = self.device.get_info(cl.device_info.TYPE) elif self.lang == 'opencl': self.device_type = 'CL_DEVICE_TYPE_ALL' # check for broken vectorizations self.raise_on_broken() @property def limit_int_overflow(self): """ Deals with issue of integer overflow in array indexing """ return self.lang == 'c' or self.lang == 'opencl' and \ ('intel' in self.platform_name.lower() or 'portable' in self.platform_name.lower()) def raise_on_broken(self): # Currently, NVIDIA w/ neither deep nor wide-vectorizations ( # i.e. a "parallel" implementation) breaks sometimes on OpenCL if self.lang == 'opencl' and cl is not None: if not (self.width or self.depth) \ and self.device_type == cl.device_type.GPU: if 'nvidia' in self.platform_name.lower(): raise BrokenPlatformError(self) # otherwise, simply warn logger = logging.getLogger(__name__) logger.warn('Some GPU implementation(s)--NVIDIA--give incorrect' 'values sporadically without either a deep or wide' 'vectorization. Use at your own risk.') if self.width and not self.is_simd and \ self.device_type == cl.device_type.CPU: logger = logging.getLogger(__name__) if 'intel' in self.platform_name.lower(): logger.error('Intel OpenCL is currently broken for wide, ' 'non-explicit-SIMD vectorizations on the CPU. ' 'Use the --explicit_simd flag.') raise BrokenPlatformError(self) if not self.explicit_simd and self._is_simd is None: # only warn if user didn't supply logger.warn('You may wish to use the --explicit_simd flag to ' 'utilize explicit-vector data-types (and avoid ' 'implicit vectorization, which may yield sub-optimal' ' results).') if 'portable' in self.platform_name.lower() and self.unique_pointers: logger = logging.getLogger(__name__) logger.error('Portable OpenCL is currently broken for ' 'unique_pointers.') raise BrokenPlatformError(self) @property def is_simd(self): """ Utility to determine whether to tell Loopy to apply explicit-simd vectorization or not Returns ------- is_simd: bool True if we should attempt to explicitly vectorize the data / arrays """ # priority to test-specification if self._is_simd is not None: return self._is_simd if not (self.width or self.depth): return False # currently SIMD is enabled only wide-CPU vectorizations ( # deep-vectorizations will require further loopy upgrades) if not self.width: if self.explicit_simd: logger = logging.getLogger(__name__) logger.warn('Explicit-SIMD deep-vectorization currently not ' 'implemented, ignoring user-specified SIMD flag') return False if self.explicit_simd is not None: # user specified return self.explicit_simd if not cl: if self.explicit_simd is None and not self.explicit_simd_warned: logger = logging.getLogger(__name__) logger.warn('Cannot determine whether to use explicit-SIMD ' 'instructions as PyOpenCL was not found. Either ' 'install PyOpenCL or use the "--explicit_simd" ' 'command line argument. Assuming not SIMD.') self.explicit_simd_warned = True return self.explicit_simd if self.lang == 'opencl': return self.device_type != cl.device_type.GPU return True @property def pre_split(self): """ It is sometimes advantageous to 'pre-split' the outer loop into an inner (vector) iname and an outer (parallel) iname, particularly when using explicit-SIMD w/ loopy (and avoid having to figure out how to simplify floor-div's of the problem size in loopy) If this property is True, utilize a pre-split. """ return self.width and arc.array_splitter._have_split_static(self) @property def initial_condition_dimsize(self): """ Return the necessary IC dimension size based on this :class:`loopy_options` """ ws = arc.work_size.name if not self.pre_split and self.width: return '{}*{}'.format(ws, self.width) return ws @property def initial_condition_loopsize(self): """ Return the necessary loop bound for the global index of inner kernel loops based on this :class:`loopy_options` """ if self.unique_pointers: return self.vector_width if self.vector_width else 1 if not self.pre_split and self.width: return '{}*{}'.format(arc.work_size.name, self.width) return arc.work_size.name @property def vector_width(self): """ Returns the vector width for this :class:`loopy_options` or None if unvectorized """ if not (self.width or self.depth): return None return self.width if self.width else self.depth @property def has_scatter(self): """ Utility to determine whether the target supports scatter writes Currently, only Intel's OpenCL implementation does not (CPU-only 16.1.1) and if attempted, it breaks the auto-vectorization Parameters ---------- None Returns ------- has_scatter: bool Whether the target supports scatter operations or not """ return not (self.lang == 'opencl' and 'intel' in self.platform_name.lower()) @property def platform_is_pyopencl(self): """ Return true, IFF :attr:`platform` is an instance of a :class:`pyopencl.Platform` """ return self.platform and cl is not None and isinstance( self.platform, cl.Platform) @property def platform_name(self): """ Returns the suppled OpenCL platform name, or None if not available """ if self.platform_is_pyopencl: return self.platform.name return self.platform def get_device_list(): """ Returns the available pyopencl devices Parameters ---------- None Returns ------- devices : list of :class:`pyopencl.Device` The devices recognized by pyopencl """ device_list = [] for p in cl.get_platforms(): device_list.append(p.get_devices()) # don't need multiple gpu's etc. return [x[0] for x in device_list if x] def get_context(device='0'): """ Simple method to generate a pyopencl context Parameters ---------- device : str or :class:`pyopencl.Device` The pyopencl string (or device class) denoting the device to use, defaults to '0' Returns ------- ctx : :class:`pyopencl.Context` The running context """ # os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1' if isinstance(device, str): os.environ['PYOPENCL_CTX'] = device ctx = cl.create_some_context(interactive=False) else: ctx = cl.Context(devices=[device]) return ctx def get_header(knl, codegen_result=None): """ Returns header definition code for a :class:`loopy.LoopKernel` Parameters ---------- knl : :class:`loopy.LoopKernel` The kernel to generate a header definition for codegen_result : :class:`loopy.CodeGenerationResult` If supplied, the pre-generated code-gen result for this kernel (speeds up header generation) Returns ------- Generated device header code Notes ----- The kernel's Target and name should be set for proper functioning """ return str(lp.generate_header(knl, codegen_result=codegen_result)[0]) def __set_editor(knl, script): # set the edit script as the 'editor' os.environ['EDITOR'] = script # turn on code editing edit_knl = lp.set_options(knl, edit_code=True) return edit_knl def set_editor(knl): """ Returns a copy of knl set up for various automated bug-fixes Parameters ---------- knl : :class:`loopy.LoopKernel` The kernel to generate code for Returns ------- edit_knl : :class:`loopy.LoopKernel` The kernel set up for editing """ return __set_editor(knl, edit_script) def set_adept_editor(knl, base_kernels, problem_size=8192, independent_variable=None, dependent_variable=None, output=None, do_not_set=[]): """ Returns a copy of knl set up for various automated bug-fixes Parameters ---------- knl : :class:`loopy.LoopKernel` The kernel to generate code for base_kernels : :class:`loopy.LoopKernel` The kernel :param:`knl` and all dependencies required for Jacobian evaluation. These kernels, should be generated for a problem_size of 1 to facilitate indexing in the wrapped kernel problem_size : int The size of the testing problem independent_variable : :class:`array_creator.creator` The independent variables to compute the Jacobian with respect to dependent_variable : :class:`array_creator.creator` The dependent variables to find the Jacobian of output : :class:`array_creator.creator` The array to store the column-major Jacobian in, ordered by thermo-chemical condition do_not_set : list of :class:`array_creator.creator` Other variables that are computed in this kernel (and hence shouldn't) be set Returns ------- edit_knl : :class:`loopy.LoopKernel` The kernel set up for editing """ # load template with open(adept_edit_script + '.in', 'r') as file: src = file.read() def __get_size_and_stringify(variable): sizes = variable.shape indicies = ['ad_j', 'i'] out_str = variable.name + '[{index}]' from pyjac.core.array_creator import creator if isinstance(variable, creator): if variable.order == 'C': # last index varies fastest, so stride of 'i' is 1 sizes = reversed(sizes) indicies = reversed(indicies) elif isinstance(variable, lp.kernel.data.ArrayBase): # find index of test_size strides = [x.stride for x in variable.dim_tags] sizes = variable.shape # if first stride is not problem_size, this is 'C' ordered # hence reverse indicies if strides[0] != problem_size: sizes = reversed(sizes) indicies = reversed(indicies) if len(variable.shape) == 1: return 1, out_str.format(index='ad_j') if len(variable.shape) > 2: assert variable.name == 'jac' size = np.product([x for x in sizes if x != problem_size]) # can't operate on this return None, None out_index = '' offset = 1 out_size = None for size, index in zip(sizes, indicies): if out_index: out_index += ' + ' if str(size) == arc.work_size.name: # per work-size = 1 in this context as we're operating per-thread pass elif size != problem_size: assert out_size is None, ( 'Cannot determine variable size!') out_size = size out_index += '{} * {}'.format(index, offset) offset *= size return out_size, out_str.format(index=out_index) # find the dimension / string representation of the independent # and dependent variables indep_size, indep = __get_size_and_stringify(independent_variable) dep_size, dep = __get_size_and_stringify(dependent_variable) # initializers init_template = Template(""" std::vector<adouble> ad_${name} (${size}); """) set_template = Template(""" for (int i = 0; i < ${size}; ++i) { ad_${name}[i].set_value(${indexed}); } """) zero_template = Template(""" for(int i = 0; i < ${size}; ++i) { ad_${name}[i].set_value(0.0); } """) # get set of written vars written_vars = knl.get_written_variables() for k in base_kernels: written_vars |= k.get_written_variables() initializers = [] for arg in knl.args: if arg.name != dependent_variable.name \ and not isinstance(arg, lp.ValueArg): size, indexed = __get_size_and_stringify(arg) if size is not None: # add initializer initializers.append(init_template.substitute( name=arg.name, size=size, )) if indexed is not None and arg.name not in written_vars: initializers.append(set_template.substitute( name=arg.name, indexed=indexed, size=size )) else: initializers.append(zero_template.substitute( name=arg.name, size=size )) dep_set_template = Template(""" for (int i = 0; i < ${size}; ++i) { ${indexed} = ad_${name}[i].value(); } """) setters = [] for var in [dependent_variable] + do_not_set: size, ind = __get_size_and_stringify(var) setters.append(dep_set_template.substitute( indexed=ind, name=var.name, size=size)) setters = '\n'.join(setters) jac_size = dep_size * indep_size # find the output name jac_base_offset = '&' + output.name + \ '[ad_j * {dep_size} * {indep_size}]'.format( dep_size=dep_size, indep_size=indep_size) # get header defn header = get_header(knl) header = header[:header.index(';')] # replace the "const" on the jacobian header = re.sub(r'double\s*const(?=[^,]+{name})'.format(name=output.name), 'double', header) # and function call kernel_calls = [] for k in base_kernels: arg_list = [arg.name for arg in k.args] for i, arg in enumerate(arg_list): name = arg[:] if arg != output.name: name = 'ad_' + name if arg != 'j': name = '&' + name + '[0]' arg_list[i] = name kernel_calls.append('ad_{name}({args});'.format( name=k.name, args=', '.join(arg_list))) # fill in template with open(adept_edit_script, 'w') as file: file.write(utils.subs_at_indent( src, problem_size=problem_size, ad_indep_name='ad_' + independent_variable.name, # indep=indep, # indep_name=independent_variable.name, indep_size=indep_size, ad_dep_name='ad_' + dependent_variable.name, # dep=dep, # dep_name=dependent_variable.name, dep_size=dep_size, jac_base_offset=jac_base_offset, # jac_size=jac_size, jac_name=output.name, function_defn=header, kernel_calls='\n'.join(kernel_calls), initializers='\n'.join(initializers), base_kernels='\n'.join([get_code(x) for x in base_kernels]), setters=setters )) # and make it executable st = os.stat(adept_edit_script) os.chmod(adept_edit_script, st.st_mode | stat.S_IEXEC) return __set_editor(knl, adept_edit_script) def get_code(knl, opts=None): """ Returns the device code for a :class:`loopy.LoopKernel` or fixes alreay generated code Parameters ---------- knl : :class:`loopy.LoopKernel` or str The kernel to generate code for. If knl is a string, it is assumed to be pregenerated code, and only the editor script must be called opts: :class:`loopy_options` The options used in created the kernel -- used to detect platform specific fixes. Ignored if not supplied Returns ------- code: str Generated device code Notes ----- The kernel's Target and name should be set for proper functioning """ if isinstance(knl, str): code = knl else: code, _ = lp.generate_code(knl) extra_subs = {} if opts is None: # ignore pass elif opts.lang == 'opencl' and ( 'intel' in opts.platform_name.lower() and ((opts.order == 'C' and opts.width) or ( opts.order == 'F' and opts.depth) or ( opts.order == 'F' and opts.width))): # If True, this is a finite-difference Jacobian on an Intel OpenCL platform # Hence we have to tell the codefixer about the intel bug # https://software.intel.com/en-us/forums/opencl/topic/748841 extra_subs[r'__kernel void __attribute__ \(\(reqd_work_group_size\(\d+, 1, 1' r'\)\)\) species_rates_kernel'] = r'void species_rates_kernel' return codefix('stdin', text_in=code, extra_subs=extra_subs) def not_is_close(arr1, arr2, **kwargs): """ A utility method that returns the result of: numpy.where(numpy.logical_not(numpy.isclose(arr1, arr2, **kwargs))) Since I use if often in testing Parameters ---------- arr1: :class:`np.ndarray` Array to compare arr2: :class:`np.ndarray` Reference answer **kwargs: dict Keyword args for :func:`numpy.isclose` Returns ------- inds: tuple of :class:`numpy.ndarray` result of: `numpy.where(numpy.logical_not(numpy.isclose(arr1, arr2, **kwargs)))` """ return np.where(np.logical_not(np.isclose(arr1, arr2, **kwargs))) class kernel_call(object): """ A wrapper for the various parameters (e.g. args, masks, etc.) for calling / executing a loopy kernel """ def __init__(self, name, ref_answer, compare_axis=1, compare_mask=None, out_mask=None, input_mask=[], strict_name_match=False, chain=None, check=True, post_process=None, allow_skip=False, other_compare=None, atol=1e-8, rtol=1e-5, equal_nan=False, ref_ans_compare_mask=None, tiling=True, **input_args): """ The initializer for the :class:`kernel_call` object Parameters ---------- name : str The kernel name, used for matching ref_answer : :class:`numpy.ndarray` or list of :class:`numpy.ndarray` The reference answer to compare to compare_axis : int, optional An axis to apply the compare_mask along, unused if compare_mask is None compare_mask : :class:`numpy.ndarray` or list of :class:`numpy.ndarray` An optional list of indexes to compare, useful when the kernel only computes partial results. Should match length of ref_answer ref_ans_compare_mask : :class:`numpy.ndarray` or list of :class:`numpy.ndarray` Same as the compare_mask, but for the reference answer. Necessary for some kernel tests, as the reference answer is not the same size as the output, which causes issues for split arrays. If not supplied, the regular :param:`compare_mask` will be used tiling: bool, [True] If True (default), the elements in the :param:`compare_mask` should be combined, e.g., if two arrays [[1, 2] and [3, 4]] are supplied to :param:`compare_mask` with tiling turned on, four resulting indicies will be compared -- [1, 3], [1, 4], [2, 3], and [2, 4]. If tiling is turned of, the compare mask will be treated as a list of indicies, e.g., (for the previous example) -- [1, 3] and [2, 4]. out_mask : int, optional The index(ices) of the returned array to aggregate. Should match length of ref_answer input_mask : list of str or function, optional An optional list of input arguements to filter out If a function is passed, the expected signature is along the lines of: def fcn(self, arg_name): ... and returns True iff this arg_name should be used strict_name_match : bool, optional If true, only kernels exactly matching this name will be excecuted Defaut is False chain : function, optional If not None, a function of signature similar to: def fcn(self, out_values): .... is expected. This function should take the output values from a previous kernel call, and place in the input args for this kernel call as necessary post_process : function, optional If not None, a function of signature similar to: def fcn(self, out_values): .... is expected. This function should take the output values from this kernel call, and process them as expected to compare to results. Currently used only in comparison of reaction rates to Cantera (to deal w/ falloff etc.) check : bool If False, do not check result (useful when chaining to check only the last result) Default is True allow_skip : bool If True, allow this kernel call to be check results without actually executing a kernel (checks the last kernel that was executed). This is useful for selectively turning off kernels (e.g. if there are no reverse reactions) other_compare : Callable, optional If supplied, a function that compares output values not checked in by this kernel call. This is useful in the case of NaN's resulting from derivatives of (e.g.,) log(0), to ensure our arrays are spitting out very large (but finite) numbers rtol : float [Default 1e-5] The relative tolerance for comparison to reference answers. For Jacobian correctness testing this may have to be loosened atol : float [Default 1e-8] The absolute tolerance for comparison to reference answers. equal_nan : bool [False] If supplied, whether to consider NaN's equal for reference testing input_args : dict of `numpy.array`s The arguements to supply to the kernel Returns ------- out_ref : list of :class:`numpy.ndarray` The value(s) of the evaluated :class:`loopy.LoopKernel` """ self.name = name self.ref_answer = ref_answer if isinstance(ref_answer, list): num_check = len(ref_answer) else: num_check = 1 self.ref_answer = [ref_answer] self.compare_axis = compare_axis if compare_mask is not None: self.compare_mask = compare_mask else: self.compare_mask = [None for i in range(num_check)] if ref_ans_compare_mask is not None: self.ref_ans_compare_mask = ref_ans_compare_mask else: self.ref_ans_compare_mask = [None for i in range(num_check)] self.out_mask = out_mask self.input_mask = input_mask self.input_args = input_args self.strict_name_match = strict_name_match self.kernel_args = None self.chain = chain self.post_process = post_process self.check = check self.current_order = None self.allow_skip = allow_skip self.other_compare = other_compare self.tiling = tiling # pull any rtol / atol from env / test config as specified by user from pyjac.utils import get_env_val rtol = float(get_env_val('rtol', rtol)) atol = float(get_env_val('atol', atol)) self.rtol = rtol self.atol = atol self.equal_nan = equal_nan self.do_not_copy = set() def is_my_kernel(self, knl): """ Tests whether this kernel should be run with this call Parameters ---------- knl : :class:`loopy.LoopKernel` The kernel to call """ if self.strict_name_match: return self.name == knl.name return True def set_state(self, array_splitter, order='F', namestore=None, jac_format=JacobianFormat.full): """ Updates the kernel arguements, and and compare axis to the order given If the 'arg' is a function, it will be called to get the correct answer Parameters ---------- array_splitter: :class:`pyjac.core.array_creator.array_splitter` The array splitter of the owning :class:`kernek_utils.kernel_gen.kernel.kernel_generator`, used to operate on numpy arrays if necessary order : {'C', 'F'} The memory layout of the arrays, C (row major) or Fortran (column major) namestore : :class:`NameStore` Must be supplied if :param:`jac_format` is of type :class:`JacobianFormat.sparse`, in order to pull row / column indicies for conversion to / from sparse matricies jac_format: :class:`JacobianFormat` [JacobianFormat.full] If sparse, we are testing a sparse matrix (and :param:`namestore` must be supplied) """ self.current_order = order # filter out bad input args_copy = self.input_args.copy() if self.input_mask is not None: if six.callable(self.input_mask): args_copy = {x: args_copy[x] for x in args_copy if self.input_mask(self, x)} else: args_copy = {x: args_copy[x] for x in args_copy if x not in self.input_mask} for key in args_copy: if six.callable(args_copy[key]): # it's a function args_copy[key] = args_copy[key](order) self.kernel_args = args_copy self.transformed_ref_ans = [np.array(ans, order=order, copy=True) for ans in self.ref_answer] self.jac_format = jac_format if jac_format == JacobianFormat.sparse: from pyjac.tests.test_utils import sparsify # need to convert the jacobian arg to a sparse representation # the easiest way to deal with this is to convert the kernel argument # to the sparse dimensions # Then afterwards we can use the row / col inds as an intermediate # index in the comparison step self.kernel_args['jac'] = np.array(self.kernel_args['jac'][ :, namestore.flat_jac_row_inds.initializer, namestore.flat_jac_col_inds.initializer], order=order, copy=True) # save for comparable self.row_inds = namestore.jac_row_inds.initializer self.col_inds = namestore.jac_col_inds.initializer # sparsify transformed answer self.transformed_ref_ans = [ sparsify(array, self.col_inds, self.row_inds, self.current_order) if array.ndim >= 3 else array for array in self.transformed_ref_ans] # and finally feed through the array splitter self.current_split = array_splitter self.kernel_args = array_splitter.split_numpy_arrays(self.kernel_args) self.transformed_ref_ans = array_splitter.split_numpy_arrays( self.transformed_ref_ans) def __call__(self, knl, queue): """ Calls the kernel, filtering input / output args as required Parameters ---------- knl : :class:`loopy.LoopKernel` The kernel to call queue : :class:`pyopencl.Queue` The command queue Returns ------- out : list of :class:`numpy.ndarray` The (potentially filtered) output variables """ if isinstance(knl.target, lp.PyOpenCLTarget): evt, out = knl(queue, out_host=True, **self.kernel_args) elif isinstance(knl.target, lp.CTarget): evt, out = knl(**{ k: v.copy(order=self.current_order) if ( isinstance(v, np.ndarray) and k not in self.do_not_copy) else v for k, v in self.kernel_args.items()}) else: raise NotImplementedError if self.out_mask is not None: return [out[ind] for ind in self.out_mask] else: return [out[0]] def _get_comparable(self, variable, index, is_answer=False): """ Selects the data to compare from the supplied variable depending on the compare mask / axes supplied """ mask = self.ref_ans_compare_mask[index] if is_answer \ else self.compare_mask[index] if mask is None and is_answer: # use the regular compare mask, as the reference answer specific one # was not supplied mask = self.compare_mask[index] # if no mask if mask is None: return variable if six.callable(mask): # see if it's a supplied callable return mask(self, variable, index, is_answer=is_answer) from pyjac.tests.test_utils import select_elements return select_elements(variable, mask, self.compare_axis, tiling=self.tiling) def compare(self, output_variables): """ Compare the output variables to the given reference answer Parameters ---------- output_variables : :class:`numpy.ndarray` or :class:`numpy.ndarray` The output variables to test Returns ------- match : bool True IFF the masked output variables match the supplied reference answer for this :class:`kernel_call` """ def _check_mask(mask): # check that the mask is one of: # 1. a list of length equal to the size of the number of outputs # 2. a list of indicies (indicated by the compare axis set to -1) # 3. a callable function / object that can figure out extracting the # comparable entries on it's own assert (isinstance(mask, list) and len(mask) == len(output_variables)) or \ not self.tiling or six.callable(mask), ( 'Compare mask does not match output variables!') _check_mask(self.compare_mask) _check_mask(self.ref_ans_compare_mask) allclear = True for i in range(len(output_variables)): outv = output_variables[i].copy() ref_answer = self.transformed_ref_ans[i].copy() if self.compare_mask[i] is not None: outv = self._get_comparable(outv, i) if outv.shape != ref_answer.shape: # apply the same transformation to the answer ref_answer = self._get_comparable(ref_answer, i, is_answer=True) else: outv = outv.squeeze() ref_answer = ref_answer.squeeze() allclear = allclear and np.allclose(outv, ref_answer, rtol=self.rtol, atol=self.atol, equal_nan=self.equal_nan) if self.other_compare is not None: allclear = allclear and self.other_compare( self, output_variables[i].copy(), self.transformed_ref_ans[i].copy(), self.compare_mask[i]) return allclear def populate(knl, kernel_calls, device='0', editor=None): """ This method runs the supplied :class:`loopy.LoopKernel` (or list thereof), and is often used by :function:`auto_run` Parameters ---------- knl : :class:`loopy.LoopKernel` or list of :class:`loopy.LoopKernel` The kernel to test, if a list of kernels they will be successively applied and the end result compared kernel_calls : :class:`kernel_call` or list thereof The masks / ref_answers, etc. to use in testing device : str The pyopencl string denoting the device to use, defaults to '0' editor : callable If not none, a callable function or object that takes a :class:`loopy.LoopKernel` as the sole arguement, and returns the kernel with editing turned on (for used with auto-differentiation) If not specified, the default (opencl) editor will be invoked Returns ------- out_ref : list of :class:`numpy.ndarray` The value(s) of the evaluated :class:`loopy.LoopKernel` """ assert len(knl), 'No kernels supplied!' # create context ctx = None if any(isinstance(k.target, lp.PyOpenCLTarget) for k in knl): ctx = get_context(device) if editor is None: editor = set_editor def __inner(queue=None): output = [] kc_ind = 0 oob = False while not oob: # handle weirdness between list / non-list input try: kc = kernel_calls[kc_ind] kc_ind += 1 except IndexError: oob = True break # reached end of list except TypeError: # not a list oob = True # break on next run kc = kernel_calls # create the outputs if kc.out_mask is not None: out_ref = [None for i in kc.out_mask] else: out_ref = [None] found = False # run kernels for k in knl: # test that we want to run this one if kc.is_my_kernel(k): found = True # set the editor to avoid intel bugs test_knl = editor(k) if isinstance(test_knl.target, lp.PyOpenCLTarget): # recreate with device test_knl = test_knl.copy( target=lp.PyOpenCLTarget(device=device)) # check for chaining if kc.chain: kc.chain(kc, output) # run! out = kc(test_knl, queue) if kc.post_process: kc.post_process(kc, out) # output mapping if all(x is None for x in out_ref): # if the outputs are none, we init to zeros # and avoid copying zeros over later data! out_ref = [np.zeros_like(x) for x in out] for ind in range(len(out)): # get indicies that are non-zero (already in there) # or non infinity/nan # try w/o finite check (I'm paranoid, don't want to mask) # any bad data copy_inds = np.where(np.logical_not(out[ind] == 0)) # copy_inds = np.where(np.logical_not( # np.logical_or(np.isinf(out[ind]), # out[ind] == 0, np.isnan(out[ind]))), # ) out_ref[ind][copy_inds] = out[ind][copy_inds] output.append(out_ref) assert found or kc.allow_skip, ( 'No kernels could be found to match kernel call {}'.format( kc.name)) return output if ctx is not None: with cl.CommandQueue(ctx) as queue: with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=cl.CompilerWarning) output = __inner(queue) queue.flush() # release context clear_first_arg_caches() del ctx else: output = __inner() return output def auto_run(knl, kernel_calls, device='0'): """ This method tests the supplied :class:`loopy.LoopKernel` (or list thereof) against a reference answer Parameters ---------- knl : :class:`loopy.LoopKernel` or list of :class:`loopy.LoopKernel` The kernel to test, if a list of kernels they will be successively applied and the end result compared kernel_calls : :class:`kernel_call` The masks / ref_answers, etc. to use in testing device : str The pyopencl string denoting the device to use, defaults to '0' input_args : dict of `numpy.array`s The arguements to supply to the kernel Returns ------- result : bool True if all tests pass """ # run kernel # check lists if not isinstance(knl, list): knl = [knl] out = populate(knl, kernel_calls, device=device) try: result = True for i, kc in enumerate(kernel_calls): if kc.check: ind = i if kc.allow_skip and all(x is None for x in out[i]): # find the last one for which we have data ind = next(ind for ind in reversed(range(i)) if not any(x is None for x in out[ind])) result = result and kc.compare(out[ind]) return result except TypeError as e: if str(e) == "'kernel_call' object is not iterable": # if not iterable return kernel_calls.compare(out[0]) raise e def get_target(lang, device=None, compiler=None): """ Parameters ---------- lang : str One of the supported languages, {'c', 'cuda', 'opencl'} device : :class:`pyopencl.Device` If supplied, and lang is 'opencl', passed to the :class:`loopy.PyOpenCLTarget` compiler: str If supplied, the C-compiler to use Returns ------- The correct loopy target type """ utils.check_lang(lang) # set target if lang == 'opencl': if cl is not None: return lp.PyOpenCLTarget(device=device) return lp.OpenCLTarget() elif lang == 'c': return lp.ExecutableCTarget(compiler=compiler) elif lang == 'cuda': return lp.CudaTarget() elif lang == 'ispc': return lp.ISPCTarget() class AdeptCompiler(CPlusPlusCompiler): def __init__(self, *args, **kwargs): from ..siteconf import ADEPT_INC_DIR, ADEPT_LIB_DIR, ADEPT_LIBNAME from ..siteconf import CXXFLAGS defaults = kwargs.copy() defaults['libraries'] = ADEPT_LIBNAME if 'cflags' not in defaults: defaults['cflags'] = [] if CXXFLAGS: defaults['cflags'] = [x for x in CXXFLAGS if x not in defaults['cflags'] and x.strip()] if ADEPT_LIB_DIR: defaults['library_dirs'] = ADEPT_LIB_DIR if ADEPT_INC_DIR: defaults['cflags'] = ['-I{}'.format(x) for x in ADEPT_INC_DIR] # update to use any user specified info defaults.update(kwargs) # get toolchain from pyjac.libgen import get_toolchain toolchain = get_toolchain('c', executable=False, **defaults) # and create super(AdeptCompiler, self).__init__(toolchain=toolchain) def build(self, *args, **kwargs): """override from CPlusPlusCompiler to load Adept into ctypes and avoid missing symbol errors""" from ctypes.util import find_library from ctypes import CDLL, RTLD_GLOBAL CDLL(find_library('adept'), mode=RTLD_GLOBAL) return super(AdeptCompiler, self).build(*args, **kwargs)
from __future__ import print_function import logging import os import stat import re import six from string import Template # package imports import loopy as lp from loopy.target.c.c_execution import CPlusPlusCompiler import numpy as np import warnings try: import pyopencl as cl from pyopencl.tools import clear_first_arg_caches except ImportError: cl = None pass # local imports from pyjac import utils from pyjac.core.enum_types import (RateSpecialization, JacobianType, JacobianFormat, KernelType) from pyjac.core import array_creator as arc from pyjac.core.exceptions import (MissingPlatformError, MissingDeviceError, BrokenPlatformError) from pyjac.loopy_utils.loopy_edit_script import substitute as codefix from pyjac.schemas import build_and_validate edit_script = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'loopy_edit_script.py') adept_edit_script = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'adept_edit_script.py') def load_platform(codegen): """ Loads a code-generation platform from a file, and returns the corresponding :class:`loopy_options` Parameters ---------- codegen: str The user-specified code-generation platform yaml file Returns ------- :class:`loopy_options` The loaded platform Raises ------ :class:`cerberus.ValidationError`: A validation error if the supplied codegen platform doesn't comply with the :doc:`../schemas/codegen_platform.yaml` """ platform = build_and_validate('codegen_platform.yaml', codegen)['platform'] width = platform.get('width', None) depth = platform.get('depth', None) # TODO: implement memory limits loading here # optional params get passed as kwargs kwargs = {} if 'order' in platform and platform['order'] is not None: kwargs['order'] = platform['order'] if 'atomic_doubles' in platform: kwargs['use_atomic_doubles'] = platform['atomic_doubles'] if 'atomic_ints' in platform: kwargs['use_atomic_ints'] = platform['atomic_ints'] return loopy_options(width=width, depth=depth, lang=platform['lang'], platform=platform['name'], **kwargs) class loopy_options(object): """ Loopy Objects class Attributes ---------- width : int If not None, the SIMD lane/SIMT block width. Cannot be specified along with depth depth : int If not None, the SIMD lane/SIMT block depth. Cannot be specified along with width ilp : bool If True, use the ILP tag on the species loop. Cannot be specified along with unr unr : int If not None, the unroll length to apply to the species loop. Cannot be specified along with ilp order : {'C', 'F'} The memory layout of the arrays, C (row major) or Fortran (column major) lang : ['opencl', 'c', 'cuda'] One of the supported languages rate_spec : RateSpecialization Controls the level to which Arrenhius rate evaluations are specialized rate_spec_kernels : bool If True, break different Arrenhius rate specializations into different kernels rop_net_kernels : bool If True, break different ROP values (fwd / back / pdep) into different kernels platform : {'CPU', 'GPU', or other vendor specific name} The OpenCL platform to run on. * If 'CPU' or 'GPU', the first available matching platform will be used * If a vendor specific string, it will be passed to pyopencl to get the platform use_atomic_doubles : bool [True] Use atomic updates where necessary for proper deep-vectorization If not, a sequential deep-vectorization (with only one thread/lane active) will be used use_atomic_ints : bool [True] Use atomic integer operations for the driver kernel. jac_type: :class:`JacobianType` [JacobianType.full] The type of Jacobian kernel (full or approximate) to generate jac_format: :class:`JacobianFormat` [JacobianFormat.full] The format of Jacobian kernel (full or sparse) to generate is_simd: bool [None] If supplied, override the user-specified flag :param:`explicit_simd`, used for testing. unique_pointers: bool [False] If specified, this indicates that the pointers passed to the generated pyJac methods will be unique (i.e., distinct per OpenMP thread / OpenCL work-group). This option is most useful for coupling to external codes an that have already been parallelized. explicit_simd: bool [False] Attempt to utilize explict-SIMD instructions in OpenCL """ def __init__(self, width=None, depth=None, ilp=False, unr=None, lang='opencl', order='C', rate_spec=RateSpecialization.fixed, rate_spec_kernels=False, rop_net_kernels=False, platform='', kernel_type=KernelType.jacobian, auto_diff=False, use_atomic_doubles=True, use_atomic_ints=True, jac_type=JacobianType.exact, jac_format=JacobianFormat.full, device=None, device_type=None, is_simd=None, unique_pointers=False, explicit_simd=None): self.width = width self.depth = depth if not utils.can_vectorize_lang[lang]: assert not (width or depth), ( "Can't use a vectorized form with unvectorizable language," " {}".format(lang)) assert not (width and depth), ( 'Cannot use deep and wide vectorizations simulataneously') self.ilp = ilp self.unr = unr utils.check_lang(lang) self.lang = lang utils.check_order(order) self.order = order self.rate_spec = utils.to_enum(rate_spec, RateSpecialization) self.rate_spec_kernels = rate_spec_kernels self.rop_net_kernels = rop_net_kernels self.platform = platform self.device_type = device_type self.device = device self.auto_diff = auto_diff self.use_atomic_doubles = use_atomic_doubles self.use_atomic_ints = use_atomic_ints self.jac_format = utils.to_enum(jac_format, JacobianFormat) self.jac_type = utils.to_enum(jac_type, JacobianType) self._is_simd = is_simd self.explicit_simd = explicit_simd self.explicit_simd_warned = False if self.lang != 'opencl' and self.explicit_simd: logger = logging.getLogger(__name__) logger.warn('explicit-SIMD flag has no effect on non-OpenCL targets.') self.kernel_type = utils.to_enum(kernel_type, KernelType) self.unique_pointers = unique_pointers if self._is_simd or self.explicit_simd: assert width or depth, ( 'Cannot use explicit SIMD types without vectorization') # need to find the first platform that has the device of the correct # type if self.lang == 'opencl' and not self.platform_is_pyopencl \ and cl is not None: self.device_type = cl.device_type.ALL check_name = None if self.platform_name.lower() == 'cpu': self.device_type = cl.device_type.CPU elif self.platform_name.lower() == 'gpu': self.device_type = cl.device_type.GPU elif self.platform_name.lower() == 'accelerator': self.device_type = cl.device_type.ACCELERATOR else: check_name = self.platform self.platform = None platforms = cl.get_platforms() for p in platforms: try: cl.Context( dev_type=self.device_type, properties=[(cl.context_properties.PLATFORM, p)]) if not check_name or check_name.lower() in p.get_info( cl.platform_info.NAME).lower(): self.platform = p break except cl.cffi_cl.RuntimeError: pass if not self.platform: raise MissingPlatformError(platform) if not isinstance(self.device, cl.Device) and ( self.device_type is not None): # finally a matching device self.device = self.platform.get_devices( device_type=self.device_type) if not self.device: raise MissingDeviceError(self.device_type, self.platform) self.device = self.device[0] self.device_type = self.device.get_info(cl.device_info.TYPE) elif self.lang == 'opencl': self.device_type = 'CL_DEVICE_TYPE_ALL' # check for broken vectorizations self.raise_on_broken() @property def limit_int_overflow(self): """ Deals with issue of integer overflow in array indexing """ return self.lang == 'c' or self.lang == 'opencl' and \ ('intel' in self.platform_name.lower() or 'portable' in self.platform_name.lower()) def raise_on_broken(self): # Currently, NVIDIA w/ neither deep nor wide-vectorizations ( # i.e. a "parallel" implementation) breaks sometimes on OpenCL if self.lang == 'opencl' and cl is not None: if not (self.width or self.depth) \ and self.device_type == cl.device_type.GPU: if 'nvidia' in self.platform_name.lower(): raise BrokenPlatformError(self) # otherwise, simply warn logger = logging.getLogger(__name__) logger.warn('Some GPU implementation(s)--NVIDIA--give incorrect' 'values sporadically without either a deep or wide' 'vectorization. Use at your own risk.') if self.width and not self.is_simd and \ self.device_type == cl.device_type.CPU: logger = logging.getLogger(__name__) if 'intel' in self.platform_name.lower(): logger.error('Intel OpenCL is currently broken for wide, ' 'non-explicit-SIMD vectorizations on the CPU. ' 'Use the --explicit_simd flag.') raise BrokenPlatformError(self) if not self.explicit_simd and self._is_simd is None: # only warn if user didn't supply logger.warn('You may wish to use the --explicit_simd flag to ' 'utilize explicit-vector data-types (and avoid ' 'implicit vectorization, which may yield sub-optimal' ' results).') if 'portable' in self.platform_name.lower() and self.unique_pointers: logger = logging.getLogger(__name__) logger.error('Portable OpenCL is currently broken for ' 'unique_pointers.') raise BrokenPlatformError(self) @property def is_simd(self): """ Utility to determine whether to tell Loopy to apply explicit-simd vectorization or not Returns ------- is_simd: bool True if we should attempt to explicitly vectorize the data / arrays """ # priority to test-specification if self._is_simd is not None: return self._is_simd if not (self.width or self.depth): return False # currently SIMD is enabled only wide-CPU vectorizations ( # deep-vectorizations will require further loopy upgrades) if not self.width: if self.explicit_simd: logger = logging.getLogger(__name__) logger.warn('Explicit-SIMD deep-vectorization currently not ' 'implemented, ignoring user-specified SIMD flag') return False if self.explicit_simd is not None: # user specified return self.explicit_simd if not cl: if self.explicit_simd is None and not self.explicit_simd_warned: logger = logging.getLogger(__name__) logger.warn('Cannot determine whether to use explicit-SIMD ' 'instructions as PyOpenCL was not found. Either ' 'install PyOpenCL or use the "--explicit_simd" ' 'command line argument. Assuming not SIMD.') self.explicit_simd_warned = True return self.explicit_simd if self.lang == 'opencl': return self.device_type != cl.device_type.GPU return True @property def pre_split(self): """ It is sometimes advantageous to 'pre-split' the outer loop into an inner (vector) iname and an outer (parallel) iname, particularly when using explicit-SIMD w/ loopy (and avoid having to figure out how to simplify floor-div's of the problem size in loopy) If this property is True, utilize a pre-split. """ return self.width and arc.array_splitter._have_split_static(self) @property def initial_condition_dimsize(self): """ Return the necessary IC dimension size based on this :class:`loopy_options` """ ws = arc.work_size.name if not self.pre_split and self.width: return '{}*{}'.format(ws, self.width) return ws @property def initial_condition_loopsize(self): """ Return the necessary loop bound for the global index of inner kernel loops based on this :class:`loopy_options` """ if self.unique_pointers: return self.vector_width if self.vector_width else 1 if not self.pre_split and self.width: return '{}*{}'.format(arc.work_size.name, self.width) return arc.work_size.name @property def vector_width(self): """ Returns the vector width for this :class:`loopy_options` or None if unvectorized """ if not (self.width or self.depth): return None return self.width if self.width else self.depth @property def has_scatter(self): """ Utility to determine whether the target supports scatter writes Currently, only Intel's OpenCL implementation does not (CPU-only 16.1.1) and if attempted, it breaks the auto-vectorization Parameters ---------- None Returns ------- has_scatter: bool Whether the target supports scatter operations or not """ return not (self.lang == 'opencl' and 'intel' in self.platform_name.lower()) @property def platform_is_pyopencl(self): """ Return true, IFF :attr:`platform` is an instance of a :class:`pyopencl.Platform` """ return self.platform and cl is not None and isinstance( self.platform, cl.Platform) @property def platform_name(self): """ Returns the suppled OpenCL platform name, or None if not available """ if self.platform_is_pyopencl: return self.platform.name return self.platform def get_device_list(): """ Returns the available pyopencl devices Parameters ---------- None Returns ------- devices : list of :class:`pyopencl.Device` The devices recognized by pyopencl """ device_list = [] for p in cl.get_platforms(): device_list.append(p.get_devices()) # don't need multiple gpu's etc. return [x[0] for x in device_list if x] def get_context(device='0'): """ Simple method to generate a pyopencl context Parameters ---------- device : str or :class:`pyopencl.Device` The pyopencl string (or device class) denoting the device to use, defaults to '0' Returns ------- ctx : :class:`pyopencl.Context` The running context """ # os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1' if isinstance(device, str): os.environ['PYOPENCL_CTX'] = device ctx = cl.create_some_context(interactive=False) else: ctx = cl.Context(devices=[device]) return ctx def get_header(knl, codegen_result=None): """ Returns header definition code for a :class:`loopy.LoopKernel` Parameters ---------- knl : :class:`loopy.LoopKernel` The kernel to generate a header definition for codegen_result : :class:`loopy.CodeGenerationResult` If supplied, the pre-generated code-gen result for this kernel (speeds up header generation) Returns ------- Generated device header code Notes ----- The kernel's Target and name should be set for proper functioning """ return str(lp.generate_header(knl, codegen_result=codegen_result)[0]) def __set_editor(knl, script): # set the edit script as the 'editor' os.environ['EDITOR'] = script # turn on code editing edit_knl = lp.set_options(knl, edit_code=True) return edit_knl def set_editor(knl): """ Returns a copy of knl set up for various automated bug-fixes Parameters ---------- knl : :class:`loopy.LoopKernel` The kernel to generate code for Returns ------- edit_knl : :class:`loopy.LoopKernel` The kernel set up for editing """ return __set_editor(knl, edit_script) def set_adept_editor(knl, base_kernels, problem_size=8192, independent_variable=None, dependent_variable=None, output=None, do_not_set=[]): """ Returns a copy of knl set up for various automated bug-fixes Parameters ---------- knl : :class:`loopy.LoopKernel` The kernel to generate code for base_kernels : :class:`loopy.LoopKernel` The kernel :param:`knl` and all dependencies required for Jacobian evaluation. These kernels, should be generated for a problem_size of 1 to facilitate indexing in the wrapped kernel problem_size : int The size of the testing problem independent_variable : :class:`array_creator.creator` The independent variables to compute the Jacobian with respect to dependent_variable : :class:`array_creator.creator` The dependent variables to find the Jacobian of output : :class:`array_creator.creator` The array to store the column-major Jacobian in, ordered by thermo-chemical condition do_not_set : list of :class:`array_creator.creator` Other variables that are computed in this kernel (and hence shouldn't) be set Returns ------- edit_knl : :class:`loopy.LoopKernel` The kernel set up for editing """ # load template with open(adept_edit_script + '.in', 'r') as file: src = file.read() def __get_size_and_stringify(variable): sizes = variable.shape indicies = ['ad_j', 'i'] out_str = variable.name + '[{index}]' from pyjac.core.array_creator import creator if isinstance(variable, creator): if variable.order == 'C': # last index varies fastest, so stride of 'i' is 1 sizes = reversed(sizes) indicies = reversed(indicies) elif isinstance(variable, lp.kernel.data.ArrayBase): # find index of test_size strides = [x.stride for x in variable.dim_tags] sizes = variable.shape # if first stride is not problem_size, this is 'C' ordered # hence reverse indicies if strides[0] != problem_size: sizes = reversed(sizes) indicies = reversed(indicies) if len(variable.shape) == 1: return 1, out_str.format(index='ad_j') if len(variable.shape) > 2: assert variable.name == 'jac' size = np.product([x for x in sizes if x != problem_size]) # can't operate on this return None, None out_index = '' offset = 1 out_size = None for size, index in zip(sizes, indicies): if out_index: out_index += ' + ' if str(size) == arc.work_size.name: # per work-size = 1 in this context as we're operating per-thread pass elif size != problem_size: assert out_size is None, ( 'Cannot determine variable size!') out_size = size out_index += '{} * {}'.format(index, offset) offset *= size return out_size, out_str.format(index=out_index) # find the dimension / string representation of the independent # and dependent variables indep_size, indep = __get_size_and_stringify(independent_variable) dep_size, dep = __get_size_and_stringify(dependent_variable) # initializers init_template = Template(""" std::vector<adouble> ad_${name} (${size}); """) set_template = Template(""" for (int i = 0; i < ${size}; ++i) { ad_${name}[i].set_value(${indexed}); } """) zero_template = Template(""" for(int i = 0; i < ${size}; ++i) { ad_${name}[i].set_value(0.0); } """) # get set of written vars written_vars = knl.get_written_variables() for k in base_kernels: written_vars |= k.get_written_variables() initializers = [] for arg in knl.args: if arg.name != dependent_variable.name \ and not isinstance(arg, lp.ValueArg): size, indexed = __get_size_and_stringify(arg) if size is not None: # add initializer initializers.append(init_template.substitute( name=arg.name, size=size, )) if indexed is not None and arg.name not in written_vars: initializers.append(set_template.substitute( name=arg.name, indexed=indexed, size=size )) else: initializers.append(zero_template.substitute( name=arg.name, size=size )) dep_set_template = Template(""" for (int i = 0; i < ${size}; ++i) { ${indexed} = ad_${name}[i].value(); } """) setters = [] for var in [dependent_variable] + do_not_set: size, ind = __get_size_and_stringify(var) setters.append(dep_set_template.substitute( indexed=ind, name=var.name, size=size)) setters = '\n'.join(setters) jac_size = dep_size * indep_size # find the output name jac_base_offset = '&' + output.name + \ '[ad_j * {dep_size} * {indep_size}]'.format( dep_size=dep_size, indep_size=indep_size) # get header defn header = get_header(knl) header = header[:header.index(';')] # replace the "const" on the jacobian header = re.sub(r'double\s*const(?=[^,]+{name})'.format(name=output.name), 'double', header) # and function call kernel_calls = [] for k in base_kernels: arg_list = [arg.name for arg in k.args] for i, arg in enumerate(arg_list): name = arg[:] if arg != output.name: name = 'ad_' + name if arg != 'j': name = '&' + name + '[0]' arg_list[i] = name kernel_calls.append('ad_{name}({args});'.format( name=k.name, args=', '.join(arg_list))) # fill in template with open(adept_edit_script, 'w') as file: file.write(utils.subs_at_indent( src, problem_size=problem_size, ad_indep_name='ad_' + independent_variable.name, # indep=indep, # indep_name=independent_variable.name, indep_size=indep_size, ad_dep_name='ad_' + dependent_variable.name, # dep=dep, # dep_name=dependent_variable.name, dep_size=dep_size, jac_base_offset=jac_base_offset, # jac_size=jac_size, jac_name=output.name, function_defn=header, kernel_calls='\n'.join(kernel_calls), initializers='\n'.join(initializers), base_kernels='\n'.join([get_code(x) for x in base_kernels]), setters=setters )) # and make it executable st = os.stat(adept_edit_script) os.chmod(adept_edit_script, st.st_mode | stat.S_IEXEC) return __set_editor(knl, adept_edit_script) def get_code(knl, opts=None): """ Returns the device code for a :class:`loopy.LoopKernel` or fixes alreay generated code Parameters ---------- knl : :class:`loopy.LoopKernel` or str The kernel to generate code for. If knl is a string, it is assumed to be pregenerated code, and only the editor script must be called opts: :class:`loopy_options` The options used in created the kernel -- used to detect platform specific fixes. Ignored if not supplied Returns ------- code: str Generated device code Notes ----- The kernel's Target and name should be set for proper functioning """ if isinstance(knl, str): code = knl else: code, _ = lp.generate_code(knl) extra_subs = {} if opts is None: # ignore pass elif opts.lang == 'opencl' and ( 'intel' in opts.platform_name.lower() and ((opts.order == 'C' and opts.width) or ( opts.order == 'F' and opts.depth) or ( opts.order == 'F' and opts.width))): # If True, this is a finite-difference Jacobian on an Intel OpenCL platform # Hence we have to tell the codefixer about the intel bug # https://software.intel.com/en-us/forums/opencl/topic/748841 extra_subs[r'__kernel void __attribute__ \(\(reqd_work_group_size\(\d+, 1, 1' r'\)\)\) species_rates_kernel'] = r'void species_rates_kernel' return codefix('stdin', text_in=code, extra_subs=extra_subs) def not_is_close(arr1, arr2, **kwargs): """ A utility method that returns the result of: numpy.where(numpy.logical_not(numpy.isclose(arr1, arr2, **kwargs))) Since I use if often in testing Parameters ---------- arr1: :class:`np.ndarray` Array to compare arr2: :class:`np.ndarray` Reference answer **kwargs: dict Keyword args for :func:`numpy.isclose` Returns ------- inds: tuple of :class:`numpy.ndarray` result of: `numpy.where(numpy.logical_not(numpy.isclose(arr1, arr2, **kwargs)))` """ return np.where(np.logical_not(np.isclose(arr1, arr2, **kwargs))) class kernel_call(object): """ A wrapper for the various parameters (e.g. args, masks, etc.) for calling / executing a loopy kernel """ def __init__(self, name, ref_answer, compare_axis=1, compare_mask=None, out_mask=None, input_mask=[], strict_name_match=False, chain=None, check=True, post_process=None, allow_skip=False, other_compare=None, atol=1e-8, rtol=1e-5, equal_nan=False, ref_ans_compare_mask=None, tiling=True, **input_args): """ The initializer for the :class:`kernel_call` object Parameters ---------- name : str The kernel name, used for matching ref_answer : :class:`numpy.ndarray` or list of :class:`numpy.ndarray` The reference answer to compare to compare_axis : int, optional An axis to apply the compare_mask along, unused if compare_mask is None compare_mask : :class:`numpy.ndarray` or list of :class:`numpy.ndarray` An optional list of indexes to compare, useful when the kernel only computes partial results. Should match length of ref_answer ref_ans_compare_mask : :class:`numpy.ndarray` or list of :class:`numpy.ndarray` Same as the compare_mask, but for the reference answer. Necessary for some kernel tests, as the reference answer is not the same size as the output, which causes issues for split arrays. If not supplied, the regular :param:`compare_mask` will be used tiling: bool, [True] If True (default), the elements in the :param:`compare_mask` should be combined, e.g., if two arrays [[1, 2] and [3, 4]] are supplied to :param:`compare_mask` with tiling turned on, four resulting indicies will be compared -- [1, 3], [1, 4], [2, 3], and [2, 4]. If tiling is turned of, the compare mask will be treated as a list of indicies, e.g., (for the previous example) -- [1, 3] and [2, 4]. out_mask : int, optional The index(ices) of the returned array to aggregate. Should match length of ref_answer input_mask : list of str or function, optional An optional list of input arguements to filter out If a function is passed, the expected signature is along the lines of: def fcn(self, arg_name): ... and returns True iff this arg_name should be used strict_name_match : bool, optional If true, only kernels exactly matching this name will be excecuted Defaut is False chain : function, optional If not None, a function of signature similar to: def fcn(self, out_values): .... is expected. This function should take the output values from a previous kernel call, and place in the input args for this kernel call as necessary post_process : function, optional If not None, a function of signature similar to: def fcn(self, out_values): .... is expected. This function should take the output values from this kernel call, and process them as expected to compare to results. Currently used only in comparison of reaction rates to Cantera (to deal w/ falloff etc.) check : bool If False, do not check result (useful when chaining to check only the last result) Default is True allow_skip : bool If True, allow this kernel call to be check results without actually executing a kernel (checks the last kernel that was executed). This is useful for selectively turning off kernels (e.g. if there are no reverse reactions) other_compare : Callable, optional If supplied, a function that compares output values not checked in by this kernel call. This is useful in the case of NaN's resulting from derivatives of (e.g.,) log(0), to ensure our arrays are spitting out very large (but finite) numbers rtol : float [Default 1e-5] The relative tolerance for comparison to reference answers. For Jacobian correctness testing this may have to be loosened atol : float [Default 1e-8] The absolute tolerance for comparison to reference answers. equal_nan : bool [False] If supplied, whether to consider NaN's equal for reference testing input_args : dict of `numpy.array`s The arguements to supply to the kernel Returns ------- out_ref : list of :class:`numpy.ndarray` The value(s) of the evaluated :class:`loopy.LoopKernel` """ self.name = name self.ref_answer = ref_answer if isinstance(ref_answer, list): num_check = len(ref_answer) else: num_check = 1 self.ref_answer = [ref_answer] self.compare_axis = compare_axis if compare_mask is not None: self.compare_mask = compare_mask else: self.compare_mask = [None for i in range(num_check)] if ref_ans_compare_mask is not None: self.ref_ans_compare_mask = ref_ans_compare_mask else: self.ref_ans_compare_mask = [None for i in range(num_check)] self.out_mask = out_mask self.input_mask = input_mask self.input_args = input_args self.strict_name_match = strict_name_match self.kernel_args = None self.chain = chain self.post_process = post_process self.check = check self.current_order = None self.allow_skip = allow_skip self.other_compare = other_compare self.tiling = tiling # pull any rtol / atol from env / test config as specified by user from pyjac.utils import get_env_val rtol = float(get_env_val('rtol', rtol)) atol = float(get_env_val('atol', atol)) self.rtol = rtol self.atol = atol self.equal_nan = equal_nan self.do_not_copy = set() def is_my_kernel(self, knl): """ Tests whether this kernel should be run with this call Parameters ---------- knl : :class:`loopy.LoopKernel` The kernel to call """ if self.strict_name_match: return self.name == knl.name return True def set_state(self, array_splitter, order='F', namestore=None, jac_format=JacobianFormat.full): """ Updates the kernel arguements, and and compare axis to the order given If the 'arg' is a function, it will be called to get the correct answer Parameters ---------- array_splitter: :class:`pyjac.core.array_creator.array_splitter` The array splitter of the owning :class:`kernek_utils.kernel_gen.kernel.kernel_generator`, used to operate on numpy arrays if necessary order : {'C', 'F'} The memory layout of the arrays, C (row major) or Fortran (column major) namestore : :class:`NameStore` Must be supplied if :param:`jac_format` is of type :class:`JacobianFormat.sparse`, in order to pull row / column indicies for conversion to / from sparse matricies jac_format: :class:`JacobianFormat` [JacobianFormat.full] If sparse, we are testing a sparse matrix (and :param:`namestore` must be supplied) """ self.current_order = order # filter out bad input args_copy = self.input_args.copy() if self.input_mask is not None: if six.callable(self.input_mask): args_copy = {x: args_copy[x] for x in args_copy if self.input_mask(self, x)} else: args_copy = {x: args_copy[x] for x in args_copy if x not in self.input_mask} for key in args_copy: if six.callable(args_copy[key]): # it's a function args_copy[key] = args_copy[key](order) self.kernel_args = args_copy self.transformed_ref_ans = [np.array(ans, order=order, copy=True) for ans in self.ref_answer] self.jac_format = jac_format if jac_format == JacobianFormat.sparse: from pyjac.tests.test_utils import sparsify # need to convert the jacobian arg to a sparse representation # the easiest way to deal with this is to convert the kernel argument # to the sparse dimensions # Then afterwards we can use the row / col inds as an intermediate # index in the comparison step self.kernel_args['jac'] = np.array(self.kernel_args['jac'][ :, namestore.flat_jac_row_inds.initializer, namestore.flat_jac_col_inds.initializer], order=order, copy=True) # save for comparable self.row_inds = namestore.jac_row_inds.initializer self.col_inds = namestore.jac_col_inds.initializer # sparsify transformed answer self.transformed_ref_ans = [ sparsify(array, self.col_inds, self.row_inds, self.current_order) if array.ndim >= 3 else array for array in self.transformed_ref_ans] # and finally feed through the array splitter self.current_split = array_splitter self.kernel_args = array_splitter.split_numpy_arrays(self.kernel_args) self.transformed_ref_ans = array_splitter.split_numpy_arrays( self.transformed_ref_ans) def __call__(self, knl, queue): """ Calls the kernel, filtering input / output args as required Parameters ---------- knl : :class:`loopy.LoopKernel` The kernel to call queue : :class:`pyopencl.Queue` The command queue Returns ------- out : list of :class:`numpy.ndarray` The (potentially filtered) output variables """ if isinstance(knl.target, lp.PyOpenCLTarget): evt, out = knl(queue, out_host=True, **self.kernel_args) elif isinstance(knl.target, lp.CTarget): evt, out = knl(**{ k: v.copy(order=self.current_order) if ( isinstance(v, np.ndarray) and k not in self.do_not_copy) else v for k, v in self.kernel_args.items()}) else: raise NotImplementedError if self.out_mask is not None: return [out[ind] for ind in self.out_mask] else: return [out[0]] def _get_comparable(self, variable, index, is_answer=False): """ Selects the data to compare from the supplied variable depending on the compare mask / axes supplied """ mask = self.ref_ans_compare_mask[index] if is_answer \ else self.compare_mask[index] if mask is None and is_answer: # use the regular compare mask, as the reference answer specific one # was not supplied mask = self.compare_mask[index] # if no mask if mask is None: return variable if six.callable(mask): # see if it's a supplied callable return mask(self, variable, index, is_answer=is_answer) from pyjac.tests.test_utils import select_elements return select_elements(variable, mask, self.compare_axis, tiling=self.tiling) def compare(self, output_variables): """ Compare the output variables to the given reference answer Parameters ---------- output_variables : :class:`numpy.ndarray` or :class:`numpy.ndarray` The output variables to test Returns ------- match : bool True IFF the masked output variables match the supplied reference answer for this :class:`kernel_call` """ def _check_mask(mask): # check that the mask is one of: # 1. a list of length equal to the size of the number of outputs # 2. a list of indicies (indicated by the compare axis set to -1) # 3. a callable function / object that can figure out extracting the # comparable entries on it's own assert (isinstance(mask, list) and len(mask) == len(output_variables)) or \ not self.tiling or six.callable(mask), ( 'Compare mask does not match output variables!') _check_mask(self.compare_mask) _check_mask(self.ref_ans_compare_mask) allclear = True for i in range(len(output_variables)): outv = output_variables[i].copy() ref_answer = self.transformed_ref_ans[i].copy() if self.compare_mask[i] is not None: outv = self._get_comparable(outv, i) if outv.shape != ref_answer.shape: # apply the same transformation to the answer ref_answer = self._get_comparable(ref_answer, i, is_answer=True) else: outv = outv.squeeze() ref_answer = ref_answer.squeeze() allclear = allclear and np.allclose(outv, ref_answer, rtol=self.rtol, atol=self.atol, equal_nan=self.equal_nan) if self.other_compare is not None: allclear = allclear and self.other_compare( self, output_variables[i].copy(), self.transformed_ref_ans[i].copy(), self.compare_mask[i]) return allclear def populate(knl, kernel_calls, device='0', editor=None): """ This method runs the supplied :class:`loopy.LoopKernel` (or list thereof), and is often used by :function:`auto_run` Parameters ---------- knl : :class:`loopy.LoopKernel` or list of :class:`loopy.LoopKernel` The kernel to test, if a list of kernels they will be successively applied and the end result compared kernel_calls : :class:`kernel_call` or list thereof The masks / ref_answers, etc. to use in testing device : str The pyopencl string denoting the device to use, defaults to '0' editor : callable If not none, a callable function or object that takes a :class:`loopy.LoopKernel` as the sole arguement, and returns the kernel with editing turned on (for used with auto-differentiation) If not specified, the default (opencl) editor will be invoked Returns ------- out_ref : list of :class:`numpy.ndarray` The value(s) of the evaluated :class:`loopy.LoopKernel` """ assert len(knl), 'No kernels supplied!' # create context ctx = None if any(isinstance(k.target, lp.PyOpenCLTarget) for k in knl): ctx = get_context(device) if editor is None: editor = set_editor def __inner(queue=None): output = [] kc_ind = 0 oob = False while not oob: # handle weirdness between list / non-list input try: kc = kernel_calls[kc_ind] kc_ind += 1 except IndexError: oob = True break # reached end of list except TypeError: # not a list oob = True # break on next run kc = kernel_calls # create the outputs if kc.out_mask is not None: out_ref = [None for i in kc.out_mask] else: out_ref = [None] found = False # run kernels for k in knl: # test that we want to run this one if kc.is_my_kernel(k): found = True # set the editor to avoid intel bugs test_knl = editor(k) if isinstance(test_knl.target, lp.PyOpenCLTarget): # recreate with device test_knl = test_knl.copy( target=lp.PyOpenCLTarget(device=device)) # check for chaining if kc.chain: kc.chain(kc, output) # run! out = kc(test_knl, queue) if kc.post_process: kc.post_process(kc, out) # output mapping if all(x is None for x in out_ref): # if the outputs are none, we init to zeros # and avoid copying zeros over later data! out_ref = [np.zeros_like(x) for x in out] for ind in range(len(out)): # get indicies that are non-zero (already in there) # or non infinity/nan # try w/o finite check (I'm paranoid, don't want to mask) # any bad data copy_inds = np.where(np.logical_not(out[ind] == 0)) # copy_inds = np.where(np.logical_not( # np.logical_or(np.isinf(out[ind]), # out[ind] == 0, np.isnan(out[ind]))), # ) out_ref[ind][copy_inds] = out[ind][copy_inds] output.append(out_ref) assert found or kc.allow_skip, ( 'No kernels could be found to match kernel call {}'.format( kc.name)) return output if ctx is not None: with cl.CommandQueue(ctx) as queue: with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=cl.CompilerWarning) output = __inner(queue) queue.flush() # release context clear_first_arg_caches() del ctx else: output = __inner() return output def auto_run(knl, kernel_calls, device='0'): """ This method tests the supplied :class:`loopy.LoopKernel` (or list thereof) against a reference answer Parameters ---------- knl : :class:`loopy.LoopKernel` or list of :class:`loopy.LoopKernel` The kernel to test, if a list of kernels they will be successively applied and the end result compared kernel_calls : :class:`kernel_call` The masks / ref_answers, etc. to use in testing device : str The pyopencl string denoting the device to use, defaults to '0' input_args : dict of `numpy.array`s The arguements to supply to the kernel Returns ------- result : bool True if all tests pass """ # run kernel # check lists if not isinstance(knl, list): knl = [knl] out = populate(knl, kernel_calls, device=device) try: result = True for i, kc in enumerate(kernel_calls): if kc.check: ind = i if kc.allow_skip and all(x is None for x in out[i]): # find the last one for which we have data ind = next(ind for ind in reversed(range(i)) if not any(x is None for x in out[ind])) result = result and kc.compare(out[ind]) return result except TypeError as e: if str(e) == "'kernel_call' object is not iterable": # if not iterable return kernel_calls.compare(out[0]) raise e def get_target(lang, device=None, compiler=None): """ Parameters ---------- lang : str One of the supported languages, {'c', 'cuda', 'opencl'} device : :class:`pyopencl.Device` If supplied, and lang is 'opencl', passed to the :class:`loopy.PyOpenCLTarget` compiler: str If supplied, the C-compiler to use Returns ------- The correct loopy target type """ utils.check_lang(lang) # set target if lang == 'opencl': if cl is not None: return lp.PyOpenCLTarget(device=device) return lp.OpenCLTarget() elif lang == 'c': return lp.ExecutableCTarget(compiler=compiler) elif lang == 'cuda': return lp.CudaTarget() elif lang == 'ispc': return lp.ISPCTarget() class AdeptCompiler(CPlusPlusCompiler): def __init__(self, *args, **kwargs): from ..siteconf import ADEPT_INC_DIR, ADEPT_LIB_DIR, ADEPT_LIBNAME from ..siteconf import CXXFLAGS defaults = kwargs.copy() defaults['libraries'] = ADEPT_LIBNAME if 'cflags' not in defaults: defaults['cflags'] = [] if CXXFLAGS: defaults['cflags'] = [x for x in CXXFLAGS if x not in defaults['cflags'] and x.strip()] if ADEPT_LIB_DIR: defaults['library_dirs'] = ADEPT_LIB_DIR if ADEPT_INC_DIR: defaults['cflags'] = ['-I{}'.format(x) for x in ADEPT_INC_DIR] # update to use any user specified info defaults.update(kwargs) # get toolchain from pyjac.libgen import get_toolchain toolchain = get_toolchain('c', executable=False, **defaults) # and create super(AdeptCompiler, self).__init__(toolchain=toolchain) def build(self, *args, **kwargs): """override from CPlusPlusCompiler to load Adept into ctypes and avoid missing symbol errors""" from ctypes.util import find_library from ctypes import CDLL, RTLD_GLOBAL CDLL(find_library('adept'), mode=RTLD_GLOBAL) return super(AdeptCompiler, self).build(*args, **kwargs)
en
0.651991
# package imports # local imports Loads a code-generation platform from a file, and returns the corresponding :class:`loopy_options` Parameters ---------- codegen: str The user-specified code-generation platform yaml file Returns ------- :class:`loopy_options` The loaded platform Raises ------ :class:`cerberus.ValidationError`: A validation error if the supplied codegen platform doesn't comply with the :doc:`../schemas/codegen_platform.yaml` # TODO: implement memory limits loading here # optional params get passed as kwargs Loopy Objects class Attributes ---------- width : int If not None, the SIMD lane/SIMT block width. Cannot be specified along with depth depth : int If not None, the SIMD lane/SIMT block depth. Cannot be specified along with width ilp : bool If True, use the ILP tag on the species loop. Cannot be specified along with unr unr : int If not None, the unroll length to apply to the species loop. Cannot be specified along with ilp order : {'C', 'F'} The memory layout of the arrays, C (row major) or Fortran (column major) lang : ['opencl', 'c', 'cuda'] One of the supported languages rate_spec : RateSpecialization Controls the level to which Arrenhius rate evaluations are specialized rate_spec_kernels : bool If True, break different Arrenhius rate specializations into different kernels rop_net_kernels : bool If True, break different ROP values (fwd / back / pdep) into different kernels platform : {'CPU', 'GPU', or other vendor specific name} The OpenCL platform to run on. * If 'CPU' or 'GPU', the first available matching platform will be used * If a vendor specific string, it will be passed to pyopencl to get the platform use_atomic_doubles : bool [True] Use atomic updates where necessary for proper deep-vectorization If not, a sequential deep-vectorization (with only one thread/lane active) will be used use_atomic_ints : bool [True] Use atomic integer operations for the driver kernel. jac_type: :class:`JacobianType` [JacobianType.full] The type of Jacobian kernel (full or approximate) to generate jac_format: :class:`JacobianFormat` [JacobianFormat.full] The format of Jacobian kernel (full or sparse) to generate is_simd: bool [None] If supplied, override the user-specified flag :param:`explicit_simd`, used for testing. unique_pointers: bool [False] If specified, this indicates that the pointers passed to the generated pyJac methods will be unique (i.e., distinct per OpenMP thread / OpenCL work-group). This option is most useful for coupling to external codes an that have already been parallelized. explicit_simd: bool [False] Attempt to utilize explict-SIMD instructions in OpenCL # need to find the first platform that has the device of the correct # type # finally a matching device # check for broken vectorizations Deals with issue of integer overflow in array indexing # Currently, NVIDIA w/ neither deep nor wide-vectorizations ( # i.e. a "parallel" implementation) breaks sometimes on OpenCL # otherwise, simply warn # only warn if user didn't supply Utility to determine whether to tell Loopy to apply explicit-simd vectorization or not Returns ------- is_simd: bool True if we should attempt to explicitly vectorize the data / arrays # priority to test-specification # currently SIMD is enabled only wide-CPU vectorizations ( # deep-vectorizations will require further loopy upgrades) # user specified It is sometimes advantageous to 'pre-split' the outer loop into an inner (vector) iname and an outer (parallel) iname, particularly when using explicit-SIMD w/ loopy (and avoid having to figure out how to simplify floor-div's of the problem size in loopy) If this property is True, utilize a pre-split. Return the necessary IC dimension size based on this :class:`loopy_options` Return the necessary loop bound for the global index of inner kernel loops based on this :class:`loopy_options` Returns the vector width for this :class:`loopy_options` or None if unvectorized Utility to determine whether the target supports scatter writes Currently, only Intel's OpenCL implementation does not (CPU-only 16.1.1) and if attempted, it breaks the auto-vectorization Parameters ---------- None Returns ------- has_scatter: bool Whether the target supports scatter operations or not Return true, IFF :attr:`platform` is an instance of a :class:`pyopencl.Platform` Returns the suppled OpenCL platform name, or None if not available Returns the available pyopencl devices Parameters ---------- None Returns ------- devices : list of :class:`pyopencl.Device` The devices recognized by pyopencl # don't need multiple gpu's etc. Simple method to generate a pyopencl context Parameters ---------- device : str or :class:`pyopencl.Device` The pyopencl string (or device class) denoting the device to use, defaults to '0' Returns ------- ctx : :class:`pyopencl.Context` The running context # os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1' Returns header definition code for a :class:`loopy.LoopKernel` Parameters ---------- knl : :class:`loopy.LoopKernel` The kernel to generate a header definition for codegen_result : :class:`loopy.CodeGenerationResult` If supplied, the pre-generated code-gen result for this kernel (speeds up header generation) Returns ------- Generated device header code Notes ----- The kernel's Target and name should be set for proper functioning # set the edit script as the 'editor' # turn on code editing Returns a copy of knl set up for various automated bug-fixes Parameters ---------- knl : :class:`loopy.LoopKernel` The kernel to generate code for Returns ------- edit_knl : :class:`loopy.LoopKernel` The kernel set up for editing Returns a copy of knl set up for various automated bug-fixes Parameters ---------- knl : :class:`loopy.LoopKernel` The kernel to generate code for base_kernels : :class:`loopy.LoopKernel` The kernel :param:`knl` and all dependencies required for Jacobian evaluation. These kernels, should be generated for a problem_size of 1 to facilitate indexing in the wrapped kernel problem_size : int The size of the testing problem independent_variable : :class:`array_creator.creator` The independent variables to compute the Jacobian with respect to dependent_variable : :class:`array_creator.creator` The dependent variables to find the Jacobian of output : :class:`array_creator.creator` The array to store the column-major Jacobian in, ordered by thermo-chemical condition do_not_set : list of :class:`array_creator.creator` Other variables that are computed in this kernel (and hence shouldn't) be set Returns ------- edit_knl : :class:`loopy.LoopKernel` The kernel set up for editing # load template # last index varies fastest, so stride of 'i' is 1 # find index of test_size # if first stride is not problem_size, this is 'C' ordered # hence reverse indicies # can't operate on this # per work-size = 1 in this context as we're operating per-thread # find the dimension / string representation of the independent # and dependent variables # initializers std::vector<adouble> ad_${name} (${size}); for (int i = 0; i < ${size}; ++i) { ad_${name}[i].set_value(${indexed}); } for(int i = 0; i < ${size}; ++i) { ad_${name}[i].set_value(0.0); } # get set of written vars # add initializer for (int i = 0; i < ${size}; ++i) { ${indexed} = ad_${name}[i].value(); } # find the output name # get header defn # replace the "const" on the jacobian # and function call # fill in template # indep=indep, # indep_name=independent_variable.name, # dep=dep, # dep_name=dependent_variable.name, # jac_size=jac_size, # and make it executable Returns the device code for a :class:`loopy.LoopKernel` or fixes alreay generated code Parameters ---------- knl : :class:`loopy.LoopKernel` or str The kernel to generate code for. If knl is a string, it is assumed to be pregenerated code, and only the editor script must be called opts: :class:`loopy_options` The options used in created the kernel -- used to detect platform specific fixes. Ignored if not supplied Returns ------- code: str Generated device code Notes ----- The kernel's Target and name should be set for proper functioning # ignore # If True, this is a finite-difference Jacobian on an Intel OpenCL platform # Hence we have to tell the codefixer about the intel bug # https://software.intel.com/en-us/forums/opencl/topic/748841 A utility method that returns the result of: numpy.where(numpy.logical_not(numpy.isclose(arr1, arr2, **kwargs))) Since I use if often in testing Parameters ---------- arr1: :class:`np.ndarray` Array to compare arr2: :class:`np.ndarray` Reference answer **kwargs: dict Keyword args for :func:`numpy.isclose` Returns ------- inds: tuple of :class:`numpy.ndarray` result of: `numpy.where(numpy.logical_not(numpy.isclose(arr1, arr2, **kwargs)))` A wrapper for the various parameters (e.g. args, masks, etc.) for calling / executing a loopy kernel The initializer for the :class:`kernel_call` object Parameters ---------- name : str The kernel name, used for matching ref_answer : :class:`numpy.ndarray` or list of :class:`numpy.ndarray` The reference answer to compare to compare_axis : int, optional An axis to apply the compare_mask along, unused if compare_mask is None compare_mask : :class:`numpy.ndarray` or list of :class:`numpy.ndarray` An optional list of indexes to compare, useful when the kernel only computes partial results. Should match length of ref_answer ref_ans_compare_mask : :class:`numpy.ndarray` or list of :class:`numpy.ndarray` Same as the compare_mask, but for the reference answer. Necessary for some kernel tests, as the reference answer is not the same size as the output, which causes issues for split arrays. If not supplied, the regular :param:`compare_mask` will be used tiling: bool, [True] If True (default), the elements in the :param:`compare_mask` should be combined, e.g., if two arrays [[1, 2] and [3, 4]] are supplied to :param:`compare_mask` with tiling turned on, four resulting indicies will be compared -- [1, 3], [1, 4], [2, 3], and [2, 4]. If tiling is turned of, the compare mask will be treated as a list of indicies, e.g., (for the previous example) -- [1, 3] and [2, 4]. out_mask : int, optional The index(ices) of the returned array to aggregate. Should match length of ref_answer input_mask : list of str or function, optional An optional list of input arguements to filter out If a function is passed, the expected signature is along the lines of: def fcn(self, arg_name): ... and returns True iff this arg_name should be used strict_name_match : bool, optional If true, only kernels exactly matching this name will be excecuted Defaut is False chain : function, optional If not None, a function of signature similar to: def fcn(self, out_values): .... is expected. This function should take the output values from a previous kernel call, and place in the input args for this kernel call as necessary post_process : function, optional If not None, a function of signature similar to: def fcn(self, out_values): .... is expected. This function should take the output values from this kernel call, and process them as expected to compare to results. Currently used only in comparison of reaction rates to Cantera (to deal w/ falloff etc.) check : bool If False, do not check result (useful when chaining to check only the last result) Default is True allow_skip : bool If True, allow this kernel call to be check results without actually executing a kernel (checks the last kernel that was executed). This is useful for selectively turning off kernels (e.g. if there are no reverse reactions) other_compare : Callable, optional If supplied, a function that compares output values not checked in by this kernel call. This is useful in the case of NaN's resulting from derivatives of (e.g.,) log(0), to ensure our arrays are spitting out very large (but finite) numbers rtol : float [Default 1e-5] The relative tolerance for comparison to reference answers. For Jacobian correctness testing this may have to be loosened atol : float [Default 1e-8] The absolute tolerance for comparison to reference answers. equal_nan : bool [False] If supplied, whether to consider NaN's equal for reference testing input_args : dict of `numpy.array`s The arguements to supply to the kernel Returns ------- out_ref : list of :class:`numpy.ndarray` The value(s) of the evaluated :class:`loopy.LoopKernel` # pull any rtol / atol from env / test config as specified by user Tests whether this kernel should be run with this call Parameters ---------- knl : :class:`loopy.LoopKernel` The kernel to call Updates the kernel arguements, and and compare axis to the order given If the 'arg' is a function, it will be called to get the correct answer Parameters ---------- array_splitter: :class:`pyjac.core.array_creator.array_splitter` The array splitter of the owning :class:`kernek_utils.kernel_gen.kernel.kernel_generator`, used to operate on numpy arrays if necessary order : {'C', 'F'} The memory layout of the arrays, C (row major) or Fortran (column major) namestore : :class:`NameStore` Must be supplied if :param:`jac_format` is of type :class:`JacobianFormat.sparse`, in order to pull row / column indicies for conversion to / from sparse matricies jac_format: :class:`JacobianFormat` [JacobianFormat.full] If sparse, we are testing a sparse matrix (and :param:`namestore` must be supplied) # filter out bad input # it's a function # need to convert the jacobian arg to a sparse representation # the easiest way to deal with this is to convert the kernel argument # to the sparse dimensions # Then afterwards we can use the row / col inds as an intermediate # index in the comparison step # save for comparable # sparsify transformed answer # and finally feed through the array splitter Calls the kernel, filtering input / output args as required Parameters ---------- knl : :class:`loopy.LoopKernel` The kernel to call queue : :class:`pyopencl.Queue` The command queue Returns ------- out : list of :class:`numpy.ndarray` The (potentially filtered) output variables Selects the data to compare from the supplied variable depending on the compare mask / axes supplied # use the regular compare mask, as the reference answer specific one # was not supplied # if no mask # see if it's a supplied callable Compare the output variables to the given reference answer Parameters ---------- output_variables : :class:`numpy.ndarray` or :class:`numpy.ndarray` The output variables to test Returns ------- match : bool True IFF the masked output variables match the supplied reference answer for this :class:`kernel_call` # check that the mask is one of: # 1. a list of length equal to the size of the number of outputs # 2. a list of indicies (indicated by the compare axis set to -1) # 3. a callable function / object that can figure out extracting the # comparable entries on it's own # apply the same transformation to the answer This method runs the supplied :class:`loopy.LoopKernel` (or list thereof), and is often used by :function:`auto_run` Parameters ---------- knl : :class:`loopy.LoopKernel` or list of :class:`loopy.LoopKernel` The kernel to test, if a list of kernels they will be successively applied and the end result compared kernel_calls : :class:`kernel_call` or list thereof The masks / ref_answers, etc. to use in testing device : str The pyopencl string denoting the device to use, defaults to '0' editor : callable If not none, a callable function or object that takes a :class:`loopy.LoopKernel` as the sole arguement, and returns the kernel with editing turned on (for used with auto-differentiation) If not specified, the default (opencl) editor will be invoked Returns ------- out_ref : list of :class:`numpy.ndarray` The value(s) of the evaluated :class:`loopy.LoopKernel` # create context # handle weirdness between list / non-list input # reached end of list # not a list # break on next run # create the outputs # run kernels # test that we want to run this one # set the editor to avoid intel bugs # recreate with device # check for chaining # run! # output mapping # if the outputs are none, we init to zeros # and avoid copying zeros over later data! # get indicies that are non-zero (already in there) # or non infinity/nan # try w/o finite check (I'm paranoid, don't want to mask) # any bad data # copy_inds = np.where(np.logical_not( # np.logical_or(np.isinf(out[ind]), # out[ind] == 0, np.isnan(out[ind]))), # ) # release context This method tests the supplied :class:`loopy.LoopKernel` (or list thereof) against a reference answer Parameters ---------- knl : :class:`loopy.LoopKernel` or list of :class:`loopy.LoopKernel` The kernel to test, if a list of kernels they will be successively applied and the end result compared kernel_calls : :class:`kernel_call` The masks / ref_answers, etc. to use in testing device : str The pyopencl string denoting the device to use, defaults to '0' input_args : dict of `numpy.array`s The arguements to supply to the kernel Returns ------- result : bool True if all tests pass # run kernel # check lists # find the last one for which we have data # if not iterable Parameters ---------- lang : str One of the supported languages, {'c', 'cuda', 'opencl'} device : :class:`pyopencl.Device` If supplied, and lang is 'opencl', passed to the :class:`loopy.PyOpenCLTarget` compiler: str If supplied, the C-compiler to use Returns ------- The correct loopy target type # set target # update to use any user specified info # get toolchain # and create override from CPlusPlusCompiler to load Adept into ctypes and avoid missing symbol errors
1.928156
2
fbssdc/bpy.py
Eijebong/binjs-ref
391
6621513
<filename>fbssdc/bpy.py #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import format import idl import opt import os import strings import subprocess import tempfile import tycheck import types import argparse import json import sys import shutil def encode_dir(dict_file, binjs_encode, in_path, out_path, skip_errors=True, copy_source=True): types = idl.parse_es6_idl() ty_script = types.interfaces['Script'] string_dict = strings.read_dict(dict_file, with_signature=True) in_path = os.path.abspath(in_path) out_path = os.path.abspath(out_path) ignored_out_directory = tempfile.TemporaryDirectory() for root, _, sources in os.walk(in_path): # 1. Prepare destination directory suffix = os.path.relpath(root, in_path) dest_root = os.path.join(out_path, suffix) print ('Encoding from {root} to {dest_root}'.format(root=root, dest_root=dest_root)) os.makedirs(dest_root, exist_ok=True) for source in sources: source_path = os.path.join(root, source) if not source[-3:] == '.js': print ('...skipping {}'.format(source_path)) continue # 2. Extract AST print ('Preprocessing {}'.format(source_path)) process = subprocess.run([binjs_encode, '--quiet', '--show-ast', '--in', source_path, '--out', ignored_out_directory.name], capture_output=True) try: proggy = json.loads(process.stdout.decode('utf-8')) # 3. Encode dest_path = os.path.join(dest_root,source[:-3] + '.binjs') print ('Encoding {source_path} => {dest_path}'.format(source_path=source_path, dest_path=dest_path)) dest_file = open(dest_path, 'wb') format.write(types, string_dict, ty_script, proggy, dest_file) # 4. Copy source file if copy_source: shutil.copy(source_path, dest_root) except: if skip_errors: print ('...does not parse') else: raise def encode(dict_file, in_file, out_file): types = idl.parse_es6_idl() ty_script = types.interfaces['Script'] string_dict = strings.read_dict(dict_file, with_signature=True) proggy = json.loads(in_file.read()) format.write(types, string_dict, ty_script, proggy, out_file) def decode(dict_file, in_file, out_file): types = idl.parse_es6_idl() ty_script = types.interfaces['Script'] string_dict = strings.read_dict(dict_file, with_signature=True) proggy = format.read(types, string_dict, ty_script, in_file) json.dump(proggy, out_file) def optimize(in_file, out_file): proggy = json.loads(in_file.read()) proggy = opt.optimize(proggy) json.dump(proggy, out_file) def make_dict(in_files, out_file): types = idl.parse_es6_idl() ty_script = types.interfaces['Script'] sources = [] for in_file in in_files: proggy = json.loads(in_file.read()) tycheck.FloatFixer(types).rewrite(ty_script, proggy) tycheck.TypeChecker(types).check_any(ty_script, proggy) sources.append((ty_script, proggy)) string_dict = strings.prepare_dict(types, sources) strings.write_dict(out_file, string_dict, with_signature=True) def pretty_json(in_file): json.dump(sys.stdout, json.loads(in_file.read()), indent=2) def type_check(in_files): types = idl.parse_es6_idl() ty_script = types.interfaces['Script'] for in_file in in_files: proggy = json.loads(in_file.read()) print(f'checking {in_file.name}... ', end='') try: tycheck.TypeChecker(types).check_any(ty_script, proggy) except Exception as e: # FIXME: Make this a specific exception type, do error recovery, etc. print(f'NG, {e!s}') continue print('OK') def fix_types(in_file): types = idl.parse_es6_idl() ty_script = types.interfaces['Script'] proggy = json.loads(in_file.read()) tycheck.FloatFixer(types).rewrite(ty_script, proggy) json.dump(proggy, sys.stdout) def main(): # We get waaay past the 1000 limit even on relatively simple examples. sys.setrecursionlimit(10000) parser = argparse.ArgumentParser() parser.set_defaults(func=lambda args: print('use --help to see commands')) subs = parser.add_subparsers(title='subcommands') encode_dir_parser = subs.add_parser('encode-dir', help='Encode a full directory (from .js).', description='''Caveats: (1) dictionary misses are not supported yet (2) not all file sections are compressed natively yet; the output should be compressed with Brotli ''') encode_dir_parser.add_argument('binjs_encode', help='path to the binjs_encode binary') encode_dir_parser.add_argument('dictionary', type=argparse.FileType('rb'), help='the dictionary file to read from') encode_dir_parser.add_argument('indir', help='the path from which to read *.js files') encode_dir_parser.add_argument('outdir', help='the path to which to write *.binjs files') encode_dir_parser.add_argument('--ignore-errors', nargs='?', const=True, default=False, help='if specified, skip files that cannot be encoded'), encode_dir_parser.add_argument('--copy-source-files', nargs='?', const=True, default=False, help='if specified, copy .js source files to the target directory after encoding them'), encode_dir_parser.set_defaults(func=lambda args: encode_dir(args.dictionary, args.binjs_encode, args.indir, args.outdir, args.skip_errors, args.copy_source_files)) encode_parser = subs.add_parser('encode-ast', help='AST JSON to binary.', description='''Caveats: (1) dictionary misses are not supported yet (2) not all file sections are compressed natively yet; the output should be compressed with Brotli ''') encode_parser.add_argument('dictionary', type=argparse.FileType('rb'), help='the dictionary file to read from') encode_parser.add_argument('input', type=argparse.FileType('r'), help='the AST JSON file read from') encode_parser.add_argument('output', type=argparse.FileType('wb'), help='the binary file to write to') encode_parser.set_defaults(func=lambda args: encode(args.dictionary, args.input, args.output)) decode_parser = subs.add_parser('decode-ast', help='Binary to AST JSON.', description=''' Caveat: dictionary identity is not checked yet; use the same dictionary as encoding. ''') decode_parser.add_argument('dictionary', type=argparse.FileType('rb'), help='the dictionary file to read from') decode_parser.add_argument('input', type=argparse.FileType('rb'), help='the binary file read from') decode_parser.add_argument('output', type=argparse.FileType('w'), help='the JSON file to write AST to') decode_parser.set_defaults(func=lambda args: decode(args.dictionary, args.input, args.output)) opt_parser = subs.add_parser('optimize-ast', help='Adds laziness annotations to an AST.') opt_parser.add_argument('input', type=argparse.FileType('r'), help='the AST JSON file read from') opt_parser.add_argument('output', type=argparse.FileType('w'), help='the AST JSON file to write to') opt_parser.set_defaults(func=lambda args: optimize(args.input, args.output)) make_dict_parser = subs.add_parser('make-dict', help='Makes a string dictionary from AST JSON files.') make_dict_parser.add_argument('input', type=argparse.FileType('r'), nargs='+', help='the AST JSON files to read from') make_dict_parser.add_argument('output', type=argparse.FileType('wb'), help='the binary file to write to') make_dict_parser.set_defaults(func=lambda args: make_dict(args.input, args.output)) pretty_json_parser = subs.add_parser('pretty-json', help='Pretty-prints JSON, which is useful for AST diffs.') pretty_json_parser.add_argument('input', type=argparse.FileType('r'), help='the JSON file to read from') pretty_json_parser.set_defaults(func=lambda args: pretty_json(args.input)) type_check_parser = subs.add_parser('type-check', help='Checks AST JSON conforms to ES6 IDL.') type_check_parser.add_argument('input', type=argparse.FileType('r'), nargs='+', help='the AST JSON file to read from') type_check_parser.set_defaults(func=lambda args: type_check(args.input)) fix_types_parser = subs.add_parser('fix-types', help='Repairs AST JSON which has ints for doubles.') fix_types_parser.add_argument('input', type=argparse.FileType('r'), help='the AST JSON file to read from') fix_types_parser.set_defaults(func=lambda args: fix_types(args.input)) args = parser.parse_args() args.func(args) if __name__ == '__main__': main()
<filename>fbssdc/bpy.py #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import format import idl import opt import os import strings import subprocess import tempfile import tycheck import types import argparse import json import sys import shutil def encode_dir(dict_file, binjs_encode, in_path, out_path, skip_errors=True, copy_source=True): types = idl.parse_es6_idl() ty_script = types.interfaces['Script'] string_dict = strings.read_dict(dict_file, with_signature=True) in_path = os.path.abspath(in_path) out_path = os.path.abspath(out_path) ignored_out_directory = tempfile.TemporaryDirectory() for root, _, sources in os.walk(in_path): # 1. Prepare destination directory suffix = os.path.relpath(root, in_path) dest_root = os.path.join(out_path, suffix) print ('Encoding from {root} to {dest_root}'.format(root=root, dest_root=dest_root)) os.makedirs(dest_root, exist_ok=True) for source in sources: source_path = os.path.join(root, source) if not source[-3:] == '.js': print ('...skipping {}'.format(source_path)) continue # 2. Extract AST print ('Preprocessing {}'.format(source_path)) process = subprocess.run([binjs_encode, '--quiet', '--show-ast', '--in', source_path, '--out', ignored_out_directory.name], capture_output=True) try: proggy = json.loads(process.stdout.decode('utf-8')) # 3. Encode dest_path = os.path.join(dest_root,source[:-3] + '.binjs') print ('Encoding {source_path} => {dest_path}'.format(source_path=source_path, dest_path=dest_path)) dest_file = open(dest_path, 'wb') format.write(types, string_dict, ty_script, proggy, dest_file) # 4. Copy source file if copy_source: shutil.copy(source_path, dest_root) except: if skip_errors: print ('...does not parse') else: raise def encode(dict_file, in_file, out_file): types = idl.parse_es6_idl() ty_script = types.interfaces['Script'] string_dict = strings.read_dict(dict_file, with_signature=True) proggy = json.loads(in_file.read()) format.write(types, string_dict, ty_script, proggy, out_file) def decode(dict_file, in_file, out_file): types = idl.parse_es6_idl() ty_script = types.interfaces['Script'] string_dict = strings.read_dict(dict_file, with_signature=True) proggy = format.read(types, string_dict, ty_script, in_file) json.dump(proggy, out_file) def optimize(in_file, out_file): proggy = json.loads(in_file.read()) proggy = opt.optimize(proggy) json.dump(proggy, out_file) def make_dict(in_files, out_file): types = idl.parse_es6_idl() ty_script = types.interfaces['Script'] sources = [] for in_file in in_files: proggy = json.loads(in_file.read()) tycheck.FloatFixer(types).rewrite(ty_script, proggy) tycheck.TypeChecker(types).check_any(ty_script, proggy) sources.append((ty_script, proggy)) string_dict = strings.prepare_dict(types, sources) strings.write_dict(out_file, string_dict, with_signature=True) def pretty_json(in_file): json.dump(sys.stdout, json.loads(in_file.read()), indent=2) def type_check(in_files): types = idl.parse_es6_idl() ty_script = types.interfaces['Script'] for in_file in in_files: proggy = json.loads(in_file.read()) print(f'checking {in_file.name}... ', end='') try: tycheck.TypeChecker(types).check_any(ty_script, proggy) except Exception as e: # FIXME: Make this a specific exception type, do error recovery, etc. print(f'NG, {e!s}') continue print('OK') def fix_types(in_file): types = idl.parse_es6_idl() ty_script = types.interfaces['Script'] proggy = json.loads(in_file.read()) tycheck.FloatFixer(types).rewrite(ty_script, proggy) json.dump(proggy, sys.stdout) def main(): # We get waaay past the 1000 limit even on relatively simple examples. sys.setrecursionlimit(10000) parser = argparse.ArgumentParser() parser.set_defaults(func=lambda args: print('use --help to see commands')) subs = parser.add_subparsers(title='subcommands') encode_dir_parser = subs.add_parser('encode-dir', help='Encode a full directory (from .js).', description='''Caveats: (1) dictionary misses are not supported yet (2) not all file sections are compressed natively yet; the output should be compressed with Brotli ''') encode_dir_parser.add_argument('binjs_encode', help='path to the binjs_encode binary') encode_dir_parser.add_argument('dictionary', type=argparse.FileType('rb'), help='the dictionary file to read from') encode_dir_parser.add_argument('indir', help='the path from which to read *.js files') encode_dir_parser.add_argument('outdir', help='the path to which to write *.binjs files') encode_dir_parser.add_argument('--ignore-errors', nargs='?', const=True, default=False, help='if specified, skip files that cannot be encoded'), encode_dir_parser.add_argument('--copy-source-files', nargs='?', const=True, default=False, help='if specified, copy .js source files to the target directory after encoding them'), encode_dir_parser.set_defaults(func=lambda args: encode_dir(args.dictionary, args.binjs_encode, args.indir, args.outdir, args.skip_errors, args.copy_source_files)) encode_parser = subs.add_parser('encode-ast', help='AST JSON to binary.', description='''Caveats: (1) dictionary misses are not supported yet (2) not all file sections are compressed natively yet; the output should be compressed with Brotli ''') encode_parser.add_argument('dictionary', type=argparse.FileType('rb'), help='the dictionary file to read from') encode_parser.add_argument('input', type=argparse.FileType('r'), help='the AST JSON file read from') encode_parser.add_argument('output', type=argparse.FileType('wb'), help='the binary file to write to') encode_parser.set_defaults(func=lambda args: encode(args.dictionary, args.input, args.output)) decode_parser = subs.add_parser('decode-ast', help='Binary to AST JSON.', description=''' Caveat: dictionary identity is not checked yet; use the same dictionary as encoding. ''') decode_parser.add_argument('dictionary', type=argparse.FileType('rb'), help='the dictionary file to read from') decode_parser.add_argument('input', type=argparse.FileType('rb'), help='the binary file read from') decode_parser.add_argument('output', type=argparse.FileType('w'), help='the JSON file to write AST to') decode_parser.set_defaults(func=lambda args: decode(args.dictionary, args.input, args.output)) opt_parser = subs.add_parser('optimize-ast', help='Adds laziness annotations to an AST.') opt_parser.add_argument('input', type=argparse.FileType('r'), help='the AST JSON file read from') opt_parser.add_argument('output', type=argparse.FileType('w'), help='the AST JSON file to write to') opt_parser.set_defaults(func=lambda args: optimize(args.input, args.output)) make_dict_parser = subs.add_parser('make-dict', help='Makes a string dictionary from AST JSON files.') make_dict_parser.add_argument('input', type=argparse.FileType('r'), nargs='+', help='the AST JSON files to read from') make_dict_parser.add_argument('output', type=argparse.FileType('wb'), help='the binary file to write to') make_dict_parser.set_defaults(func=lambda args: make_dict(args.input, args.output)) pretty_json_parser = subs.add_parser('pretty-json', help='Pretty-prints JSON, which is useful for AST diffs.') pretty_json_parser.add_argument('input', type=argparse.FileType('r'), help='the JSON file to read from') pretty_json_parser.set_defaults(func=lambda args: pretty_json(args.input)) type_check_parser = subs.add_parser('type-check', help='Checks AST JSON conforms to ES6 IDL.') type_check_parser.add_argument('input', type=argparse.FileType('r'), nargs='+', help='the AST JSON file to read from') type_check_parser.set_defaults(func=lambda args: type_check(args.input)) fix_types_parser = subs.add_parser('fix-types', help='Repairs AST JSON which has ints for doubles.') fix_types_parser.add_argument('input', type=argparse.FileType('r'), help='the AST JSON file to read from') fix_types_parser.set_defaults(func=lambda args: fix_types(args.input)) args = parser.parse_args() args.func(args) if __name__ == '__main__': main()
en
0.847826
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # 1. Prepare destination directory # 2. Extract AST # 3. Encode # 4. Copy source file # FIXME: Make this a specific exception type, do error recovery, etc. # We get waaay past the 1000 limit even on relatively simple examples. Caveats: (1) dictionary misses are not supported yet (2) not all file sections are compressed natively yet; the output should be compressed with Brotli Caveats: (1) dictionary misses are not supported yet (2) not all file sections are compressed natively yet; the output should be compressed with Brotli Caveat: dictionary identity is not checked yet; use the same dictionary as encoding.
2.086848
2
lab03/book_registry/test_script.py
Boris-Barboris/rsoi
0
6621514
#!/usr/bin/python3 from app_book_registry.oauthclient import * from app_book_registry.book_registry_client import * from app_book_registry.rsoi_common import AlreadyExists import json import requests import logging import http.client http.client.HTTPConnection.debuglevel = 1 logging.basicConfig() logging.getLogger().setLevel(logging.DEBUG) requests_log = logging.getLogger("requests.packages.urllib3") requests_log.setLevel(logging.DEBUG) requests_log.propagate = True pp = PasswordPlugin('admin', 'admin') client = OAuthClient('http://127.0.0.1:39000', pp, 'debug_client', 'mysecret', 'localhost') print('\nVerification using password plugin\n') print('\n\n' + repr(client.verify()) + '\n') print('\nissuing tokens...\n') tokens = client.get_token() print('\n\ntoken:\n') print('\n' + repr(tokens) + '\n') ll = BookRegistryClient('http://localhost:39003', client) print('\nlist prints...\n') print('\n\n' + repr(ll.list_prints()) + '\n') print('\nadding test print...\n') test_print = { 'title': 'TestTitle', 'authors': '<NAME>', 'page_count': 250, 'year': 1967, } try: print('\n\n' + repr(ll.add_print('38721837', test_print)) + '\n') except AlreadyExists: print('\nALREADY EXISTS\n') print('\nlist prints...\n') print('\n\n' + repr(ll.list_prints()) + '\n') print('\npatching test print...\n') test_print['year'] = 1971 test_print['authors'] = '<NAME>.' print('\n\n' + repr(ll.update_print('38721837', test_print)) + '\n') print('\nlist prints...\n') print('\n\n' + repr(ll.list_prints()) + '\n') print('\ndeleting test print...\n') print('\n\n' + repr(ll.delete_print('38721837')) + '\n') print('\nlist prints...\n') print('\n\n' + repr(ll.list_prints()) + '\n')
#!/usr/bin/python3 from app_book_registry.oauthclient import * from app_book_registry.book_registry_client import * from app_book_registry.rsoi_common import AlreadyExists import json import requests import logging import http.client http.client.HTTPConnection.debuglevel = 1 logging.basicConfig() logging.getLogger().setLevel(logging.DEBUG) requests_log = logging.getLogger("requests.packages.urllib3") requests_log.setLevel(logging.DEBUG) requests_log.propagate = True pp = PasswordPlugin('admin', 'admin') client = OAuthClient('http://127.0.0.1:39000', pp, 'debug_client', 'mysecret', 'localhost') print('\nVerification using password plugin\n') print('\n\n' + repr(client.verify()) + '\n') print('\nissuing tokens...\n') tokens = client.get_token() print('\n\ntoken:\n') print('\n' + repr(tokens) + '\n') ll = BookRegistryClient('http://localhost:39003', client) print('\nlist prints...\n') print('\n\n' + repr(ll.list_prints()) + '\n') print('\nadding test print...\n') test_print = { 'title': 'TestTitle', 'authors': '<NAME>', 'page_count': 250, 'year': 1967, } try: print('\n\n' + repr(ll.add_print('38721837', test_print)) + '\n') except AlreadyExists: print('\nALREADY EXISTS\n') print('\nlist prints...\n') print('\n\n' + repr(ll.list_prints()) + '\n') print('\npatching test print...\n') test_print['year'] = 1971 test_print['authors'] = '<NAME>.' print('\n\n' + repr(ll.update_print('38721837', test_print)) + '\n') print('\nlist prints...\n') print('\n\n' + repr(ll.list_prints()) + '\n') print('\ndeleting test print...\n') print('\n\n' + repr(ll.delete_print('38721837')) + '\n') print('\nlist prints...\n') print('\n\n' + repr(ll.list_prints()) + '\n')
fr
0.386793
#!/usr/bin/python3
2.238985
2
Controllers/UI.py
DeerChen/LRCPlayer
0
6621515
''' Description: Author: Senkita Date: 2021-04-16 18:53:58 LastEditors: Senkita LastEditTime: 2021-04-16 20:48:53 ''' from pathlib import Path from typing import List from .Player import Player import os from random import shuffle class UI: def __init__(self) -> None: self.lrc_dir = Path('./lrc') self.lrc_files = self.__list_lrc_files() def __list_lrc_files(self) -> List[str]: lrc_files = [] for file in self.lrc_dir.iterdir(): if file.suffix == '.lrc': lrc_files.append(file.stem) return lrc_files def __choose(self) -> None: print('顺序播放输入A,随机播放输入B,退出请按Ctrl+C') print('请输入操作,并以回车结束:', end='') choice = input() if choice in ('A', 'a'): os.system('cls') Player(self.lrc_files).play() elif choice in ('B', 'b'): os.system('cls') lrc_files = self.lrc_files shuffle(lrc_files) Player(lrc_files).play() else: print('输入有误,请重新输入') self.__choose() def display(self) -> None: print('歌词保存在"lrc/"目录下,现有歌词{}份:'.format(len(self.lrc_files))) count = 0 for file in self.lrc_files: count += 1 print('{}. {}'.format(count, file)) self.__choose()
''' Description: Author: Senkita Date: 2021-04-16 18:53:58 LastEditors: Senkita LastEditTime: 2021-04-16 20:48:53 ''' from pathlib import Path from typing import List from .Player import Player import os from random import shuffle class UI: def __init__(self) -> None: self.lrc_dir = Path('./lrc') self.lrc_files = self.__list_lrc_files() def __list_lrc_files(self) -> List[str]: lrc_files = [] for file in self.lrc_dir.iterdir(): if file.suffix == '.lrc': lrc_files.append(file.stem) return lrc_files def __choose(self) -> None: print('顺序播放输入A,随机播放输入B,退出请按Ctrl+C') print('请输入操作,并以回车结束:', end='') choice = input() if choice in ('A', 'a'): os.system('cls') Player(self.lrc_files).play() elif choice in ('B', 'b'): os.system('cls') lrc_files = self.lrc_files shuffle(lrc_files) Player(lrc_files).play() else: print('输入有误,请重新输入') self.__choose() def display(self) -> None: print('歌词保存在"lrc/"目录下,现有歌词{}份:'.format(len(self.lrc_files))) count = 0 for file in self.lrc_files: count += 1 print('{}. {}'.format(count, file)) self.__choose()
en
0.44344
Description: Author: Senkita Date: 2021-04-16 18:53:58 LastEditors: Senkita LastEditTime: 2021-04-16 20:48:53
3.295915
3
cloudnet-package/trainer/utils.py
Windact/cloud_detection
0
6621516
<gh_stars>0 from tensorflow.keras import backend as K from tensorflow import keras smooth = 0.0000001 def jacc_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return 1 - ((intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + smooth)) class ADAMLearningRateTracker(keras.callbacks.Callback): """It prints out the last used learning rate after each epoch (useful for resuming a training) original code: https://github.com/keras-team/keras/issues/7874#issuecomment-329347949 """ def __init__(self, end_lr): super(ADAMLearningRateTracker, self).__init__() self.end_lr = end_lr def on_epoch_end(self, epoch, logs={}): # works only when decay in optimizer is zero optimizer = self.model.optimizer # t = K.cast(optimizer.iterations, K.floatx()) + 1 # lr_t = K.eval(optimizer.lr * (K.sqrt(1. - K.pow(optimizer.beta_2, t)) / # (1. - K.pow(optimizer.beta_1, t)))) # print('\n***The last Actual Learning rate in this epoch is:', lr_t,'***\n') print('\n***The last Basic Learning rate in this epoch is:', K.eval(optimizer.lr), '***\n') # stops the training if the basic lr is less than or equal to end_learning_rate if K.eval(optimizer.lr) <= self.end_lr: print("training is finished") self.model.stop_training = True
from tensorflow.keras import backend as K from tensorflow import keras smooth = 0.0000001 def jacc_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return 1 - ((intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + smooth)) class ADAMLearningRateTracker(keras.callbacks.Callback): """It prints out the last used learning rate after each epoch (useful for resuming a training) original code: https://github.com/keras-team/keras/issues/7874#issuecomment-329347949 """ def __init__(self, end_lr): super(ADAMLearningRateTracker, self).__init__() self.end_lr = end_lr def on_epoch_end(self, epoch, logs={}): # works only when decay in optimizer is zero optimizer = self.model.optimizer # t = K.cast(optimizer.iterations, K.floatx()) + 1 # lr_t = K.eval(optimizer.lr * (K.sqrt(1. - K.pow(optimizer.beta_2, t)) / # (1. - K.pow(optimizer.beta_1, t)))) # print('\n***The last Actual Learning rate in this epoch is:', lr_t,'***\n') print('\n***The last Basic Learning rate in this epoch is:', K.eval(optimizer.lr), '***\n') # stops the training if the basic lr is less than or equal to end_learning_rate if K.eval(optimizer.lr) <= self.end_lr: print("training is finished") self.model.stop_training = True
en
0.57163
It prints out the last used learning rate after each epoch (useful for resuming a training) original code: https://github.com/keras-team/keras/issues/7874#issuecomment-329347949 # works only when decay in optimizer is zero # t = K.cast(optimizer.iterations, K.floatx()) + 1 # lr_t = K.eval(optimizer.lr * (K.sqrt(1. - K.pow(optimizer.beta_2, t)) / # (1. - K.pow(optimizer.beta_1, t)))) # print('\n***The last Actual Learning rate in this epoch is:', lr_t,'***\n') # stops the training if the basic lr is less than or equal to end_learning_rate
2.929758
3
Module6_HW/hw6_1_4.py
vladspirin/python_core
0
6621517
from random import randint list_1 = [] list_2 = [] numbers = [randint(-5, 5) for i in range(-5, 5)] for i in numbers: print(i) if i < 0: list_1.append(i) if i > 0: list_2.append(i) print(list_1) print(list_2)
from random import randint list_1 = [] list_2 = [] numbers = [randint(-5, 5) for i in range(-5, 5)] for i in numbers: print(i) if i < 0: list_1.append(i) if i > 0: list_2.append(i) print(list_1) print(list_2)
none
1
3.626039
4
export.py
jessedp/tut
4
6621518
import gevent import os import contextlib import shutil import socket import tempfile import logging from tqdm import tqdm import ffmpeg from tinydb import TinyDB, Query from config import built_ins from recording import Recording logger = logging.getLogger(__name__) def copy(id_list, args): total = len(id_list) if total == 0: print(f"Nothing to process, exiting...") return elif total == 1: print(f"Processing {total} recording") else: print(f"Processing {total} recordings") print("-"*50) for id in id_list: # TODO: put a X of Y somewhere near here _copy(id, args) print() print("FINISHED") def _copy(obj_id, args): # TODO: Whoops, now used this twice (search.py too) path = built_ins['db']['recordings'] rec_db = TinyDB(path) shows = Query() # shortcut for later shows_qry = shows.data # TODO: deal with pieces of the same recording (TMSID?) marked "finished" # ie, 2 portions (non-full) of the an episode # + just skip them (do this!) # + differentiate on recorded at the same time # - look at recording/show data to see what it *should* be? # - overwrite previous portions obj = rec_db.get( (shows_qry.object_id == int(obj_id)) & (shows_qry.video_details.state == 'finished') ) if obj is None: print( f'ERROR: Unable to find recording with object_id == "{obj_id}", ' f'skipping...') return rec = Recording(obj['data']) watch = rec.watch() if watch.error is not None: print(rec.get_description()) print("ERROR: Recording no longer exists, skipping!") return out_file = rec.get_out_path('mp4') # TODO: this could make weird dirs? os.makedirs(os.path.dirname(out_file), exist_ok=True) # Display what we're working on if built_ins['log_level'] <= logging.INFO: rec.print() watch.dump_info() else: print(rec.get_description()) print(" " * 2 + f"writing to: {out_file}") if not args.clobber and os.path.exists(out_file): print("File exists, skipping") return total_duration = float(ffmpeg.probe( watch.playlist_url)['format']['duration']) if built_ins['dry_run']: # maybe do a dry run writing to a temp path and deleting so the time # is roughly the same? print("DRY RUN: The recording wasn't saved.") else: with show_progress(total_duration) as socket_filename: try: copier = ( ffmpeg # this is a m3u8 playlist .input(watch.playlist_url) .output(out_file, codec='copy', preset='ultrafast', loglevel='info') .overwrite_output() .global_args( '-progress', 'unix://{}'.format(socket_filename) ) ) copier.run(capture_stdout=True, capture_stderr=True) except KeyboardInterrupt: os.remove(out_file) raise KeyboardInterrupt except ffmpeg.Error as e: logger.error(e) # TODO: all of this should probably be somewhere else... @contextlib.contextmanager def _tmpdir_scope(): tmpdir = tempfile.mkdtemp() try: yield tmpdir finally: shutil.rmtree(tmpdir) def _do_watch_progress(filename, sock, handler): """Function to run in a separate gevent greenlet to read progress events from a unix-domain socket.""" connection, client_address = sock.accept() data = b'' try: while True: more_data = connection.recv(16) if not more_data: break data += more_data lines = data.split(b'\n') for line in lines[:-1]: line = line.decode() parts = line.split('=') key = parts[0] if len(parts) > 0 else None value = parts[1] if len(parts) > 1 else None handler(key, value) data = lines[-1] finally: connection.close() @contextlib.contextmanager def _watch_progress(handler): """Context manager for creating a unix-domain socket and listen for ffmpeg progress events. The socket filename is yielded from the context manager and the socket is closed when the context manager is exited. Args: handler: a function to be called when progress events are received; receives a ``key`` argument and ``value`` argument. (The example ``show_progress`` below uses tqdm) Yields: socket_filename: the name of the socket file. """ with _tmpdir_scope() as tmpdir: socket_filename = os.path.join(tmpdir, 'sock') sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) with contextlib.closing(sock): sock.bind(socket_filename) sock.listen(1) child = gevent.spawn(_do_watch_progress, socket_filename, sock, handler) try: yield socket_filename except Exception: gevent.kill(child) raise @contextlib.contextmanager def show_progress(total_duration): """Create a unix-domain socket to watch progress and render tqdm progress bar.""" with tqdm(total=round(total_duration, 2)) as bar: def handler(key, value): if key == 'out_time_ms': time = round(float(value) / 1000000., 2) bar.update(time - bar.n) elif key == 'progress' and value == 'end': bar.update(bar.total - bar.n) with _watch_progress(handler) as socket_filename: yield socket_filename
import gevent import os import contextlib import shutil import socket import tempfile import logging from tqdm import tqdm import ffmpeg from tinydb import TinyDB, Query from config import built_ins from recording import Recording logger = logging.getLogger(__name__) def copy(id_list, args): total = len(id_list) if total == 0: print(f"Nothing to process, exiting...") return elif total == 1: print(f"Processing {total} recording") else: print(f"Processing {total} recordings") print("-"*50) for id in id_list: # TODO: put a X of Y somewhere near here _copy(id, args) print() print("FINISHED") def _copy(obj_id, args): # TODO: Whoops, now used this twice (search.py too) path = built_ins['db']['recordings'] rec_db = TinyDB(path) shows = Query() # shortcut for later shows_qry = shows.data # TODO: deal with pieces of the same recording (TMSID?) marked "finished" # ie, 2 portions (non-full) of the an episode # + just skip them (do this!) # + differentiate on recorded at the same time # - look at recording/show data to see what it *should* be? # - overwrite previous portions obj = rec_db.get( (shows_qry.object_id == int(obj_id)) & (shows_qry.video_details.state == 'finished') ) if obj is None: print( f'ERROR: Unable to find recording with object_id == "{obj_id}", ' f'skipping...') return rec = Recording(obj['data']) watch = rec.watch() if watch.error is not None: print(rec.get_description()) print("ERROR: Recording no longer exists, skipping!") return out_file = rec.get_out_path('mp4') # TODO: this could make weird dirs? os.makedirs(os.path.dirname(out_file), exist_ok=True) # Display what we're working on if built_ins['log_level'] <= logging.INFO: rec.print() watch.dump_info() else: print(rec.get_description()) print(" " * 2 + f"writing to: {out_file}") if not args.clobber and os.path.exists(out_file): print("File exists, skipping") return total_duration = float(ffmpeg.probe( watch.playlist_url)['format']['duration']) if built_ins['dry_run']: # maybe do a dry run writing to a temp path and deleting so the time # is roughly the same? print("DRY RUN: The recording wasn't saved.") else: with show_progress(total_duration) as socket_filename: try: copier = ( ffmpeg # this is a m3u8 playlist .input(watch.playlist_url) .output(out_file, codec='copy', preset='ultrafast', loglevel='info') .overwrite_output() .global_args( '-progress', 'unix://{}'.format(socket_filename) ) ) copier.run(capture_stdout=True, capture_stderr=True) except KeyboardInterrupt: os.remove(out_file) raise KeyboardInterrupt except ffmpeg.Error as e: logger.error(e) # TODO: all of this should probably be somewhere else... @contextlib.contextmanager def _tmpdir_scope(): tmpdir = tempfile.mkdtemp() try: yield tmpdir finally: shutil.rmtree(tmpdir) def _do_watch_progress(filename, sock, handler): """Function to run in a separate gevent greenlet to read progress events from a unix-domain socket.""" connection, client_address = sock.accept() data = b'' try: while True: more_data = connection.recv(16) if not more_data: break data += more_data lines = data.split(b'\n') for line in lines[:-1]: line = line.decode() parts = line.split('=') key = parts[0] if len(parts) > 0 else None value = parts[1] if len(parts) > 1 else None handler(key, value) data = lines[-1] finally: connection.close() @contextlib.contextmanager def _watch_progress(handler): """Context manager for creating a unix-domain socket and listen for ffmpeg progress events. The socket filename is yielded from the context manager and the socket is closed when the context manager is exited. Args: handler: a function to be called when progress events are received; receives a ``key`` argument and ``value`` argument. (The example ``show_progress`` below uses tqdm) Yields: socket_filename: the name of the socket file. """ with _tmpdir_scope() as tmpdir: socket_filename = os.path.join(tmpdir, 'sock') sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) with contextlib.closing(sock): sock.bind(socket_filename) sock.listen(1) child = gevent.spawn(_do_watch_progress, socket_filename, sock, handler) try: yield socket_filename except Exception: gevent.kill(child) raise @contextlib.contextmanager def show_progress(total_duration): """Create a unix-domain socket to watch progress and render tqdm progress bar.""" with tqdm(total=round(total_duration, 2)) as bar: def handler(key, value): if key == 'out_time_ms': time = round(float(value) / 1000000., 2) bar.update(time - bar.n) elif key == 'progress' and value == 'end': bar.update(bar.total - bar.n) with _watch_progress(handler) as socket_filename: yield socket_filename
en
0.896116
# TODO: put a X of Y somewhere near here # TODO: Whoops, now used this twice (search.py too) # shortcut for later # TODO: deal with pieces of the same recording (TMSID?) marked "finished" # ie, 2 portions (non-full) of the an episode # + just skip them (do this!) # + differentiate on recorded at the same time # - look at recording/show data to see what it *should* be? # - overwrite previous portions # TODO: this could make weird dirs? # Display what we're working on # maybe do a dry run writing to a temp path and deleting so the time # is roughly the same? # this is a m3u8 playlist # TODO: all of this should probably be somewhere else... Function to run in a separate gevent greenlet to read progress events from a unix-domain socket. Context manager for creating a unix-domain socket and listen for ffmpeg progress events. The socket filename is yielded from the context manager and the socket is closed when the context manager is exited. Args: handler: a function to be called when progress events are received; receives a ``key`` argument and ``value`` argument. (The example ``show_progress`` below uses tqdm) Yields: socket_filename: the name of the socket file. Create a unix-domain socket to watch progress and render tqdm progress bar.
2.525855
3
src/isle/memoize.py
chelseajohn/isle
2
6621519
r"""!\file \brief Utilities for memoizing function results. Memoization decorators let us compute expensive functions once and store the result, so that if the function is called again with the same arguments the result is just returned. """ import functools import inspect import typing import weakref from dataclasses import dataclass from logging import getLogger class MemoizeMethod: r"""! Decorator that memoizes the result of a method call based on given arguments. \warning This decorator works only on bound methods not free functions. \note Classes using this decorator on their methods must support hashing. The most recent result of calling the decorated method is cached and returned on subsequent calls if the specified arguments match those of the cached call. This can help avoid repeating expensive calculations in a way that is transparent to the user. It is important that all relevant arguments are checked though, so as not to re-use results erroneously. <B>Example: Memoize by one argument</B><br> ```{.py} class C: @MemoizeMethod(lambda a: a) def f(self, a, b): print(f"evaluate f({a}, {b})") c1 = C() c1.f(0, 1) # evaluated c1.f(0, 1) # re-used c1.f(0, 2) # re-used; careful, this might be wrong! c1.f(1, 1) # evaluated, a has changed c1.f(1, 1) # re-used c1.f(0, 1) # evaluated, only the most recent result is stored c2 = C() c2.f(0, 1) # evaluated, c1 and c2 do not share memoized results c1.f(0, 1) # re-used from earlier call to c1.f ``` <B>Example: Memoize by two arguments</B><br> ```{.py} class C: @MemoizeMethod(lambda a, b: (a, b)) def f(self, a, b): print(f"evaluate f({a}, {b})") c1 = C() c1.f(0, 1) # evaluated c1.f(0, 1) # re-used c1.f(0, 2) # evaluated (b changed) c1.f(1, 2) # evaluated (a changed) c1.f(1, 2) # re-used c1.f(0, 2) # evaluated, only the most recent result is stored ``` <B>Example: Memoize by derived properties</B><br> ```{.py} class C: @MemoizeMethod(lambda s: len(s)) def f(self, s, x): print(f"evaluate f({s}, {x})") return len(s) + x c1 = C() c1.f("foo", 1) # evaluated c1.f("foo", 1) # re-used c1.f("bar", 1) # re-used, only len(s) matters c1.f("bar", 2) # re-used c1.f("foobar", 2) # evaluated (len(s) changed) c1.f("bazbar", 2) # re-used c1.f("foo", 2) # evaluated, only the most recent result is stored ``` \note The efficiency of this decorator depends on the equality operator (`==`). If it is slow for the arguments by which the result is memoized, function calls can still be expensive. """ # # *** Implementation notes *** # # Using the decorator syntax like in the examples applies the MemoizeMethod decorator to # the function at *class* creation time, i.e. before it is bound to an instance. # This means that all instances of a class share the same instance of MemoizedMethod # for each of their decorated methods. # It is thus necessary to distinguish the different instances and store arguments # and return values for each one separately. # # This is achieved by keeping weak references to all instances on which the # decorated method get called in the attribute _instanceData. # This happens when the method gets called and not when the instance is created. # @dataclass class MemoizedData: """! Store values of arguments and the return value of the memoized function. """ # Stored values of arguments as processed by argumentKey. # Can be `_Empty`, meaning that the method has not been called yet. argvals: typing.Any # Return value. result: typing.Any def __init__(self, argumentKeyFn): r"""! \param argumentKeyFn Callable which is applied to the arguments of the decorated method (except for `self`) before comparing to the cache. """ self.argumentKeyFn = argumentKeyFn self._argumentKeyFnParams = inspect.signature(self.argumentKeyFn).parameters self._instanceData = weakref.WeakKeyDictionary() def __call__(self, method): """! Apply decorator to a method. """ memo = self # Using 'self' inside of wrapper is confusing; 'memo' is the instance of Memoize. methodSignature = self._getMethodSignature(method) @functools.wraps(method) def wrapper(instance, *args, **kwargs): memoized = memo._getOrInsertInstanceData(instance) actualArguments = _bindArguments(methodSignature, instance, *args, **kwargs) argumentKey = memo.argumentKeyFn(**memo._keyArguments(actualArguments)) if memoized.argvals is not _Empty: # Has the function been called before? if memoized.argvals == argumentKey: # The previous call was equivalent to the current one. return memoized.result memoized.argvals = argumentKey memoized.result = method(instance, *args, **kwargs) return memoized.result return wrapper def _keyArguments(self, allArguments): """! Return the arguments for the key functions based on all arguments passed to method. """ return {name: allArguments[name] for name in self._argumentKeyFnParams} def _getMethodSignature(self, method): """! Return and verify the signature of `method`. """ methodSig = inspect.signature(method) for name in self._argumentKeyFnParams: if name not in methodSig.parameters: getLogger(__name__).error("Argument name '%s' used in key function not in signature" " of function %s\n signature: %s", name, method, methodSig) raise TypeError(f"Argument name {name} if not part of function signature") return methodSig def _getOrInsertInstanceData(self, instance): """! Return the MemoizationData object for given instance. Creates a new one if `instance` has not been seen before. """ try: return self._instanceData[instance] except KeyError: getLogger(__name__).debug("Inserting new instance for memoization: %s\n in memoization object %s", instance, self) self._instanceData[instance] = self.MemoizedData(_Empty, None) return self._instanceData[instance] def _bindArguments(signature, *args, **kwargs): r"""! Bind the given arguments including default values to a function signature and return an ordered mapping from argument names to values. """ boundArguments = signature.bind(*args, **kwargs) boundArguments.apply_defaults() return boundArguments.arguments class _EmptyType: """! Indicates that some variable has not been set yet. """ def __repr__(self): return "Empty" def __bool__(self): return False def __reduce__(self): return "Empty" _Empty = _EmptyType()
r"""!\file \brief Utilities for memoizing function results. Memoization decorators let us compute expensive functions once and store the result, so that if the function is called again with the same arguments the result is just returned. """ import functools import inspect import typing import weakref from dataclasses import dataclass from logging import getLogger class MemoizeMethod: r"""! Decorator that memoizes the result of a method call based on given arguments. \warning This decorator works only on bound methods not free functions. \note Classes using this decorator on their methods must support hashing. The most recent result of calling the decorated method is cached and returned on subsequent calls if the specified arguments match those of the cached call. This can help avoid repeating expensive calculations in a way that is transparent to the user. It is important that all relevant arguments are checked though, so as not to re-use results erroneously. <B>Example: Memoize by one argument</B><br> ```{.py} class C: @MemoizeMethod(lambda a: a) def f(self, a, b): print(f"evaluate f({a}, {b})") c1 = C() c1.f(0, 1) # evaluated c1.f(0, 1) # re-used c1.f(0, 2) # re-used; careful, this might be wrong! c1.f(1, 1) # evaluated, a has changed c1.f(1, 1) # re-used c1.f(0, 1) # evaluated, only the most recent result is stored c2 = C() c2.f(0, 1) # evaluated, c1 and c2 do not share memoized results c1.f(0, 1) # re-used from earlier call to c1.f ``` <B>Example: Memoize by two arguments</B><br> ```{.py} class C: @MemoizeMethod(lambda a, b: (a, b)) def f(self, a, b): print(f"evaluate f({a}, {b})") c1 = C() c1.f(0, 1) # evaluated c1.f(0, 1) # re-used c1.f(0, 2) # evaluated (b changed) c1.f(1, 2) # evaluated (a changed) c1.f(1, 2) # re-used c1.f(0, 2) # evaluated, only the most recent result is stored ``` <B>Example: Memoize by derived properties</B><br> ```{.py} class C: @MemoizeMethod(lambda s: len(s)) def f(self, s, x): print(f"evaluate f({s}, {x})") return len(s) + x c1 = C() c1.f("foo", 1) # evaluated c1.f("foo", 1) # re-used c1.f("bar", 1) # re-used, only len(s) matters c1.f("bar", 2) # re-used c1.f("foobar", 2) # evaluated (len(s) changed) c1.f("bazbar", 2) # re-used c1.f("foo", 2) # evaluated, only the most recent result is stored ``` \note The efficiency of this decorator depends on the equality operator (`==`). If it is slow for the arguments by which the result is memoized, function calls can still be expensive. """ # # *** Implementation notes *** # # Using the decorator syntax like in the examples applies the MemoizeMethod decorator to # the function at *class* creation time, i.e. before it is bound to an instance. # This means that all instances of a class share the same instance of MemoizedMethod # for each of their decorated methods. # It is thus necessary to distinguish the different instances and store arguments # and return values for each one separately. # # This is achieved by keeping weak references to all instances on which the # decorated method get called in the attribute _instanceData. # This happens when the method gets called and not when the instance is created. # @dataclass class MemoizedData: """! Store values of arguments and the return value of the memoized function. """ # Stored values of arguments as processed by argumentKey. # Can be `_Empty`, meaning that the method has not been called yet. argvals: typing.Any # Return value. result: typing.Any def __init__(self, argumentKeyFn): r"""! \param argumentKeyFn Callable which is applied to the arguments of the decorated method (except for `self`) before comparing to the cache. """ self.argumentKeyFn = argumentKeyFn self._argumentKeyFnParams = inspect.signature(self.argumentKeyFn).parameters self._instanceData = weakref.WeakKeyDictionary() def __call__(self, method): """! Apply decorator to a method. """ memo = self # Using 'self' inside of wrapper is confusing; 'memo' is the instance of Memoize. methodSignature = self._getMethodSignature(method) @functools.wraps(method) def wrapper(instance, *args, **kwargs): memoized = memo._getOrInsertInstanceData(instance) actualArguments = _bindArguments(methodSignature, instance, *args, **kwargs) argumentKey = memo.argumentKeyFn(**memo._keyArguments(actualArguments)) if memoized.argvals is not _Empty: # Has the function been called before? if memoized.argvals == argumentKey: # The previous call was equivalent to the current one. return memoized.result memoized.argvals = argumentKey memoized.result = method(instance, *args, **kwargs) return memoized.result return wrapper def _keyArguments(self, allArguments): """! Return the arguments for the key functions based on all arguments passed to method. """ return {name: allArguments[name] for name in self._argumentKeyFnParams} def _getMethodSignature(self, method): """! Return and verify the signature of `method`. """ methodSig = inspect.signature(method) for name in self._argumentKeyFnParams: if name not in methodSig.parameters: getLogger(__name__).error("Argument name '%s' used in key function not in signature" " of function %s\n signature: %s", name, method, methodSig) raise TypeError(f"Argument name {name} if not part of function signature") return methodSig def _getOrInsertInstanceData(self, instance): """! Return the MemoizationData object for given instance. Creates a new one if `instance` has not been seen before. """ try: return self._instanceData[instance] except KeyError: getLogger(__name__).debug("Inserting new instance for memoization: %s\n in memoization object %s", instance, self) self._instanceData[instance] = self.MemoizedData(_Empty, None) return self._instanceData[instance] def _bindArguments(signature, *args, **kwargs): r"""! Bind the given arguments including default values to a function signature and return an ordered mapping from argument names to values. """ boundArguments = signature.bind(*args, **kwargs) boundArguments.apply_defaults() return boundArguments.arguments class _EmptyType: """! Indicates that some variable has not been set yet. """ def __repr__(self): return "Empty" def __bool__(self): return False def __reduce__(self): return "Empty" _Empty = _EmptyType()
en
0.816139
!\file \brief Utilities for memoizing function results. Memoization decorators let us compute expensive functions once and store the result, so that if the function is called again with the same arguments the result is just returned. ! Decorator that memoizes the result of a method call based on given arguments. \warning This decorator works only on bound methods not free functions. \note Classes using this decorator on their methods must support hashing. The most recent result of calling the decorated method is cached and returned on subsequent calls if the specified arguments match those of the cached call. This can help avoid repeating expensive calculations in a way that is transparent to the user. It is important that all relevant arguments are checked though, so as not to re-use results erroneously. <B>Example: Memoize by one argument</B><br> ```{.py} class C: @MemoizeMethod(lambda a: a) def f(self, a, b): print(f"evaluate f({a}, {b})") c1 = C() c1.f(0, 1) # evaluated c1.f(0, 1) # re-used c1.f(0, 2) # re-used; careful, this might be wrong! c1.f(1, 1) # evaluated, a has changed c1.f(1, 1) # re-used c1.f(0, 1) # evaluated, only the most recent result is stored c2 = C() c2.f(0, 1) # evaluated, c1 and c2 do not share memoized results c1.f(0, 1) # re-used from earlier call to c1.f ``` <B>Example: Memoize by two arguments</B><br> ```{.py} class C: @MemoizeMethod(lambda a, b: (a, b)) def f(self, a, b): print(f"evaluate f({a}, {b})") c1 = C() c1.f(0, 1) # evaluated c1.f(0, 1) # re-used c1.f(0, 2) # evaluated (b changed) c1.f(1, 2) # evaluated (a changed) c1.f(1, 2) # re-used c1.f(0, 2) # evaluated, only the most recent result is stored ``` <B>Example: Memoize by derived properties</B><br> ```{.py} class C: @MemoizeMethod(lambda s: len(s)) def f(self, s, x): print(f"evaluate f({s}, {x})") return len(s) + x c1 = C() c1.f("foo", 1) # evaluated c1.f("foo", 1) # re-used c1.f("bar", 1) # re-used, only len(s) matters c1.f("bar", 2) # re-used c1.f("foobar", 2) # evaluated (len(s) changed) c1.f("bazbar", 2) # re-used c1.f("foo", 2) # evaluated, only the most recent result is stored ``` \note The efficiency of this decorator depends on the equality operator (`==`). If it is slow for the arguments by which the result is memoized, function calls can still be expensive. # # *** Implementation notes *** # # Using the decorator syntax like in the examples applies the MemoizeMethod decorator to # the function at *class* creation time, i.e. before it is bound to an instance. # This means that all instances of a class share the same instance of MemoizedMethod # for each of their decorated methods. # It is thus necessary to distinguish the different instances and store arguments # and return values for each one separately. # # This is achieved by keeping weak references to all instances on which the # decorated method get called in the attribute _instanceData. # This happens when the method gets called and not when the instance is created. # ! Store values of arguments and the return value of the memoized function. # Stored values of arguments as processed by argumentKey. # Can be `_Empty`, meaning that the method has not been called yet. # Return value. ! \param argumentKeyFn Callable which is applied to the arguments of the decorated method (except for `self`) before comparing to the cache. ! Apply decorator to a method. # Using 'self' inside of wrapper is confusing; 'memo' is the instance of Memoize. # Has the function been called before? # The previous call was equivalent to the current one. ! Return the arguments for the key functions based on all arguments passed to method. ! Return and verify the signature of `method`. ! Return the MemoizationData object for given instance. Creates a new one if `instance` has not been seen before. ! Bind the given arguments including default values to a function signature and return an ordered mapping from argument names to values. ! Indicates that some variable has not been set yet.
4.267141
4
src/lander/__init__.py
lsst-sqre/lander
2
6621520
"""HTML landing page generator for LSST PDF documentation deployed from Git to LSST the Docs. https://github.com/lsst-sqre/lander """ from pkg_resources import DistributionNotFound, get_distribution try: __version__ = get_distribution("lander").version except DistributionNotFound: # package is not installed pass
"""HTML landing page generator for LSST PDF documentation deployed from Git to LSST the Docs. https://github.com/lsst-sqre/lander """ from pkg_resources import DistributionNotFound, get_distribution try: __version__ = get_distribution("lander").version except DistributionNotFound: # package is not installed pass
en
0.77478
HTML landing page generator for LSST PDF documentation deployed from Git to LSST the Docs. https://github.com/lsst-sqre/lander # package is not installed
1.408901
1
examples/some_activity/tests.py
klausfmh/pypeman
6
6621521
<gh_stars>1-10 from pypeman.test import PypeTestCase # from pypeman.message import Message class MyChanTest(PypeTestCase): def test1_great_channel(self): """ Test example """ self.get_channel("periodic")
from pypeman.test import PypeTestCase # from pypeman.message import Message class MyChanTest(PypeTestCase): def test1_great_channel(self): """ Test example """ self.get_channel("periodic")
en
0.314678
# from pypeman.message import Message Test example
2.465531
2
src/Accunniscila/frontend/views.py
GiorgioBelli/Accunniscila
2
6621522
<filename>src/Accunniscila/frontend/views.py from django.shortcuts import render import Orders.views as order_views from Menu.models import Pizza from Utilities.views import EmptyAPIView, NoAuthAPIView, AuthAPIView # Create your views here. class IndexView(EmptyAPIView): def get(self,request): return render(request, 'frontend/homepage.html') def post(self,request): return self.get(request) class CreaOrdine(AuthAPIView): def get(self,request): if(not self.authenticated(request)): return render(request, "core/login.html",{"target_page": self.getRequestUrl(request)}) return render(request, 'frontend/creaOrdine.html') class Menu(EmptyAPIView): def get(self,request): return render(request, 'frontend/menu.html') def post(self,request): return self.get(request) class SignUp(NoAuthAPIView): def get(self,request): return render(request, 'core/registration.html') def post(self,request): return self.get(request) class Login(NoAuthAPIView): def get(self,request): return render(request, 'core/login.html',{"target_page": self.getRootUrl(request)}) def post(self,request): return self.get(request) class Orders(AuthAPIView): def get(self,request): if(not self.authenticated(request)): return render(request, "core/login.html",{"target_page": self.getRequestUrl(request)}) return render(request, 'frontend/ordini.html') def post(self,request): return self.get(request) class OrderDetails(AuthAPIView): def get(self,request,order_id): if(not self.authenticated(request)): return render(request, "core/login.html",{"target_page": self.getRequestUrl(request)}) return render(request, 'frontend/dettaglioOrdine.html',{"order_id": order_id}) def post(self,request,order_id): return self.get(request,order_id) class About(NoAuthAPIView): def get(self,request): return render(request, 'frontend/about.html') class NotFound(NoAuthAPIView): def get(self,request): return render(request, 'frontend/404.html')
<filename>src/Accunniscila/frontend/views.py from django.shortcuts import render import Orders.views as order_views from Menu.models import Pizza from Utilities.views import EmptyAPIView, NoAuthAPIView, AuthAPIView # Create your views here. class IndexView(EmptyAPIView): def get(self,request): return render(request, 'frontend/homepage.html') def post(self,request): return self.get(request) class CreaOrdine(AuthAPIView): def get(self,request): if(not self.authenticated(request)): return render(request, "core/login.html",{"target_page": self.getRequestUrl(request)}) return render(request, 'frontend/creaOrdine.html') class Menu(EmptyAPIView): def get(self,request): return render(request, 'frontend/menu.html') def post(self,request): return self.get(request) class SignUp(NoAuthAPIView): def get(self,request): return render(request, 'core/registration.html') def post(self,request): return self.get(request) class Login(NoAuthAPIView): def get(self,request): return render(request, 'core/login.html',{"target_page": self.getRootUrl(request)}) def post(self,request): return self.get(request) class Orders(AuthAPIView): def get(self,request): if(not self.authenticated(request)): return render(request, "core/login.html",{"target_page": self.getRequestUrl(request)}) return render(request, 'frontend/ordini.html') def post(self,request): return self.get(request) class OrderDetails(AuthAPIView): def get(self,request,order_id): if(not self.authenticated(request)): return render(request, "core/login.html",{"target_page": self.getRequestUrl(request)}) return render(request, 'frontend/dettaglioOrdine.html',{"order_id": order_id}) def post(self,request,order_id): return self.get(request,order_id) class About(NoAuthAPIView): def get(self,request): return render(request, 'frontend/about.html') class NotFound(NoAuthAPIView): def get(self,request): return render(request, 'frontend/404.html')
en
0.968116
# Create your views here.
2.249313
2
vnet_manager/config/validate.py
ppartarr/vnet-manager
0
6621523
from ipaddress import IPv4Interface, IPv6Interface, ip_interface from re import fullmatch from logging import getLogger from os.path import isdir, isfile, join from copy import deepcopy from vnet_manager.utils.mac import random_mac_generator from vnet_manager.conf import settings logger = getLogger(__name__) class ValidateConfig: """ Validates the config generated by get_config() and updates some values if missing """ def __init__(self, config): """ :param dict config: The config generated by get_config() """ self._all_ok = True self._validators_ran = 0 self._new_config = deepcopy(config) self.default_message = ". Please check your settings" self.config = config def __str__(self): return "VNet config validator, current_state: {}, amount of validators run: {}".format( "OK" if self._all_ok else "NOT OK", self._validators_ran ) @property def config_validation_successful(self): """ This property can be called to see if any unrecoverable errors in the config have been found """ return self._all_ok @property def updated_config(self): """ This property contains a updated config dict, with all values that have been fixed by this validator """ return self._new_config @property def validators_ran(self): """ Return the amount of validators that have been run """ return self._validators_ran def validate(self): """ Run all validation functions """ self._all_ok = True self.validate_provider_config() self.validate_switch_config() self.validate_machine_config() if "veths" in self.config: self.validate_veth_config() def validate_provider_config(self): """ Validates the provider part of the config """ self._validators_ran += 1 if "providers" not in self.config: logger.error( "Providers dict not found in config, this usually means the default config is not correct{}".format(self.default_message) ) self._all_ok = False elif not isinstance(self.config["providers"], dict): logger.error("Providers is not a dict, this means the default config is corrupt{}".format(self.default_message)) self._all_ok = False else: for name, values in self.config["providers"].items(): if "supported_operating_systems" not in values: logger.error("No supported operating systems found for provider {}{}".format(name, self.default_message)) self._all_ok = False elif not isinstance(values["supported_operating_systems"], list): logger.error("supported_operating_systems for provider {} is not a list{}".format(name, self.default_message)) self._all_ok = False if "dns-nameserver" not in values or not isinstance(values["dns-nameserver"], str): logger.warning("DNS nameserver not correctly set for provider {}. Defaulting to 8.8.8.8".format(name)) self._new_config["providers"][name]["dns-nameserver"] = "8.8.8.8" if "required_host_packages" not in values or not isinstance(values["required_host_packages"], list): logger.warning("Required host packages not correctly set for provider {}. Defaulting to empty list".format(name)) self._new_config["providers"][name]["required_host_packages"] = list() if "guest_packages" not in values or not isinstance(values["guest_packages"], list): logger.warning("Guest packages not correctly set for provider {}. Defaulting to empty list".format(name)) self._new_config["providers"][name]["guest_packages"] = list() if "base_image" not in values: logger.error("No base_image found for provider {}{}".format(name, self.default_message)) self._all_ok = False elif not isinstance(values["base_image"], dict): logger.error("'base_image' for provider {} is not a dict{}".format(name, self.default_message)) self._all_ok = False # Validate the base image else: self.validate_base_image_parameters(name) def validate_base_image_parameters(self, provider): """ Validates the provider base image parameters for a particiar provider Assumes the base_image dict exists for that provider :param str provider: The provider base image parameters to verify """ base_image_config = self.config["providers"][provider]["base_image"] if "os" not in base_image_config: logger.error("Provider {} is missing OS in the base image config{}".format(provider, self.default_message)) self._all_ok = False elif not isinstance(base_image_config["os"], str): logger.error("Provider {} OS for base image config is not a string{}".format(provider, self.default_message)) self._all_ok = False if "server" not in base_image_config: logger.error("Provider {} is missing server in the base image config{}".format(provider, self.default_message)) self._all_ok = False elif not isinstance(base_image_config["server"], str): logger.error("Provider {} server for base image config is not a string{}".format(provider, self.default_message)) self._all_ok = False if "protocol" not in base_image_config: logger.error("Provider {} is missing protocol in the base image config{}".format(provider, self.default_message)) self._all_ok = False elif not isinstance(base_image_config["protocol"], str): logger.error("Provider {} protocol for base image config is not a string{}".format(provider, self.default_message)) self._all_ok = False def validate_switch_config(self): """ Validates the switch part of the config """ self._validators_ran += 1 if "switches" not in self.config: logger.error("Config item 'switches' missing{}".format(self.default_message)) self._all_ok = False elif not isinstance(self.config["switches"], int): logger.error( "Config item 'switches: {}' does not seem to be an integer{}".format(self.config["switches"], self.default_message) ) self._all_ok = False def validate_machine_config(self): # TODO: Refactor # pylint: disable=too-many-branches """ Validates the machines part of the config """ self._validators_ran += 1 if "machines" not in self.config: logger.error("Config item 'machines' missing{}".format(self.default_message)) self._all_ok = False elif not isinstance(self.config["machines"], dict): logger.error("Machines config is not a dict, this means the user config is incorrect{}".format(self.default_message)) self._all_ok = False else: for name, values in self.config["machines"].items(): if "type" not in values: logger.error("Type not found for machine {}{}".format(name, self.default_message)) self._all_ok = False elif values["type"] not in settings.SUPPORTED_MACHINE_TYPES: logger.error( "Type {} for machine {} unsupported. I only support the following types: {}{}".format( values["type"], name, settings.SUPPORTED_MACHINE_TYPES, self.default_message ) ) self._all_ok = False # Files if "files" in values: if not isinstance(values["files"], dict): logger.error("Files directive for machine {} is not a dict{}".format(name, self.default_message)) self._all_ok = False else: # Check the files self.validate_machine_files_parameters(name) # Interfaces if "interfaces" not in values: logger.error("Machine {} does not appear to have any interfaces{}".format(name, self.default_message)) self._all_ok = False elif not isinstance(values["interfaces"], dict): logger.error( "The interfaces for machine {} are not given as a dict, this usually means a typo in the config{}".format( name, self.default_message ) ) self._all_ok = False else: self.validate_interface_config(name) # VLANs? if "vlans" not in values: logger.debug("Machine {} does not appear to have any VLAN interfaces, that's okay".format(name)) elif not isinstance(values["vlans"], dict): logger.error( "Machine {} has a VLAN config but it does not " "appear to be a dict, this usually means a typo in the config{}".format(name, self.default_message) ) self._all_ok = False else: self.validate_vlan_config(name) def validate_vlan_config(self, machine): """ Validates the VLAN config of a particular machine :param machine: str: the machine to validate the VLAN config for """ vlans = self.config["machines"][machine]["vlans"] for name, values in vlans.items(): if "id" not in values: logger.error("VLAN {} on machine {} is missing it's vlan id{}".format(name, machine, self.default_message)) self._all_ok = False else: try: self._new_config["machines"][machine]["vlans"][name]["id"] = int(values["id"]) except ValueError: logger.error( "Unable to cast VLAN {} with ID {} from machine {} to a integer{}".format( name, values["id"], machine, self.default_message ) ) self._all_ok = False if "link" not in values: logger.error("VLAN {} on machine {} is missing it's link attribute{}".format(name, machine, self.default_message)) self._all_ok = False elif not isinstance(values["link"], str): logger.error( "Link {} for VLAN {} on machine {}, does not seem to be a string{}".format( values["link"], name, machine, self.default_message ) ) self._all_ok = False # This check requires a valid interface config, so we only do it if the previous checks have been successful elif self._all_ok and values["link"] not in self.config["machines"][machine]["interfaces"]: logger.error( "Link {} for VLAN {} on machine {} does not correspond to any interfaces on the same machine{}".format( values["link"], name, machine, self.default_message ) ) self._all_ok = False if "addresses" not in values: logger.debug("VLAN {} on machine {} does not have any addresses, that's okay".format(name, machine)) elif not isinstance(values["addresses"], list): logger.error( "Addresses on VLAN {} for machine {}, does not seem to be a list{}".format(name, machine, self.default_message) ) self._all_ok = False else: for address in values["addresses"]: try: ip_interface(address) except ValueError as e: logger.error( "Address {} for VLAN {} on machine {} does not seem to be a valid address, got parse error {}".format( address, name, machine, e ) ) self._all_ok = False def validate_machine_files_parameters(self, machine): """ Validates the files config of a particular machine Assumes the files dict exists for that machine :param str machine: The machine to validates the files config for """ files = self.config["machines"][machine]["files"] for host_file in files.keys(): # First check if the user gave a relative dir from the config dir if isdir(join(self.config["config_dir"], host_file)) or isfile(join(self.config["config_dir"], host_file)): logger.debug( "Updating relative host_file path {} to full path {}".format(host_file, join(self.config["config_dir"], host_file)) ) self._new_config["machines"][machine]["files"][join(self.config["config_dir"], host_file)] = self._new_config["machines"][ machine ]["files"].pop(host_file) # Check for absolute paths elif not isdir(host_file) or not isfile(host_file): logger.error( "Host file {} for machine {} does not seem to be a dir or a file{}".format(host_file, machine, self.default_message) ) self._all_ok = False def validate_interface_config(self, machine): """ Validates the interface config of a particular machine Assumes the interfaces dict exists for that machine :param str machine: the machine to validate the interfaces config for """ interfaces = self.config["machines"][machine]["interfaces"] for int_name, int_vals in interfaces.items(): if "ipv4" not in int_vals: logger.debug( "No IPv4 found for interface {} on machine {}. That's okay, no IPv4 will be configured".format(int_name, machine) ) else: # Validate the given IP try: IPv4Interface(int_vals["ipv4"]) except ValueError as e: logger.error("Unable to parse IPv4 address {} for machine {}. Parse error: {}".format(int_vals["ipv4"], machine, e)) self._all_ok = False if "ipv6" not in int_vals: logger.debug( "No IPv6 found for interface {} on machine {}, that's okay no IPv6 address will be configured".format(int_name, machine) ) else: # Validate the given IP try: IPv6Interface(int_vals["ipv6"]) except ValueError as e: logger.error("Unable to parse IPv6 address {} for machine {}. Parse error: {}".format(int_vals["ipv6"], machine, e)) self._all_ok = False if "mac" not in int_vals: logger.debug("MAC not found for interface {} on machine {}, generating a random one".format(int_name, machine)) self._new_config["machines"][machine]["interfaces"][int_name]["mac"] = random_mac_generator() # From: https://stackoverflow.com/a/7629690/8632038 elif not fullmatch(r"^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$", int_vals["mac"]): logger.error( "MAC {} for interface {} on machine {}, does not seem to be valid{}".format( int_vals["mac"], int_name, machine, self.default_message ) ) self._all_ok = False if "bridge" not in int_vals: logger.error("bridge keyword missing on interface {} for machine {}{}".format(int_name, machine, self.default_message)) self._all_ok = False elif not isinstance(int_vals["bridge"], int) or int_vals["bridge"] > self.config["switches"] - 1: logger.error( "Invalid bridge number detected for interface {} on machine {}. " "The bridge keyword should correspond to the interface number of the vnet bridge to connect to " "(starting at iface number 0)".format(int_name, machine) ) self._all_ok = False def validate_veth_config(self): """ Validates the veth config if present """ if "veths" not in self.config: logger.warning("Tried to validate veth config, but no veth config present, skipping...") return if not isinstance(self.config["veths"], dict): logger.error("Config item: 'veths' does not seem to be a dict {}".format(self.default_message)) self._all_ok = False return for name, values in self.config["veths"].items(): if not isinstance(name, str): logger.error("veth interface name: {} does not seem to be a string{}".format(name, self.default_message)) self._all_ok = False elif not isinstance(values, dict): logger.error("veth interface {} data does not seem to be a dict{}".format(name, self.default_message)) self._all_ok = False else: if "bridge" not in values: logger.error("veth interface {} is missing the bridge parameter{}".format(name, self.default_message)) self._all_ok = False elif not isinstance(values["bridge"], str): logger.error("veth interface {} bridge parameter does not seem to be a str{}".format(name, self.default_message)) self._all_ok = False if "peer" not in values: logger.debug("veth interface {} does not have a peer, that's ok, assuming it's peer is defined elsewhere".format(name)) elif not isinstance(values["peer"], str): logger.error("veth interface {} peer parameter does not seem to be a string{}".format(name, self.default_message)) self._all_ok = False if "stp" not in values: logger.debug("veth interface {} as no STP parameter, that's okay".format(name)) elif not isinstance(values["stp"], bool): logger.error("veth interface {} stp parameter does not seem to be a boolean{}".format(name, self.default_message)) self._all_ok = False
from ipaddress import IPv4Interface, IPv6Interface, ip_interface from re import fullmatch from logging import getLogger from os.path import isdir, isfile, join from copy import deepcopy from vnet_manager.utils.mac import random_mac_generator from vnet_manager.conf import settings logger = getLogger(__name__) class ValidateConfig: """ Validates the config generated by get_config() and updates some values if missing """ def __init__(self, config): """ :param dict config: The config generated by get_config() """ self._all_ok = True self._validators_ran = 0 self._new_config = deepcopy(config) self.default_message = ". Please check your settings" self.config = config def __str__(self): return "VNet config validator, current_state: {}, amount of validators run: {}".format( "OK" if self._all_ok else "NOT OK", self._validators_ran ) @property def config_validation_successful(self): """ This property can be called to see if any unrecoverable errors in the config have been found """ return self._all_ok @property def updated_config(self): """ This property contains a updated config dict, with all values that have been fixed by this validator """ return self._new_config @property def validators_ran(self): """ Return the amount of validators that have been run """ return self._validators_ran def validate(self): """ Run all validation functions """ self._all_ok = True self.validate_provider_config() self.validate_switch_config() self.validate_machine_config() if "veths" in self.config: self.validate_veth_config() def validate_provider_config(self): """ Validates the provider part of the config """ self._validators_ran += 1 if "providers" not in self.config: logger.error( "Providers dict not found in config, this usually means the default config is not correct{}".format(self.default_message) ) self._all_ok = False elif not isinstance(self.config["providers"], dict): logger.error("Providers is not a dict, this means the default config is corrupt{}".format(self.default_message)) self._all_ok = False else: for name, values in self.config["providers"].items(): if "supported_operating_systems" not in values: logger.error("No supported operating systems found for provider {}{}".format(name, self.default_message)) self._all_ok = False elif not isinstance(values["supported_operating_systems"], list): logger.error("supported_operating_systems for provider {} is not a list{}".format(name, self.default_message)) self._all_ok = False if "dns-nameserver" not in values or not isinstance(values["dns-nameserver"], str): logger.warning("DNS nameserver not correctly set for provider {}. Defaulting to 8.8.8.8".format(name)) self._new_config["providers"][name]["dns-nameserver"] = "8.8.8.8" if "required_host_packages" not in values or not isinstance(values["required_host_packages"], list): logger.warning("Required host packages not correctly set for provider {}. Defaulting to empty list".format(name)) self._new_config["providers"][name]["required_host_packages"] = list() if "guest_packages" not in values or not isinstance(values["guest_packages"], list): logger.warning("Guest packages not correctly set for provider {}. Defaulting to empty list".format(name)) self._new_config["providers"][name]["guest_packages"] = list() if "base_image" not in values: logger.error("No base_image found for provider {}{}".format(name, self.default_message)) self._all_ok = False elif not isinstance(values["base_image"], dict): logger.error("'base_image' for provider {} is not a dict{}".format(name, self.default_message)) self._all_ok = False # Validate the base image else: self.validate_base_image_parameters(name) def validate_base_image_parameters(self, provider): """ Validates the provider base image parameters for a particiar provider Assumes the base_image dict exists for that provider :param str provider: The provider base image parameters to verify """ base_image_config = self.config["providers"][provider]["base_image"] if "os" not in base_image_config: logger.error("Provider {} is missing OS in the base image config{}".format(provider, self.default_message)) self._all_ok = False elif not isinstance(base_image_config["os"], str): logger.error("Provider {} OS for base image config is not a string{}".format(provider, self.default_message)) self._all_ok = False if "server" not in base_image_config: logger.error("Provider {} is missing server in the base image config{}".format(provider, self.default_message)) self._all_ok = False elif not isinstance(base_image_config["server"], str): logger.error("Provider {} server for base image config is not a string{}".format(provider, self.default_message)) self._all_ok = False if "protocol" not in base_image_config: logger.error("Provider {} is missing protocol in the base image config{}".format(provider, self.default_message)) self._all_ok = False elif not isinstance(base_image_config["protocol"], str): logger.error("Provider {} protocol for base image config is not a string{}".format(provider, self.default_message)) self._all_ok = False def validate_switch_config(self): """ Validates the switch part of the config """ self._validators_ran += 1 if "switches" not in self.config: logger.error("Config item 'switches' missing{}".format(self.default_message)) self._all_ok = False elif not isinstance(self.config["switches"], int): logger.error( "Config item 'switches: {}' does not seem to be an integer{}".format(self.config["switches"], self.default_message) ) self._all_ok = False def validate_machine_config(self): # TODO: Refactor # pylint: disable=too-many-branches """ Validates the machines part of the config """ self._validators_ran += 1 if "machines" not in self.config: logger.error("Config item 'machines' missing{}".format(self.default_message)) self._all_ok = False elif not isinstance(self.config["machines"], dict): logger.error("Machines config is not a dict, this means the user config is incorrect{}".format(self.default_message)) self._all_ok = False else: for name, values in self.config["machines"].items(): if "type" not in values: logger.error("Type not found for machine {}{}".format(name, self.default_message)) self._all_ok = False elif values["type"] not in settings.SUPPORTED_MACHINE_TYPES: logger.error( "Type {} for machine {} unsupported. I only support the following types: {}{}".format( values["type"], name, settings.SUPPORTED_MACHINE_TYPES, self.default_message ) ) self._all_ok = False # Files if "files" in values: if not isinstance(values["files"], dict): logger.error("Files directive for machine {} is not a dict{}".format(name, self.default_message)) self._all_ok = False else: # Check the files self.validate_machine_files_parameters(name) # Interfaces if "interfaces" not in values: logger.error("Machine {} does not appear to have any interfaces{}".format(name, self.default_message)) self._all_ok = False elif not isinstance(values["interfaces"], dict): logger.error( "The interfaces for machine {} are not given as a dict, this usually means a typo in the config{}".format( name, self.default_message ) ) self._all_ok = False else: self.validate_interface_config(name) # VLANs? if "vlans" not in values: logger.debug("Machine {} does not appear to have any VLAN interfaces, that's okay".format(name)) elif not isinstance(values["vlans"], dict): logger.error( "Machine {} has a VLAN config but it does not " "appear to be a dict, this usually means a typo in the config{}".format(name, self.default_message) ) self._all_ok = False else: self.validate_vlan_config(name) def validate_vlan_config(self, machine): """ Validates the VLAN config of a particular machine :param machine: str: the machine to validate the VLAN config for """ vlans = self.config["machines"][machine]["vlans"] for name, values in vlans.items(): if "id" not in values: logger.error("VLAN {} on machine {} is missing it's vlan id{}".format(name, machine, self.default_message)) self._all_ok = False else: try: self._new_config["machines"][machine]["vlans"][name]["id"] = int(values["id"]) except ValueError: logger.error( "Unable to cast VLAN {} with ID {} from machine {} to a integer{}".format( name, values["id"], machine, self.default_message ) ) self._all_ok = False if "link" not in values: logger.error("VLAN {} on machine {} is missing it's link attribute{}".format(name, machine, self.default_message)) self._all_ok = False elif not isinstance(values["link"], str): logger.error( "Link {} for VLAN {} on machine {}, does not seem to be a string{}".format( values["link"], name, machine, self.default_message ) ) self._all_ok = False # This check requires a valid interface config, so we only do it if the previous checks have been successful elif self._all_ok and values["link"] not in self.config["machines"][machine]["interfaces"]: logger.error( "Link {} for VLAN {} on machine {} does not correspond to any interfaces on the same machine{}".format( values["link"], name, machine, self.default_message ) ) self._all_ok = False if "addresses" not in values: logger.debug("VLAN {} on machine {} does not have any addresses, that's okay".format(name, machine)) elif not isinstance(values["addresses"], list): logger.error( "Addresses on VLAN {} for machine {}, does not seem to be a list{}".format(name, machine, self.default_message) ) self._all_ok = False else: for address in values["addresses"]: try: ip_interface(address) except ValueError as e: logger.error( "Address {} for VLAN {} on machine {} does not seem to be a valid address, got parse error {}".format( address, name, machine, e ) ) self._all_ok = False def validate_machine_files_parameters(self, machine): """ Validates the files config of a particular machine Assumes the files dict exists for that machine :param str machine: The machine to validates the files config for """ files = self.config["machines"][machine]["files"] for host_file in files.keys(): # First check if the user gave a relative dir from the config dir if isdir(join(self.config["config_dir"], host_file)) or isfile(join(self.config["config_dir"], host_file)): logger.debug( "Updating relative host_file path {} to full path {}".format(host_file, join(self.config["config_dir"], host_file)) ) self._new_config["machines"][machine]["files"][join(self.config["config_dir"], host_file)] = self._new_config["machines"][ machine ]["files"].pop(host_file) # Check for absolute paths elif not isdir(host_file) or not isfile(host_file): logger.error( "Host file {} for machine {} does not seem to be a dir or a file{}".format(host_file, machine, self.default_message) ) self._all_ok = False def validate_interface_config(self, machine): """ Validates the interface config of a particular machine Assumes the interfaces dict exists for that machine :param str machine: the machine to validate the interfaces config for """ interfaces = self.config["machines"][machine]["interfaces"] for int_name, int_vals in interfaces.items(): if "ipv4" not in int_vals: logger.debug( "No IPv4 found for interface {} on machine {}. That's okay, no IPv4 will be configured".format(int_name, machine) ) else: # Validate the given IP try: IPv4Interface(int_vals["ipv4"]) except ValueError as e: logger.error("Unable to parse IPv4 address {} for machine {}. Parse error: {}".format(int_vals["ipv4"], machine, e)) self._all_ok = False if "ipv6" not in int_vals: logger.debug( "No IPv6 found for interface {} on machine {}, that's okay no IPv6 address will be configured".format(int_name, machine) ) else: # Validate the given IP try: IPv6Interface(int_vals["ipv6"]) except ValueError as e: logger.error("Unable to parse IPv6 address {} for machine {}. Parse error: {}".format(int_vals["ipv6"], machine, e)) self._all_ok = False if "mac" not in int_vals: logger.debug("MAC not found for interface {} on machine {}, generating a random one".format(int_name, machine)) self._new_config["machines"][machine]["interfaces"][int_name]["mac"] = random_mac_generator() # From: https://stackoverflow.com/a/7629690/8632038 elif not fullmatch(r"^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$", int_vals["mac"]): logger.error( "MAC {} for interface {} on machine {}, does not seem to be valid{}".format( int_vals["mac"], int_name, machine, self.default_message ) ) self._all_ok = False if "bridge" not in int_vals: logger.error("bridge keyword missing on interface {} for machine {}{}".format(int_name, machine, self.default_message)) self._all_ok = False elif not isinstance(int_vals["bridge"], int) or int_vals["bridge"] > self.config["switches"] - 1: logger.error( "Invalid bridge number detected for interface {} on machine {}. " "The bridge keyword should correspond to the interface number of the vnet bridge to connect to " "(starting at iface number 0)".format(int_name, machine) ) self._all_ok = False def validate_veth_config(self): """ Validates the veth config if present """ if "veths" not in self.config: logger.warning("Tried to validate veth config, but no veth config present, skipping...") return if not isinstance(self.config["veths"], dict): logger.error("Config item: 'veths' does not seem to be a dict {}".format(self.default_message)) self._all_ok = False return for name, values in self.config["veths"].items(): if not isinstance(name, str): logger.error("veth interface name: {} does not seem to be a string{}".format(name, self.default_message)) self._all_ok = False elif not isinstance(values, dict): logger.error("veth interface {} data does not seem to be a dict{}".format(name, self.default_message)) self._all_ok = False else: if "bridge" not in values: logger.error("veth interface {} is missing the bridge parameter{}".format(name, self.default_message)) self._all_ok = False elif not isinstance(values["bridge"], str): logger.error("veth interface {} bridge parameter does not seem to be a str{}".format(name, self.default_message)) self._all_ok = False if "peer" not in values: logger.debug("veth interface {} does not have a peer, that's ok, assuming it's peer is defined elsewhere".format(name)) elif not isinstance(values["peer"], str): logger.error("veth interface {} peer parameter does not seem to be a string{}".format(name, self.default_message)) self._all_ok = False if "stp" not in values: logger.debug("veth interface {} as no STP parameter, that's okay".format(name)) elif not isinstance(values["stp"], bool): logger.error("veth interface {} stp parameter does not seem to be a boolean{}".format(name, self.default_message)) self._all_ok = False
en
0.686238
Validates the config generated by get_config() and updates some values if missing :param dict config: The config generated by get_config() This property can be called to see if any unrecoverable errors in the config have been found This property contains a updated config dict, with all values that have been fixed by this validator Return the amount of validators that have been run Run all validation functions Validates the provider part of the config # Validate the base image Validates the provider base image parameters for a particiar provider Assumes the base_image dict exists for that provider :param str provider: The provider base image parameters to verify Validates the switch part of the config # TODO: Refactor # pylint: disable=too-many-branches Validates the machines part of the config # Files # Check the files # Interfaces # VLANs? Validates the VLAN config of a particular machine :param machine: str: the machine to validate the VLAN config for # This check requires a valid interface config, so we only do it if the previous checks have been successful Validates the files config of a particular machine Assumes the files dict exists for that machine :param str machine: The machine to validates the files config for # First check if the user gave a relative dir from the config dir # Check for absolute paths Validates the interface config of a particular machine Assumes the interfaces dict exists for that machine :param str machine: the machine to validate the interfaces config for # Validate the given IP # Validate the given IP # From: https://stackoverflow.com/a/7629690/8632038 Validates the veth config if present
2.341737
2
sequences.py
chapman-cs510-2017f/cw-03-kris_ehsan_evan
0
6621524
<filename>sequences.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys def fibonacci(n): count = 0 while True: try: if count >= 1: n = int(input("enter a positve integer: ")) my_list = [] a,b = 0,1 for i in range(n): a,b = b,a+b my_list.append(a) #print(my_list) return my_list except Exception: print("ERROR:invalid input recieved for fibonacci(n), expect positive integer") count=count+1 continue #fibonacci(10.3333333) #fibonacci("Dw") #fibonacci(wadkvsa) #fibonacci(100)
<filename>sequences.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys def fibonacci(n): count = 0 while True: try: if count >= 1: n = int(input("enter a positve integer: ")) my_list = [] a,b = 0,1 for i in range(n): a,b = b,a+b my_list.append(a) #print(my_list) return my_list except Exception: print("ERROR:invalid input recieved for fibonacci(n), expect positive integer") count=count+1 continue #fibonacci(10.3333333) #fibonacci("Dw") #fibonacci(wadkvsa) #fibonacci(100)
en
0.068134
#!/usr/bin/env python3 # -*- coding: utf-8 -*- #print(my_list) #fibonacci(10.3333333) #fibonacci("Dw") #fibonacci(wadkvsa) #fibonacci(100)
4.135221
4
service_info_cms/templatetags/si_cms_language_tags.py
hanaahajj/Serviceinfo_hanaa
0
6621525
from django import template from django.conf import settings register = template.Library() @register.simple_tag(takes_context=True) def service_language_code(context, service): request = context['request'] return settings.SERVICE_LANGUAGE_CODES[service.lower()][request.LANGUAGE_CODE] @register.simple_tag def menu_language_name(language_code): return settings.MENU_LANGUAGE_NAMES[language_code]
from django import template from django.conf import settings register = template.Library() @register.simple_tag(takes_context=True) def service_language_code(context, service): request = context['request'] return settings.SERVICE_LANGUAGE_CODES[service.lower()][request.LANGUAGE_CODE] @register.simple_tag def menu_language_name(language_code): return settings.MENU_LANGUAGE_NAMES[language_code]
none
1
1.831308
2
tckdb/backend/app/schemas/trans.py
TCKDB/TCKDB
2
6621526
""" TCKDB backend app schemas energy transfer (trans) module """ from enum import Enum from typing import Dict, Optional, Tuple, Union from pydantic import BaseModel, Field, validator class TransModelEnum(str, Enum): """ The supported Trans models """ single_exponential_down = 'Single Exponential Down' class TransBase(BaseModel): """ A TransBase class (shared properties) """ model: TransModelEnum = Field(..., title='The energy transfer model, ' 'currently only "Single Exponential Down" is supported') parameters: Dict[str, Union[Tuple[Union[float], str], Union[float]]] = \ Field(..., title='The energy transfer model parameters') reviewer_flags: Optional[Dict[str, str]] = Field(None, title='Reviewer flags') class Config: extra = "forbid" @validator('reviewer_flags', always=True) def check_reviewer_flags(cls, value): """Trans.reviewer_flags validator""" return value or dict() @validator('parameters', always=True) def check_parameters(cls, value, values): """Trans.parameters validator""" if 'model' in values and values['model'] == TransModelEnum.single_exponential_down: if 'alpha0' not in value: raise ValueError(f"The 'alpha0' parameter is required for a " f"Single Exponential Down energy transfer model") if 'T0' not in value: raise ValueError(f"The 'T0' parameter is required for a " f"Single Exponential Down energy transfer model") if 'n' not in value: raise ValueError(f"The 'n' parameter is required for a " f"Single Exponential Down energy transfer model") for key, val in value.items(): if key not in ['alpha0', 'T0', 'n']: raise ValueError(f"Got an unexpected key for the Single Exponential Down energy transfer model: " f"'{key}'. Allowed keys are 'alpha0', 'T0', 'n'.") if key == 'n': if not isinstance(val, (float, int)): raise ValueError(f"The 'n' parameter of the Single Exponential Down energy transfer model must " f"be dimensionless, got {val} in {value} which is a {type(val)}") else: if not isinstance(val, tuple): raise ValueError(f"The 'alpha0' and 'T0' parameters of the Single Exponential Down energy " f"transfer model must be dimensionless, got {val} in {value} " f"which is a {type(val)}") return value class TransCreate(TransBase): """Create a Trans item: Properties to receive on item creation""" model: str parameters: Dict[str, Union[Tuple[Union[float, int], str], Union[float, int]]] reviewer_flags: Optional[Dict[str, str]] = None class TransUpdate(TransBase): """Update a Trans item: Properties to receive on item update""" model: str parameters: Dict[str, Union[Tuple[Union[float, int], str], Union[float, int]]] reviewer_flags: Optional[Dict[str, str]] = None class TransInDBBase(TransBase): """Properties shared by models stored in DB""" id: int model: str parameters: Dict[str, Union[Tuple[Union[float, int], str], Union[float, int]]] reviewer_flags: Optional[Dict[str, str]] = None class Config: orm_mode = True class Trans(TransInDBBase): """Properties to return to client""" pass class TransInDB(TransInDBBase): """Properties stored in DB""" pass
""" TCKDB backend app schemas energy transfer (trans) module """ from enum import Enum from typing import Dict, Optional, Tuple, Union from pydantic import BaseModel, Field, validator class TransModelEnum(str, Enum): """ The supported Trans models """ single_exponential_down = 'Single Exponential Down' class TransBase(BaseModel): """ A TransBase class (shared properties) """ model: TransModelEnum = Field(..., title='The energy transfer model, ' 'currently only "Single Exponential Down" is supported') parameters: Dict[str, Union[Tuple[Union[float], str], Union[float]]] = \ Field(..., title='The energy transfer model parameters') reviewer_flags: Optional[Dict[str, str]] = Field(None, title='Reviewer flags') class Config: extra = "forbid" @validator('reviewer_flags', always=True) def check_reviewer_flags(cls, value): """Trans.reviewer_flags validator""" return value or dict() @validator('parameters', always=True) def check_parameters(cls, value, values): """Trans.parameters validator""" if 'model' in values and values['model'] == TransModelEnum.single_exponential_down: if 'alpha0' not in value: raise ValueError(f"The 'alpha0' parameter is required for a " f"Single Exponential Down energy transfer model") if 'T0' not in value: raise ValueError(f"The 'T0' parameter is required for a " f"Single Exponential Down energy transfer model") if 'n' not in value: raise ValueError(f"The 'n' parameter is required for a " f"Single Exponential Down energy transfer model") for key, val in value.items(): if key not in ['alpha0', 'T0', 'n']: raise ValueError(f"Got an unexpected key for the Single Exponential Down energy transfer model: " f"'{key}'. Allowed keys are 'alpha0', 'T0', 'n'.") if key == 'n': if not isinstance(val, (float, int)): raise ValueError(f"The 'n' parameter of the Single Exponential Down energy transfer model must " f"be dimensionless, got {val} in {value} which is a {type(val)}") else: if not isinstance(val, tuple): raise ValueError(f"The 'alpha0' and 'T0' parameters of the Single Exponential Down energy " f"transfer model must be dimensionless, got {val} in {value} " f"which is a {type(val)}") return value class TransCreate(TransBase): """Create a Trans item: Properties to receive on item creation""" model: str parameters: Dict[str, Union[Tuple[Union[float, int], str], Union[float, int]]] reviewer_flags: Optional[Dict[str, str]] = None class TransUpdate(TransBase): """Update a Trans item: Properties to receive on item update""" model: str parameters: Dict[str, Union[Tuple[Union[float, int], str], Union[float, int]]] reviewer_flags: Optional[Dict[str, str]] = None class TransInDBBase(TransBase): """Properties shared by models stored in DB""" id: int model: str parameters: Dict[str, Union[Tuple[Union[float, int], str], Union[float, int]]] reviewer_flags: Optional[Dict[str, str]] = None class Config: orm_mode = True class Trans(TransInDBBase): """Properties to return to client""" pass class TransInDB(TransInDBBase): """Properties stored in DB""" pass
en
0.716901
TCKDB backend app schemas energy transfer (trans) module The supported Trans models A TransBase class (shared properties) Trans.reviewer_flags validator Trans.parameters validator Create a Trans item: Properties to receive on item creation Update a Trans item: Properties to receive on item update Properties shared by models stored in DB Properties to return to client Properties stored in DB
2.42363
2
Python/Ex049.py
EspagueteTV/Meus-Estudos-CursoEmVideo
0
6621527
n = int(input('Informe um número para ver a sua tabuada: ')) for i in range(0, 11): print('{} x {:2} = {}'.format(n, i, n * i))
n = int(input('Informe um número para ver a sua tabuada: ')) for i in range(0, 11): print('{} x {:2} = {}'.format(n, i, n * i))
none
1
3.937096
4
sensor_correction/apps/train.py
cheind/rgbd-correction
15
6621528
__author__ = '<NAME>' __copyright__ = 'Copyright 2017, Profactor GmbH' __license__ = 'BSD' import glob import os import numpy as np import matplotlib.pyplot as plt from sensor_correction.utils import sensor_unproject from sensor_correction.gp_cpu import GPRegressor def select_data(temps, poses, all_depths_ir, all_depths_rgb, Kinv, xy, target='rgb'): sel_xyzt = [] sel_deltas = [] for p in poses: if target == 'rgb': depth_target = all_depths_rgb[(p, temps[0])] elif target == 'ir': depth_target = all_depths_ir[(p, temps[0])] d_target = depth_target[xy[:,1], xy[:,0]] for t in temps: depth_ir = all_depths_ir[(p, t)] # Actual d_ir = depth_ir[xy[:,1], xy[:,0]] xyz = sensor_unproject(xy, d_ir, Kinv) xyzt = np.empty((xyz.shape[0], 4), dtype=np.float32) xyzt[:, :3] = xyz xyzt[:, 3] = t delta = d_target - d_ir mask = d_ir > 0. """ plt.imshow(depth_rgb - depth_ir) plt.plot(xy[:,0][mask], xy[:,1][mask], 'k+') plt.colorbar() plt.show() """ sel_xyzt.append(xyzt[mask]) sel_deltas.append(delta[mask]) sel_xyzt = np.concatenate(sel_xyzt) sel_deltas = np.concatenate(sel_deltas) return sel_xyzt, sel_deltas if __name__ == '__main__': np.random.seed(1) import argparse parser = argparse.ArgumentParser(description='Train Gaussian Process for depth correction.') parser.add_argument('depth', type=str, help='Preprocessed depth data') parser.add_argument('intrinsics', type=str, help='Camera intrinsics') parser.add_argument('--output', type=str, help='Result regressor filename', default='gpr.pkl') parser.add_argument('--target', type=str, help='Target depth to train for, RGB or IR.', default='rgb') args = parser.parse_args() # Load depth data data = np.load(args.depth) temps = data['temps'] poses = data['poses'] all_depths_ir = data['depth_ir'][()] all_depths_rgb = data['depth_rgb'][()] h, w = all_depths_ir[(poses[0], temps[0])].shape # Load intrinsics K = np.loadtxt(args.intrinsics).reshape(3,3) Kinv = np.linalg.inv(K) # Create train and test data x = np.linspace(0, w-1, 8, dtype=np.int32) y = np.linspace(0, h-1, 8, dtype=np.int32) xx, yy = np.meshgrid(x, y) xy_train = np.hstack((xx.reshape(-1,1), yy.reshape(-1,1))) train_xyzt, train_deltae = select_data( temps[::2], poses, all_depths_ir, all_depths_rgb, Kinv, xy_train, target=args.target.lower()) xy_test = np.random.uniform(0, [w-1,h-1], size=(10,2)).astype(np.int32) test_xyzt, test_deltae = select_data( temps[::2], poses[::2], all_depths_ir, all_depths_rgb, Kinv, xy_test, target=args.target.lower()) r = GPRegressor() r.fit(train_xyzt, train_deltae, length_scale=[0.5, 0.5, 0.5, 10], signal_std=1., noise_std=0.002, optimize=True, normalize=True, repeat=2) ypred = r.predict(test_xyzt) d = ypred - test_deltae rmse = np.sqrt(np.mean(np.square(d))) print('RMSE {:e}'.format(rmse)) print('Optimized length scale {}'.format(r.length_scale)) print('Optimized signal std {}'.format(r.signal_std)) print('Optimized noise std {}'.format(r.noise_std)) r.save(args.output)
__author__ = '<NAME>' __copyright__ = 'Copyright 2017, Profactor GmbH' __license__ = 'BSD' import glob import os import numpy as np import matplotlib.pyplot as plt from sensor_correction.utils import sensor_unproject from sensor_correction.gp_cpu import GPRegressor def select_data(temps, poses, all_depths_ir, all_depths_rgb, Kinv, xy, target='rgb'): sel_xyzt = [] sel_deltas = [] for p in poses: if target == 'rgb': depth_target = all_depths_rgb[(p, temps[0])] elif target == 'ir': depth_target = all_depths_ir[(p, temps[0])] d_target = depth_target[xy[:,1], xy[:,0]] for t in temps: depth_ir = all_depths_ir[(p, t)] # Actual d_ir = depth_ir[xy[:,1], xy[:,0]] xyz = sensor_unproject(xy, d_ir, Kinv) xyzt = np.empty((xyz.shape[0], 4), dtype=np.float32) xyzt[:, :3] = xyz xyzt[:, 3] = t delta = d_target - d_ir mask = d_ir > 0. """ plt.imshow(depth_rgb - depth_ir) plt.plot(xy[:,0][mask], xy[:,1][mask], 'k+') plt.colorbar() plt.show() """ sel_xyzt.append(xyzt[mask]) sel_deltas.append(delta[mask]) sel_xyzt = np.concatenate(sel_xyzt) sel_deltas = np.concatenate(sel_deltas) return sel_xyzt, sel_deltas if __name__ == '__main__': np.random.seed(1) import argparse parser = argparse.ArgumentParser(description='Train Gaussian Process for depth correction.') parser.add_argument('depth', type=str, help='Preprocessed depth data') parser.add_argument('intrinsics', type=str, help='Camera intrinsics') parser.add_argument('--output', type=str, help='Result regressor filename', default='gpr.pkl') parser.add_argument('--target', type=str, help='Target depth to train for, RGB or IR.', default='rgb') args = parser.parse_args() # Load depth data data = np.load(args.depth) temps = data['temps'] poses = data['poses'] all_depths_ir = data['depth_ir'][()] all_depths_rgb = data['depth_rgb'][()] h, w = all_depths_ir[(poses[0], temps[0])].shape # Load intrinsics K = np.loadtxt(args.intrinsics).reshape(3,3) Kinv = np.linalg.inv(K) # Create train and test data x = np.linspace(0, w-1, 8, dtype=np.int32) y = np.linspace(0, h-1, 8, dtype=np.int32) xx, yy = np.meshgrid(x, y) xy_train = np.hstack((xx.reshape(-1,1), yy.reshape(-1,1))) train_xyzt, train_deltae = select_data( temps[::2], poses, all_depths_ir, all_depths_rgb, Kinv, xy_train, target=args.target.lower()) xy_test = np.random.uniform(0, [w-1,h-1], size=(10,2)).astype(np.int32) test_xyzt, test_deltae = select_data( temps[::2], poses[::2], all_depths_ir, all_depths_rgb, Kinv, xy_test, target=args.target.lower()) r = GPRegressor() r.fit(train_xyzt, train_deltae, length_scale=[0.5, 0.5, 0.5, 10], signal_std=1., noise_std=0.002, optimize=True, normalize=True, repeat=2) ypred = r.predict(test_xyzt) d = ypred - test_deltae rmse = np.sqrt(np.mean(np.square(d))) print('RMSE {:e}'.format(rmse)) print('Optimized length scale {}'.format(r.length_scale)) print('Optimized signal std {}'.format(r.signal_std)) print('Optimized noise std {}'.format(r.noise_std)) r.save(args.output)
en
0.106481
# Actual plt.imshow(depth_rgb - depth_ir) plt.plot(xy[:,0][mask], xy[:,1][mask], 'k+') plt.colorbar() plt.show() # Load depth data # Load intrinsics # Create train and test data
2.349477
2
TM1py/Services/DimensionService.py
lotsaram/TM1py
0
6621529
# -*- coding: utf-8 -*- import json from TM1py.Exceptions import TM1pyException from TM1py.Objects.Dimension import Dimension from TM1py.Services.ObjectService import ObjectService from TM1py.Services.SubsetService import SubsetService from TM1py.Services.HierarchyService import HierarchyService class DimensionService(ObjectService): """ Service to handle Object Updates for TM1 Dimensions """ def __init__(self, rest): super().__init__(rest) self.hierarchies = HierarchyService(rest) self.subsets = SubsetService(rest) def create(self, dimension): """ create a dimension :param dimension: instance of TM1py.Dimension :return: response """ # If Dimension exists. throw Exception if self.exists(dimension.name): raise Exception("Dimension already exists") # If not all subsequent calls successfull -> undo everything that has been done in this function try: # create Dimension, Hierarchies, Elements, Edges etc. request = "/api/v1/Dimensions" response = self._rest.POST(request, dimension.body) except TM1pyException as e: # undo everything if problem in step 1 or 2 if self.exists(dimension.name): self.delete(dimension.name) raise e return response def get(self, dimension_name): """ Get a Dimension :param dimension_name: :return: """ request = "/api/v1/Dimensions('{}')?$expand=Hierarchies($expand=*)".format(dimension_name) dimension_as_json = self._rest.GET(request) return Dimension.from_json(dimension_as_json) def update(self, dimension): """ Update an existing dimension :param dimension: instance of TM1py.Dimension :return: None """ # update Hierarchies for hierarchy in dimension: self.hierarchies.update(hierarchy) def delete(self, dimension_name): """ Delete a dimension :param dimension_name: Name of the dimension :return: """ request = '/api/v1/Dimensions(\'{}\')'.format(dimension_name) return self._rest.DELETE(request) def exists(self, dimension_name): """ Check if dimension exists :return: """ request = "/api/v1/Dimensions('{}')".format(dimension_name) return super(DimensionService, self).exists(request) def get_all_names(self): """Ask TM1 Server for list with all dimension names :Returns: List of Strings """ response = self._rest.GET('/api/v1/Dimensions?$select=Name', '') dimensions = json.loads(response)['value'] list_dimensions = list(entry['Name'] for entry in dimensions) return list_dimensions
# -*- coding: utf-8 -*- import json from TM1py.Exceptions import TM1pyException from TM1py.Objects.Dimension import Dimension from TM1py.Services.ObjectService import ObjectService from TM1py.Services.SubsetService import SubsetService from TM1py.Services.HierarchyService import HierarchyService class DimensionService(ObjectService): """ Service to handle Object Updates for TM1 Dimensions """ def __init__(self, rest): super().__init__(rest) self.hierarchies = HierarchyService(rest) self.subsets = SubsetService(rest) def create(self, dimension): """ create a dimension :param dimension: instance of TM1py.Dimension :return: response """ # If Dimension exists. throw Exception if self.exists(dimension.name): raise Exception("Dimension already exists") # If not all subsequent calls successfull -> undo everything that has been done in this function try: # create Dimension, Hierarchies, Elements, Edges etc. request = "/api/v1/Dimensions" response = self._rest.POST(request, dimension.body) except TM1pyException as e: # undo everything if problem in step 1 or 2 if self.exists(dimension.name): self.delete(dimension.name) raise e return response def get(self, dimension_name): """ Get a Dimension :param dimension_name: :return: """ request = "/api/v1/Dimensions('{}')?$expand=Hierarchies($expand=*)".format(dimension_name) dimension_as_json = self._rest.GET(request) return Dimension.from_json(dimension_as_json) def update(self, dimension): """ Update an existing dimension :param dimension: instance of TM1py.Dimension :return: None """ # update Hierarchies for hierarchy in dimension: self.hierarchies.update(hierarchy) def delete(self, dimension_name): """ Delete a dimension :param dimension_name: Name of the dimension :return: """ request = '/api/v1/Dimensions(\'{}\')'.format(dimension_name) return self._rest.DELETE(request) def exists(self, dimension_name): """ Check if dimension exists :return: """ request = "/api/v1/Dimensions('{}')".format(dimension_name) return super(DimensionService, self).exists(request) def get_all_names(self): """Ask TM1 Server for list with all dimension names :Returns: List of Strings """ response = self._rest.GET('/api/v1/Dimensions?$select=Name', '') dimensions = json.loads(response)['value'] list_dimensions = list(entry['Name'] for entry in dimensions) return list_dimensions
en
0.721588
# -*- coding: utf-8 -*- Service to handle Object Updates for TM1 Dimensions create a dimension :param dimension: instance of TM1py.Dimension :return: response # If Dimension exists. throw Exception # If not all subsequent calls successfull -> undo everything that has been done in this function # create Dimension, Hierarchies, Elements, Edges etc. # undo everything if problem in step 1 or 2 Get a Dimension :param dimension_name: :return: Update an existing dimension :param dimension: instance of TM1py.Dimension :return: None # update Hierarchies Delete a dimension :param dimension_name: Name of the dimension :return: Check if dimension exists :return: Ask TM1 Server for list with all dimension names :Returns: List of Strings
2.445708
2
epyseg/draw/shapes/ellipse2d.py
jo-mueller/EPySeg
14
6621530
from PyQt5 import QtWidgets from PyQt5.QtCore import QPoint, QPointF, Qt, QRectF from PyQt5.QtGui import QBrush, QPen, QColor, QTransform from epyseg.tools.logger import TA_logger logger = TA_logger() class Ellipse2D(QtWidgets.QGraphicsEllipseItem): isSet = False def __init__(self, *args, color=0xFFFF00, fill_color=None, opacity=1., stroke=0.65, line_style=None,theta=0, **kwargs): super(Ellipse2D, self).__init__(*args) if not args: self.isSet = False else: self.isSet = True self.setRect(self.rect()) self.color = color self.fill_color = fill_color self.stroke = stroke self.opacity = opacity self.scale = 1 self.translation = QPointF() self.line_style = line_style # rotation self.theta = theta def set_rotation(self, theta): self.theta = theta def set_opacity(self, opacity): self.opacity = opacity def set_line_style(self,style): '''allows lines to be dashed or dotted or have custom pattern :param style: a list of numbers or any of the following Qt.SolidLine, Qt.DashLine, Qt.DashDotLine, Qt.DotLine, Qt.DashDotDotLine but not Qt.CustomDashLine, Qt.CustomDashLine is assumed by default if a list is passed in. None is also a valid value that resets the line --> assume plain line :return: ''' self.line_style = style # if style is a list then assume custom pattern otherwise apply solidline def draw(self, painter, draw=True): if self.color is None and self.fill_color is None: return if draw: painter.save() painter.setOpacity(self.opacity) if self.color is not None: pen = QPen(QColor(self.color)) if self.stroke is not None: pen.setWidthF(self.stroke) if self.line_style is not None: if self.line_style in [Qt.SolidLine, Qt.DashLine, Qt.DashDotLine, Qt.DotLine, Qt.DashDotDotLine]: pen.setStyle(self.line_style) elif isinstance(self.line_style, list): pen.setStyle(Qt.CustomDashLine) pen.setDashPattern(self.line_style) painter.setPen(pen) else: painter.setPen(Qt.NoPen) if self.fill_color is not None: painter.setBrush(QBrush(QColor(self.fill_color))) if draw: rect_to_plot = self.rect().adjusted(0, 0, 0, 0) if self.scale is not None and self.scale != 1: # TODO KEEP THE ORDER THIS MUST BE DONE THIS WAY OR IT WILL GENERATE PLENTY OF BUGS... new_width = rect_to_plot.width() * self.scale new_height = rect_to_plot.height() * self.scale # TODO BE EXTREMELY CAREFUL AS SETX AND SETY CAN CHANGE WIDTH AND HEIGHT --> ALWAYS TAKE SIZE BEFORE OTHERWISE THERE WILL BE A PB AND ALWAYS RESET THE SIZE WHEN SETX IS CALLED!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # Sets the left edge of the rectangle to the given x coordinate. May change the width, but will never change the right edge of the rectangle. --> NO CLUE WHY SHOULD CHANGE WIDTH THOUGH BUT BE CAREFUL!!! rect_to_plot.setX(rect_to_plot.x() * self.scale) rect_to_plot.setY(rect_to_plot.y() * self.scale) rect_to_plot.setWidth(new_width) rect_to_plot.setHeight(new_height) if self.translation is not None: rect_to_plot.translate(self.translation) # if self.color is not None: # painter.drawRect(rect_to_plot) # else: # painter.fillRect(rect_to_plot, QColor(self.fill_color)) if self.theta is not None and self.theta != 0: painter.translate(rect_to_plot.center()) painter.rotate(self.theta) painter.translate(-rect_to_plot.center()) painter.drawEllipse(rect_to_plot) painter.restore() # def fill(self, painter, draw=True): # if self.fill_color is None: # return # if draw: # painter.save() # painter.setBrush(QBrush(QColor(self.fill_color))) # painter.setOpacity(self.opacity) # if draw: # painter.drawEllipse(self.rect()) # painter.restore() # # # TODO pb will draw the shape twice.... ---> because painter drawpolygon is called twice # # def drawAndFill(self, painter): # painter.save() # self.draw(painter, draw=False) # self.fill(painter, draw=False) # painter.drawEllipse(self.rect()) # painter.restore() def translate(self, translation): self.moveBy(translation.x(), translation.y()) rect = self.rect() rect.translate(translation.x(), translation.y()) self.setRect(rect) def add(self, *args): p1 = args[0] p2 = args[1] rect = self.rect() rect.setWidth(abs(p1.x()-p2.x())) rect.setHeight(abs(p1.y()-p2.y())) x = p2.x() y = p2.y() if p1.x() < p2.x(): x = p1.x() if p1.y() < p2.y(): y = p1.y() rect.setX(x) rect.setY(y) self.setRect(rect) self.isSet = True def boundingRect(self): # should I return the scaled version or the orig --> think about it... rect_to_plot = self.rect().adjusted(0, 0, 0, 0) try: # print('tada') if self.theta is not None and self.theta!=0: # print('entering') center = rect_to_plot.center() # print('entering2') t = QTransform().translate(center.x(), center.y()).rotate(self.theta).translate(-center.x(), -center.y()) # print('entering3') # self.setTransform(t) # self.transform() # transformed = QRectF(self.rect()) # print('entering5', transformed) # self.resetTransform() # print('entering5', rect_to_plot) # print('entering4') transformed = t.mapRect(rect_to_plot) # self.setTransform(t) # self.transform() # # print(self.shape().boundingRect()) # # # print(self.rect(), transformed) # # transformed = QRectF(self.shape().boundingRect()) # # self.resetTransform() # not perfect but ok for now though --> bounds are not sharp at the edges upon rotation # print('entering45', transformed) return transformed except: pass return rect_to_plot def get_P1(self): return self.boundingRect().topLeft() def set_P1(self, point): rect = self.rect() width = rect.width() height = rect.height() rect.setX(point.x()) rect.setY(point.y()) # required due to setX changing width and sety changing height rect.setWidth(width) rect.setHeight(height) self.setRect(rect) def set_to_scale(self, factor): self.scale = factor def set_to_translation(self, translation): self.translation = translation if __name__ == '__main__': # ça marche --> voici deux examples de shapes test = Ellipse2D(0, 0, 100, 100) # print(test.x(), test.y(), test.width(), test.height()) print(test.contains(QPointF(50, 50))) print(test.contains(QPointF(15, 15))) print(test.contains(QPointF(-1, -1))) print(test.contains(QPointF(0, 0))) print(test.contains(QPointF(100, 100))) print(test.contains(QPointF(100, 100.1))) print(test.x()) print(test.y()) print(test.translate(QPoint(10, 10))) print(test.x()) print(test.y()) # p1 = test.p1() # print(p1.x(), p1.y()) # p2 = test.p2() # print(p2.x(), p2.y()) # print(test.arrow) # print(test.length()) # sqrt 2 --> 141 # # if it's an arrow I can add easily all the stuff I need # # test = Rect2D(0, 0, 1, 1) # p1 = test.p1() # print(p1.x(), p1.y()) # p2 = test.p2() # print(p2.x(), p2.y()) # print(test.arrow) # import math # print(test.length() == math.sqrt(2)) # sqrt 2 # # test2 = Rect2D() # p1 = test2.p1() # print(p1.x(), p1.y()) # p2 = test2.p2() # print(p2.x(), p2.y()) # print(test2.arrow)
from PyQt5 import QtWidgets from PyQt5.QtCore import QPoint, QPointF, Qt, QRectF from PyQt5.QtGui import QBrush, QPen, QColor, QTransform from epyseg.tools.logger import TA_logger logger = TA_logger() class Ellipse2D(QtWidgets.QGraphicsEllipseItem): isSet = False def __init__(self, *args, color=0xFFFF00, fill_color=None, opacity=1., stroke=0.65, line_style=None,theta=0, **kwargs): super(Ellipse2D, self).__init__(*args) if not args: self.isSet = False else: self.isSet = True self.setRect(self.rect()) self.color = color self.fill_color = fill_color self.stroke = stroke self.opacity = opacity self.scale = 1 self.translation = QPointF() self.line_style = line_style # rotation self.theta = theta def set_rotation(self, theta): self.theta = theta def set_opacity(self, opacity): self.opacity = opacity def set_line_style(self,style): '''allows lines to be dashed or dotted or have custom pattern :param style: a list of numbers or any of the following Qt.SolidLine, Qt.DashLine, Qt.DashDotLine, Qt.DotLine, Qt.DashDotDotLine but not Qt.CustomDashLine, Qt.CustomDashLine is assumed by default if a list is passed in. None is also a valid value that resets the line --> assume plain line :return: ''' self.line_style = style # if style is a list then assume custom pattern otherwise apply solidline def draw(self, painter, draw=True): if self.color is None and self.fill_color is None: return if draw: painter.save() painter.setOpacity(self.opacity) if self.color is not None: pen = QPen(QColor(self.color)) if self.stroke is not None: pen.setWidthF(self.stroke) if self.line_style is not None: if self.line_style in [Qt.SolidLine, Qt.DashLine, Qt.DashDotLine, Qt.DotLine, Qt.DashDotDotLine]: pen.setStyle(self.line_style) elif isinstance(self.line_style, list): pen.setStyle(Qt.CustomDashLine) pen.setDashPattern(self.line_style) painter.setPen(pen) else: painter.setPen(Qt.NoPen) if self.fill_color is not None: painter.setBrush(QBrush(QColor(self.fill_color))) if draw: rect_to_plot = self.rect().adjusted(0, 0, 0, 0) if self.scale is not None and self.scale != 1: # TODO KEEP THE ORDER THIS MUST BE DONE THIS WAY OR IT WILL GENERATE PLENTY OF BUGS... new_width = rect_to_plot.width() * self.scale new_height = rect_to_plot.height() * self.scale # TODO BE EXTREMELY CAREFUL AS SETX AND SETY CAN CHANGE WIDTH AND HEIGHT --> ALWAYS TAKE SIZE BEFORE OTHERWISE THERE WILL BE A PB AND ALWAYS RESET THE SIZE WHEN SETX IS CALLED!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # Sets the left edge of the rectangle to the given x coordinate. May change the width, but will never change the right edge of the rectangle. --> NO CLUE WHY SHOULD CHANGE WIDTH THOUGH BUT BE CAREFUL!!! rect_to_plot.setX(rect_to_plot.x() * self.scale) rect_to_plot.setY(rect_to_plot.y() * self.scale) rect_to_plot.setWidth(new_width) rect_to_plot.setHeight(new_height) if self.translation is not None: rect_to_plot.translate(self.translation) # if self.color is not None: # painter.drawRect(rect_to_plot) # else: # painter.fillRect(rect_to_plot, QColor(self.fill_color)) if self.theta is not None and self.theta != 0: painter.translate(rect_to_plot.center()) painter.rotate(self.theta) painter.translate(-rect_to_plot.center()) painter.drawEllipse(rect_to_plot) painter.restore() # def fill(self, painter, draw=True): # if self.fill_color is None: # return # if draw: # painter.save() # painter.setBrush(QBrush(QColor(self.fill_color))) # painter.setOpacity(self.opacity) # if draw: # painter.drawEllipse(self.rect()) # painter.restore() # # # TODO pb will draw the shape twice.... ---> because painter drawpolygon is called twice # # def drawAndFill(self, painter): # painter.save() # self.draw(painter, draw=False) # self.fill(painter, draw=False) # painter.drawEllipse(self.rect()) # painter.restore() def translate(self, translation): self.moveBy(translation.x(), translation.y()) rect = self.rect() rect.translate(translation.x(), translation.y()) self.setRect(rect) def add(self, *args): p1 = args[0] p2 = args[1] rect = self.rect() rect.setWidth(abs(p1.x()-p2.x())) rect.setHeight(abs(p1.y()-p2.y())) x = p2.x() y = p2.y() if p1.x() < p2.x(): x = p1.x() if p1.y() < p2.y(): y = p1.y() rect.setX(x) rect.setY(y) self.setRect(rect) self.isSet = True def boundingRect(self): # should I return the scaled version or the orig --> think about it... rect_to_plot = self.rect().adjusted(0, 0, 0, 0) try: # print('tada') if self.theta is not None and self.theta!=0: # print('entering') center = rect_to_plot.center() # print('entering2') t = QTransform().translate(center.x(), center.y()).rotate(self.theta).translate(-center.x(), -center.y()) # print('entering3') # self.setTransform(t) # self.transform() # transformed = QRectF(self.rect()) # print('entering5', transformed) # self.resetTransform() # print('entering5', rect_to_plot) # print('entering4') transformed = t.mapRect(rect_to_plot) # self.setTransform(t) # self.transform() # # print(self.shape().boundingRect()) # # # print(self.rect(), transformed) # # transformed = QRectF(self.shape().boundingRect()) # # self.resetTransform() # not perfect but ok for now though --> bounds are not sharp at the edges upon rotation # print('entering45', transformed) return transformed except: pass return rect_to_plot def get_P1(self): return self.boundingRect().topLeft() def set_P1(self, point): rect = self.rect() width = rect.width() height = rect.height() rect.setX(point.x()) rect.setY(point.y()) # required due to setX changing width and sety changing height rect.setWidth(width) rect.setHeight(height) self.setRect(rect) def set_to_scale(self, factor): self.scale = factor def set_to_translation(self, translation): self.translation = translation if __name__ == '__main__': # ça marche --> voici deux examples de shapes test = Ellipse2D(0, 0, 100, 100) # print(test.x(), test.y(), test.width(), test.height()) print(test.contains(QPointF(50, 50))) print(test.contains(QPointF(15, 15))) print(test.contains(QPointF(-1, -1))) print(test.contains(QPointF(0, 0))) print(test.contains(QPointF(100, 100))) print(test.contains(QPointF(100, 100.1))) print(test.x()) print(test.y()) print(test.translate(QPoint(10, 10))) print(test.x()) print(test.y()) # p1 = test.p1() # print(p1.x(), p1.y()) # p2 = test.p2() # print(p2.x(), p2.y()) # print(test.arrow) # print(test.length()) # sqrt 2 --> 141 # # if it's an arrow I can add easily all the stuff I need # # test = Rect2D(0, 0, 1, 1) # p1 = test.p1() # print(p1.x(), p1.y()) # p2 = test.p2() # print(p2.x(), p2.y()) # print(test.arrow) # import math # print(test.length() == math.sqrt(2)) # sqrt 2 # # test2 = Rect2D() # p1 = test2.p1() # print(p1.x(), p1.y()) # p2 = test2.p2() # print(p2.x(), p2.y()) # print(test2.arrow)
en
0.556848
# rotation allows lines to be dashed or dotted or have custom pattern :param style: a list of numbers or any of the following Qt.SolidLine, Qt.DashLine, Qt.DashDotLine, Qt.DotLine, Qt.DashDotDotLine but not Qt.CustomDashLine, Qt.CustomDashLine is assumed by default if a list is passed in. None is also a valid value that resets the line --> assume plain line :return: # if style is a list then assume custom pattern otherwise apply solidline # TODO KEEP THE ORDER THIS MUST BE DONE THIS WAY OR IT WILL GENERATE PLENTY OF BUGS... # TODO BE EXTREMELY CAREFUL AS SETX AND SETY CAN CHANGE WIDTH AND HEIGHT --> ALWAYS TAKE SIZE BEFORE OTHERWISE THERE WILL BE A PB AND ALWAYS RESET THE SIZE WHEN SETX IS CALLED!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # Sets the left edge of the rectangle to the given x coordinate. May change the width, but will never change the right edge of the rectangle. --> NO CLUE WHY SHOULD CHANGE WIDTH THOUGH BUT BE CAREFUL!!! # if self.color is not None: # painter.drawRect(rect_to_plot) # else: # painter.fillRect(rect_to_plot, QColor(self.fill_color)) # def fill(self, painter, draw=True): # if self.fill_color is None: # return # if draw: # painter.save() # painter.setBrush(QBrush(QColor(self.fill_color))) # painter.setOpacity(self.opacity) # if draw: # painter.drawEllipse(self.rect()) # painter.restore() # # # TODO pb will draw the shape twice.... ---> because painter drawpolygon is called twice # # def drawAndFill(self, painter): # painter.save() # self.draw(painter, draw=False) # self.fill(painter, draw=False) # painter.drawEllipse(self.rect()) # painter.restore() # should I return the scaled version or the orig --> think about it... # print('tada') # print('entering') # print('entering2') # print('entering3') # self.setTransform(t) # self.transform() # transformed = QRectF(self.rect()) # print('entering5', transformed) # self.resetTransform() # print('entering5', rect_to_plot) # print('entering4') # self.setTransform(t) # self.transform() # # print(self.shape().boundingRect()) # # # print(self.rect(), transformed) # # transformed = QRectF(self.shape().boundingRect()) # # self.resetTransform() # not perfect but ok for now though --> bounds are not sharp at the edges upon rotation # print('entering45', transformed) # required due to setX changing width and sety changing height # ça marche --> voici deux examples de shapes # print(test.x(), test.y(), test.width(), test.height()) # p1 = test.p1() # print(p1.x(), p1.y()) # p2 = test.p2() # print(p2.x(), p2.y()) # print(test.arrow) # print(test.length()) # sqrt 2 --> 141 # # if it's an arrow I can add easily all the stuff I need # # test = Rect2D(0, 0, 1, 1) # p1 = test.p1() # print(p1.x(), p1.y()) # p2 = test.p2() # print(p2.x(), p2.y()) # print(test.arrow) # import math # print(test.length() == math.sqrt(2)) # sqrt 2 # # test2 = Rect2D() # p1 = test2.p1() # print(p1.x(), p1.y()) # p2 = test2.p2() # print(p2.x(), p2.y()) # print(test2.arrow)
2.738286
3
fastreg/testing.py
fmcetin/fastreg
34
6621531
<gh_stars>10-100 import numpy as np import pandas as pd from . import linear from .formula import I, R, C # true parameters params0 = { 'I': 0.1, 'x1': 0.3, 'x2': 0.6, 'id1': 1.0, 'id2': 1.0, 'sigma': 1.0, 'pz': 0.2, 'alpha': 0.3 } # poisson dampening pfact = 100 # default specification default_x = I + R('x1') + R('x2') + C('id1') + C('id2') # good negbin in terms of mean and overdispersion (var = m + alpha*m^2) def rand_negbin(mean, alpha, size=None, state=np.random): return state.negative_binomial(1/alpha, 1/(1+alpha*mean), size=size) def dataset( N=1_000_000, K1=10, K2=100, models=['linear'], letter=True, params=params0, seed=89320432, ): if type(models) is str: models = [models] # init random st = np.random.RandomState(seed) # core regressors df = pd.DataFrame({'x1': st.randn(N), 'x2': st.randn(N)}) # predictors df['yhat0'] = params['I'] + params['x1']*df['x1'] + params['x2']*df['x2'] df['yhat'] = df['yhat0'] if K1 is not None: df['id1'] = st.randint(K1, size=N) df['yhat'] += params['id1']*df['id1']/K1 if K2 is not None: df['id2'] = st.randint(K2, size=N) df['yhat'] += params['id2']*df['id2']/K2 # linear if 'linear' in models: df['y0'] = df['yhat0'] + params['sigma']*st.randn(N) df['y'] = df['yhat'] + params['sigma']*st.randn(N) # logit if 'logit' in models: df['Eb0'] = 1/(1+np.exp(-df['yhat0'])) df['Eb'] = 1/(1+np.exp(-df['yhat'])) df['b0'] = (st.randn(N) < df['Eb0']).astype(np.int) df['b'] = (st.randn(N) < df['Eb']).astype(np.int) # poisson if 'poisson' in models: df['Ep0'] = np.exp(df['yhat0']) df['Ep'] = np.exp(df['yhat']) df['p0'] = st.poisson(df['Ep0']) df['p'] = st.poisson(df['Ep']) # zero-inflated poisson if 'zinf_poisson' in models: df['pz0'] = np.where(st.rand(N) < params['pz'], 0, df['p0']) df['pz'] = np.where(st.rand(N) < params['pz'], 0, df['p']) # negative binomial if 'negbin' in models: df['nb0'] = rand_negbin(df['Ep0'], params['alpha'], state=st) df['nb'] = rand_negbin(df['Ep'], params['alpha'], state=st) # zero-inflated poisson if 'zinf_negbin' in models: df['nbz0'] = np.where(st.rand(N) < params['pz'], 0, df['nb0']) df['nbz'] = np.where(st.rand(N) < params['pz'], 0, df['nb']) if letter and K1 is not None: df['id1'] = df['id1'].map(lambda x: chr(65+x)) return df def dataset_compare(N=10_000_000, K=100): K1, K2 = N // K, K id1 = np.random.randint(K1, size=N) id2 = np.random.randint(K2, size=N) x1 = 5*np.cos(id1) + 5*np.sin(id2) + np.random.randn(N) x2 = np.cos(id1) + np.sin(id2) + np.random.randn(N) y= 3*x1 + 5*x2 + np.cos(id1) + np.cos(id2)**2 + np.random.randn(N) return pd.DataFrame({ 'y': y, 'x1': x1, 'x2': x2, 'id1': id1, 'id2': id2 }) def plot_coeff(beta, params=params0): import matplotlib.pyplot as plt coeff = pd.DataFrame({ 'id2': np.arange(len(beta)), 'beta1': beta }) coeff['beta0'] = params['id2']*coeff['id2']/pfact coeff['beta0'] -= coeff['beta0'].mean() coeff['beta1'] -= coeff['beta1'].mean() # inferred ranges bmin = coeff[['beta0', 'beta1']].min().min() bmax = coeff[['beta0', 'beta1']].max().max() bvec = np.linspace(bmin, bmax, 100) # plot estimates fig, ax = plt.subplots(figsize=(6, 5)) coeff.plot.scatter(x='beta0', y='beta1', ax=ax, alpha=0.5) ax.plot(bvec, bvec, c='r', linewidth=1, zorder=1) ax.set_xlabel('$\\beta_0$') ax.set_ylabel('$\\beta_1$') def test_ols(data, y='y', x=default_x, plot=False, **kwargs): table = linear.ols(y=y, x=x, data=data, **kwargs) if plot: plot_coeff(table['coeff'].filter(regex='id2')) return table def test_glm(data, estim='poisson', y='p', x=default_x, plot=False, **kwargs): from . import general if type(estim) is str: estim = getattr(general, estim) table = estim(y=y, x=x, data=data, **kwargs) if plot: plot_coeff(table['coeff'].filter(regex='id2')) return table
import numpy as np import pandas as pd from . import linear from .formula import I, R, C # true parameters params0 = { 'I': 0.1, 'x1': 0.3, 'x2': 0.6, 'id1': 1.0, 'id2': 1.0, 'sigma': 1.0, 'pz': 0.2, 'alpha': 0.3 } # poisson dampening pfact = 100 # default specification default_x = I + R('x1') + R('x2') + C('id1') + C('id2') # good negbin in terms of mean and overdispersion (var = m + alpha*m^2) def rand_negbin(mean, alpha, size=None, state=np.random): return state.negative_binomial(1/alpha, 1/(1+alpha*mean), size=size) def dataset( N=1_000_000, K1=10, K2=100, models=['linear'], letter=True, params=params0, seed=89320432, ): if type(models) is str: models = [models] # init random st = np.random.RandomState(seed) # core regressors df = pd.DataFrame({'x1': st.randn(N), 'x2': st.randn(N)}) # predictors df['yhat0'] = params['I'] + params['x1']*df['x1'] + params['x2']*df['x2'] df['yhat'] = df['yhat0'] if K1 is not None: df['id1'] = st.randint(K1, size=N) df['yhat'] += params['id1']*df['id1']/K1 if K2 is not None: df['id2'] = st.randint(K2, size=N) df['yhat'] += params['id2']*df['id2']/K2 # linear if 'linear' in models: df['y0'] = df['yhat0'] + params['sigma']*st.randn(N) df['y'] = df['yhat'] + params['sigma']*st.randn(N) # logit if 'logit' in models: df['Eb0'] = 1/(1+np.exp(-df['yhat0'])) df['Eb'] = 1/(1+np.exp(-df['yhat'])) df['b0'] = (st.randn(N) < df['Eb0']).astype(np.int) df['b'] = (st.randn(N) < df['Eb']).astype(np.int) # poisson if 'poisson' in models: df['Ep0'] = np.exp(df['yhat0']) df['Ep'] = np.exp(df['yhat']) df['p0'] = st.poisson(df['Ep0']) df['p'] = st.poisson(df['Ep']) # zero-inflated poisson if 'zinf_poisson' in models: df['pz0'] = np.where(st.rand(N) < params['pz'], 0, df['p0']) df['pz'] = np.where(st.rand(N) < params['pz'], 0, df['p']) # negative binomial if 'negbin' in models: df['nb0'] = rand_negbin(df['Ep0'], params['alpha'], state=st) df['nb'] = rand_negbin(df['Ep'], params['alpha'], state=st) # zero-inflated poisson if 'zinf_negbin' in models: df['nbz0'] = np.where(st.rand(N) < params['pz'], 0, df['nb0']) df['nbz'] = np.where(st.rand(N) < params['pz'], 0, df['nb']) if letter and K1 is not None: df['id1'] = df['id1'].map(lambda x: chr(65+x)) return df def dataset_compare(N=10_000_000, K=100): K1, K2 = N // K, K id1 = np.random.randint(K1, size=N) id2 = np.random.randint(K2, size=N) x1 = 5*np.cos(id1) + 5*np.sin(id2) + np.random.randn(N) x2 = np.cos(id1) + np.sin(id2) + np.random.randn(N) y= 3*x1 + 5*x2 + np.cos(id1) + np.cos(id2)**2 + np.random.randn(N) return pd.DataFrame({ 'y': y, 'x1': x1, 'x2': x2, 'id1': id1, 'id2': id2 }) def plot_coeff(beta, params=params0): import matplotlib.pyplot as plt coeff = pd.DataFrame({ 'id2': np.arange(len(beta)), 'beta1': beta }) coeff['beta0'] = params['id2']*coeff['id2']/pfact coeff['beta0'] -= coeff['beta0'].mean() coeff['beta1'] -= coeff['beta1'].mean() # inferred ranges bmin = coeff[['beta0', 'beta1']].min().min() bmax = coeff[['beta0', 'beta1']].max().max() bvec = np.linspace(bmin, bmax, 100) # plot estimates fig, ax = plt.subplots(figsize=(6, 5)) coeff.plot.scatter(x='beta0', y='beta1', ax=ax, alpha=0.5) ax.plot(bvec, bvec, c='r', linewidth=1, zorder=1) ax.set_xlabel('$\\beta_0$') ax.set_ylabel('$\\beta_1$') def test_ols(data, y='y', x=default_x, plot=False, **kwargs): table = linear.ols(y=y, x=x, data=data, **kwargs) if plot: plot_coeff(table['coeff'].filter(regex='id2')) return table def test_glm(data, estim='poisson', y='p', x=default_x, plot=False, **kwargs): from . import general if type(estim) is str: estim = getattr(general, estim) table = estim(y=y, x=x, data=data, **kwargs) if plot: plot_coeff(table['coeff'].filter(regex='id2')) return table
en
0.362519
# true parameters # poisson dampening # default specification # good negbin in terms of mean and overdispersion (var = m + alpha*m^2) # init random # core regressors # predictors # linear # logit # poisson # zero-inflated poisson # negative binomial # zero-inflated poisson # inferred ranges # plot estimates
2.427204
2
pyh3lib/tests/test_multipart.py
dimosarvanitakis/H3
10
6621532
<gh_stars>1-10 # Copyright [2019] [FORTH-ICS] # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import pyh3lib import random MEGABYTE = 1048576 def test_simple(h3): """Create, delete an object.""" # All empty. assert h3.list_buckets() == [] with pytest.raises(pyh3lib.H3FailureError): h3.create_multipart('b1', 'm1') # Create a bucket. assert h3.create_bucket('b1') == True assert h3.list_multiparts('b1') == [] # Create a multipart object. multipart = h3.create_multipart('b1', 'm1') assert h3.list_multiparts('b1') == ['m1'] with open('/dev/urandom', 'rb') as f: data = f.read(3 * MEGABYTE) h3.create_object('b1', 'o1', data) h3.create_part(multipart, 1, data) h3.create_part(multipart, 0, data[:MEGABYTE]) h3.create_part(multipart, 2, data) h3.create_part_copy('o1', 0, MEGABYTE, multipart, 0) parts = h3.list_parts(multipart) assert len(parts) == 3 for part_number, size in parts: if part_number == 0: assert size == MEGABYTE else: assert size == (3 * MEGABYTE) h3.complete_multipart(multipart) with pytest.raises(pyh3lib.H3NotExistsError): h3.complete_multipart(multipart) with pytest.raises(pyh3lib.H3NotExistsError): h3.abort_multipart(multipart) assert 'm1' in h3.list_objects('b1') object_info = h3.info_object('b1', 'm1') assert not object_info.is_bad assert object_info.size == (7 * MEGABYTE) assert type(object_info.creation) == float assert type(object_info.last_access) == float assert type(object_info.last_modification) == float assert type(object_info.last_change) == float # Delete objects. h3.delete_object('b1', 'm1') h3.delete_object('b1', 'o1') assert h3.list_objects('b1') == [] assert h3.delete_bucket('b1') == True assert h3.list_buckets() == []
# Copyright [2019] [FORTH-ICS] # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import pyh3lib import random MEGABYTE = 1048576 def test_simple(h3): """Create, delete an object.""" # All empty. assert h3.list_buckets() == [] with pytest.raises(pyh3lib.H3FailureError): h3.create_multipart('b1', 'm1') # Create a bucket. assert h3.create_bucket('b1') == True assert h3.list_multiparts('b1') == [] # Create a multipart object. multipart = h3.create_multipart('b1', 'm1') assert h3.list_multiparts('b1') == ['m1'] with open('/dev/urandom', 'rb') as f: data = f.read(3 * MEGABYTE) h3.create_object('b1', 'o1', data) h3.create_part(multipart, 1, data) h3.create_part(multipart, 0, data[:MEGABYTE]) h3.create_part(multipart, 2, data) h3.create_part_copy('o1', 0, MEGABYTE, multipart, 0) parts = h3.list_parts(multipart) assert len(parts) == 3 for part_number, size in parts: if part_number == 0: assert size == MEGABYTE else: assert size == (3 * MEGABYTE) h3.complete_multipart(multipart) with pytest.raises(pyh3lib.H3NotExistsError): h3.complete_multipart(multipart) with pytest.raises(pyh3lib.H3NotExistsError): h3.abort_multipart(multipart) assert 'm1' in h3.list_objects('b1') object_info = h3.info_object('b1', 'm1') assert not object_info.is_bad assert object_info.size == (7 * MEGABYTE) assert type(object_info.creation) == float assert type(object_info.last_access) == float assert type(object_info.last_modification) == float assert type(object_info.last_change) == float # Delete objects. h3.delete_object('b1', 'm1') h3.delete_object('b1', 'o1') assert h3.list_objects('b1') == [] assert h3.delete_bucket('b1') == True assert h3.list_buckets() == []
en
0.861314
# Copyright [2019] [FORTH-ICS] # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Create, delete an object. # All empty. # Create a bucket. # Create a multipart object. # Delete objects.
2.157727
2
vk.py
Kwentar/ImageDownloader
2
6621533
import json import random from urllib.error import URLError from urllib.parse import urlencode from urllib.request import urlopen, http, Request import time from datetime import date from Profiler import Profiler import __setup_photo__ as setup class VkError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class VkUser: def __init__(self, uid, name, last_name, day_b, month_b, sex, city_id, age=-1, year_b=-1): self.uid = uid self.name = name self.last_name = last_name self.day_b = day_b self.month_b = month_b if year_b == -1: year_b = date.today().year - age if month_b < date.today().month or month_b == date.today().month and day_b < date.today().day: year_b -= 1 self.year_b = year_b self.sex = sex self.city_id = city_id def __str__(self): return ";".join([self.uid, self.name, self.last_name, self.day_b.__str__(), self.month_b.__str__(), self.year_b.__str__(), self.sex.__str__(), self.city_id.__str__()]) def get_age(self): return date.today().year - self.year_b class Vk: tokens = setup.user_tokens curr_token = '' p = Profiler() @staticmethod def check_time(value=0.5): if Vk.p.get_time() < value: time.sleep(value) Vk.p.start() @staticmethod def set_token(token): Vk.tokens.clear() Vk.tokens.append(token) @staticmethod def get_token(): while True: el = random.choice(Vk.tokens) if el != Vk.curr_token: test_url = 'https://api.vk.com/method/getProfiles?uid=66748&v=5.103&access_token=' + el Vk.check_time(1) try: response = urlopen(test_url).read() result = json.loads(response.decode('utf-8')) if 'response' in result.keys(): print('now I use the ' + el + ' token') Vk.curr_token = el return el except http.client.BadStatusLine as err_: print("".join(['ERROR Vk.get_token', err_.__str__()])) raise VkError('all tokens are invalid: ' + result['error']['error_msg'].__str__()) @staticmethod def call_api(method, params): Vk.check_time() while not Vk.curr_token: Vk.get_token() if isinstance(params, list): params_list = params[:] elif isinstance(params, dict): params_list = params.items() else: params_list = [params] params_list += [('access_token', Vk.curr_token), ('v', '5.103')] url = 'https://api.vk.com/method/%s?%s' % (method, urlencode(params_list)) try: req = Request(url=url, headers={'User-agent': random.choice(setup.user_agents)}) response = urlopen(req).read() result = json.loads(response.decode('utf-8')) try: if 'response' in result.keys(): return result['response'] else: raise VkError('no response on answer: ' + result['error']['error_msg'].__str__()) except VkError as err_: print(err_.value) Vk.curr_token = Vk.get_token() # Vk.call_api(method, params) except URLError as err_: print('URLError: ' + err_.errno.__str__() + ", " + err_.reason.__str__()) except http.client.BadStatusLine as err_: print("".join(['ERROR Vk.call_api', err_.__str__()])) except ConnectionResetError as err_: print("".join(['ERROR ConnectionResetError', err_.__str__()])) except ConnectionAbortedError as err_: print("".join(['ERROR ConnectionAbortedError', err_.__str__()])) return list() @staticmethod def get_uids(age, month, day, city_id, fields='sex'): search_q = list() search_q.append(('offset', '0')) search_q.append(('count', '300')) search_q.append(('city', city_id)) search_q.append(('fields', fields)) search_q.append(('age_from', age)) search_q.append(('age_to', age)) search_q.append(('has_photo', '1')) search_q.append(('birth_day', day)) search_q.append(('birth_month', month)) r = Vk.call_api('users.search', search_q) count = r['count'] users = list() for el in r['items']: if 'id' in el.keys() and not el['is_closed']: user = VkUser(uid=el['id'].__str__(), name=el['first_name'], last_name=el['last_name'], sex=el['sex'], day_b=day, month_b=month, age=age, city_id=city_id) users.append(user) if count > 1000: Vk.warning('''Count more than 1000, count = {}, age = {}, month = {}, day = {}'''.format(count, age, month, day)) return users @staticmethod def create_user_from_response(response): if 'user_id' in response.keys(): uid = response['user_id'].__str__() elif 'uid' in response.keys(): uid = response['uid'].__str__() else: return None if 'deactivated' in response.keys(): return None last_name = 'None' sex = 'None' name = 'None' city_id = 'None' day, month, age = [0, 0, 0] if 'last_name' in response.keys(): last_name = response['last_name'].__str__() if 'first_name' in response.keys(): name = response['first_name'].__str__() if 'sex' in response.keys(): sex = response['sex'].__str__() if 'city' in response.keys(): city_id = response['city'].__str__() if 'bdate' in response.keys(): bdate = response['bdate'].__str__().split('.') if len(bdate) > 2: day, month, age = map(int, bdate) age = date.today().year - age else: day, month = map(int, bdate) user = VkUser(uid=uid, name=name, last_name=last_name, sex=sex, day_b=day, month_b=month, age=age, city_id=city_id) return user @staticmethod def get_user_info(uid, fields='city,bdate,sex'): search_q = list() search_q.append(('user_id', uid)) search_q.append(('fields', fields)) r = Vk.call_api('users.get', search_q) for el in r: user = Vk.create_user_from_response(el) if user is not None: return user @staticmethod def get_friends(uid, fields='city,bdate,sex'): search_q = list() search_q.append(('user_id', uid)) search_q.append(('offset', '0')) search_q.append(('count', '1000')) search_q.append(('fields', fields)) r = Vk.call_api('friends.get', search_q) count = len(r) users = list() for el in r: user = Vk.create_user_from_response(el) if user is not None: users.append(user) if count > 1000: Vk.warning('Count more than 1000') return users @staticmethod def get_profile_photos(id_): q = list() q.append(('owner_id', id_)) q.append(('count', '10')) q.append(('rev', '1')) q.append(('extended', '1')) q.append(('photos_size', '0')) r = Vk.call_api('photos.getAll', q) images = [] for photo in r['items']: max_photo = max(photo['sizes'], key=lambda x: x['width']*x['height']) images.append(max_photo['url']) return images @staticmethod def warning(msg): print(msg)
import json import random from urllib.error import URLError from urllib.parse import urlencode from urllib.request import urlopen, http, Request import time from datetime import date from Profiler import Profiler import __setup_photo__ as setup class VkError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class VkUser: def __init__(self, uid, name, last_name, day_b, month_b, sex, city_id, age=-1, year_b=-1): self.uid = uid self.name = name self.last_name = last_name self.day_b = day_b self.month_b = month_b if year_b == -1: year_b = date.today().year - age if month_b < date.today().month or month_b == date.today().month and day_b < date.today().day: year_b -= 1 self.year_b = year_b self.sex = sex self.city_id = city_id def __str__(self): return ";".join([self.uid, self.name, self.last_name, self.day_b.__str__(), self.month_b.__str__(), self.year_b.__str__(), self.sex.__str__(), self.city_id.__str__()]) def get_age(self): return date.today().year - self.year_b class Vk: tokens = setup.user_tokens curr_token = '' p = Profiler() @staticmethod def check_time(value=0.5): if Vk.p.get_time() < value: time.sleep(value) Vk.p.start() @staticmethod def set_token(token): Vk.tokens.clear() Vk.tokens.append(token) @staticmethod def get_token(): while True: el = random.choice(Vk.tokens) if el != Vk.curr_token: test_url = 'https://api.vk.com/method/getProfiles?uid=66748&v=5.103&access_token=' + el Vk.check_time(1) try: response = urlopen(test_url).read() result = json.loads(response.decode('utf-8')) if 'response' in result.keys(): print('now I use the ' + el + ' token') Vk.curr_token = el return el except http.client.BadStatusLine as err_: print("".join(['ERROR Vk.get_token', err_.__str__()])) raise VkError('all tokens are invalid: ' + result['error']['error_msg'].__str__()) @staticmethod def call_api(method, params): Vk.check_time() while not Vk.curr_token: Vk.get_token() if isinstance(params, list): params_list = params[:] elif isinstance(params, dict): params_list = params.items() else: params_list = [params] params_list += [('access_token', Vk.curr_token), ('v', '5.103')] url = 'https://api.vk.com/method/%s?%s' % (method, urlencode(params_list)) try: req = Request(url=url, headers={'User-agent': random.choice(setup.user_agents)}) response = urlopen(req).read() result = json.loads(response.decode('utf-8')) try: if 'response' in result.keys(): return result['response'] else: raise VkError('no response on answer: ' + result['error']['error_msg'].__str__()) except VkError as err_: print(err_.value) Vk.curr_token = Vk.get_token() # Vk.call_api(method, params) except URLError as err_: print('URLError: ' + err_.errno.__str__() + ", " + err_.reason.__str__()) except http.client.BadStatusLine as err_: print("".join(['ERROR Vk.call_api', err_.__str__()])) except ConnectionResetError as err_: print("".join(['ERROR ConnectionResetError', err_.__str__()])) except ConnectionAbortedError as err_: print("".join(['ERROR ConnectionAbortedError', err_.__str__()])) return list() @staticmethod def get_uids(age, month, day, city_id, fields='sex'): search_q = list() search_q.append(('offset', '0')) search_q.append(('count', '300')) search_q.append(('city', city_id)) search_q.append(('fields', fields)) search_q.append(('age_from', age)) search_q.append(('age_to', age)) search_q.append(('has_photo', '1')) search_q.append(('birth_day', day)) search_q.append(('birth_month', month)) r = Vk.call_api('users.search', search_q) count = r['count'] users = list() for el in r['items']: if 'id' in el.keys() and not el['is_closed']: user = VkUser(uid=el['id'].__str__(), name=el['first_name'], last_name=el['last_name'], sex=el['sex'], day_b=day, month_b=month, age=age, city_id=city_id) users.append(user) if count > 1000: Vk.warning('''Count more than 1000, count = {}, age = {}, month = {}, day = {}'''.format(count, age, month, day)) return users @staticmethod def create_user_from_response(response): if 'user_id' in response.keys(): uid = response['user_id'].__str__() elif 'uid' in response.keys(): uid = response['uid'].__str__() else: return None if 'deactivated' in response.keys(): return None last_name = 'None' sex = 'None' name = 'None' city_id = 'None' day, month, age = [0, 0, 0] if 'last_name' in response.keys(): last_name = response['last_name'].__str__() if 'first_name' in response.keys(): name = response['first_name'].__str__() if 'sex' in response.keys(): sex = response['sex'].__str__() if 'city' in response.keys(): city_id = response['city'].__str__() if 'bdate' in response.keys(): bdate = response['bdate'].__str__().split('.') if len(bdate) > 2: day, month, age = map(int, bdate) age = date.today().year - age else: day, month = map(int, bdate) user = VkUser(uid=uid, name=name, last_name=last_name, sex=sex, day_b=day, month_b=month, age=age, city_id=city_id) return user @staticmethod def get_user_info(uid, fields='city,bdate,sex'): search_q = list() search_q.append(('user_id', uid)) search_q.append(('fields', fields)) r = Vk.call_api('users.get', search_q) for el in r: user = Vk.create_user_from_response(el) if user is not None: return user @staticmethod def get_friends(uid, fields='city,bdate,sex'): search_q = list() search_q.append(('user_id', uid)) search_q.append(('offset', '0')) search_q.append(('count', '1000')) search_q.append(('fields', fields)) r = Vk.call_api('friends.get', search_q) count = len(r) users = list() for el in r: user = Vk.create_user_from_response(el) if user is not None: users.append(user) if count > 1000: Vk.warning('Count more than 1000') return users @staticmethod def get_profile_photos(id_): q = list() q.append(('owner_id', id_)) q.append(('count', '10')) q.append(('rev', '1')) q.append(('extended', '1')) q.append(('photos_size', '0')) r = Vk.call_api('photos.getAll', q) images = [] for photo in r['items']: max_photo = max(photo['sizes'], key=lambda x: x['width']*x['height']) images.append(max_photo['url']) return images @staticmethod def warning(msg): print(msg)
en
0.651787
# Vk.call_api(method, params) Count more than 1000, count = {}, age = {}, month = {}, day = {}
2.679518
3
twitter/credentials.py
eferrares/codeinquero
1
6621534
CONSUMER_KEY = 'q8hG77NeTRXu4ldDyZb8kjw4S' CONSUMER_SECRET = '<KEY>' ACCESS_TOKEN = '<KEY>' ACCESS_TOKEN_SECRET = '<KEY>'
CONSUMER_KEY = 'q8hG77NeTRXu4ldDyZb8kjw4S' CONSUMER_SECRET = '<KEY>' ACCESS_TOKEN = '<KEY>' ACCESS_TOKEN_SECRET = '<KEY>'
none
1
1.002039
1
python/learning/sample_03.py
erisky/my_practices
0
6621535
<reponame>erisky/my_practices #!/usr/bin/env python import sys #tmp = sys.stdout #sys.stdout = open('output.txt', 'a') # variable number of arguments def min1(*args): res = args[0] for val in args[1:]: if (res > val): res = val return res print min1(1,2,3,4,5) print min1('a', 'b') print min1('12321', '1efwef') #lambda -- easier format of function #ex f = (lambda x=1,y=2,z=3: (x+y) *z) print f(3,4) print f(1,3,3) mydict = {'a':1, 'b':2} print mydict['a'] mydict2 = {'a':(lambda x,y: x+y), 'b':(lambda x,y: x *y)} print mydict['a'] print mydict2['a'](12,23) print mydict2['b'](12,23) # apply function, something useful when make use of function pointer of C def funct1(x,y,z): return x+y+z f = funct1 print "apply:%d" % apply(f, (3,5,6)) # map function mylist1 = [1,2,3,4] def sqrx(x): return x*x print "list before map", print mylist1 mylist1 = map(sqrx, mylist1) print " after map:", print mylist1 print " !!!!!!!!!!!!!!!!! " print " MUST re-read bottom half of chapter 17" print "Done"
#!/usr/bin/env python import sys #tmp = sys.stdout #sys.stdout = open('output.txt', 'a') # variable number of arguments def min1(*args): res = args[0] for val in args[1:]: if (res > val): res = val return res print min1(1,2,3,4,5) print min1('a', 'b') print min1('12321', '1efwef') #lambda -- easier format of function #ex f = (lambda x=1,y=2,z=3: (x+y) *z) print f(3,4) print f(1,3,3) mydict = {'a':1, 'b':2} print mydict['a'] mydict2 = {'a':(lambda x,y: x+y), 'b':(lambda x,y: x *y)} print mydict['a'] print mydict2['a'](12,23) print mydict2['b'](12,23) # apply function, something useful when make use of function pointer of C def funct1(x,y,z): return x+y+z f = funct1 print "apply:%d" % apply(f, (3,5,6)) # map function mylist1 = [1,2,3,4] def sqrx(x): return x*x print "list before map", print mylist1 mylist1 = map(sqrx, mylist1) print " after map:", print mylist1 print " !!!!!!!!!!!!!!!!! " print " MUST re-read bottom half of chapter 17" print "Done"
en
0.270844
#!/usr/bin/env python #tmp = sys.stdout #sys.stdout = open('output.txt', 'a') # variable number of arguments #lambda -- easier format of function #ex # apply function, something useful when make use of function pointer of C # map function
3.952479
4
gluetool_modules_framework/tests/test_coldstore.py
testing-farm/gluetool-modules
0
6621536
# Copyright Contributors to the Testing Farm project. # SPDX-License-Identifier: Apache-2.0 import logging import pytest from mock import MagicMock import gluetool_modules_framework.helpers.coldstore from . import create_module, patch_shared, check_loadable @pytest.fixture(name='module') def fixture_module(): return create_module(gluetool_modules_framework.helpers.coldstore.ColdStore)[1] def test_loadable(module): check_loadable(module.glue, 'gluetool_modules_framework/helpers/coldstore.py', 'ColdStore') def test_coldstore_url(module, monkeypatch): module._config['coldstore-url-template'] = '{{ URL }}' patch_shared(monkeypatch, module, { 'eval_context': { 'URL': 'some-url' } }) assert module.coldstore_url == 'some-url' def test_execute_no_coldstore_url(module, monkeypatch, log): monkeypatch.setattr(gluetool_modules_framework.helpers.coldstore.ColdStore, 'coldstore_url', None) module.execute() assert log.match(message='Cold store URL seems to be empty', levelno=logging.WARN) assert not log.match(message='For the pipeline artifacts, see None') def test_execute_with_coldstore_url(module, monkeypatch, log): monkeypatch.setattr(gluetool_modules_framework.helpers.coldstore.ColdStore, 'coldstore_url', 'some-url') module.execute() assert not log.match(message='Cold store URL seems to be empty', levelno=logging.WARN) assert log.match(message='For the pipeline artifacts, see some-url') def test_eval_context(module, monkeypatch): monkeypatch.setattr(gluetool_modules_framework.helpers.coldstore.ColdStore, 'coldstore_url', 'some-url') assert module.eval_context == { 'COLDSTORE_URL': 'some-url' } def test_eval_context_recursion(module, monkeypatch): monkeypatch.setattr(gluetool_modules_framework.libs, 'is_recursion', MagicMock(return_value=True)) assert module.eval_context == {}
# Copyright Contributors to the Testing Farm project. # SPDX-License-Identifier: Apache-2.0 import logging import pytest from mock import MagicMock import gluetool_modules_framework.helpers.coldstore from . import create_module, patch_shared, check_loadable @pytest.fixture(name='module') def fixture_module(): return create_module(gluetool_modules_framework.helpers.coldstore.ColdStore)[1] def test_loadable(module): check_loadable(module.glue, 'gluetool_modules_framework/helpers/coldstore.py', 'ColdStore') def test_coldstore_url(module, monkeypatch): module._config['coldstore-url-template'] = '{{ URL }}' patch_shared(monkeypatch, module, { 'eval_context': { 'URL': 'some-url' } }) assert module.coldstore_url == 'some-url' def test_execute_no_coldstore_url(module, monkeypatch, log): monkeypatch.setattr(gluetool_modules_framework.helpers.coldstore.ColdStore, 'coldstore_url', None) module.execute() assert log.match(message='Cold store URL seems to be empty', levelno=logging.WARN) assert not log.match(message='For the pipeline artifacts, see None') def test_execute_with_coldstore_url(module, monkeypatch, log): monkeypatch.setattr(gluetool_modules_framework.helpers.coldstore.ColdStore, 'coldstore_url', 'some-url') module.execute() assert not log.match(message='Cold store URL seems to be empty', levelno=logging.WARN) assert log.match(message='For the pipeline artifacts, see some-url') def test_eval_context(module, monkeypatch): monkeypatch.setattr(gluetool_modules_framework.helpers.coldstore.ColdStore, 'coldstore_url', 'some-url') assert module.eval_context == { 'COLDSTORE_URL': 'some-url' } def test_eval_context_recursion(module, monkeypatch): monkeypatch.setattr(gluetool_modules_framework.libs, 'is_recursion', MagicMock(return_value=True)) assert module.eval_context == {}
en
0.55216
# Copyright Contributors to the Testing Farm project. # SPDX-License-Identifier: Apache-2.0
1.84635
2
pobalog/text_area_detection.py
select766/PoBaLog
0
6621537
<reponame>select766/PoBaLog """ 文字の面積を検出する """ import numpy as np import cv2 class TextAreaDetection: def __init__(self, rectangle, threshold): """ :param rectangle: 対象領域の上、下、左、右の座標 :param threshold: 明るさ閾値(0~255,これ以下が文字とみなされる) """ self.rectangle = rectangle self.threshold = threshold def evaluate(self, img): img_crop = img[self.rectangle[0]:self.rectangle[1], self.rectangle[2]:self.rectangle[3]] gray = cv2.cvtColor(img_crop, cv2.COLOR_BGR2GRAY) text_area = np.count_nonzero(gray <= self.threshold) return {'text_area': text_area}
""" 文字の面積を検出する """ import numpy as np import cv2 class TextAreaDetection: def __init__(self, rectangle, threshold): """ :param rectangle: 対象領域の上、下、左、右の座標 :param threshold: 明るさ閾値(0~255,これ以下が文字とみなされる) """ self.rectangle = rectangle self.threshold = threshold def evaluate(self, img): img_crop = img[self.rectangle[0]:self.rectangle[1], self.rectangle[2]:self.rectangle[3]] gray = cv2.cvtColor(img_crop, cv2.COLOR_BGR2GRAY) text_area = np.count_nonzero(gray <= self.threshold) return {'text_area': text_area}
ja
0.986953
文字の面積を検出する :param rectangle: 対象領域の上、下、左、右の座標 :param threshold: 明るさ閾値(0~255,これ以下が文字とみなされる)
3.258983
3
migrations/versions/fe743605e1a_remove_repositoryoption.py
vault-the/changes
443
6621538
"""Remove RepositoryOption Revision ID: fe743605e1a Revises: <PASSWORD> Create Date: 2014-09-17 15:17:09.925681 """ # revision identifiers, used by Alembic. revision = 'fe743605e1a' down_revision = '<PASSWORD>' from alembic import op def upgrade(): op.drop_table('repositoryoption') def downgrade(): raise NotImplementedError
"""Remove RepositoryOption Revision ID: fe743605e1a Revises: <PASSWORD> Create Date: 2014-09-17 15:17:09.925681 """ # revision identifiers, used by Alembic. revision = 'fe743605e1a' down_revision = '<PASSWORD>' from alembic import op def upgrade(): op.drop_table('repositoryoption') def downgrade(): raise NotImplementedError
en
0.428106
Remove RepositoryOption Revision ID: fe743605e1a Revises: <PASSWORD> Create Date: 2014-09-17 15:17:09.925681 # revision identifiers, used by Alembic.
0.925398
1
my_test/Net.py
RuoyuX-2018/6998DL
0
6621539
import cv2 import os import torch import torchvision from torchvision import transforms, utils import matplotlib.pyplot as plt import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = torch.nn.Sequential( torch.nn.Conv2d(3, 32, 3, 1, 1), torch.nn.ReLU(), torch.nn.MaxPool2d(2)) self.conv2 = torch.nn.Sequential( torch.nn.Conv2d(32, 64, 3, 1, 1), torch.nn.ReLU(), torch.nn.MaxPool2d(2) ) self.conv3 = torch.nn.Sequential( torch.nn.Conv2d(64, 64, 3, 1, 1), torch.nn.ReLU(), torch.nn.MaxPool2d(2) ) self.dense = torch.nn.Sequential( #torch.nn.Linear(4096, 1024), #torch.nn.ReLU(), torch.nn.Linear(1024, 128), torch.nn.ReLU(), torch.nn.Linear(128, 2) ) def forward(self, x): conv1_out = self.conv1(x) conv2_out = self.conv2(conv1_out) conv3_out = self.conv3(conv2_out) res = conv3_out.view(conv3_out.size(0), -1) out = self.dense(res) return out if __name__ == "__main__": train_data_path = "data/nn_data/trainset/" test_data_path = "data/nn_data/testset/" train_data = torchvision.datasets.ImageFolder(train_data_path, transform=transforms.Compose([ transforms.Resize((32, 32)), transforms.ToTensor()])) test_data = torchvision.datasets.ImageFolder(test_data_path, transform=transforms.Compose([ transforms.Resize((32, 32)), transforms.ToTensor()])) print(len(train_data)) print(len(test_data)) train_loader = torch.utils.data.DataLoader(train_data, batch_size=4,shuffle=True) test_loader = torch.utils.data.DataLoader(test_data, batch_size=4,shuffle=True) model = Net() print(model) optimizer = torch.optim.Adam(model.parameters()) loss_func = torch.nn.CrossEntropyLoss() for epoch in range(50): print('epoch {}'.format(epoch + 1)) # training----------------------------- train_loss = 0. train_acc = 0. for batch_x, batch_y in train_loader: batch_x, batch_y = Variable(batch_x), Variable(batch_y) out = model(batch_x) loss = loss_func(out, batch_y) train_loss += loss.item() pred = torch.max(out, 1)[1] train_correct = (pred == batch_y).sum() train_acc += train_correct.item() optimizer.zero_grad() loss.backward() optimizer.step() print('Train Loss: {:.6f}, Acc: {:.6f}'.format(train_loss / (len( train_data)), train_acc / (len(train_data)))) # evaluation-------------------------------- model.eval() eval_loss = 0. eval_acc = 0. num_false = 0 for batch_x, batch_y in test_loader: print(batch_x.shape) batch_x, batch_y = Variable(batch_x, volatile=True), Variable(batch_y, volatile=True) out = model(batch_x) loss = loss_func(out, batch_y) eval_loss += loss.item() pred = torch.max(out, 1)[1] if epoch == 49: for k in range(len(pred)): if pred[k] != batch_y[k]: torchvision.utils.save_image( batch_x[k], os.path.join("data/k/"+str(num_false) +"-" + str(batch_y[k])+".png")) num_false += 1 num_correct = (pred == batch_y).sum() eval_acc += num_correct.item() print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len( test_data)), eval_acc / (len(test_data)))) #torch.save(model, "CNN_model")
import cv2 import os import torch import torchvision from torchvision import transforms, utils import matplotlib.pyplot as plt import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = torch.nn.Sequential( torch.nn.Conv2d(3, 32, 3, 1, 1), torch.nn.ReLU(), torch.nn.MaxPool2d(2)) self.conv2 = torch.nn.Sequential( torch.nn.Conv2d(32, 64, 3, 1, 1), torch.nn.ReLU(), torch.nn.MaxPool2d(2) ) self.conv3 = torch.nn.Sequential( torch.nn.Conv2d(64, 64, 3, 1, 1), torch.nn.ReLU(), torch.nn.MaxPool2d(2) ) self.dense = torch.nn.Sequential( #torch.nn.Linear(4096, 1024), #torch.nn.ReLU(), torch.nn.Linear(1024, 128), torch.nn.ReLU(), torch.nn.Linear(128, 2) ) def forward(self, x): conv1_out = self.conv1(x) conv2_out = self.conv2(conv1_out) conv3_out = self.conv3(conv2_out) res = conv3_out.view(conv3_out.size(0), -1) out = self.dense(res) return out if __name__ == "__main__": train_data_path = "data/nn_data/trainset/" test_data_path = "data/nn_data/testset/" train_data = torchvision.datasets.ImageFolder(train_data_path, transform=transforms.Compose([ transforms.Resize((32, 32)), transforms.ToTensor()])) test_data = torchvision.datasets.ImageFolder(test_data_path, transform=transforms.Compose([ transforms.Resize((32, 32)), transforms.ToTensor()])) print(len(train_data)) print(len(test_data)) train_loader = torch.utils.data.DataLoader(train_data, batch_size=4,shuffle=True) test_loader = torch.utils.data.DataLoader(test_data, batch_size=4,shuffle=True) model = Net() print(model) optimizer = torch.optim.Adam(model.parameters()) loss_func = torch.nn.CrossEntropyLoss() for epoch in range(50): print('epoch {}'.format(epoch + 1)) # training----------------------------- train_loss = 0. train_acc = 0. for batch_x, batch_y in train_loader: batch_x, batch_y = Variable(batch_x), Variable(batch_y) out = model(batch_x) loss = loss_func(out, batch_y) train_loss += loss.item() pred = torch.max(out, 1)[1] train_correct = (pred == batch_y).sum() train_acc += train_correct.item() optimizer.zero_grad() loss.backward() optimizer.step() print('Train Loss: {:.6f}, Acc: {:.6f}'.format(train_loss / (len( train_data)), train_acc / (len(train_data)))) # evaluation-------------------------------- model.eval() eval_loss = 0. eval_acc = 0. num_false = 0 for batch_x, batch_y in test_loader: print(batch_x.shape) batch_x, batch_y = Variable(batch_x, volatile=True), Variable(batch_y, volatile=True) out = model(batch_x) loss = loss_func(out, batch_y) eval_loss += loss.item() pred = torch.max(out, 1)[1] if epoch == 49: for k in range(len(pred)): if pred[k] != batch_y[k]: torchvision.utils.save_image( batch_x[k], os.path.join("data/k/"+str(num_false) +"-" + str(batch_y[k])+".png")) num_false += 1 num_correct = (pred == batch_y).sum() eval_acc += num_correct.item() print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len( test_data)), eval_acc / (len(test_data)))) #torch.save(model, "CNN_model")
en
0.298467
#torch.nn.Linear(4096, 1024), #torch.nn.ReLU(), # training----------------------------- # evaluation-------------------------------- #torch.save(model, "CNN_model")
2.915636
3
jurisdictions/canada/can_financial_scrape.py
DiarmuidM/charity-dissolution
0
6621540
<filename>jurisdictions/canada/can_financial_scrape.py ## Python script to download financial data ## of Canadian charities from the CRA website # <NAME>, <NAME> # Created: 17 May 2018 # Last edited: captured in Github file history import csv import requests import os import os.path import errno import zipfile import io from time import sleep import random from bs4 import BeautifulSoup from downloaddate_function import downloaddate #prob dont need basic info or B #in A need question 2 as it's the dependant - make a note of this in the markdown # grab the links to the sceduels before sraping into them where neeeded - also have the links in the CSV # ========================================================== # ========================================================== def buildwebadd(orgid, name): webadd = 'http://www.cra-arc.gc.ca/ebci/haip/srch/charity-eng.action?bn=' + orgid + '&m=1' return webadd # This function looks at the charity page and finds the link to their T3010 record if it exists def getT3010(webadd): trys = 0 while trys<=3: try: rorg = requests.get(webadd) if rorg.status_code == 200: trys=5 else: sleep(5) except: trys +=1 sleep(5) print(webadd, ' | ', rorg.status_code) if rorg.status_code == 200: html_org = rorg.text soup_org = BeautifulSoup(html_org, 'html.parser') orgdetails = soup_org.find(text="T3010 Return") # If there is a T3010 link, then parse it if orgdetails != None: # Navigate up the tree to get at the <a> tage search = orgdetails.parent.parent.parent # Find the <a> tag, and look for the hyperlink link = search.find('a') if link.has_attr('href'): return 'http://www.cra-arc.gc.ca/' + link['href'] else: print('No link') return False else: print('No link') return False else: print('| ************ Address did not resolve') return False # Given a T3010 link, this function loops through the available years def scrapeorg(webadd, orgid): print('--------------------------------------') trys = 0 while trys<=3: try: rorg = requests.get(webadd) if rorg.status_code == 200: trys=5 else: sleep(5) except: trys +=1 sleep(5) #print(webadd, ' | ', rorg.status_code) if rorg.status_code == 200: html_org = rorg.text soup_org = BeautifulSoup(html_org, 'html.parser') # Build a table of all the available years yeartable = soup_org.find_all('div', class_=' ') # Loop through each year for year in yeartable: # Extract the financial year date findate = year.text[0:10] # Extract the financial year finyear = int(findate[0:4]) # Extract the link to the return for that year finlink = 'http://www.cra-arc.gc.ca/' + year.find('a')['href'] print(findate, end='') if finyear>=2009: # Pass the return for scraping scrape_finance(finlink, finyear, orgid, webadd) else: print(' | --') # This function takes a given report, and finds the link to the Schedule 6 # detailed financial data. It DOES NOT collect data for organisations without # a completed Schedule 6. # This function also handles the writing to file of the scraped finances. def scrape_finance(webadd, finyear, orgid, orglink): trys = 0 while trys<=3: try: rorg = requests.get(webadd) if rorg.status_code == 200: trys=5 else: sleep(5) except: trys +=1 sleep(5) #print(webadd, ' | ', rorg.status_code) # This dict will hold the scraped financial information sched6record = {} if rorg.status_code == 200: html_org = rorg.text soup_org = BeautifulSoup(html_org, 'html.parser') # Find the link to the Schedule 6 return if it exists schedule6 = soup_org.find(text="Schedule 6 - Detailed Financial Information") # Check if a valid link was found if schedule6 != None: if schedule6.parent.has_attr('href'): print(' | ** SCHEDULE 6') sched6_add = 'http://www.cra-arc.gc.ca' + schedule6.parent['href'].strip() # Use the Schedule 6 link to go and get the financial information, and return it into sched6record dict sched6record = scrape_sched6(sched6_add, finyear, orgid) # Add the relevant links to the dictionary for auditing sched6record['s6link']=sched6_add sched6record['yearlink']=webadd sched6record['orglink'] = orglink # If any of this fails, make an appropriate blank record to go into the output file else: print(' | -- NO s6 link found') sched6record = {'orgid': orgid, 'year': finyear, 'sched6': 0, 'orglink': orglink, 'yearlink': webadd, } else: print(' | -- No schedule 6 returned') sched6record = {'orgid': orgid, 'year': finyear, 'sched6': 0, 'orglink': orglink, 'yearlink': webadd} else: sched6record = {'orgid': orgid, 'year': finyear, 'sched6': -1, 'orglink': orglink, 'yearlink': webadd} print(' | -- Link failed') # This is where we write the dict to the output file on each pass # The file has one row per financial year writer.writerow(sched6record) # This is the function doimg the dirty work of collecting the financial data from the # Schedule 6 page. It takes advantage of the unique line numbers to code the data items. # It just grabs any data item with a valid line number. def scrape_sched6(webadd, finyear, orgid): trys = 0 while trys<=3: try: rorg = requests.get(webadd) if rorg.status_code == 200: trys=5 else: sleep(5) except: trys +=1 sleep(5) #print(webadd, ' | ', rorg.status_code) if rorg.status_code == 200: sched6record = {'orgid': orgid, 'year': finyear, 'sched6': 0} html_org = rorg.text soup_org = BeautifulSoup(html_org, 'html.parser') # Find all the rows of the table, denoted by <tr> tags revenue_row = soup_org.find_all('tr') # If a row with data is found, then separate it into columns if revenue_row != []: # Record for the output file that we got this far - if this flag is set then we expect to see financial data sched6record['sched6'] = 1 # Go through all the rows of the table in turn for row in revenue_row: # Columns are denoted by either <td> or <th> (The latter denotes totals in heading rows) col = row.find_all(['td', 'th']) if col != []: try: # The financial figures are in the third column. We strip out the dollar sign. figure = col[2].text.strip() # Rows with no financial data are recorded as n/a in the table, so ignore those if figure != 'n/a': figure = figure[2:] # Get the linenumber from the second column. This tells us what the number means, and is the key for the output dict linenumber = col[1].text.strip() # Spacer rows in the table don't have valid 4-digit line numbers, so we can ignore them if len(linenumber) == 4: sched6record[linenumber] = figure except: # This picks up the parse above failing - at the moment we don't record that # It would appear in the output file as a row with the sched6 flag set to '1', but no data # so we can pick it up in the quality checks later. We could always record something here if it's # a big problem. pass else: # If we don't manage to get a valid page at this stage just record in the output that no sched6 was found. # This could be extended to give other values of sched6 to help with debugging. sched6record = {'orgid': orgid, 'year': finyear, 'sched6': 0} # Return the dict containing the info that was scraped (or minimal info if scrape not successful) return sched6record # ========================================================== # ========================================================== # MAIN PROGRAM # ========================================================== # Run the downloaddate function to get the date 'benefacts_master.py' was executed. ddate = downloaddate() # Set up the file paths projectpath = './' outputfilepath = projectpath + 'data_raw/' + ddate + '/canada_register_finance_sample_' + ddate + '.csv' inputfilepath = projectpath + 'data_raw/' + ddate + '/canada_register_sample_' + ddate + '.csv' # Set the file row tracking startrow = 1 rowcounter=0 # Open the input file list of charities ... with open(inputfilepath, 'r', newline='') as inCSVfile: # ... as a CSV file. reader = csv.reader(inCSVfile) # Open the output file for financial information ... with open(outputfilepath, 'w', newline='') as outCSVfile: # ... as a CSV dict. Using dict here really simplifies storing the scrape data where many fields are missing outputfieldnames = ('orgid', 'year', 'sched6', 'orglink', 'yearlink', 's6link', '5040', '4310', '4540', '4575', '4160', '4250', '4166', '4900', '4110', '4810', '4300', '4860', '5000', '4580', '4891', '4920', '4330', '4600', '4100', '4150', '4950', '4500', '4140', '4571', '4610', '4700', '4830', '4890', '4130', '4640', '4850', '5030', '4155', '5050', '4200', '4165', '4870', '5010', '4840', '4590', '4120', '4880', '4800', '4320', '4510', '5610', '4350', '4630', '4505', '4650', '4560', '5020', '4910', '5100', '4620', '4180', '4530', '4170', '4820', '4550', '4520', '5070', '5060', '5640', '4525') writer = csv.DictWriter(outCSVfile, fieldnames = outputfieldnames) writer.writeheader() # Ignore the first row of fieldnames in the input file while rowcounter<startrow: next(reader) rowcounter+=1 # Iterate through the input file taking each charity in turn for row in reader: # We only really need the ID and NAME for each charity matchid = row[0] matchname = row[1] print('==============================') print('Organisation number:', rowcounter) print(matchid, matchname) # Build the organisation's web address orglink = buildwebadd(matchid, matchname) # Get the link to the t3010 t3010link = getT3010(orglink) # If it exists, then scrape the financial data if t3010link != False: scrapeorg(t3010link, matchid) sleep(random.randint(2,4)) else: # Otherwise write a record with missing data to show that we couldnt get any financial data for this organisation sched6record = {'orgid': matchid, 'year': -9, 'sched6': -9} writer.writerow(sched6record) # Keep count of the rows to help the user track progress rowcounter +=1
<filename>jurisdictions/canada/can_financial_scrape.py ## Python script to download financial data ## of Canadian charities from the CRA website # <NAME>, <NAME> # Created: 17 May 2018 # Last edited: captured in Github file history import csv import requests import os import os.path import errno import zipfile import io from time import sleep import random from bs4 import BeautifulSoup from downloaddate_function import downloaddate #prob dont need basic info or B #in A need question 2 as it's the dependant - make a note of this in the markdown # grab the links to the sceduels before sraping into them where neeeded - also have the links in the CSV # ========================================================== # ========================================================== def buildwebadd(orgid, name): webadd = 'http://www.cra-arc.gc.ca/ebci/haip/srch/charity-eng.action?bn=' + orgid + '&m=1' return webadd # This function looks at the charity page and finds the link to their T3010 record if it exists def getT3010(webadd): trys = 0 while trys<=3: try: rorg = requests.get(webadd) if rorg.status_code == 200: trys=5 else: sleep(5) except: trys +=1 sleep(5) print(webadd, ' | ', rorg.status_code) if rorg.status_code == 200: html_org = rorg.text soup_org = BeautifulSoup(html_org, 'html.parser') orgdetails = soup_org.find(text="T3010 Return") # If there is a T3010 link, then parse it if orgdetails != None: # Navigate up the tree to get at the <a> tage search = orgdetails.parent.parent.parent # Find the <a> tag, and look for the hyperlink link = search.find('a') if link.has_attr('href'): return 'http://www.cra-arc.gc.ca/' + link['href'] else: print('No link') return False else: print('No link') return False else: print('| ************ Address did not resolve') return False # Given a T3010 link, this function loops through the available years def scrapeorg(webadd, orgid): print('--------------------------------------') trys = 0 while trys<=3: try: rorg = requests.get(webadd) if rorg.status_code == 200: trys=5 else: sleep(5) except: trys +=1 sleep(5) #print(webadd, ' | ', rorg.status_code) if rorg.status_code == 200: html_org = rorg.text soup_org = BeautifulSoup(html_org, 'html.parser') # Build a table of all the available years yeartable = soup_org.find_all('div', class_=' ') # Loop through each year for year in yeartable: # Extract the financial year date findate = year.text[0:10] # Extract the financial year finyear = int(findate[0:4]) # Extract the link to the return for that year finlink = 'http://www.cra-arc.gc.ca/' + year.find('a')['href'] print(findate, end='') if finyear>=2009: # Pass the return for scraping scrape_finance(finlink, finyear, orgid, webadd) else: print(' | --') # This function takes a given report, and finds the link to the Schedule 6 # detailed financial data. It DOES NOT collect data for organisations without # a completed Schedule 6. # This function also handles the writing to file of the scraped finances. def scrape_finance(webadd, finyear, orgid, orglink): trys = 0 while trys<=3: try: rorg = requests.get(webadd) if rorg.status_code == 200: trys=5 else: sleep(5) except: trys +=1 sleep(5) #print(webadd, ' | ', rorg.status_code) # This dict will hold the scraped financial information sched6record = {} if rorg.status_code == 200: html_org = rorg.text soup_org = BeautifulSoup(html_org, 'html.parser') # Find the link to the Schedule 6 return if it exists schedule6 = soup_org.find(text="Schedule 6 - Detailed Financial Information") # Check if a valid link was found if schedule6 != None: if schedule6.parent.has_attr('href'): print(' | ** SCHEDULE 6') sched6_add = 'http://www.cra-arc.gc.ca' + schedule6.parent['href'].strip() # Use the Schedule 6 link to go and get the financial information, and return it into sched6record dict sched6record = scrape_sched6(sched6_add, finyear, orgid) # Add the relevant links to the dictionary for auditing sched6record['s6link']=sched6_add sched6record['yearlink']=webadd sched6record['orglink'] = orglink # If any of this fails, make an appropriate blank record to go into the output file else: print(' | -- NO s6 link found') sched6record = {'orgid': orgid, 'year': finyear, 'sched6': 0, 'orglink': orglink, 'yearlink': webadd, } else: print(' | -- No schedule 6 returned') sched6record = {'orgid': orgid, 'year': finyear, 'sched6': 0, 'orglink': orglink, 'yearlink': webadd} else: sched6record = {'orgid': orgid, 'year': finyear, 'sched6': -1, 'orglink': orglink, 'yearlink': webadd} print(' | -- Link failed') # This is where we write the dict to the output file on each pass # The file has one row per financial year writer.writerow(sched6record) # This is the function doimg the dirty work of collecting the financial data from the # Schedule 6 page. It takes advantage of the unique line numbers to code the data items. # It just grabs any data item with a valid line number. def scrape_sched6(webadd, finyear, orgid): trys = 0 while trys<=3: try: rorg = requests.get(webadd) if rorg.status_code == 200: trys=5 else: sleep(5) except: trys +=1 sleep(5) #print(webadd, ' | ', rorg.status_code) if rorg.status_code == 200: sched6record = {'orgid': orgid, 'year': finyear, 'sched6': 0} html_org = rorg.text soup_org = BeautifulSoup(html_org, 'html.parser') # Find all the rows of the table, denoted by <tr> tags revenue_row = soup_org.find_all('tr') # If a row with data is found, then separate it into columns if revenue_row != []: # Record for the output file that we got this far - if this flag is set then we expect to see financial data sched6record['sched6'] = 1 # Go through all the rows of the table in turn for row in revenue_row: # Columns are denoted by either <td> or <th> (The latter denotes totals in heading rows) col = row.find_all(['td', 'th']) if col != []: try: # The financial figures are in the third column. We strip out the dollar sign. figure = col[2].text.strip() # Rows with no financial data are recorded as n/a in the table, so ignore those if figure != 'n/a': figure = figure[2:] # Get the linenumber from the second column. This tells us what the number means, and is the key for the output dict linenumber = col[1].text.strip() # Spacer rows in the table don't have valid 4-digit line numbers, so we can ignore them if len(linenumber) == 4: sched6record[linenumber] = figure except: # This picks up the parse above failing - at the moment we don't record that # It would appear in the output file as a row with the sched6 flag set to '1', but no data # so we can pick it up in the quality checks later. We could always record something here if it's # a big problem. pass else: # If we don't manage to get a valid page at this stage just record in the output that no sched6 was found. # This could be extended to give other values of sched6 to help with debugging. sched6record = {'orgid': orgid, 'year': finyear, 'sched6': 0} # Return the dict containing the info that was scraped (or minimal info if scrape not successful) return sched6record # ========================================================== # ========================================================== # MAIN PROGRAM # ========================================================== # Run the downloaddate function to get the date 'benefacts_master.py' was executed. ddate = downloaddate() # Set up the file paths projectpath = './' outputfilepath = projectpath + 'data_raw/' + ddate + '/canada_register_finance_sample_' + ddate + '.csv' inputfilepath = projectpath + 'data_raw/' + ddate + '/canada_register_sample_' + ddate + '.csv' # Set the file row tracking startrow = 1 rowcounter=0 # Open the input file list of charities ... with open(inputfilepath, 'r', newline='') as inCSVfile: # ... as a CSV file. reader = csv.reader(inCSVfile) # Open the output file for financial information ... with open(outputfilepath, 'w', newline='') as outCSVfile: # ... as a CSV dict. Using dict here really simplifies storing the scrape data where many fields are missing outputfieldnames = ('orgid', 'year', 'sched6', 'orglink', 'yearlink', 's6link', '5040', '4310', '4540', '4575', '4160', '4250', '4166', '4900', '4110', '4810', '4300', '4860', '5000', '4580', '4891', '4920', '4330', '4600', '4100', '4150', '4950', '4500', '4140', '4571', '4610', '4700', '4830', '4890', '4130', '4640', '4850', '5030', '4155', '5050', '4200', '4165', '4870', '5010', '4840', '4590', '4120', '4880', '4800', '4320', '4510', '5610', '4350', '4630', '4505', '4650', '4560', '5020', '4910', '5100', '4620', '4180', '4530', '4170', '4820', '4550', '4520', '5070', '5060', '5640', '4525') writer = csv.DictWriter(outCSVfile, fieldnames = outputfieldnames) writer.writeheader() # Ignore the first row of fieldnames in the input file while rowcounter<startrow: next(reader) rowcounter+=1 # Iterate through the input file taking each charity in turn for row in reader: # We only really need the ID and NAME for each charity matchid = row[0] matchname = row[1] print('==============================') print('Organisation number:', rowcounter) print(matchid, matchname) # Build the organisation's web address orglink = buildwebadd(matchid, matchname) # Get the link to the t3010 t3010link = getT3010(orglink) # If it exists, then scrape the financial data if t3010link != False: scrapeorg(t3010link, matchid) sleep(random.randint(2,4)) else: # Otherwise write a record with missing data to show that we couldnt get any financial data for this organisation sched6record = {'orgid': matchid, 'year': -9, 'sched6': -9} writer.writerow(sched6record) # Keep count of the rows to help the user track progress rowcounter +=1
en
0.850296
## Python script to download financial data ## of Canadian charities from the CRA website # <NAME>, <NAME> # Created: 17 May 2018 # Last edited: captured in Github file history #prob dont need basic info or B #in A need question 2 as it's the dependant - make a note of this in the markdown # grab the links to the sceduels before sraping into them where neeeded - also have the links in the CSV # ========================================================== # ========================================================== # This function looks at the charity page and finds the link to their T3010 record if it exists # If there is a T3010 link, then parse it # Navigate up the tree to get at the <a> tage # Find the <a> tag, and look for the hyperlink # Given a T3010 link, this function loops through the available years #print(webadd, ' | ', rorg.status_code) # Build a table of all the available years # Loop through each year # Extract the financial year date # Extract the financial year # Extract the link to the return for that year # Pass the return for scraping # This function takes a given report, and finds the link to the Schedule 6 # detailed financial data. It DOES NOT collect data for organisations without # a completed Schedule 6. # This function also handles the writing to file of the scraped finances. #print(webadd, ' | ', rorg.status_code) # This dict will hold the scraped financial information # Find the link to the Schedule 6 return if it exists # Check if a valid link was found # Use the Schedule 6 link to go and get the financial information, and return it into sched6record dict # Add the relevant links to the dictionary for auditing # If any of this fails, make an appropriate blank record to go into the output file # This is where we write the dict to the output file on each pass # The file has one row per financial year # This is the function doimg the dirty work of collecting the financial data from the # Schedule 6 page. It takes advantage of the unique line numbers to code the data items. # It just grabs any data item with a valid line number. #print(webadd, ' | ', rorg.status_code) # Find all the rows of the table, denoted by <tr> tags # If a row with data is found, then separate it into columns # Record for the output file that we got this far - if this flag is set then we expect to see financial data # Go through all the rows of the table in turn # Columns are denoted by either <td> or <th> (The latter denotes totals in heading rows) # The financial figures are in the third column. We strip out the dollar sign. # Rows with no financial data are recorded as n/a in the table, so ignore those # Get the linenumber from the second column. This tells us what the number means, and is the key for the output dict # Spacer rows in the table don't have valid 4-digit line numbers, so we can ignore them # This picks up the parse above failing - at the moment we don't record that # It would appear in the output file as a row with the sched6 flag set to '1', but no data # so we can pick it up in the quality checks later. We could always record something here if it's # a big problem. # If we don't manage to get a valid page at this stage just record in the output that no sched6 was found. # This could be extended to give other values of sched6 to help with debugging. # Return the dict containing the info that was scraped (or minimal info if scrape not successful) # ========================================================== # ========================================================== # MAIN PROGRAM # ========================================================== # Run the downloaddate function to get the date 'benefacts_master.py' was executed. # Set up the file paths # Set the file row tracking # Open the input file list of charities ... # ... as a CSV file. # Open the output file for financial information ... # ... as a CSV dict. Using dict here really simplifies storing the scrape data where many fields are missing # Ignore the first row of fieldnames in the input file # Iterate through the input file taking each charity in turn # We only really need the ID and NAME for each charity # Build the organisation's web address # Get the link to the t3010 # If it exists, then scrape the financial data # Otherwise write a record with missing data to show that we couldnt get any financial data for this organisation # Keep count of the rows to help the user track progress
2.964407
3
covid/dashboard.py
jgdelrio/covid-data
0
6621541
import dash import dash_core_components as dcc import dash_html_components as html from covid.data_manager import global_data external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) colors = { 'background': '#111111', 'text': '#7FDBFF' } def generate_table(df, max_rows: int=20): """Generates an HTML table from a pandas dataframe with the number of rows specified""" return html.Table([ html.Thead( html.Tr([html.Th(col) for col in df.columns]) ), html.Tbody([ html.Tr([ html.Td(df.iloc[i][col]) for col in df.columns ]) for i in range(min(len(df), max_rows)) ]) ]) app.layout = html.Div( style={'backgroundColor': colors['background']}, children=[ html.H1(children='Graphical View', style={'textAlign': 'center', 'color': colors['text']} ), html.Div(children='Visualization examples', style={'textAlign': 'center', 'color': colors['text']}), dcc.Graph( id='example-graph', figure={ 'data': [ {'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'bar', 'name': 'SF'}, {'x': [1, 2, 3], 'y': [2, 4, 5], 'type': 'bar', 'name': u'Montréal'}, ], 'layout': { 'plot_bgcolor': colors['background'], 'paper_bgcolor': colors['background'], 'font': { 'color': colors['text'] } } } ), generate_table(global_data['Deaths']), ]) if __name__ == '__main__': app.run_server(debug=True)
import dash import dash_core_components as dcc import dash_html_components as html from covid.data_manager import global_data external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) colors = { 'background': '#111111', 'text': '#7FDBFF' } def generate_table(df, max_rows: int=20): """Generates an HTML table from a pandas dataframe with the number of rows specified""" return html.Table([ html.Thead( html.Tr([html.Th(col) for col in df.columns]) ), html.Tbody([ html.Tr([ html.Td(df.iloc[i][col]) for col in df.columns ]) for i in range(min(len(df), max_rows)) ]) ]) app.layout = html.Div( style={'backgroundColor': colors['background']}, children=[ html.H1(children='Graphical View', style={'textAlign': 'center', 'color': colors['text']} ), html.Div(children='Visualization examples', style={'textAlign': 'center', 'color': colors['text']}), dcc.Graph( id='example-graph', figure={ 'data': [ {'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'bar', 'name': 'SF'}, {'x': [1, 2, 3], 'y': [2, 4, 5], 'type': 'bar', 'name': u'Montréal'}, ], 'layout': { 'plot_bgcolor': colors['background'], 'paper_bgcolor': colors['background'], 'font': { 'color': colors['text'] } } } ), generate_table(global_data['Deaths']), ]) if __name__ == '__main__': app.run_server(debug=True)
en
0.303264
Generates an HTML table from a pandas dataframe with the number of rows specified
2.734759
3
Action_CNN.py
adewopova/Demonstration-Repo-GAGS
0
6621542
<reponame>adewopova/Demonstration-Repo-GAGS<filename>Action_CNN.py #!/usr/bin/env python # coding: utf-8 # # Training a ConvNet PyTorch # # In this notebook, you'll learn how to use the powerful PyTorch framework to specify a conv net architecture and train it on the human action recognition dataset. # # In[30]: #ip install -r requirements.txt # In[1]: import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable from torch.utils.data import DataLoader,sampler,Dataset import torchvision.datasets as dset import torchvision.transforms as T import timeit from PIL import Image import os import numpy as np import scipy.io import torchvision.models.inception as inception import csv import pandas as pd # ## What's this PyTorch business? # # * When using a framework like PyTorch or TensorFlow you can harness the power of the GPU for your own custom neural network architectures without having to write CUDA code directly. # * this notebook will walk you through much of what you need to do to train models using pytorch. if you want to learn more or need further clarification on topics that aren't fully explained here, here are 2 good Pytorch tutorials. 1): http://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html 2)http://pytorch.org/tutorials/beginner/pytorch_with_examples.html # * It's not necessary to have a GPU for this homework, using a GPU can make your code run faster. # # ## Load Datasets # # In this part, we will load the action recognition dataset for the neural network. In order to load data from our custom dataset, we need to write a custom Dataloader. If you put q3_2_data.mat, /valClips,/trainClips,/testClips under the folder of ./data/ , you do not need to change anything in this part. # First, load the labels of the dataset, you should write your path of the q3_2_data.mat file. # In[18]: label_mat=scipy.io.loadmat('./data/q3_2_data.mat') label_train=label_mat['trLb'] print(len(label_train)) label_val=label_mat['valLb'] print(len(label_val)) # In[2]: df = pd.read_csv('/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/train.csv') vlabel_train=df['Label'] vlabel_train # In[37]: ASLabel=vlabel_train[] ASLabel # In[48]: vtrainclips=df.FileName vtrainclips # In[24]: label_train # In[41]: label # ### Dataset class # # torch.utils.data.Dataset is an abstract class representing a dataset. The custom dataset should inherit Dataset and override the following methods: # # __len__ so that len(dataset) returns the size of the dataset. # __getitem__ to support the indexing such that dataset[i] can be used to get ith sample # # Let’s create a dataset class for our action recognition dataset. We will read images in __getitem__. This is memory efficient because all the images are not stored in the memory at once but read as required. # # Sample of our dataset will be a dict {'image':image,'img_path':img_path,'Label':Label}. Our datset will take an optional argument transform so that any required processing can be applied on the sample. # In[3]: class ActionDataset(Dataset): """Action dataset.""" def __init__(self, root_dir,labels=[], transform=None): """ Args: root_dir (string): Directory with all the images. labels(list): labels if images. transform (callable, optional): Optional transform to be applied on a sample. """ self.root_dir = root_dir self.transform = transform self.length=len(os.listdir(self.root_dir)) self.labels=labels def __len__(self): return self.length*3 def __getitem__(self, idx): folder=idx/3+1 imidx=idx%3+1 folder=str(folder) imgname=str(imidx)+'.jpg' img_path = os.path.join(self.root_dir, folder,imgname) image = Image.open(img_path) if len(self.labels)!=0: Label=self.labels[idx/3][0]-1 if self.transform: image = self.transform(image) if len(self.labels)!=0: sample={'image':image,'img_path':img_path,'Label':Label} else: sample={'image':image,'img_path':img_path} return sample # In[5]: # image_dataset=ActionDataset(root_dir='/home/adewopva/Downloads/CNN_AR/CNN-Action-Recognition-master/data/trainClips/',\ # labels=label_train,transform=T.ToTensor()) # #iterating though the dataset # for i in range(10): # sample=image_dataset[i] # print(sample['image'].shape) # print(sample['Label']) # print(sample['img_path']) # In[1]: ''' For the given path, get the List of all files in the directory tree ''' def getListOfFiles(dirName): # create a list of file and sub directories # names in the given directory listOfFile = os.listdir(dirName) allFiles = [] # Iterate over all the entries for entry in listOfFile: # Create full path fullPath = os.path.join(dirName, entry) # If entry is a directory then get the list of files in this directory if os.path.isdir(fullPath): allFiles = allFiles + getListOfFiles(fullPath) else: allFiles.append(fullPath) return allFiles # In[7]: data_dir_list # In[77]: import os def listdirs(rootdir): d=[] for file in os.listdir(rootdir): d = os.path.join(rootdir, file) if os.path.isdir(d): print(d) listdirs(d) rootdir ='/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/' listdirs(rootdir) # In[14]: # In[16]: #V Current as of 2am 1/26 class ActionDataset(Dataset): """Action dataset.""" def __init__(self, root_dir,labels=[], transform=None): """ Args: root_dir (string): Directory with all the images. labels(list): labels if images. transform (callable, optional): Optional transform to be applied on a sample. """ self.root_dir = root_dir self.transform = transform self.length=len(os.listdir(self.root_dir)) self.labels=labels def __len__(self): return self.length*5 def __getitem__(self, idx): root=self.root_dir #we shall store all the file names in this list img_path1=[] for path, subdirs, files in os.walk(root): for name in files: img_path1.append(os.path.join(path, name)) #print all the file names for var in img_path1: if var.endswith(".jpg"): img_path=var image = Image.open(img_path) if len(self.labels)!=0: #your_path = img/path1 label1 = img_path.split(os.sep) labels_name={'on_feet':0, 'active':1, 'rest':2, 'escape':3, 'crawling':4} label2=label1[10] Label=labels_name[label2] if self.transform: image = self.transform(image) if len(self.labels)!=0: sample={'image':image,'img_path':img_path,'Label':Label} else: sample={'image':image,'img_path':img_path} #print(sample) return sample image_dataset=ActionDataset(root_dir=r'/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/', labels=vlabel_train,transform=T.ToTensor()) #iterating though the dataset for i in range(4): sample1=image_dataset[i] print(sample1['image'].shape) print(sample1['Label']) print(sample1['img_path']) # In[17]: #V Current as of 2am 1/26 class ActionDataset(Dataset): """Action dataset.""" def __init__(self, root_dir,labels=[], transform=None): """ Args: root_dir (string): Directory with all the images. labels(list): labels if images. transform (callable, optional): Optional transform to be applied on a sample. """ self.root_dir = root_dir self.transform = transform self.length=len(os.listdir(self.root_dir)) self.labels=labels def __len__(self): return self.length*5 def __getitem__(self, root): root=self.root_dir #we shall store all the file names in this list #img_path1=[] for path, subdirs, files in os.walk(root): for name in files: img_path1=(os.path.join(path, name)) if img_path1.endswith(".jpg"): #img_path=var image = Image.open(img_path1) #your_path = img/path1 label1 = img_path1.split(os.sep) labels_name={'on_feet':0, 'active':1, 'rest':2, 'escape':3, 'crawling':4} label2=label1[10] Label=labels_name[label2] if self.transform: image = self.transform(image) if len(self.labels)!=0: sample={'image':image,'img_path':img_path1,'Label':Label} else: sample={'image':image,'img_path':img_path1} #print(sample) return sample #break image_dataset=ActionDataset(root_dir=r'/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/', labels=vlabel_train,transform=T.ToTensor()) #iterating though the dataset for i in range (5): sample1=image_dataset[i] print(sample1['image'].shape) print(sample1['Label']) print(sample1['img_path']) # In[ ]: HERE IS THE PROBLEM. The output is just a single image not different image. # In[22]: # Working Full Code for video with single action and Multiple actions import os, sys import pandas as pd directory = r'/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/' input_base = [] for filename in os.listdir(directory): if filename.endswith(".csv"): os.path.splitext(filename) filename = os.path.splitext(filename)[0] # In[5]: import os #img_path1=[] for path, subdirs, files in os.walk(r'/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/'): for i in files: print(os.path.join(path, i)) # In[11]: #file=img_path.splitext(filename) filename = os.path.splitext(img_path)[] filename # In[9]: print(len(img_path1)) # In[5]: print(len(img_path1)) # In[6]: # #!/usr/bin/python # # -*- coding: utf-8 -*- # class ActionDataset(Dataset): # """Action dataset.""" # def __init__( # self, # root_dir, # labels=[], # transform=None, # ): # """ # Args: # root_dir (string): Directory with all the images. # labels(list): labels if images. # transform (callable, optional): Optional transform to be applied on a sample. # """ # self.root_dir = root_dir # self.transform = transform # self.length = len(os.listdir(self.root_dir)) # self.labels = labels # def __len__(self): # return self.length * 3 # def __getitem__(self, idx): # root = self.root_dir # # we shall store all the file names in this list # img_path1 = [] # for (root, dirs, files) in os.walk(root): # for file in files: # # append the file name to the list # img_path1.append(os.path.join(root, file)) # return img_path1 # # print all the file names # for name in img_path1: # img_path = name # image = Image.open(img_path) # # your_path = imgpath1 # label1 = img_path.split(os.sep) # labels_name = { # 'on_feet': 0, # 'active': 1, # 'rest': 2, # 'escape': 3, # 'crawling': 4, # } # label2 = label1[10] # Label = labels_name[label2] # if self.transform: # image = self.transform(image) # if len(self.labels) != 0: # sample = {'image': image, 'img_path': img_path, # 'Label': Label} # else: # sample = {'image': image, 'img_path': img_path} # return sample # image_dataset = ActionDataset(root_dir='/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/' # , labels=vlabel_train, transform=T.ToTensor()) # # iterating though the dataset # for i in range(10): # sample1 = image_dataset[i] # print (sample1['image'].shape) # print (sample1['Label']) # print (sample1['img_path']) # In[19]: image_dataset=ActionDataset(root_dir='/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/', labels=vlabel_train,transform=T.ToTensor()) #iterating though the dataset for i in range(10): sample=image_dataset[i] print(sample['image'].shape) print(sample['Label']) print(sample['img_path']) # We can iterate over the created dataset with a 'for' loop as before. However, we are losing a lot of features by using a simple for loop to iterate over the data. In particular, we are missing out on: # # * Batching the data # * Shuffling the data # * Load the data in parallel using multiprocessing workers. # # torch.utils.data.DataLoader is an iterator which provides all these features. # Dataloaders for the training, validationg and testing set. # In[38]: image_dataset_train=ActionDataset(root_dir='./data/trainClips/',labels=label_train,transform=T.ToTensor()) image_dataloader_train = DataLoader(image_dataset_train, batch_size=32, shuffle=True, num_workers=4) image_dataset_val=ActionDataset(root_dir='./data/valClips/',labels=label_val,transform=T.ToTensor()) image_dataloader_val = DataLoader(image_dataset_val, batch_size=32, shuffle=False, num_workers=4) image_dataset_test=ActionDataset(root_dir='./data/testClips/',labels=[],transform=T.ToTensor()) image_dataloader_test = DataLoader(image_dataset_test, batch_size=32, shuffle=False, num_workers=4) # In[39]: dtype = torch.FloatTensor # the CPU datatype # Constant to control how frequently we print train loss print_every = 100 # This is a little utility that we'll use to reset the model # if we want to re-initialize all our parameters def reset(m): if hasattr(m, 'reset_parameters'): m.reset_parameters() # ## Example Model # # ### Some assorted tidbits # # Let's start by looking at a simple model. First, note that PyTorch operates on Tensors, which are n-dimensional arrays functionally analogous to numpy's ndarrays, with the additional feature that they can be used for computations on GPUs. # # We'll provide you with a Flatten function, which we explain here. Remember that our image data (and more relevantly, our intermediate feature maps) are initially N x C x H x W, where: # * N is the number of datapoints # * C is the number of image channels. # * H is the height of the intermediate feature map in pixels # * W is the height of the intermediate feature map in pixels # # This is the right way to represent the data when we are doing something like a 2D convolution, that needs spatial understanding of where the intermediate features are relative to each other. When we input data into fully connected affine layers, however, we want each datapoint to be represented by a single vector -- it's no longer useful to segregate the different channels, rows, and columns of the data. So, we use a "Flatten" operation to collapse the C x H x W values per representation into a single long vector. The Flatten function below first reads in the N, C, H, and W values from a given batch of data, and then returns a "view" of that data. "View" is analogous to numpy's "reshape" method: it reshapes x's dimensions to be N x ??, where ?? is allowed to be anything (in this case, it will be C x H x W, but we don't need to specify that explicitly). # In[40]: class Flatten(nn.Module): def forward(self, x): N, C, H, W = x.size() # read in N, C, H, W return x.view(N, -1) # "flatten" the C * H * W values into a single vector per image # ### The example model itself # # The first step to training your own model is defining its architecture. # # Here's an example of a convolutional neural network defined in PyTorch -- try to understand what each line is doing, remembering that each layer is composed upon the previous layer. We haven't trained anything yet - that'll come next - for now, we want you to understand how everything gets set up. nn.Sequential is a container which applies each layer # one after the other. # # In this example, you see 2D convolutional layers (Conv2d), ReLU activations, and fully-connected layers (Linear). You also see the Cross-Entropy loss function, and the Adam optimizer being used. # # Make sure you understand why the parameters of the Linear layer are 10092 and 10. # # In[9]: # Here's where we define the architecture of the model... simple_model = nn.Sequential( nn.Conv2d(3, 32, kernel_size=7, stride=2), nn.ReLU(inplace=True), Flatten(), # see above for explanation nn.Linear(10092, 10), # affine layer ) # Set the type of all data in this model to be FloatTensor simple_model.type(dtype) loss_fn = nn.CrossEntropyLoss().type(dtype) optimizer = optim.Adam(simple_model.parameters(), lr=1e-2) # lr sets the learning rate of the optimizer # PyTorch supports many other layer types, loss functions, and optimizers - you will experiment with these next. Here's the official API documentation for these (if any of the parameters used above were unclear, this resource will also be helpful). # # * Layers: http://pytorch.org/docs/nn.html # * Activations: http://pytorch.org/docs/nn.html#non-linear-activations # * Loss functions: http://pytorch.org/docs/nn.html#loss-functions # * Optimizers: http://pytorch.org/docs/optim.html#algorithms # ## Training a specific model # # In this section, we're going to specify a model for you to construct. The goal here isn't to get good performance (that'll be next), but instead to get comfortable with understanding the PyTorch documentation and configuring your own model. # # Using the code provided above as guidance, and using the following PyTorch documentation, specify a model with the following architecture: # # * 7x7 Convolutional Layer with 8 filters and stride of 1 # * ReLU Activation Layer # * 2x2 Max Pooling layer with a stride of 2 # * 7x7 Convolutional Layer with 16 filters and stride of 1 # * ReLU Activation Layer # * 2x2 Max Pooling layer with a stride of 2 # * Flatten the feature map # * ReLU Activation Layer # * Affine layer to map input units to 10 outputs, you need to figure out the input size here. # # In[10]: fixed_model_base = nn.Sequential( #########1st To Do (10 points)################### nn.Conv2d(3, 8, kernel_size=7, stride=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride = 2), nn.Conv2d(8, 16, kernel_size=7, stride=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride = 2), Flatten(), nn.ReLU(inplace=True), nn.Linear(1936, 10) #################################### ) fixed_model = fixed_model_base.type(dtype) # To make sure you're doing the right thing, use the following tool to check the dimensionality of your output (it should be 32 x 10, since our batches have size 32 and the output of the final affine layer should be 10, corresponding to our 10 classes): # In[11]: ## Now we're going to feed a random batch into the model you defined and make sure the output is the right size x = torch.randn(32, 3, 64, 64).type(dtype) x_var = Variable(x.type(dtype)) # Construct a PyTorch Variable out of your input data ans = fixed_model(x_var) # Feed it through the model! # Check to make sure what comes out of your model # is the right dimensionality... this should be True # if you've done everything correctly print(np.array(ans.size())) np.array_equal(np.array(ans.size()), np.array([32, 10])) # ### Train the model. # # Now that you've seen how to define a model and do a single forward pass of some data through it, let's walk through how you'd actually train one whole epoch over your training data (using the fixed_model_base we provided above). # # Make sure you understand how each PyTorch function used below corresponds to what you implemented in your custom neural network implementation. # # Note that because we are not resetting the weights anywhere below, if you run the cell multiple times, you are effectively training multiple epochs (so your performance should improve). # # First, set up an RMSprop optimizer (using a 1e-4 learning rate) and a cross-entropy loss function: # In[31]: ################ 2nd To Do (5 points)################## optimizer = torch.optim.RMSprop(fixed_model_base.parameters(), lr = 0.0001) #optimizer = torch.optim.Adadelta(fixed_model_base.parameters(), lr = 0.001) loss_fn = nn.CrossEntropyLoss() #loss_fn = nn.MultiMarginLoss() # In[37]: # This sets the model in "training" mode. # This is relevant for some layers that may have different behavior # in training mode vs testing mode, such as Dropout and BatchNorm. fixed_model.train() # Load one batch at a time. for t, sample in enumerate(image_dataloader_train): x_var = Variable(sample['image']) #print(type(x_var.data)) #print(x_var.shape) y_var = Variable(sample['Label']).long() # This is the forward pass: predict the scores for each class, for each x in the batch. scores = fixed_model(x_var) # Use the correct y values and the predicted y values to compute the loss. loss = loss_fn(scores, y_var) if (t + 1) % print_every == 0: print('t = %d, loss = %.4f' % (t + 1, loss.data[0])) # Zero out all of the gradients for the variables which the optimizer will update. optimizer.zero_grad() # This is the backwards pass: compute the gradient of the loss with respect to each # parameter of the model. loss.backward() # Actually update the parameters of the model using the gradients computed by the backwards pass. optimizer.step() # Now you've seen how the training process works in PyTorch. To save you writing boilerplate code, we're providing the following helper functions to help you train for multiple epochs and check the accuracy of your model: # In[41]: def train(model, loss_fn, optimizer, dataloader, num_epochs = 1): for epoch in range(num_epochs): print('Starting epoch %d / %d' % (epoch + 1, num_epochs)) check_accuracy(fixed_model, image_dataloader_val)# check accuracy on the training set model.train() for t, sample in enumerate(dataloader): x_var = Variable(sample['image']) y_var = Variable(sample['Label'].long()) scores = model(x_var) loss = loss_fn(scores, y_var) if (t + 1) % print_every == 0: print('t = %d, loss = %.4f' % (t + 1, loss.data[0])) optimizer.zero_grad() loss.backward() optimizer.step() def check_accuracy(model, loader): ''' if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') ''' num_correct = 0 num_samples = 0 model.eval() # Put the model in test mode (the opposite of model.train(), essentially) for t, sample in enumerate(loader): x_var = Variable(sample['image']) y_var = sample['Label'] #y_var=y_var.cpu() scores = model(x_var) _, preds = scores.data.max(1)#scores.data.cpu().max(1) #print(preds) #print(y_var) num_correct += (preds.numpy() == y_var.numpy()).sum() num_samples += preds.size(0) acc = float(num_correct) / num_samples print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc)) # ### Check the accuracy of the model. # # Let's see the train and check_accuracy code in action -- feel free to use these methods when evaluating the models you develop below. # # You should get a training loss of around 1.0-1.2, and a validation accuracy of around 50-60%. As mentioned above, if you re-run the cells, you'll be training more epochs, so your performance will improve past these numbers. # # But don't worry about getting these numbers better -- this was just practice before you tackle designing your own model. # In[39]: torch.random.manual_seed(54321) fixed_model.cpu() fixed_model.apply(reset) fixed_model.train() train(fixed_model, loss_fn, optimizer,image_dataloader_train, num_epochs=4) check_accuracy(fixed_model, image_dataloader_train)# check accuracy on the training set # ### Don't forget the validation set! # # And note that you can use the check_accuracy function to evaluate on the validation set, by passing **image_dataloader_val** as the second argument to check_accuracy. The accuracy on validation set is arround 40-50%. # In[40]: check_accuracy(fixed_model, image_dataloader_val)#check accuracy on the validation set # ##### Train a better model for action recognition! # # Now it's your job to experiment with architectures, hyperparameters, loss functions, and optimizers to train a model that achieves better accuracy on the action recognition **validation** set. You can use the check_accuracy and train functions from above. # In[42]: ###########3rd To Do (16 points, must submit the results to Kaggle) ############## # Train your model here, and make sure the output of this cell is the accuracy of your best model on the # train, val, and test sets. Here's some code to get you started. The output of this cell should be the training # and validation accuracy on your best model (measured by validation accuracy). fixed_model_base = nn.Sequential( nn.Conv2d(3, 200, kernel_size=10, stride=3), nn.ReLU(inplace=True), nn.MaxPool2d(3, stride = 1), nn.BatchNorm2d(200), nn.Dropout2d(0.1), nn.Conv2d(200, 100, kernel_size=5, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(3, stride = 1), nn.BatchNorm2d(100), nn.Dropout2d(0.2), nn.Conv2d(100, 50, kernel_size=3, stride=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=1), nn.BatchNorm2d(50), Flatten(), nn.Linear(200, 100), nn.Linear(100, 50), nn.Linear(50, 10), nn.LogSoftmax() #################################### ) fixed_model = fixed_model_base.type(dtype) optimizer = torch.optim.RMSprop(fixed_model_base.parameters(), lr = 0.0001) #optimizer = torch.optim.Adadelta(fixed_model_base.parameters(), lr = 0.001) loss_fn = nn.CrossEntropyLoss() # ### Describe what you did # # In the cell below you should write an explanation of what you did, any additional features that you implemented, and any visualizations or graphs that you make in the process of training and evaluating your network. # ### Tell us here! # ########### 4th To Do (4 points) ############## # * 10X10 Convolution layer with 200 filters with stride 3 # * ReLU layer # * Max Pool layer with window size 3X3 with stride 1 # * Batch Norm layer with input size 200 # * Dropout layer with penalty 0.1 # * 5X5 Convolution layer with 100 filters with stride 2 # * ReLU layer # * Max Pool layer with window size 3X3 with stride 1 # * Batch Norm layer with input size 100 # * Dropout layer with penalty 0.2 # * 3X3 Convolution layer with 50 filters and stride 1 # * ReLU layer # * Max Pool layer with window size 2 and stride 1 # * Batch Norm layer with input size 50 # * Flatten # * affine layer to reduce inputs from 200 to 100 # * affine layer to reduce inputs from 100 to 50 # * affine layer to reduce inputs from 50 to 10 # * logsoftmaxing layer # ### Testing the model and submit on Kaggle # Testing the model on the testing set and save the results as a .csv file. # Please submitted the results.csv file generated by predict_on_test() to Kaggle(https://www.kaggle.com/c/cse512springhw3) to see how well your network performs on the test set. # #######5th To Do (submit the result to Kaggle,the highest 3 entries get extra 10 points )############### # # * Rank: 10 # * Score: 70.34658 # In[ ]: # In[43]: ## Now we're going to feed a random batch into the model you defined and make sure the output is the right size x = torch.randn(32, 3, 64, 64).type(dtype) x_var = Variable(x.type(dtype)) # Construct a PyTorch Variable out of your input data ans = fixed_model(x_var) # Feed it through the model! # Check to make sure what comes out of your model # is the right dimensionality... this should be True # if you've done everything correctly print(np.array(ans.size())) np.array_equal(np.array(ans.size()), np.array([32, 10])) # In[78]: torch.random.manual_seed(54321) fixed_model.cpu() fixed_model.apply(reset) fixed_model.train() train(fixed_model, loss_fn, optimizer,image_dataloader_train, num_epochs=12) check_accuracy(fixed_model, image_dataloader_train)# check accuracy on the training set # In[79]: check_accuracy(fixed_model, image_dataloader_val)# check accuracy on the training set # ### Things you should try: # - **Filter size**: Above we used 7x7; this makes pretty pictures but smaller filters may be more efficient # - **Number of filters**: Do more or fewer do better? # - **Pooling vs Strided Convolution**: Do you use max pooling or just stride convolutions? # - **Batch normalization**: Try adding spatial batch normalization after convolution layers and vanilla batch normalization after affine layers. Do your networks train faster? # - **Network architecture**: The network above has two layers of trainable parameters. Can you do better with a deep network? Good architectures to try include: # - [conv-relu-pool]xN -> [affine]xM -> [softmax or SVM] # - [conv-relu-conv-relu-pool]xN -> [affine]xM -> [softmax or SVM] # - [batchnorm-relu-conv]xN -> [affine]xM -> [softmax or SVM] # - **Global Average Pooling**: Instead of flattening and then having multiple affine layers, perform convolutions until your image gets small (7x7 or so) and then perform an average pooling operation to get to a 1x1 image picture (1, 1 , Filter#), which is then reshaped into a (Filter#) vector. This is used in [Google's Inception Network](https://arxiv.org/abs/1512.00567) (See Table 1 for their architecture). # - **Regularization**: Add l2 weight regularization, or perhaps use Dropout. # # ### Tips for training # For each network architecture that you try, you should tune the learning rate and regularization strength. When doing this there are a couple important things to keep in mind: # # - If the parameters are working well, you should see improvement within a few hundred iterations # - Remember the coarse-to-fine approach for hyperparameter tuning: start by testing a large range of hyperparameters for just a few training iterations to find the combinations of parameters that are working at all. # - Once you have found some sets of parameters that seem to work, search more finely around these parameters. You may need to train for more epochs. # - You should use the validation set for hyperparameter search, and save your test set for evaluating your architecture on the best parameters as selected by the validation set. # # ### Going above and beyond # If you are feeling adventurous there are many other features you can implement to try and improve your performance. You are **not required** to implement any of these; however they would be good things to try. # # - Alternative update steps: For the assignment we implemented SGD+momentum, RMSprop, and Adam; you could try alternatives like AdaGrad or AdaDelta. # - Alternative activation functions such as leaky ReLU, parametric ReLU, ELU, or MaxOut. # - Model ensembles # - Data augmentation # - New Architectures # - [ResNets](https://arxiv.org/abs/1512.03385) where the input from the previous layer is added to the output. # - [DenseNets](https://arxiv.org/abs/1608.06993) where inputs into previous layers are concatenated together. # - [This blog has an in-depth overview](https://chatbotslife.com/resnets-highwaynets-and-densenets-oh-my-9bb15918ee32) # # If you do decide to implement something extra, clearly describe it in the "Extra Credit Description" cell below. # # ### What we expect # At the very least, you should be able to train a ConvNet that gets at least 55% accuracy on the validation set. This is just a lower bound - if you are careful it should be possible to get accuracies much higher than that! Extra credit points will be awarded for particularly high-scoring models or unique approaches. # # You should use the space below to experiment and train your network. # # # In[ ]: train(fixed_model_base, loss_fn, optimizer,image_dataloader_train, num_epochs=4) check_accuracy(fixed_model, image_dataloader_val) # ### GPU! (This part is optional, 0 points) # # If you have access to GPU, you can make the code run on GPU, it would be much faster. # # Now, we're going to switch the dtype of the model and our data to the GPU-friendly tensors, and see what happens... everything is the same, except we are casting our model and input tensors as this new dtype instead of the old one. # # If this returns false, or otherwise fails in a not-graceful way (i.e., with some error message), you may not have an NVIDIA GPU available on your machine. # In[75]: # Verify that CUDA is properly configured and you have a GPU available torch.cuda.is_available() # In[76]: import copy gpu_dtype = torch.cuda.FloatTensor fixed_model_gpu = copy.deepcopy(fixed_model_base)#.type(gpu_dtype) fixed_model_gpu.cuda() x_gpu = torch.randn(4, 3, 64, 64).cuda()#.type(gpu_dtype) x_var_gpu = Variable(x_gpu)#type(gpu_dtype)) # Construct a PyTorch Variable out of your input data ans = fixed_model_gpu(x_var_gpu) # Feed it through the model! # Check to make sure what comes out of your model # is the right dimensionality... this should be True # if you've done everything correctly np.array_equal(np.array(ans.size()), np.array([4, 10])) # Run the following cell to evaluate the performance of the forward pass running on the CPU: # In[77]: get_ipython().run_cell_magic('timeit', '', 'ans = fixed_model(x_var)') # ... and now the GPU: # In[78]: get_ipython().run_cell_magic('timeit', '', 'torch.cuda.synchronize() # Make sure there are no pending GPU computations\nans = fixed_model_gpu(x_var_gpu) # Feed it through the model! \ntorch.cuda.synchronize() # Make sure there are no pending GPU computations') # You should observe that even a simple forward pass like this is significantly faster on the GPU. So for the rest of the assignment (and when you go train your models in assignment 3 and your project!), you should use the GPU datatype for your model and your tensors: as a reminder that is *torch.cuda.FloatTensor* (in our notebook here as *gpu_dtype*) # Let's make the loss function and training variables to GPU friendly format by '.cuda()' # In[79]: loss_fn = nn.CrossEntropyLoss().cuda() optimizer = optim.RMSprop(fixed_model_gpu.parameters(), lr=1e-4) # In[80]: def train(model, loss_fn, optimizer, dataloader, num_epochs = 1): for epoch in range(num_epochs): print('Starting epoch %d / %d' % (epoch + 1, num_epochs)) model.train() check_accuracy(fixed_model_gpu, image_dataloader_val)# check accuracy on the training set for t, sample in enumerate(dataloader): x_var = Variable(sample['image'].cuda()) y_var = Variable(sample['Label'].cuda().long()) scores = model(x_var) loss = loss_fn(scores, y_var) if (t + 1) % print_every == 0: print('t = %d, loss = %.4f' % (t + 1, loss.data[0])) optimizer.zero_grad() loss.backward() optimizer.step() def check_accuracy(model, loader): ''' if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') ''' num_correct = 0 num_samples = 0 model.eval() # Put the model in test mode (the opposite of model.train(), essentially) for t, sample in enumerate(loader): x_var = Variable(sample['image'].cuda()) y_var = sample['Label'].cuda() y_var=y_var.cpu() scores = model(x_var) _, preds = scores.data.cpu().max(1) #print(preds) #print(y_var) num_correct += (preds.numpy() == y_var.numpy()).sum() num_samples += preds.size(0) acc = float(num_correct) / num_samples print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc)) # Run on GPU! # In[47]: torch.cuda.random.manual_seed(873271) fixed_model_gpu.apply(reset) fixed_model_gpu.train() train(fixed_model_gpu, loss_fn, optimizer,image_dataloader_train, num_epochs=4) check_accuracy(fixed_model_gpu, image_dataloader_train)# check accuracy on the training set # In[48]: check_accuracy(fixed_model_gpu, image_dataloader_val)# check accuracy on the training set # In[46]: def predict_on_test(model, loader): ''' if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') ''' num_correct = 0 num_samples = 0 model.eval() # Put the model in test mode (the opposite of model.train(), essentially) results=open('results.csv','w') count=0 results.write('Id'+','+'Class'+'\n') for t, sample in enumerate(loader): x_var = Variable(sample['image']) scores = model(x_var) _, preds = scores.data.max(1) for i in range(len(preds)): results.write(str(count)+','+str(preds[i])+'\n') count+=1 results.close() return count count=predict_on_test(fixed_model, image_dataloader_test) print(count) # ### 3D Convolution on video clips (25 points+10 extra points) # 3D convolution is for videos, it has one more dimension than 2d convolution. You can find the document for 3D convolution here http://pytorch.org/docs/master/nn.html#torch.nn.Conv3dIn. In our dataset, each clip is a video of 3 frames. Lets classify the each clip rather than each image using 3D convolution. # We offer the data loader, the train_3d and check_accuracy # In[49]: class ActionClipDataset(Dataset): """Action Landmarks dataset.""" def __init__(self, root_dir,labels=[], transform=None): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.root_dir = root_dir self.transform = transform self.length=len(os.listdir(self.root_dir)) self.labels=labels def __len__(self): return self.length def __getitem__(self, idx): folder=idx+1 folder=format(folder,'05d') clip=[] if len(self.labels)!=0: Label=self.labels[idx][0]-1 for i in range(3): imidx=i+1 imgname=str(imidx)+'.jpg' img_path = os.path.join(self.root_dir, folder,imgname) image = Image.open(img_path) image=np.array(image) clip.append(image) if self.transform: clip=np.asarray(clip) clip=np.transpose(clip, (0,3,1,2)) clip = torch.from_numpy(np.asarray(clip)) if len(self.labels)!=0: sample={'clip':clip,'Label':Label,'folder':folder} else: sample={'clip':clip,'folder':folder} return sample clip_dataset=ActionClipDataset(root_dir='./data/trainClips/', labels=label_train,transform=T.ToTensor())#/home/tqvinh/Study/CSE512/cse512-s18/hw2data/trainClips/ for i in range(10): sample=clip_dataset[i] print(sample['clip'].shape) print(sample['Label']) print(sample['folder']) # In[50]: clip_dataloader = DataLoader(clip_dataset, batch_size=4, shuffle=True, num_workers=4) for i,sample in enumerate(clip_dataloader): print(i,sample['clip'].shape,sample['folder'],sample['Label']) if i>20: break # In[51]: clip_dataset_train=ActionClipDataset(root_dir='./data/trainClips/',labels=label_train,transform=T.ToTensor()) clip_dataloader_train = DataLoader(clip_dataset_train, batch_size=16, shuffle=True, num_workers=4) clip_dataset_val=ActionClipDataset(root_dir='./data/valClips/',labels=label_val,transform=T.ToTensor()) clip_dataloader_val = DataLoader(clip_dataset_val, batch_size=16, shuffle=True, num_workers=4) clip_dataset_test=ActionClipDataset(root_dir='./data/testClips/',labels=[],transform=T.ToTensor()) clip_dataloader_test = DataLoader(clip_dataset_test, batch_size=16, shuffle=False, num_workers=4) # Write the Flatten for 3d covolution feature maps. # In[52]: class Flatten3d(nn.Module): def forward(self, x): ###############6th To Do (5 points)################### N, C, D, H, W = x.size() # read in N, C, D, H, W return x.view(N, -1) # "flatten" the C * D * H * W values into a single vector per image # Design a network using 3D convolution on videos for video classification. # In[58]: fixed_model_3d = nn.Sequential( # You fill this in! ###############7th To Do (16 points)######################### nn.Conv3d(in_channels = 3, out_channels = 50, kernel_size = 2, stride = 1), nn.ReLU(inplace=True), nn.MaxPool3d((1, 2, 2), stride = 2), nn.Conv3d(in_channels = 50, out_channels = 100, kernel_size = (1, 3, 3), stride = 1), nn.ReLU(inplace = True), nn.MaxPool3d((1, 3, 3), stride = 2), nn.Dropout3d(0.1), Flatten3d(), nn.ReLU(inplace=True), nn.Linear(19600, 10), nn.LogSoftmax() ############################### ) fixed_model_3d = fixed_model_3d.type(dtype) x = torch.randn(32,3, 3, 64, 64).type(dtype) x_var = Variable(x).type(dtype) # Construct a PyTorch Variable out of your input data ans = fixed_model_3d(x_var) np.array_equal(np.array(ans.size()), np.array([32, 10])) #Accuracy 62 iterations 6 # ### Describe what you did (4 points) # # In the cell below you should write an explanation of what you did, any additional features that you implemented, and any visualizations or graphs that you make in the process of training and evaluating your network. # 8th To Do # Tell us here: # * 2X2X2 Convolution layer with 50 filters # * ReLU layer inplace True # * Max Pooling layer with window size (1, 2, 2) stride = 2 # * 1X3X3 Convolution layer with 100 filters # * ReLU layer with inplace True # * Max Pooling layer with window size (1, 3, 3) stride = 2 # * dropout layer with penalty 0.1 # * flattening # * ReLU layer with inplace True # * Affine layer # * LogSoftmax Layer # In[59]: loss_fn = nn.CrossEntropyLoss().type(dtype) optimizer = optim.RMSprop(fixed_model_3d.parameters(), lr=1e-4) # In[60]: def train_3d(model, loss_fn, optimizer,dataloader,num_epochs = 1): for epoch in range(num_epochs): print('Starting epoch %d / %d' % (epoch + 1, num_epochs)) check_accuracy_3d(fixed_model_3d, clip_dataloader_val) model.train() for t, sample in enumerate(dataloader): x_var = Variable(sample['clip'].type(dtype)) y_var = Variable(sample['Label'].type(dtype).long()) scores = model(x_var) loss = loss_fn(scores, y_var) if (t + 1) % print_every == 0: print('t = %d, loss = %.4f' % (t + 1, loss.data[0])) optimizer.zero_grad() loss.backward() optimizer.step() def check_accuracy_3d(model, loader): ''' if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') ''' num_correct = 0 num_samples = 0 model.eval() # Put the model in test mode (the opposite of model.train(), essentially) for t, sample in enumerate(loader): x_var = Variable(sample['clip'].type(dtype)) y_var = sample['Label'].type(dtype) y_var=y_var.cpu() scores = model(x_var) _, preds = scores.data.cpu().max(1) #print(preds) #print(y_var) num_correct += (preds.numpy() == y_var.numpy()).sum() num_samples += preds.size(0) acc = float(num_correct) / num_samples print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc)) # In[61]: torch.cuda.random.manual_seed(782374) fixed_model_3d.apply(reset) fixed_model_3d.train() train_3d(fixed_model_3d, loss_fn, optimizer,clip_dataloader_train, num_epochs=5) fixed_model_3d.eval() check_accuracy_3d(fixed_model_3d, clip_dataloader_train) check_accuracy_3d(fixed_model_3d, clip_dataloader_val) # Test your 3d convolution model on the validation set. You don't need to submit the result of this part to kaggle. # Test your model on the test set, predict_on_test_3d() will generate a file named 'results_3d.csv'. Please submit the csv file to kaggle https://www.kaggle.com/c/cse512springhw3video # The highest 3 entries get extra 10 points. # # In[62]: def predict_on_test_3d(model, loader): ''' if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') ''' num_correct = 0 num_samples = 0 model.eval() # Put the model in test mode (the opposite of model.train(), essentially) results=open('results_3d.csv','w') count=0 results.write('Id'+','+'Class'+'\n') for t, sample in enumerate(loader): x_var = Variable(sample['clip'].type(dtype)) scores = model(x_var) _, preds = scores.data.max(1) for i in range(len(preds)): results.write(str(count)+','+str(preds[i])+'\n') count+=1 results.close() return count count=predict_on_test_3d(fixed_model_3d, clip_dataloader_test) print(count) # * Rank on kaggle: 27 # * Score: 61.80428 # In[ ]:
#!/usr/bin/env python # coding: utf-8 # # Training a ConvNet PyTorch # # In this notebook, you'll learn how to use the powerful PyTorch framework to specify a conv net architecture and train it on the human action recognition dataset. # # In[30]: #ip install -r requirements.txt # In[1]: import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable from torch.utils.data import DataLoader,sampler,Dataset import torchvision.datasets as dset import torchvision.transforms as T import timeit from PIL import Image import os import numpy as np import scipy.io import torchvision.models.inception as inception import csv import pandas as pd # ## What's this PyTorch business? # # * When using a framework like PyTorch or TensorFlow you can harness the power of the GPU for your own custom neural network architectures without having to write CUDA code directly. # * this notebook will walk you through much of what you need to do to train models using pytorch. if you want to learn more or need further clarification on topics that aren't fully explained here, here are 2 good Pytorch tutorials. 1): http://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html 2)http://pytorch.org/tutorials/beginner/pytorch_with_examples.html # * It's not necessary to have a GPU for this homework, using a GPU can make your code run faster. # # ## Load Datasets # # In this part, we will load the action recognition dataset for the neural network. In order to load data from our custom dataset, we need to write a custom Dataloader. If you put q3_2_data.mat, /valClips,/trainClips,/testClips under the folder of ./data/ , you do not need to change anything in this part. # First, load the labels of the dataset, you should write your path of the q3_2_data.mat file. # In[18]: label_mat=scipy.io.loadmat('./data/q3_2_data.mat') label_train=label_mat['trLb'] print(len(label_train)) label_val=label_mat['valLb'] print(len(label_val)) # In[2]: df = pd.read_csv('/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/train.csv') vlabel_train=df['Label'] vlabel_train # In[37]: ASLabel=vlabel_train[] ASLabel # In[48]: vtrainclips=df.FileName vtrainclips # In[24]: label_train # In[41]: label # ### Dataset class # # torch.utils.data.Dataset is an abstract class representing a dataset. The custom dataset should inherit Dataset and override the following methods: # # __len__ so that len(dataset) returns the size of the dataset. # __getitem__ to support the indexing such that dataset[i] can be used to get ith sample # # Let’s create a dataset class for our action recognition dataset. We will read images in __getitem__. This is memory efficient because all the images are not stored in the memory at once but read as required. # # Sample of our dataset will be a dict {'image':image,'img_path':img_path,'Label':Label}. Our datset will take an optional argument transform so that any required processing can be applied on the sample. # In[3]: class ActionDataset(Dataset): """Action dataset.""" def __init__(self, root_dir,labels=[], transform=None): """ Args: root_dir (string): Directory with all the images. labels(list): labels if images. transform (callable, optional): Optional transform to be applied on a sample. """ self.root_dir = root_dir self.transform = transform self.length=len(os.listdir(self.root_dir)) self.labels=labels def __len__(self): return self.length*3 def __getitem__(self, idx): folder=idx/3+1 imidx=idx%3+1 folder=str(folder) imgname=str(imidx)+'.jpg' img_path = os.path.join(self.root_dir, folder,imgname) image = Image.open(img_path) if len(self.labels)!=0: Label=self.labels[idx/3][0]-1 if self.transform: image = self.transform(image) if len(self.labels)!=0: sample={'image':image,'img_path':img_path,'Label':Label} else: sample={'image':image,'img_path':img_path} return sample # In[5]: # image_dataset=ActionDataset(root_dir='/home/adewopva/Downloads/CNN_AR/CNN-Action-Recognition-master/data/trainClips/',\ # labels=label_train,transform=T.ToTensor()) # #iterating though the dataset # for i in range(10): # sample=image_dataset[i] # print(sample['image'].shape) # print(sample['Label']) # print(sample['img_path']) # In[1]: ''' For the given path, get the List of all files in the directory tree ''' def getListOfFiles(dirName): # create a list of file and sub directories # names in the given directory listOfFile = os.listdir(dirName) allFiles = [] # Iterate over all the entries for entry in listOfFile: # Create full path fullPath = os.path.join(dirName, entry) # If entry is a directory then get the list of files in this directory if os.path.isdir(fullPath): allFiles = allFiles + getListOfFiles(fullPath) else: allFiles.append(fullPath) return allFiles # In[7]: data_dir_list # In[77]: import os def listdirs(rootdir): d=[] for file in os.listdir(rootdir): d = os.path.join(rootdir, file) if os.path.isdir(d): print(d) listdirs(d) rootdir ='/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/' listdirs(rootdir) # In[14]: # In[16]: #V Current as of 2am 1/26 class ActionDataset(Dataset): """Action dataset.""" def __init__(self, root_dir,labels=[], transform=None): """ Args: root_dir (string): Directory with all the images. labels(list): labels if images. transform (callable, optional): Optional transform to be applied on a sample. """ self.root_dir = root_dir self.transform = transform self.length=len(os.listdir(self.root_dir)) self.labels=labels def __len__(self): return self.length*5 def __getitem__(self, idx): root=self.root_dir #we shall store all the file names in this list img_path1=[] for path, subdirs, files in os.walk(root): for name in files: img_path1.append(os.path.join(path, name)) #print all the file names for var in img_path1: if var.endswith(".jpg"): img_path=var image = Image.open(img_path) if len(self.labels)!=0: #your_path = img/path1 label1 = img_path.split(os.sep) labels_name={'on_feet':0, 'active':1, 'rest':2, 'escape':3, 'crawling':4} label2=label1[10] Label=labels_name[label2] if self.transform: image = self.transform(image) if len(self.labels)!=0: sample={'image':image,'img_path':img_path,'Label':Label} else: sample={'image':image,'img_path':img_path} #print(sample) return sample image_dataset=ActionDataset(root_dir=r'/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/', labels=vlabel_train,transform=T.ToTensor()) #iterating though the dataset for i in range(4): sample1=image_dataset[i] print(sample1['image'].shape) print(sample1['Label']) print(sample1['img_path']) # In[17]: #V Current as of 2am 1/26 class ActionDataset(Dataset): """Action dataset.""" def __init__(self, root_dir,labels=[], transform=None): """ Args: root_dir (string): Directory with all the images. labels(list): labels if images. transform (callable, optional): Optional transform to be applied on a sample. """ self.root_dir = root_dir self.transform = transform self.length=len(os.listdir(self.root_dir)) self.labels=labels def __len__(self): return self.length*5 def __getitem__(self, root): root=self.root_dir #we shall store all the file names in this list #img_path1=[] for path, subdirs, files in os.walk(root): for name in files: img_path1=(os.path.join(path, name)) if img_path1.endswith(".jpg"): #img_path=var image = Image.open(img_path1) #your_path = img/path1 label1 = img_path1.split(os.sep) labels_name={'on_feet':0, 'active':1, 'rest':2, 'escape':3, 'crawling':4} label2=label1[10] Label=labels_name[label2] if self.transform: image = self.transform(image) if len(self.labels)!=0: sample={'image':image,'img_path':img_path1,'Label':Label} else: sample={'image':image,'img_path':img_path1} #print(sample) return sample #break image_dataset=ActionDataset(root_dir=r'/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/', labels=vlabel_train,transform=T.ToTensor()) #iterating though the dataset for i in range (5): sample1=image_dataset[i] print(sample1['image'].shape) print(sample1['Label']) print(sample1['img_path']) # In[ ]: HERE IS THE PROBLEM. The output is just a single image not different image. # In[22]: # Working Full Code for video with single action and Multiple actions import os, sys import pandas as pd directory = r'/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/' input_base = [] for filename in os.listdir(directory): if filename.endswith(".csv"): os.path.splitext(filename) filename = os.path.splitext(filename)[0] # In[5]: import os #img_path1=[] for path, subdirs, files in os.walk(r'/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/'): for i in files: print(os.path.join(path, i)) # In[11]: #file=img_path.splitext(filename) filename = os.path.splitext(img_path)[] filename # In[9]: print(len(img_path1)) # In[5]: print(len(img_path1)) # In[6]: # #!/usr/bin/python # # -*- coding: utf-8 -*- # class ActionDataset(Dataset): # """Action dataset.""" # def __init__( # self, # root_dir, # labels=[], # transform=None, # ): # """ # Args: # root_dir (string): Directory with all the images. # labels(list): labels if images. # transform (callable, optional): Optional transform to be applied on a sample. # """ # self.root_dir = root_dir # self.transform = transform # self.length = len(os.listdir(self.root_dir)) # self.labels = labels # def __len__(self): # return self.length * 3 # def __getitem__(self, idx): # root = self.root_dir # # we shall store all the file names in this list # img_path1 = [] # for (root, dirs, files) in os.walk(root): # for file in files: # # append the file name to the list # img_path1.append(os.path.join(root, file)) # return img_path1 # # print all the file names # for name in img_path1: # img_path = name # image = Image.open(img_path) # # your_path = imgpath1 # label1 = img_path.split(os.sep) # labels_name = { # 'on_feet': 0, # 'active': 1, # 'rest': 2, # 'escape': 3, # 'crawling': 4, # } # label2 = label1[10] # Label = labels_name[label2] # if self.transform: # image = self.transform(image) # if len(self.labels) != 0: # sample = {'image': image, 'img_path': img_path, # 'Label': Label} # else: # sample = {'image': image, 'img_path': img_path} # return sample # image_dataset = ActionDataset(root_dir='/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/' # , labels=vlabel_train, transform=T.ToTensor()) # # iterating though the dataset # for i in range(10): # sample1 = image_dataset[i] # print (sample1['image'].shape) # print (sample1['Label']) # print (sample1['img_path']) # In[19]: image_dataset=ActionDataset(root_dir='/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/', labels=vlabel_train,transform=T.ToTensor()) #iterating though the dataset for i in range(10): sample=image_dataset[i] print(sample['image'].shape) print(sample['Label']) print(sample['img_path']) # We can iterate over the created dataset with a 'for' loop as before. However, we are losing a lot of features by using a simple for loop to iterate over the data. In particular, we are missing out on: # # * Batching the data # * Shuffling the data # * Load the data in parallel using multiprocessing workers. # # torch.utils.data.DataLoader is an iterator which provides all these features. # Dataloaders for the training, validationg and testing set. # In[38]: image_dataset_train=ActionDataset(root_dir='./data/trainClips/',labels=label_train,transform=T.ToTensor()) image_dataloader_train = DataLoader(image_dataset_train, batch_size=32, shuffle=True, num_workers=4) image_dataset_val=ActionDataset(root_dir='./data/valClips/',labels=label_val,transform=T.ToTensor()) image_dataloader_val = DataLoader(image_dataset_val, batch_size=32, shuffle=False, num_workers=4) image_dataset_test=ActionDataset(root_dir='./data/testClips/',labels=[],transform=T.ToTensor()) image_dataloader_test = DataLoader(image_dataset_test, batch_size=32, shuffle=False, num_workers=4) # In[39]: dtype = torch.FloatTensor # the CPU datatype # Constant to control how frequently we print train loss print_every = 100 # This is a little utility that we'll use to reset the model # if we want to re-initialize all our parameters def reset(m): if hasattr(m, 'reset_parameters'): m.reset_parameters() # ## Example Model # # ### Some assorted tidbits # # Let's start by looking at a simple model. First, note that PyTorch operates on Tensors, which are n-dimensional arrays functionally analogous to numpy's ndarrays, with the additional feature that they can be used for computations on GPUs. # # We'll provide you with a Flatten function, which we explain here. Remember that our image data (and more relevantly, our intermediate feature maps) are initially N x C x H x W, where: # * N is the number of datapoints # * C is the number of image channels. # * H is the height of the intermediate feature map in pixels # * W is the height of the intermediate feature map in pixels # # This is the right way to represent the data when we are doing something like a 2D convolution, that needs spatial understanding of where the intermediate features are relative to each other. When we input data into fully connected affine layers, however, we want each datapoint to be represented by a single vector -- it's no longer useful to segregate the different channels, rows, and columns of the data. So, we use a "Flatten" operation to collapse the C x H x W values per representation into a single long vector. The Flatten function below first reads in the N, C, H, and W values from a given batch of data, and then returns a "view" of that data. "View" is analogous to numpy's "reshape" method: it reshapes x's dimensions to be N x ??, where ?? is allowed to be anything (in this case, it will be C x H x W, but we don't need to specify that explicitly). # In[40]: class Flatten(nn.Module): def forward(self, x): N, C, H, W = x.size() # read in N, C, H, W return x.view(N, -1) # "flatten" the C * H * W values into a single vector per image # ### The example model itself # # The first step to training your own model is defining its architecture. # # Here's an example of a convolutional neural network defined in PyTorch -- try to understand what each line is doing, remembering that each layer is composed upon the previous layer. We haven't trained anything yet - that'll come next - for now, we want you to understand how everything gets set up. nn.Sequential is a container which applies each layer # one after the other. # # In this example, you see 2D convolutional layers (Conv2d), ReLU activations, and fully-connected layers (Linear). You also see the Cross-Entropy loss function, and the Adam optimizer being used. # # Make sure you understand why the parameters of the Linear layer are 10092 and 10. # # In[9]: # Here's where we define the architecture of the model... simple_model = nn.Sequential( nn.Conv2d(3, 32, kernel_size=7, stride=2), nn.ReLU(inplace=True), Flatten(), # see above for explanation nn.Linear(10092, 10), # affine layer ) # Set the type of all data in this model to be FloatTensor simple_model.type(dtype) loss_fn = nn.CrossEntropyLoss().type(dtype) optimizer = optim.Adam(simple_model.parameters(), lr=1e-2) # lr sets the learning rate of the optimizer # PyTorch supports many other layer types, loss functions, and optimizers - you will experiment with these next. Here's the official API documentation for these (if any of the parameters used above were unclear, this resource will also be helpful). # # * Layers: http://pytorch.org/docs/nn.html # * Activations: http://pytorch.org/docs/nn.html#non-linear-activations # * Loss functions: http://pytorch.org/docs/nn.html#loss-functions # * Optimizers: http://pytorch.org/docs/optim.html#algorithms # ## Training a specific model # # In this section, we're going to specify a model for you to construct. The goal here isn't to get good performance (that'll be next), but instead to get comfortable with understanding the PyTorch documentation and configuring your own model. # # Using the code provided above as guidance, and using the following PyTorch documentation, specify a model with the following architecture: # # * 7x7 Convolutional Layer with 8 filters and stride of 1 # * ReLU Activation Layer # * 2x2 Max Pooling layer with a stride of 2 # * 7x7 Convolutional Layer with 16 filters and stride of 1 # * ReLU Activation Layer # * 2x2 Max Pooling layer with a stride of 2 # * Flatten the feature map # * ReLU Activation Layer # * Affine layer to map input units to 10 outputs, you need to figure out the input size here. # # In[10]: fixed_model_base = nn.Sequential( #########1st To Do (10 points)################### nn.Conv2d(3, 8, kernel_size=7, stride=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride = 2), nn.Conv2d(8, 16, kernel_size=7, stride=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride = 2), Flatten(), nn.ReLU(inplace=True), nn.Linear(1936, 10) #################################### ) fixed_model = fixed_model_base.type(dtype) # To make sure you're doing the right thing, use the following tool to check the dimensionality of your output (it should be 32 x 10, since our batches have size 32 and the output of the final affine layer should be 10, corresponding to our 10 classes): # In[11]: ## Now we're going to feed a random batch into the model you defined and make sure the output is the right size x = torch.randn(32, 3, 64, 64).type(dtype) x_var = Variable(x.type(dtype)) # Construct a PyTorch Variable out of your input data ans = fixed_model(x_var) # Feed it through the model! # Check to make sure what comes out of your model # is the right dimensionality... this should be True # if you've done everything correctly print(np.array(ans.size())) np.array_equal(np.array(ans.size()), np.array([32, 10])) # ### Train the model. # # Now that you've seen how to define a model and do a single forward pass of some data through it, let's walk through how you'd actually train one whole epoch over your training data (using the fixed_model_base we provided above). # # Make sure you understand how each PyTorch function used below corresponds to what you implemented in your custom neural network implementation. # # Note that because we are not resetting the weights anywhere below, if you run the cell multiple times, you are effectively training multiple epochs (so your performance should improve). # # First, set up an RMSprop optimizer (using a 1e-4 learning rate) and a cross-entropy loss function: # In[31]: ################ 2nd To Do (5 points)################## optimizer = torch.optim.RMSprop(fixed_model_base.parameters(), lr = 0.0001) #optimizer = torch.optim.Adadelta(fixed_model_base.parameters(), lr = 0.001) loss_fn = nn.CrossEntropyLoss() #loss_fn = nn.MultiMarginLoss() # In[37]: # This sets the model in "training" mode. # This is relevant for some layers that may have different behavior # in training mode vs testing mode, such as Dropout and BatchNorm. fixed_model.train() # Load one batch at a time. for t, sample in enumerate(image_dataloader_train): x_var = Variable(sample['image']) #print(type(x_var.data)) #print(x_var.shape) y_var = Variable(sample['Label']).long() # This is the forward pass: predict the scores for each class, for each x in the batch. scores = fixed_model(x_var) # Use the correct y values and the predicted y values to compute the loss. loss = loss_fn(scores, y_var) if (t + 1) % print_every == 0: print('t = %d, loss = %.4f' % (t + 1, loss.data[0])) # Zero out all of the gradients for the variables which the optimizer will update. optimizer.zero_grad() # This is the backwards pass: compute the gradient of the loss with respect to each # parameter of the model. loss.backward() # Actually update the parameters of the model using the gradients computed by the backwards pass. optimizer.step() # Now you've seen how the training process works in PyTorch. To save you writing boilerplate code, we're providing the following helper functions to help you train for multiple epochs and check the accuracy of your model: # In[41]: def train(model, loss_fn, optimizer, dataloader, num_epochs = 1): for epoch in range(num_epochs): print('Starting epoch %d / %d' % (epoch + 1, num_epochs)) check_accuracy(fixed_model, image_dataloader_val)# check accuracy on the training set model.train() for t, sample in enumerate(dataloader): x_var = Variable(sample['image']) y_var = Variable(sample['Label'].long()) scores = model(x_var) loss = loss_fn(scores, y_var) if (t + 1) % print_every == 0: print('t = %d, loss = %.4f' % (t + 1, loss.data[0])) optimizer.zero_grad() loss.backward() optimizer.step() def check_accuracy(model, loader): ''' if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') ''' num_correct = 0 num_samples = 0 model.eval() # Put the model in test mode (the opposite of model.train(), essentially) for t, sample in enumerate(loader): x_var = Variable(sample['image']) y_var = sample['Label'] #y_var=y_var.cpu() scores = model(x_var) _, preds = scores.data.max(1)#scores.data.cpu().max(1) #print(preds) #print(y_var) num_correct += (preds.numpy() == y_var.numpy()).sum() num_samples += preds.size(0) acc = float(num_correct) / num_samples print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc)) # ### Check the accuracy of the model. # # Let's see the train and check_accuracy code in action -- feel free to use these methods when evaluating the models you develop below. # # You should get a training loss of around 1.0-1.2, and a validation accuracy of around 50-60%. As mentioned above, if you re-run the cells, you'll be training more epochs, so your performance will improve past these numbers. # # But don't worry about getting these numbers better -- this was just practice before you tackle designing your own model. # In[39]: torch.random.manual_seed(54321) fixed_model.cpu() fixed_model.apply(reset) fixed_model.train() train(fixed_model, loss_fn, optimizer,image_dataloader_train, num_epochs=4) check_accuracy(fixed_model, image_dataloader_train)# check accuracy on the training set # ### Don't forget the validation set! # # And note that you can use the check_accuracy function to evaluate on the validation set, by passing **image_dataloader_val** as the second argument to check_accuracy. The accuracy on validation set is arround 40-50%. # In[40]: check_accuracy(fixed_model, image_dataloader_val)#check accuracy on the validation set # ##### Train a better model for action recognition! # # Now it's your job to experiment with architectures, hyperparameters, loss functions, and optimizers to train a model that achieves better accuracy on the action recognition **validation** set. You can use the check_accuracy and train functions from above. # In[42]: ###########3rd To Do (16 points, must submit the results to Kaggle) ############## # Train your model here, and make sure the output of this cell is the accuracy of your best model on the # train, val, and test sets. Here's some code to get you started. The output of this cell should be the training # and validation accuracy on your best model (measured by validation accuracy). fixed_model_base = nn.Sequential( nn.Conv2d(3, 200, kernel_size=10, stride=3), nn.ReLU(inplace=True), nn.MaxPool2d(3, stride = 1), nn.BatchNorm2d(200), nn.Dropout2d(0.1), nn.Conv2d(200, 100, kernel_size=5, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(3, stride = 1), nn.BatchNorm2d(100), nn.Dropout2d(0.2), nn.Conv2d(100, 50, kernel_size=3, stride=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=1), nn.BatchNorm2d(50), Flatten(), nn.Linear(200, 100), nn.Linear(100, 50), nn.Linear(50, 10), nn.LogSoftmax() #################################### ) fixed_model = fixed_model_base.type(dtype) optimizer = torch.optim.RMSprop(fixed_model_base.parameters(), lr = 0.0001) #optimizer = torch.optim.Adadelta(fixed_model_base.parameters(), lr = 0.001) loss_fn = nn.CrossEntropyLoss() # ### Describe what you did # # In the cell below you should write an explanation of what you did, any additional features that you implemented, and any visualizations or graphs that you make in the process of training and evaluating your network. # ### Tell us here! # ########### 4th To Do (4 points) ############## # * 10X10 Convolution layer with 200 filters with stride 3 # * ReLU layer # * Max Pool layer with window size 3X3 with stride 1 # * Batch Norm layer with input size 200 # * Dropout layer with penalty 0.1 # * 5X5 Convolution layer with 100 filters with stride 2 # * ReLU layer # * Max Pool layer with window size 3X3 with stride 1 # * Batch Norm layer with input size 100 # * Dropout layer with penalty 0.2 # * 3X3 Convolution layer with 50 filters and stride 1 # * ReLU layer # * Max Pool layer with window size 2 and stride 1 # * Batch Norm layer with input size 50 # * Flatten # * affine layer to reduce inputs from 200 to 100 # * affine layer to reduce inputs from 100 to 50 # * affine layer to reduce inputs from 50 to 10 # * logsoftmaxing layer # ### Testing the model and submit on Kaggle # Testing the model on the testing set and save the results as a .csv file. # Please submitted the results.csv file generated by predict_on_test() to Kaggle(https://www.kaggle.com/c/cse512springhw3) to see how well your network performs on the test set. # #######5th To Do (submit the result to Kaggle,the highest 3 entries get extra 10 points )############### # # * Rank: 10 # * Score: 70.34658 # In[ ]: # In[43]: ## Now we're going to feed a random batch into the model you defined and make sure the output is the right size x = torch.randn(32, 3, 64, 64).type(dtype) x_var = Variable(x.type(dtype)) # Construct a PyTorch Variable out of your input data ans = fixed_model(x_var) # Feed it through the model! # Check to make sure what comes out of your model # is the right dimensionality... this should be True # if you've done everything correctly print(np.array(ans.size())) np.array_equal(np.array(ans.size()), np.array([32, 10])) # In[78]: torch.random.manual_seed(54321) fixed_model.cpu() fixed_model.apply(reset) fixed_model.train() train(fixed_model, loss_fn, optimizer,image_dataloader_train, num_epochs=12) check_accuracy(fixed_model, image_dataloader_train)# check accuracy on the training set # In[79]: check_accuracy(fixed_model, image_dataloader_val)# check accuracy on the training set # ### Things you should try: # - **Filter size**: Above we used 7x7; this makes pretty pictures but smaller filters may be more efficient # - **Number of filters**: Do more or fewer do better? # - **Pooling vs Strided Convolution**: Do you use max pooling or just stride convolutions? # - **Batch normalization**: Try adding spatial batch normalization after convolution layers and vanilla batch normalization after affine layers. Do your networks train faster? # - **Network architecture**: The network above has two layers of trainable parameters. Can you do better with a deep network? Good architectures to try include: # - [conv-relu-pool]xN -> [affine]xM -> [softmax or SVM] # - [conv-relu-conv-relu-pool]xN -> [affine]xM -> [softmax or SVM] # - [batchnorm-relu-conv]xN -> [affine]xM -> [softmax or SVM] # - **Global Average Pooling**: Instead of flattening and then having multiple affine layers, perform convolutions until your image gets small (7x7 or so) and then perform an average pooling operation to get to a 1x1 image picture (1, 1 , Filter#), which is then reshaped into a (Filter#) vector. This is used in [Google's Inception Network](https://arxiv.org/abs/1512.00567) (See Table 1 for their architecture). # - **Regularization**: Add l2 weight regularization, or perhaps use Dropout. # # ### Tips for training # For each network architecture that you try, you should tune the learning rate and regularization strength. When doing this there are a couple important things to keep in mind: # # - If the parameters are working well, you should see improvement within a few hundred iterations # - Remember the coarse-to-fine approach for hyperparameter tuning: start by testing a large range of hyperparameters for just a few training iterations to find the combinations of parameters that are working at all. # - Once you have found some sets of parameters that seem to work, search more finely around these parameters. You may need to train for more epochs. # - You should use the validation set for hyperparameter search, and save your test set for evaluating your architecture on the best parameters as selected by the validation set. # # ### Going above and beyond # If you are feeling adventurous there are many other features you can implement to try and improve your performance. You are **not required** to implement any of these; however they would be good things to try. # # - Alternative update steps: For the assignment we implemented SGD+momentum, RMSprop, and Adam; you could try alternatives like AdaGrad or AdaDelta. # - Alternative activation functions such as leaky ReLU, parametric ReLU, ELU, or MaxOut. # - Model ensembles # - Data augmentation # - New Architectures # - [ResNets](https://arxiv.org/abs/1512.03385) where the input from the previous layer is added to the output. # - [DenseNets](https://arxiv.org/abs/1608.06993) where inputs into previous layers are concatenated together. # - [This blog has an in-depth overview](https://chatbotslife.com/resnets-highwaynets-and-densenets-oh-my-9bb15918ee32) # # If you do decide to implement something extra, clearly describe it in the "Extra Credit Description" cell below. # # ### What we expect # At the very least, you should be able to train a ConvNet that gets at least 55% accuracy on the validation set. This is just a lower bound - if you are careful it should be possible to get accuracies much higher than that! Extra credit points will be awarded for particularly high-scoring models or unique approaches. # # You should use the space below to experiment and train your network. # # # In[ ]: train(fixed_model_base, loss_fn, optimizer,image_dataloader_train, num_epochs=4) check_accuracy(fixed_model, image_dataloader_val) # ### GPU! (This part is optional, 0 points) # # If you have access to GPU, you can make the code run on GPU, it would be much faster. # # Now, we're going to switch the dtype of the model and our data to the GPU-friendly tensors, and see what happens... everything is the same, except we are casting our model and input tensors as this new dtype instead of the old one. # # If this returns false, or otherwise fails in a not-graceful way (i.e., with some error message), you may not have an NVIDIA GPU available on your machine. # In[75]: # Verify that CUDA is properly configured and you have a GPU available torch.cuda.is_available() # In[76]: import copy gpu_dtype = torch.cuda.FloatTensor fixed_model_gpu = copy.deepcopy(fixed_model_base)#.type(gpu_dtype) fixed_model_gpu.cuda() x_gpu = torch.randn(4, 3, 64, 64).cuda()#.type(gpu_dtype) x_var_gpu = Variable(x_gpu)#type(gpu_dtype)) # Construct a PyTorch Variable out of your input data ans = fixed_model_gpu(x_var_gpu) # Feed it through the model! # Check to make sure what comes out of your model # is the right dimensionality... this should be True # if you've done everything correctly np.array_equal(np.array(ans.size()), np.array([4, 10])) # Run the following cell to evaluate the performance of the forward pass running on the CPU: # In[77]: get_ipython().run_cell_magic('timeit', '', 'ans = fixed_model(x_var)') # ... and now the GPU: # In[78]: get_ipython().run_cell_magic('timeit', '', 'torch.cuda.synchronize() # Make sure there are no pending GPU computations\nans = fixed_model_gpu(x_var_gpu) # Feed it through the model! \ntorch.cuda.synchronize() # Make sure there are no pending GPU computations') # You should observe that even a simple forward pass like this is significantly faster on the GPU. So for the rest of the assignment (and when you go train your models in assignment 3 and your project!), you should use the GPU datatype for your model and your tensors: as a reminder that is *torch.cuda.FloatTensor* (in our notebook here as *gpu_dtype*) # Let's make the loss function and training variables to GPU friendly format by '.cuda()' # In[79]: loss_fn = nn.CrossEntropyLoss().cuda() optimizer = optim.RMSprop(fixed_model_gpu.parameters(), lr=1e-4) # In[80]: def train(model, loss_fn, optimizer, dataloader, num_epochs = 1): for epoch in range(num_epochs): print('Starting epoch %d / %d' % (epoch + 1, num_epochs)) model.train() check_accuracy(fixed_model_gpu, image_dataloader_val)# check accuracy on the training set for t, sample in enumerate(dataloader): x_var = Variable(sample['image'].cuda()) y_var = Variable(sample['Label'].cuda().long()) scores = model(x_var) loss = loss_fn(scores, y_var) if (t + 1) % print_every == 0: print('t = %d, loss = %.4f' % (t + 1, loss.data[0])) optimizer.zero_grad() loss.backward() optimizer.step() def check_accuracy(model, loader): ''' if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') ''' num_correct = 0 num_samples = 0 model.eval() # Put the model in test mode (the opposite of model.train(), essentially) for t, sample in enumerate(loader): x_var = Variable(sample['image'].cuda()) y_var = sample['Label'].cuda() y_var=y_var.cpu() scores = model(x_var) _, preds = scores.data.cpu().max(1) #print(preds) #print(y_var) num_correct += (preds.numpy() == y_var.numpy()).sum() num_samples += preds.size(0) acc = float(num_correct) / num_samples print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc)) # Run on GPU! # In[47]: torch.cuda.random.manual_seed(873271) fixed_model_gpu.apply(reset) fixed_model_gpu.train() train(fixed_model_gpu, loss_fn, optimizer,image_dataloader_train, num_epochs=4) check_accuracy(fixed_model_gpu, image_dataloader_train)# check accuracy on the training set # In[48]: check_accuracy(fixed_model_gpu, image_dataloader_val)# check accuracy on the training set # In[46]: def predict_on_test(model, loader): ''' if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') ''' num_correct = 0 num_samples = 0 model.eval() # Put the model in test mode (the opposite of model.train(), essentially) results=open('results.csv','w') count=0 results.write('Id'+','+'Class'+'\n') for t, sample in enumerate(loader): x_var = Variable(sample['image']) scores = model(x_var) _, preds = scores.data.max(1) for i in range(len(preds)): results.write(str(count)+','+str(preds[i])+'\n') count+=1 results.close() return count count=predict_on_test(fixed_model, image_dataloader_test) print(count) # ### 3D Convolution on video clips (25 points+10 extra points) # 3D convolution is for videos, it has one more dimension than 2d convolution. You can find the document for 3D convolution here http://pytorch.org/docs/master/nn.html#torch.nn.Conv3dIn. In our dataset, each clip is a video of 3 frames. Lets classify the each clip rather than each image using 3D convolution. # We offer the data loader, the train_3d and check_accuracy # In[49]: class ActionClipDataset(Dataset): """Action Landmarks dataset.""" def __init__(self, root_dir,labels=[], transform=None): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.root_dir = root_dir self.transform = transform self.length=len(os.listdir(self.root_dir)) self.labels=labels def __len__(self): return self.length def __getitem__(self, idx): folder=idx+1 folder=format(folder,'05d') clip=[] if len(self.labels)!=0: Label=self.labels[idx][0]-1 for i in range(3): imidx=i+1 imgname=str(imidx)+'.jpg' img_path = os.path.join(self.root_dir, folder,imgname) image = Image.open(img_path) image=np.array(image) clip.append(image) if self.transform: clip=np.asarray(clip) clip=np.transpose(clip, (0,3,1,2)) clip = torch.from_numpy(np.asarray(clip)) if len(self.labels)!=0: sample={'clip':clip,'Label':Label,'folder':folder} else: sample={'clip':clip,'folder':folder} return sample clip_dataset=ActionClipDataset(root_dir='./data/trainClips/', labels=label_train,transform=T.ToTensor())#/home/tqvinh/Study/CSE512/cse512-s18/hw2data/trainClips/ for i in range(10): sample=clip_dataset[i] print(sample['clip'].shape) print(sample['Label']) print(sample['folder']) # In[50]: clip_dataloader = DataLoader(clip_dataset, batch_size=4, shuffle=True, num_workers=4) for i,sample in enumerate(clip_dataloader): print(i,sample['clip'].shape,sample['folder'],sample['Label']) if i>20: break # In[51]: clip_dataset_train=ActionClipDataset(root_dir='./data/trainClips/',labels=label_train,transform=T.ToTensor()) clip_dataloader_train = DataLoader(clip_dataset_train, batch_size=16, shuffle=True, num_workers=4) clip_dataset_val=ActionClipDataset(root_dir='./data/valClips/',labels=label_val,transform=T.ToTensor()) clip_dataloader_val = DataLoader(clip_dataset_val, batch_size=16, shuffle=True, num_workers=4) clip_dataset_test=ActionClipDataset(root_dir='./data/testClips/',labels=[],transform=T.ToTensor()) clip_dataloader_test = DataLoader(clip_dataset_test, batch_size=16, shuffle=False, num_workers=4) # Write the Flatten for 3d covolution feature maps. # In[52]: class Flatten3d(nn.Module): def forward(self, x): ###############6th To Do (5 points)################### N, C, D, H, W = x.size() # read in N, C, D, H, W return x.view(N, -1) # "flatten" the C * D * H * W values into a single vector per image # Design a network using 3D convolution on videos for video classification. # In[58]: fixed_model_3d = nn.Sequential( # You fill this in! ###############7th To Do (16 points)######################### nn.Conv3d(in_channels = 3, out_channels = 50, kernel_size = 2, stride = 1), nn.ReLU(inplace=True), nn.MaxPool3d((1, 2, 2), stride = 2), nn.Conv3d(in_channels = 50, out_channels = 100, kernel_size = (1, 3, 3), stride = 1), nn.ReLU(inplace = True), nn.MaxPool3d((1, 3, 3), stride = 2), nn.Dropout3d(0.1), Flatten3d(), nn.ReLU(inplace=True), nn.Linear(19600, 10), nn.LogSoftmax() ############################### ) fixed_model_3d = fixed_model_3d.type(dtype) x = torch.randn(32,3, 3, 64, 64).type(dtype) x_var = Variable(x).type(dtype) # Construct a PyTorch Variable out of your input data ans = fixed_model_3d(x_var) np.array_equal(np.array(ans.size()), np.array([32, 10])) #Accuracy 62 iterations 6 # ### Describe what you did (4 points) # # In the cell below you should write an explanation of what you did, any additional features that you implemented, and any visualizations or graphs that you make in the process of training and evaluating your network. # 8th To Do # Tell us here: # * 2X2X2 Convolution layer with 50 filters # * ReLU layer inplace True # * Max Pooling layer with window size (1, 2, 2) stride = 2 # * 1X3X3 Convolution layer with 100 filters # * ReLU layer with inplace True # * Max Pooling layer with window size (1, 3, 3) stride = 2 # * dropout layer with penalty 0.1 # * flattening # * ReLU layer with inplace True # * Affine layer # * LogSoftmax Layer # In[59]: loss_fn = nn.CrossEntropyLoss().type(dtype) optimizer = optim.RMSprop(fixed_model_3d.parameters(), lr=1e-4) # In[60]: def train_3d(model, loss_fn, optimizer,dataloader,num_epochs = 1): for epoch in range(num_epochs): print('Starting epoch %d / %d' % (epoch + 1, num_epochs)) check_accuracy_3d(fixed_model_3d, clip_dataloader_val) model.train() for t, sample in enumerate(dataloader): x_var = Variable(sample['clip'].type(dtype)) y_var = Variable(sample['Label'].type(dtype).long()) scores = model(x_var) loss = loss_fn(scores, y_var) if (t + 1) % print_every == 0: print('t = %d, loss = %.4f' % (t + 1, loss.data[0])) optimizer.zero_grad() loss.backward() optimizer.step() def check_accuracy_3d(model, loader): ''' if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') ''' num_correct = 0 num_samples = 0 model.eval() # Put the model in test mode (the opposite of model.train(), essentially) for t, sample in enumerate(loader): x_var = Variable(sample['clip'].type(dtype)) y_var = sample['Label'].type(dtype) y_var=y_var.cpu() scores = model(x_var) _, preds = scores.data.cpu().max(1) #print(preds) #print(y_var) num_correct += (preds.numpy() == y_var.numpy()).sum() num_samples += preds.size(0) acc = float(num_correct) / num_samples print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc)) # In[61]: torch.cuda.random.manual_seed(782374) fixed_model_3d.apply(reset) fixed_model_3d.train() train_3d(fixed_model_3d, loss_fn, optimizer,clip_dataloader_train, num_epochs=5) fixed_model_3d.eval() check_accuracy_3d(fixed_model_3d, clip_dataloader_train) check_accuracy_3d(fixed_model_3d, clip_dataloader_val) # Test your 3d convolution model on the validation set. You don't need to submit the result of this part to kaggle. # Test your model on the test set, predict_on_test_3d() will generate a file named 'results_3d.csv'. Please submit the csv file to kaggle https://www.kaggle.com/c/cse512springhw3video # The highest 3 entries get extra 10 points. # # In[62]: def predict_on_test_3d(model, loader): ''' if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') ''' num_correct = 0 num_samples = 0 model.eval() # Put the model in test mode (the opposite of model.train(), essentially) results=open('results_3d.csv','w') count=0 results.write('Id'+','+'Class'+'\n') for t, sample in enumerate(loader): x_var = Variable(sample['clip'].type(dtype)) scores = model(x_var) _, preds = scores.data.max(1) for i in range(len(preds)): results.write(str(count)+','+str(preds[i])+'\n') count+=1 results.close() return count count=predict_on_test_3d(fixed_model_3d, clip_dataloader_test) print(count) # * Rank on kaggle: 27 # * Score: 61.80428 # In[ ]:
en
0.796817
#!/usr/bin/env python # coding: utf-8 # # Training a ConvNet PyTorch # # In this notebook, you'll learn how to use the powerful PyTorch framework to specify a conv net architecture and train it on the human action recognition dataset. # # In[30]: #ip install -r requirements.txt # In[1]: # ## What's this PyTorch business? # # * When using a framework like PyTorch or TensorFlow you can harness the power of the GPU for your own custom neural network architectures without having to write CUDA code directly. # * this notebook will walk you through much of what you need to do to train models using pytorch. if you want to learn more or need further clarification on topics that aren't fully explained here, here are 2 good Pytorch tutorials. 1): http://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html 2)http://pytorch.org/tutorials/beginner/pytorch_with_examples.html # * It's not necessary to have a GPU for this homework, using a GPU can make your code run faster. # # ## Load Datasets # # In this part, we will load the action recognition dataset for the neural network. In order to load data from our custom dataset, we need to write a custom Dataloader. If you put q3_2_data.mat, /valClips,/trainClips,/testClips under the folder of ./data/ , you do not need to change anything in this part. # First, load the labels of the dataset, you should write your path of the q3_2_data.mat file. # In[18]: # In[2]: # In[37]: # In[48]: # In[24]: # In[41]: # ### Dataset class # # torch.utils.data.Dataset is an abstract class representing a dataset. The custom dataset should inherit Dataset and override the following methods: # # __len__ so that len(dataset) returns the size of the dataset. # __getitem__ to support the indexing such that dataset[i] can be used to get ith sample # # Let’s create a dataset class for our action recognition dataset. We will read images in __getitem__. This is memory efficient because all the images are not stored in the memory at once but read as required. # # Sample of our dataset will be a dict {'image':image,'img_path':img_path,'Label':Label}. Our datset will take an optional argument transform so that any required processing can be applied on the sample. # In[3]: Action dataset. Args: root_dir (string): Directory with all the images. labels(list): labels if images. transform (callable, optional): Optional transform to be applied on a sample. # In[5]: # image_dataset=ActionDataset(root_dir='/home/adewopva/Downloads/CNN_AR/CNN-Action-Recognition-master/data/trainClips/',\ # labels=label_train,transform=T.ToTensor()) # #iterating though the dataset # for i in range(10): # sample=image_dataset[i] # print(sample['image'].shape) # print(sample['Label']) # print(sample['img_path']) # In[1]: For the given path, get the List of all files in the directory tree # create a list of file and sub directories # names in the given directory # Iterate over all the entries # Create full path # If entry is a directory then get the list of files in this directory # In[7]: # In[77]: # In[14]: # In[16]: #V Current as of 2am 1/26 Action dataset. Args: root_dir (string): Directory with all the images. labels(list): labels if images. transform (callable, optional): Optional transform to be applied on a sample. #we shall store all the file names in this list #print all the file names #your_path = img/path1 #print(sample) #iterating though the dataset # In[17]: #V Current as of 2am 1/26 Action dataset. Args: root_dir (string): Directory with all the images. labels(list): labels if images. transform (callable, optional): Optional transform to be applied on a sample. #we shall store all the file names in this list #img_path1=[] #img_path=var #your_path = img/path1 #print(sample) #break #iterating though the dataset # In[ ]: # In[22]: # Working Full Code for video with single action and Multiple actions # In[5]: #img_path1=[] # In[11]: #file=img_path.splitext(filename) # In[9]: # In[5]: # In[6]: # #!/usr/bin/python # # -*- coding: utf-8 -*- # class ActionDataset(Dataset): # """Action dataset.""" # def __init__( # self, # root_dir, # labels=[], # transform=None, # ): # """ # Args: # root_dir (string): Directory with all the images. # labels(list): labels if images. # transform (callable, optional): Optional transform to be applied on a sample. # """ # self.root_dir = root_dir # self.transform = transform # self.length = len(os.listdir(self.root_dir)) # self.labels = labels # def __len__(self): # return self.length * 3 # def __getitem__(self, idx): # root = self.root_dir # # we shall store all the file names in this list # img_path1 = [] # for (root, dirs, files) in os.walk(root): # for file in files: # # append the file name to the list # img_path1.append(os.path.join(root, file)) # return img_path1 # # print all the file names # for name in img_path1: # img_path = name # image = Image.open(img_path) # # your_path = imgpath1 # label1 = img_path.split(os.sep) # labels_name = { # 'on_feet': 0, # 'active': 1, # 'rest': 2, # 'escape': 3, # 'crawling': 4, # } # label2 = label1[10] # Label = labels_name[label2] # if self.transform: # image = self.transform(image) # if len(self.labels) != 0: # sample = {'image': image, 'img_path': img_path, # 'Label': Label} # else: # sample = {'image': image, 'img_path': img_path} # return sample # image_dataset = ActionDataset(root_dir='/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/' # , labels=vlabel_train, transform=T.ToTensor()) # # iterating though the dataset # for i in range(10): # sample1 = image_dataset[i] # print (sample1['image'].shape) # print (sample1['Label']) # print (sample1['img_path']) # In[19]: #iterating though the dataset # We can iterate over the created dataset with a 'for' loop as before. However, we are losing a lot of features by using a simple for loop to iterate over the data. In particular, we are missing out on: # # * Batching the data # * Shuffling the data # * Load the data in parallel using multiprocessing workers. # # torch.utils.data.DataLoader is an iterator which provides all these features. # Dataloaders for the training, validationg and testing set. # In[38]: # In[39]: # the CPU datatype # Constant to control how frequently we print train loss # This is a little utility that we'll use to reset the model # if we want to re-initialize all our parameters # ## Example Model # # ### Some assorted tidbits # # Let's start by looking at a simple model. First, note that PyTorch operates on Tensors, which are n-dimensional arrays functionally analogous to numpy's ndarrays, with the additional feature that they can be used for computations on GPUs. # # We'll provide you with a Flatten function, which we explain here. Remember that our image data (and more relevantly, our intermediate feature maps) are initially N x C x H x W, where: # * N is the number of datapoints # * C is the number of image channels. # * H is the height of the intermediate feature map in pixels # * W is the height of the intermediate feature map in pixels # # This is the right way to represent the data when we are doing something like a 2D convolution, that needs spatial understanding of where the intermediate features are relative to each other. When we input data into fully connected affine layers, however, we want each datapoint to be represented by a single vector -- it's no longer useful to segregate the different channels, rows, and columns of the data. So, we use a "Flatten" operation to collapse the C x H x W values per representation into a single long vector. The Flatten function below first reads in the N, C, H, and W values from a given batch of data, and then returns a "view" of that data. "View" is analogous to numpy's "reshape" method: it reshapes x's dimensions to be N x ??, where ?? is allowed to be anything (in this case, it will be C x H x W, but we don't need to specify that explicitly). # In[40]: # read in N, C, H, W # "flatten" the C * H * W values into a single vector per image # ### The example model itself # # The first step to training your own model is defining its architecture. # # Here's an example of a convolutional neural network defined in PyTorch -- try to understand what each line is doing, remembering that each layer is composed upon the previous layer. We haven't trained anything yet - that'll come next - for now, we want you to understand how everything gets set up. nn.Sequential is a container which applies each layer # one after the other. # # In this example, you see 2D convolutional layers (Conv2d), ReLU activations, and fully-connected layers (Linear). You also see the Cross-Entropy loss function, and the Adam optimizer being used. # # Make sure you understand why the parameters of the Linear layer are 10092 and 10. # # In[9]: # Here's where we define the architecture of the model... # see above for explanation # affine layer # Set the type of all data in this model to be FloatTensor # lr sets the learning rate of the optimizer # PyTorch supports many other layer types, loss functions, and optimizers - you will experiment with these next. Here's the official API documentation for these (if any of the parameters used above were unclear, this resource will also be helpful). # # * Layers: http://pytorch.org/docs/nn.html # * Activations: http://pytorch.org/docs/nn.html#non-linear-activations # * Loss functions: http://pytorch.org/docs/nn.html#loss-functions # * Optimizers: http://pytorch.org/docs/optim.html#algorithms # ## Training a specific model # # In this section, we're going to specify a model for you to construct. The goal here isn't to get good performance (that'll be next), but instead to get comfortable with understanding the PyTorch documentation and configuring your own model. # # Using the code provided above as guidance, and using the following PyTorch documentation, specify a model with the following architecture: # # * 7x7 Convolutional Layer with 8 filters and stride of 1 # * ReLU Activation Layer # * 2x2 Max Pooling layer with a stride of 2 # * 7x7 Convolutional Layer with 16 filters and stride of 1 # * ReLU Activation Layer # * 2x2 Max Pooling layer with a stride of 2 # * Flatten the feature map # * ReLU Activation Layer # * Affine layer to map input units to 10 outputs, you need to figure out the input size here. # # In[10]: #########1st To Do (10 points)################### #################################### # To make sure you're doing the right thing, use the following tool to check the dimensionality of your output (it should be 32 x 10, since our batches have size 32 and the output of the final affine layer should be 10, corresponding to our 10 classes): # In[11]: ## Now we're going to feed a random batch into the model you defined and make sure the output is the right size # Construct a PyTorch Variable out of your input data # Feed it through the model! # Check to make sure what comes out of your model # is the right dimensionality... this should be True # if you've done everything correctly # ### Train the model. # # Now that you've seen how to define a model and do a single forward pass of some data through it, let's walk through how you'd actually train one whole epoch over your training data (using the fixed_model_base we provided above). # # Make sure you understand how each PyTorch function used below corresponds to what you implemented in your custom neural network implementation. # # Note that because we are not resetting the weights anywhere below, if you run the cell multiple times, you are effectively training multiple epochs (so your performance should improve). # # First, set up an RMSprop optimizer (using a 1e-4 learning rate) and a cross-entropy loss function: # In[31]: ################ 2nd To Do (5 points)################## #optimizer = torch.optim.Adadelta(fixed_model_base.parameters(), lr = 0.001) #loss_fn = nn.MultiMarginLoss() # In[37]: # This sets the model in "training" mode. # This is relevant for some layers that may have different behavior # in training mode vs testing mode, such as Dropout and BatchNorm. # Load one batch at a time. #print(type(x_var.data)) #print(x_var.shape) # This is the forward pass: predict the scores for each class, for each x in the batch. # Use the correct y values and the predicted y values to compute the loss. # Zero out all of the gradients for the variables which the optimizer will update. # This is the backwards pass: compute the gradient of the loss with respect to each # parameter of the model. # Actually update the parameters of the model using the gradients computed by the backwards pass. # Now you've seen how the training process works in PyTorch. To save you writing boilerplate code, we're providing the following helper functions to help you train for multiple epochs and check the accuracy of your model: # In[41]: # check accuracy on the training set if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') # Put the model in test mode (the opposite of model.train(), essentially) #y_var=y_var.cpu() #scores.data.cpu().max(1) #print(preds) #print(y_var) # ### Check the accuracy of the model. # # Let's see the train and check_accuracy code in action -- feel free to use these methods when evaluating the models you develop below. # # You should get a training loss of around 1.0-1.2, and a validation accuracy of around 50-60%. As mentioned above, if you re-run the cells, you'll be training more epochs, so your performance will improve past these numbers. # # But don't worry about getting these numbers better -- this was just practice before you tackle designing your own model. # In[39]: # check accuracy on the training set # ### Don't forget the validation set! # # And note that you can use the check_accuracy function to evaluate on the validation set, by passing **image_dataloader_val** as the second argument to check_accuracy. The accuracy on validation set is arround 40-50%. # In[40]: #check accuracy on the validation set # ##### Train a better model for action recognition! # # Now it's your job to experiment with architectures, hyperparameters, loss functions, and optimizers to train a model that achieves better accuracy on the action recognition **validation** set. You can use the check_accuracy and train functions from above. # In[42]: ###########3rd To Do (16 points, must submit the results to Kaggle) ############## # Train your model here, and make sure the output of this cell is the accuracy of your best model on the # train, val, and test sets. Here's some code to get you started. The output of this cell should be the training # and validation accuracy on your best model (measured by validation accuracy). #################################### #optimizer = torch.optim.Adadelta(fixed_model_base.parameters(), lr = 0.001) # ### Describe what you did # # In the cell below you should write an explanation of what you did, any additional features that you implemented, and any visualizations or graphs that you make in the process of training and evaluating your network. # ### Tell us here! # ########### 4th To Do (4 points) ############## # * 10X10 Convolution layer with 200 filters with stride 3 # * ReLU layer # * Max Pool layer with window size 3X3 with stride 1 # * Batch Norm layer with input size 200 # * Dropout layer with penalty 0.1 # * 5X5 Convolution layer with 100 filters with stride 2 # * ReLU layer # * Max Pool layer with window size 3X3 with stride 1 # * Batch Norm layer with input size 100 # * Dropout layer with penalty 0.2 # * 3X3 Convolution layer with 50 filters and stride 1 # * ReLU layer # * Max Pool layer with window size 2 and stride 1 # * Batch Norm layer with input size 50 # * Flatten # * affine layer to reduce inputs from 200 to 100 # * affine layer to reduce inputs from 100 to 50 # * affine layer to reduce inputs from 50 to 10 # * logsoftmaxing layer # ### Testing the model and submit on Kaggle # Testing the model on the testing set and save the results as a .csv file. # Please submitted the results.csv file generated by predict_on_test() to Kaggle(https://www.kaggle.com/c/cse512springhw3) to see how well your network performs on the test set. # #######5th To Do (submit the result to Kaggle,the highest 3 entries get extra 10 points )############### # # * Rank: 10 # * Score: 70.34658 # In[ ]: # In[43]: ## Now we're going to feed a random batch into the model you defined and make sure the output is the right size # Construct a PyTorch Variable out of your input data # Feed it through the model! # Check to make sure what comes out of your model # is the right dimensionality... this should be True # if you've done everything correctly # In[78]: # check accuracy on the training set # In[79]: # check accuracy on the training set # ### Things you should try: # - **Filter size**: Above we used 7x7; this makes pretty pictures but smaller filters may be more efficient # - **Number of filters**: Do more or fewer do better? # - **Pooling vs Strided Convolution**: Do you use max pooling or just stride convolutions? # - **Batch normalization**: Try adding spatial batch normalization after convolution layers and vanilla batch normalization after affine layers. Do your networks train faster? # - **Network architecture**: The network above has two layers of trainable parameters. Can you do better with a deep network? Good architectures to try include: # - [conv-relu-pool]xN -> [affine]xM -> [softmax or SVM] # - [conv-relu-conv-relu-pool]xN -> [affine]xM -> [softmax or SVM] # - [batchnorm-relu-conv]xN -> [affine]xM -> [softmax or SVM] # - **Global Average Pooling**: Instead of flattening and then having multiple affine layers, perform convolutions until your image gets small (7x7 or so) and then perform an average pooling operation to get to a 1x1 image picture (1, 1 , Filter#), which is then reshaped into a (Filter#) vector. This is used in [Google's Inception Network](https://arxiv.org/abs/1512.00567) (See Table 1 for their architecture). # - **Regularization**: Add l2 weight regularization, or perhaps use Dropout. # # ### Tips for training # For each network architecture that you try, you should tune the learning rate and regularization strength. When doing this there are a couple important things to keep in mind: # # - If the parameters are working well, you should see improvement within a few hundred iterations # - Remember the coarse-to-fine approach for hyperparameter tuning: start by testing a large range of hyperparameters for just a few training iterations to find the combinations of parameters that are working at all. # - Once you have found some sets of parameters that seem to work, search more finely around these parameters. You may need to train for more epochs. # - You should use the validation set for hyperparameter search, and save your test set for evaluating your architecture on the best parameters as selected by the validation set. # # ### Going above and beyond # If you are feeling adventurous there are many other features you can implement to try and improve your performance. You are **not required** to implement any of these; however they would be good things to try. # # - Alternative update steps: For the assignment we implemented SGD+momentum, RMSprop, and Adam; you could try alternatives like AdaGrad or AdaDelta. # - Alternative activation functions such as leaky ReLU, parametric ReLU, ELU, or MaxOut. # - Model ensembles # - Data augmentation # - New Architectures # - [ResNets](https://arxiv.org/abs/1512.03385) where the input from the previous layer is added to the output. # - [DenseNets](https://arxiv.org/abs/1608.06993) where inputs into previous layers are concatenated together. # - [This blog has an in-depth overview](https://chatbotslife.com/resnets-highwaynets-and-densenets-oh-my-9bb15918ee32) # # If you do decide to implement something extra, clearly describe it in the "Extra Credit Description" cell below. # # ### What we expect # At the very least, you should be able to train a ConvNet that gets at least 55% accuracy on the validation set. This is just a lower bound - if you are careful it should be possible to get accuracies much higher than that! Extra credit points will be awarded for particularly high-scoring models or unique approaches. # # You should use the space below to experiment and train your network. # # # In[ ]: # ### GPU! (This part is optional, 0 points) # # If you have access to GPU, you can make the code run on GPU, it would be much faster. # # Now, we're going to switch the dtype of the model and our data to the GPU-friendly tensors, and see what happens... everything is the same, except we are casting our model and input tensors as this new dtype instead of the old one. # # If this returns false, or otherwise fails in a not-graceful way (i.e., with some error message), you may not have an NVIDIA GPU available on your machine. # In[75]: # Verify that CUDA is properly configured and you have a GPU available # In[76]: #.type(gpu_dtype) #.type(gpu_dtype) #type(gpu_dtype)) # Construct a PyTorch Variable out of your input data # Feed it through the model! # Check to make sure what comes out of your model # is the right dimensionality... this should be True # if you've done everything correctly # Run the following cell to evaluate the performance of the forward pass running on the CPU: # In[77]: # ... and now the GPU: # In[78]: # Make sure there are no pending GPU computations\nans = fixed_model_gpu(x_var_gpu) # Feed it through the model! \ntorch.cuda.synchronize() # Make sure there are no pending GPU computations') # You should observe that even a simple forward pass like this is significantly faster on the GPU. So for the rest of the assignment (and when you go train your models in assignment 3 and your project!), you should use the GPU datatype for your model and your tensors: as a reminder that is *torch.cuda.FloatTensor* (in our notebook here as *gpu_dtype*) # Let's make the loss function and training variables to GPU friendly format by '.cuda()' # In[79]: # In[80]: # check accuracy on the training set if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') # Put the model in test mode (the opposite of model.train(), essentially) #print(preds) #print(y_var) # Run on GPU! # In[47]: # check accuracy on the training set # In[48]: # check accuracy on the training set # In[46]: if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') # Put the model in test mode (the opposite of model.train(), essentially) # ### 3D Convolution on video clips (25 points+10 extra points) # 3D convolution is for videos, it has one more dimension than 2d convolution. You can find the document for 3D convolution here http://pytorch.org/docs/master/nn.html#torch.nn.Conv3dIn. In our dataset, each clip is a video of 3 frames. Lets classify the each clip rather than each image using 3D convolution. # We offer the data loader, the train_3d and check_accuracy # In[49]: Action Landmarks dataset. Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. #/home/tqvinh/Study/CSE512/cse512-s18/hw2data/trainClips/ # In[50]: # In[51]: # Write the Flatten for 3d covolution feature maps. # In[52]: ###############6th To Do (5 points)################### # read in N, C, D, H, W # "flatten" the C * D * H * W values into a single vector per image # Design a network using 3D convolution on videos for video classification. # In[58]: # You fill this in! ###############7th To Do (16 points)######################### ############################### # Construct a PyTorch Variable out of your input data #Accuracy 62 iterations 6 # ### Describe what you did (4 points) # # In the cell below you should write an explanation of what you did, any additional features that you implemented, and any visualizations or graphs that you make in the process of training and evaluating your network. # 8th To Do # Tell us here: # * 2X2X2 Convolution layer with 50 filters # * ReLU layer inplace True # * Max Pooling layer with window size (1, 2, 2) stride = 2 # * 1X3X3 Convolution layer with 100 filters # * ReLU layer with inplace True # * Max Pooling layer with window size (1, 3, 3) stride = 2 # * dropout layer with penalty 0.1 # * flattening # * ReLU layer with inplace True # * Affine layer # * LogSoftmax Layer # In[59]: # In[60]: if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') # Put the model in test mode (the opposite of model.train(), essentially) #print(preds) #print(y_var) # In[61]: # Test your 3d convolution model on the validation set. You don't need to submit the result of this part to kaggle. # Test your model on the test set, predict_on_test_3d() will generate a file named 'results_3d.csv'. Please submit the csv file to kaggle https://www.kaggle.com/c/cse512springhw3video # The highest 3 entries get extra 10 points. # # In[62]: if loader.dataset.train: print('Checking accuracy on validation set') else: print('Checking accuracy on test set') # Put the model in test mode (the opposite of model.train(), essentially) # * Rank on kaggle: 27 # * Score: 61.80428 # In[ ]:
4.100268
4
djangophysics/units/migrations/0008_auto_20210603_2248.py
fmeurou/django-physics
1
6621543
<reponame>fmeurou/django-physics # Generated by Django 3.2.3 on 2021-06-03 22:48 from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('units', '0007_customdimension'), ] operations = [ migrations.AlterModelOptions( name='customdimension', options={'ordering': ['name', 'code']}, ), migrations.AlterField( model_name='customdimension', name='code', field=models.CharField(max_length=255, verbose_name='technical name of the dimension (e.g.: [myDimension])'), ), migrations.AlterUniqueTogether( name='customdimension', unique_together={('user', 'key', 'code')}, ), ]
# Generated by Django 3.2.3 on 2021-06-03 22:48 from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('units', '0007_customdimension'), ] operations = [ migrations.AlterModelOptions( name='customdimension', options={'ordering': ['name', 'code']}, ), migrations.AlterField( model_name='customdimension', name='code', field=models.CharField(max_length=255, verbose_name='technical name of the dimension (e.g.: [myDimension])'), ), migrations.AlterUniqueTogether( name='customdimension', unique_together={('user', 'key', 'code')}, ), ]
en
0.876073
# Generated by Django 3.2.3 on 2021-06-03 22:48
1.696179
2
Lab10/checkpoint5.py
demsks/CSCI2961
0
6621544
<filename>Lab10/checkpoint5.py from pymongo import MongoClient from random import randint import datetime client = MongoClient() db = client.csci2963 definitions = db.definitions def random_word_requester(): total = definitions.count() index = randint(0, total) target = definitions.find()[index] word = target.get("word") if not target.has_key("date"): target["date"] = [] target.get("date").insert(0, str(datetime.datetime.now())) definitions.save(target) return word if __name__ == '__main__': print random_word_requester()
<filename>Lab10/checkpoint5.py from pymongo import MongoClient from random import randint import datetime client = MongoClient() db = client.csci2963 definitions = db.definitions def random_word_requester(): total = definitions.count() index = randint(0, total) target = definitions.find()[index] word = target.get("word") if not target.has_key("date"): target["date"] = [] target.get("date").insert(0, str(datetime.datetime.now())) definitions.save(target) return word if __name__ == '__main__': print random_word_requester()
none
1
2.563196
3
postpy/ddl.py
portfoliome/pgdabble
24
6621545
""" ddl.py contains the Data Definition Language for Postgresql Server. """ from psycopg2.extensions import AsIs def compile_qualified_name(table: str, schema='public') -> str: """Format table's fully qualified name string.""" return '{}.{}'.format(schema, table) def compile_create_table(qualified_name: str, column_statement: str, primary_key_statement: str) -> str: """Postgresql Create Table statement formatter.""" statement = """ CREATE TABLE {table} ({columns} {primary_keys}); """.format(table=qualified_name, columns=column_statement, primary_keys=primary_key_statement) return statement def compile_create_temporary_table(table_name: str, column_statement: str, primary_key_statement: str) -> str: """Postgresql Create Temporary Table statement formatter.""" statement = """ CREATE TEMPORARY TABLE {table} ({columns} {primary_keys}); """.format(table=table_name, columns=column_statement, primary_keys=primary_key_statement) return statement def compile_column(name: str, data_type: str, nullable: bool) -> str: """Create column definition statement.""" null_str = 'NULL' if nullable else 'NOT NULL' return '{name} {data_type} {null},'.format(name=name, data_type=data_type, null=null_str) def compile_primary_key(column_names): return 'PRIMARY KEY ({})'.format(', '.join(column_names)) class CreateTableAs: def __init__(self, table, parent_table, columns=('*',), *, clause): self.table = table self.parent_table = parent_table self.columns = columns self.column_str = ', '.join(columns) self.clause = clause def compile(self): statement = '{create} ({select} {clause})'.format( create=self._create_statement(), select=self._select_statement(), clause=self._clause_statement()) return statement def compile_with_cte(self, common_table_expression): statement = '{create} (WITH {cte} {select} {clause})'.format( create=self._create_statement(), cte=common_table_expression, select=self._select_statement(), clause=self._clause_statement()) return statement def _create_statement(self): return 'CREATE TABLE {} AS'.format(self.table) def _select_statement(self): return '\n SELECT {column_str} \n FROM {parent_table}'.format( column_str=self.column_str, parent_table=self.parent_table) def _clause_statement(self): return '\n WHERE %s' % self.clause class MaterializedView: """Postgres materialized view declaration formatter.""" def __init__(self, name, query='', query_values=None): self.name = name self.query = query self.query_values = query_values def create(self, no_data=False): """Declare materalized view.""" if self.query: ddl_statement = self.compile_create_as() else: ddl_statement = self.compile_create() if no_data: ddl_statement += '\nWITH NO DATA' return ddl_statement, self.query_values def compile_create(self): """Materalized view.""" return 'CREATE MATERIALIZED VIEW {}'.format(AsIs(self.name)) def compile_create_as(self): """Build from a select statement.""" return '{} AS \n {}'.format(self.compile_create(), self.query) def refresh(self): """Refresh a materialized view.""" return 'REFRESH MATERIALIZED VIEW {}'.format(AsIs(self.name)) def drop(self): return 'DROP MATERIALIZED VIEW {}'.format(AsIs(self.name))
""" ddl.py contains the Data Definition Language for Postgresql Server. """ from psycopg2.extensions import AsIs def compile_qualified_name(table: str, schema='public') -> str: """Format table's fully qualified name string.""" return '{}.{}'.format(schema, table) def compile_create_table(qualified_name: str, column_statement: str, primary_key_statement: str) -> str: """Postgresql Create Table statement formatter.""" statement = """ CREATE TABLE {table} ({columns} {primary_keys}); """.format(table=qualified_name, columns=column_statement, primary_keys=primary_key_statement) return statement def compile_create_temporary_table(table_name: str, column_statement: str, primary_key_statement: str) -> str: """Postgresql Create Temporary Table statement formatter.""" statement = """ CREATE TEMPORARY TABLE {table} ({columns} {primary_keys}); """.format(table=table_name, columns=column_statement, primary_keys=primary_key_statement) return statement def compile_column(name: str, data_type: str, nullable: bool) -> str: """Create column definition statement.""" null_str = 'NULL' if nullable else 'NOT NULL' return '{name} {data_type} {null},'.format(name=name, data_type=data_type, null=null_str) def compile_primary_key(column_names): return 'PRIMARY KEY ({})'.format(', '.join(column_names)) class CreateTableAs: def __init__(self, table, parent_table, columns=('*',), *, clause): self.table = table self.parent_table = parent_table self.columns = columns self.column_str = ', '.join(columns) self.clause = clause def compile(self): statement = '{create} ({select} {clause})'.format( create=self._create_statement(), select=self._select_statement(), clause=self._clause_statement()) return statement def compile_with_cte(self, common_table_expression): statement = '{create} (WITH {cte} {select} {clause})'.format( create=self._create_statement(), cte=common_table_expression, select=self._select_statement(), clause=self._clause_statement()) return statement def _create_statement(self): return 'CREATE TABLE {} AS'.format(self.table) def _select_statement(self): return '\n SELECT {column_str} \n FROM {parent_table}'.format( column_str=self.column_str, parent_table=self.parent_table) def _clause_statement(self): return '\n WHERE %s' % self.clause class MaterializedView: """Postgres materialized view declaration formatter.""" def __init__(self, name, query='', query_values=None): self.name = name self.query = query self.query_values = query_values def create(self, no_data=False): """Declare materalized view.""" if self.query: ddl_statement = self.compile_create_as() else: ddl_statement = self.compile_create() if no_data: ddl_statement += '\nWITH NO DATA' return ddl_statement, self.query_values def compile_create(self): """Materalized view.""" return 'CREATE MATERIALIZED VIEW {}'.format(AsIs(self.name)) def compile_create_as(self): """Build from a select statement.""" return '{} AS \n {}'.format(self.compile_create(), self.query) def refresh(self): """Refresh a materialized view.""" return 'REFRESH MATERIALIZED VIEW {}'.format(AsIs(self.name)) def drop(self): return 'DROP MATERIALIZED VIEW {}'.format(AsIs(self.name))
en
0.526439
ddl.py contains the Data Definition Language for Postgresql Server. Format table's fully qualified name string. Postgresql Create Table statement formatter. CREATE TABLE {table} ({columns} {primary_keys}); Postgresql Create Temporary Table statement formatter. CREATE TEMPORARY TABLE {table} ({columns} {primary_keys}); Create column definition statement. Postgres materialized view declaration formatter. Declare materalized view. Materalized view. Build from a select statement. Refresh a materialized view.
3.050603
3
use_cases/BBerliner_tests.py
czack425/openc2_schema_tests
0
6621546
<filename>use_cases/BBerliner_tests.py<gh_stars>0 """ OpenC2 Profile Use Case Command/Response Pairs from <NAME> """ import unittest from .test_setup import SetupTests, ValidationError from .utils import check_profiles_skip profile = "BBerliner" @unittest.skipIf(check_profiles_skip(profile), f"{profile} Profile tests not specified") class PROFILE_UseCases(SetupTests): # Dynamic Validation Variables profile = profile # Static Validation Functions
<filename>use_cases/BBerliner_tests.py<gh_stars>0 """ OpenC2 Profile Use Case Command/Response Pairs from <NAME> """ import unittest from .test_setup import SetupTests, ValidationError from .utils import check_profiles_skip profile = "BBerliner" @unittest.skipIf(check_profiles_skip(profile), f"{profile} Profile tests not specified") class PROFILE_UseCases(SetupTests): # Dynamic Validation Variables profile = profile # Static Validation Functions
en
0.628155
OpenC2 Profile Use Case Command/Response Pairs from <NAME> # Dynamic Validation Variables # Static Validation Functions
1.814135
2
RL/DQN/settings.py
ColinFred/Reinforce_Learning_Pytorch
0
6621547
<filename>RL/DQN/settings.py # hyper-parameters MEMORY_CAPACITY = 1000 C = 50 BATCH_SIZE = 32 LR = 0.01 GAMMA = 0.90 EPISILO = 0.9 TEST_EPISODE = 30
<filename>RL/DQN/settings.py # hyper-parameters MEMORY_CAPACITY = 1000 C = 50 BATCH_SIZE = 32 LR = 0.01 GAMMA = 0.90 EPISILO = 0.9 TEST_EPISODE = 30
fi
0.260391
# hyper-parameters
1.123773
1
app/migrations/0012_auto_20160315_1419.py
westos007/OMS-1
32
6621548
<reponame>westos007/OMS-1 # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0011_network'), ] operations = [ migrations.AlterField( model_name='network', name='info', field=models.CharField(max_length=100, verbose_name='\u8bf4\u660e', blank=True), ), migrations.AlterField( model_name='network', name='url', field=models.URLField(max_length=100, verbose_name='\u8bbf\u95ee\u5730\u5740', blank=True), ), ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0011_network'), ] operations = [ migrations.AlterField( model_name='network', name='info', field=models.CharField(max_length=100, verbose_name='\u8bf4\u660e', blank=True), ), migrations.AlterField( model_name='network', name='url', field=models.URLField(max_length=100, verbose_name='\u8bbf\u95ee\u5730\u5740', blank=True), ), ]
en
0.769321
# -*- coding: utf-8 -*-
1.516918
2
evaluate.py
shgoren/ShapeWorld
1
6621549
<reponame>shgoren/ShapeWorld import argparse from datetime import datetime from importlib import import_module import json import os import sys from shapeworld import dataset, util from models.TFMacros.tf_macros import Model if __name__ == '__main__': parser = argparse.ArgumentParser(description='Evaluate a model') parser.add_argument('-t', '--type', help='Dataset type') parser.add_argument('-n', '--name', help='Dataset name') parser.add_argument('-l', '--language', default=None, help='Dataset language') parser.add_argument('-c', '--config', type=util.parse_config, default=None, help='Dataset configuration file') parser.add_argument('-p', '--pixel-noise', type=float, default=0.1, help='Pixel noise range') parser.add_argument('-m', '--model', help='Model') parser.add_argument('-y', '--hyperparams-file', default=None, help='Model hyperparameters file (default: hyperparams directory)') parser.add_argument('-i', '--iterations', type=util.parse_int_with_factor, default=1, help='Iterations') parser.add_argument('-b', '--batch-size', type=util.parse_int_with_factor, default=1000, help='Batch size') parser.add_argument('--model-dir', help='TensorFlow model directory, storing the model computation graph and parameters') parser.add_argument('--report-file', default=None, help='CSV file reporting the evaluation results') parser.add_argument('-v', '--verbosity', type=int, choices=(0, 1, 2), default=1, help='Verbosity (0: nothing, 1: default, 2: TensorFlow)') parser.add_argument('--query', default=None, help='Experimental: Values to query (separated by commas)') parser.add_argument('--serialize', default=None, help='Experimental: Values to serialize (requires --evaluate) (separated by commas)') args = parser.parse_args() # import tensorflow if args.verbosity >= 2: os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' else: os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorflow as tf assert args.model_dir is not None # dataset dataset = dataset(dtype=args.type, name=args.name, language=args.language, config=args.config) # information about dataset and model if args.verbosity >= 1: sys.stdout.write('{time} {dataset}\n'.format(time=datetime.now().strftime('%H:%M:%S'), dataset=dataset)) sys.stdout.write(' config: {}\n'.format(args.config)) sys.stdout.write(' {} model: {}\n'.format(args.type, args.model)) sys.stdout.write(' hyperparameters: {}\n'.format(args.hyperparameters)) sys.stdout.flush() if args.type == 'agreement': parameters = dict( world_shape=dataset.world_shape, vocabulary_size=dataset.vocabulary_size(value_type='language'), caption_shape=dataset.vector_shape(value_name='caption') ) query = ('agreement_accuracy',) serialize = () elif args.type == 'classification': parameters = dict( world_shape=dataset.world_shape, num_classes=dataset.num_classes, multi_class=dataset.multi_class, class_count=dataset.class_count ) query = ('classification_fscore', 'classification_precision', 'classification_recall') serialize = () elif args.type == 'clevr_classification': parameters = dict( world_shape=dataset.world_shape, vocabulary_size=dataset.vocabulary_size, question_shape=dataset.vector_shape('question'), num_answers=len(dataset.answers) ) query = ('answer_fscore', 'answer_precision', 'answer_recall') serialize = () else: assert False if args.query: query = tuple(args.query.split(',')) if args.serialize: serialize = tuple(args.serialize.split(',')) query += serialize if args.hyperparams_file is None: with open(os.path.join('models', dataset.type, 'hyperparams', args.model + '.params.json'), 'r') as filehandle: parameters.update(json.load(fp=filehandle)) else: with open(args.hyperparams_file, 'r') as filehandle: parameters.update(json.load(fp=filehandle)) # restore iteration_start = 1 if args.report_file: with open(args.report_file, 'r') as filehandle: for line in filehandle: value = line.split(',')[0] if value != 'iteration': iteration_start = int(value) + 1 with Model(name=args.model, learning_rate=parameters.pop('learning_rate'), weight_decay=parameters.pop('weight_decay', 0.0), model_directory=args.model_dir) as model: parameters.pop('dropout_rate', 0.0) module = import_module('models.{}.{}'.format(args.type, args.model)) module.model(model=model, inputs=dict(), **parameters) # no input tensors, hence None for placeholder creation model.finalize(restore=True) if args.verbosity >= 1: sys.stdout.write(' parameters: {:,}\n'.format(model.num_parameters)) sys.stdout.write(' bytes: {:,}\n'.format(model.num_bytes)) sys.stdout.flush() if args.verbosity >= 1: sys.stdout.write('{} evaluate model...\n'.format(datetime.now().strftime('%H:%M:%S'))) sys.stdout.flush() train = {name: 0.0 for name in query} for _ in range(args.iterations): generated = dataset.generate(n=args.batch_size, mode='train', noise_range=args.pixel_noise) queried = model(query=query, data=generated) train = {name: value + queried[name] for name, value in train.items()} train = {name: value / args.iterations for name, value in train.items()} sys.stdout.write(' train: ') for name in query: sys.stdout.write('{}={:.3f} '.format(name, train[name])) sys.stdout.write('\n') sys.stdout.flush() if serialize: dataset.serialize(path=None, generated=generated, additional={name: (train[name], serialize[name]) for name in serialize}) validation = {name: 0.0 for name in query} for _ in range(args.iterations): generated = dataset.generate(n=args.batch_size, mode='validation', noise_range=args.pixel_noise) queried = model(query=query, data=generated) validation = {name: value + queried[name] for name, value in validation.items()} validation = {name: value / args.iterations for name, value in validation.items()} sys.stdout.write(' validation: ') for name in query: sys.stdout.write('{}={:.3f} '.format(name, validation[name])) sys.stdout.write('\n') sys.stdout.flush() if serialize: dataset.serialize(path=None, generated=generated, additional={name: (validation[name], serialize[name]) for name in serialize}) test = {name: 0.0 for name in query} for _ in range(args.iterations): generated = dataset.generate(n=args.batch_size, mode='test', noise_range=args.pixel_noise) queried = model(query=query, data=generated) test = {name: value + queried[name] for name, value in test.items()} test = {name: value / args.iterations for name, value in test.items()} sys.stdout.write(' test: ') for name in query: sys.stdout.write('{}={:.3f} '.format(name, test[name])) sys.stdout.write('\n') sys.stdout.flush() if serialize: dataset.serialize(path=None, generated=generated, additional={name: (test[name], serialize[name]) for name in serialize}) if args.verbosity >= 1: sys.stdout.write('\n{} model evaluation finished\n'.format(datetime.now().strftime('%H:%M:%S'))) sys.stdout.flush() # else: # training # if args.verbosity >= 1: # sys.stdout.write('{} train model...\n'.format(datetime.now().strftime('%H:%M:%S'))) # sys.stdout.flush() # before = datetime.now() # if args.tf_records: # mean = {name: 0.0 for name in query} # n = 0 # for iteration in range(iteration_start, iteration_end + 1): # train = model(query=query, optimize=True, dropout=args.dropout_rate) # loss !!!??? # mean = {name: value + train[name] for name, value in mean.items()} # n += 1 # if iteration % args.evaluation_frequency == 0 or iteration == 1 or iteration == args.evaluation_frequency // 2 or iteration == iteration_end: # mean = {name: value / n for name, value in mean.items()} # after = datetime.now() # if args.verbosity >= 1: # sys.stdout.write('\r {:.0f}% {}/{} '.format(iteration * 100 / iteration_end, iteration, iteration_end)) # for name in query: # sys.stdout.write('{}={:.3f} '.format(name, train[name])) # sys.stdout.write('(time per evaluation iteration: {})'.format(str(after - before).split('.')[0])) # sys.stdout.flush() # before = datetime.now() # if args.report_file: # with open(args.report_file, 'a') as filehandle: # filehandle.write(str(iteration)) # for name in query: # filehandle.write(',' + str(train[name])) # filehandle.write('\n') # mean = {name: 0.0 for name in mean} # n = 0 # else: # for iteration in range(iteration_start, iteration_end + 1): # generated = dataset.generate(n=args.batch_size, mode='train', noise_range=args.pixel_noise) # model(data=generated, optimize=True, dropout=args.dropout_rate) # if iteration % args.evaluation_frequency == 0 or iteration == 1 or iteration == args.evaluation_frequency // 2 or iteration == iteration_end: # generated = dataset.generate(n=args.evaluation_size, mode='train', noise_range=args.pixel_noise) # train = model(query=query, data=generated) # generated = dataset.generate(n=args.evaluation_size, mode='validation', noise_range=args.pixel_noise) # validation = model(query=query, data=generated) # after = datetime.now() # if args.verbosity >= 1: # sys.stdout.write('\r {:.0f}% {}/{} '.format(iteration * 100 / iteration_end, iteration, iteration_end)) # sys.stdout.write('train: ') # for name in query: # sys.stdout.write('{}={:.3f} '.format(name, train[name])) # sys.stdout.write(' validation: ') # for name in query: # sys.stdout.write('{}={:.3f} '.format(name, validation[name])) # sys.stdout.write(' (time per evaluation iteration: {})'.format(str(after - before).split('.')[0])) # sys.stdout.flush() # before = datetime.now() # if args.report_file: # with open(args.report_file, 'a') as filehandle: # filehandle.write(str(iteration)) # for name in query: # filehandle.write(',' + str(train[name])) # for name in query: # filehandle.write(',' + str(validation[name])) # filehandle.write('\n')
import argparse from datetime import datetime from importlib import import_module import json import os import sys from shapeworld import dataset, util from models.TFMacros.tf_macros import Model if __name__ == '__main__': parser = argparse.ArgumentParser(description='Evaluate a model') parser.add_argument('-t', '--type', help='Dataset type') parser.add_argument('-n', '--name', help='Dataset name') parser.add_argument('-l', '--language', default=None, help='Dataset language') parser.add_argument('-c', '--config', type=util.parse_config, default=None, help='Dataset configuration file') parser.add_argument('-p', '--pixel-noise', type=float, default=0.1, help='Pixel noise range') parser.add_argument('-m', '--model', help='Model') parser.add_argument('-y', '--hyperparams-file', default=None, help='Model hyperparameters file (default: hyperparams directory)') parser.add_argument('-i', '--iterations', type=util.parse_int_with_factor, default=1, help='Iterations') parser.add_argument('-b', '--batch-size', type=util.parse_int_with_factor, default=1000, help='Batch size') parser.add_argument('--model-dir', help='TensorFlow model directory, storing the model computation graph and parameters') parser.add_argument('--report-file', default=None, help='CSV file reporting the evaluation results') parser.add_argument('-v', '--verbosity', type=int, choices=(0, 1, 2), default=1, help='Verbosity (0: nothing, 1: default, 2: TensorFlow)') parser.add_argument('--query', default=None, help='Experimental: Values to query (separated by commas)') parser.add_argument('--serialize', default=None, help='Experimental: Values to serialize (requires --evaluate) (separated by commas)') args = parser.parse_args() # import tensorflow if args.verbosity >= 2: os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' else: os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorflow as tf assert args.model_dir is not None # dataset dataset = dataset(dtype=args.type, name=args.name, language=args.language, config=args.config) # information about dataset and model if args.verbosity >= 1: sys.stdout.write('{time} {dataset}\n'.format(time=datetime.now().strftime('%H:%M:%S'), dataset=dataset)) sys.stdout.write(' config: {}\n'.format(args.config)) sys.stdout.write(' {} model: {}\n'.format(args.type, args.model)) sys.stdout.write(' hyperparameters: {}\n'.format(args.hyperparameters)) sys.stdout.flush() if args.type == 'agreement': parameters = dict( world_shape=dataset.world_shape, vocabulary_size=dataset.vocabulary_size(value_type='language'), caption_shape=dataset.vector_shape(value_name='caption') ) query = ('agreement_accuracy',) serialize = () elif args.type == 'classification': parameters = dict( world_shape=dataset.world_shape, num_classes=dataset.num_classes, multi_class=dataset.multi_class, class_count=dataset.class_count ) query = ('classification_fscore', 'classification_precision', 'classification_recall') serialize = () elif args.type == 'clevr_classification': parameters = dict( world_shape=dataset.world_shape, vocabulary_size=dataset.vocabulary_size, question_shape=dataset.vector_shape('question'), num_answers=len(dataset.answers) ) query = ('answer_fscore', 'answer_precision', 'answer_recall') serialize = () else: assert False if args.query: query = tuple(args.query.split(',')) if args.serialize: serialize = tuple(args.serialize.split(',')) query += serialize if args.hyperparams_file is None: with open(os.path.join('models', dataset.type, 'hyperparams', args.model + '.params.json'), 'r') as filehandle: parameters.update(json.load(fp=filehandle)) else: with open(args.hyperparams_file, 'r') as filehandle: parameters.update(json.load(fp=filehandle)) # restore iteration_start = 1 if args.report_file: with open(args.report_file, 'r') as filehandle: for line in filehandle: value = line.split(',')[0] if value != 'iteration': iteration_start = int(value) + 1 with Model(name=args.model, learning_rate=parameters.pop('learning_rate'), weight_decay=parameters.pop('weight_decay', 0.0), model_directory=args.model_dir) as model: parameters.pop('dropout_rate', 0.0) module = import_module('models.{}.{}'.format(args.type, args.model)) module.model(model=model, inputs=dict(), **parameters) # no input tensors, hence None for placeholder creation model.finalize(restore=True) if args.verbosity >= 1: sys.stdout.write(' parameters: {:,}\n'.format(model.num_parameters)) sys.stdout.write(' bytes: {:,}\n'.format(model.num_bytes)) sys.stdout.flush() if args.verbosity >= 1: sys.stdout.write('{} evaluate model...\n'.format(datetime.now().strftime('%H:%M:%S'))) sys.stdout.flush() train = {name: 0.0 for name in query} for _ in range(args.iterations): generated = dataset.generate(n=args.batch_size, mode='train', noise_range=args.pixel_noise) queried = model(query=query, data=generated) train = {name: value + queried[name] for name, value in train.items()} train = {name: value / args.iterations for name, value in train.items()} sys.stdout.write(' train: ') for name in query: sys.stdout.write('{}={:.3f} '.format(name, train[name])) sys.stdout.write('\n') sys.stdout.flush() if serialize: dataset.serialize(path=None, generated=generated, additional={name: (train[name], serialize[name]) for name in serialize}) validation = {name: 0.0 for name in query} for _ in range(args.iterations): generated = dataset.generate(n=args.batch_size, mode='validation', noise_range=args.pixel_noise) queried = model(query=query, data=generated) validation = {name: value + queried[name] for name, value in validation.items()} validation = {name: value / args.iterations for name, value in validation.items()} sys.stdout.write(' validation: ') for name in query: sys.stdout.write('{}={:.3f} '.format(name, validation[name])) sys.stdout.write('\n') sys.stdout.flush() if serialize: dataset.serialize(path=None, generated=generated, additional={name: (validation[name], serialize[name]) for name in serialize}) test = {name: 0.0 for name in query} for _ in range(args.iterations): generated = dataset.generate(n=args.batch_size, mode='test', noise_range=args.pixel_noise) queried = model(query=query, data=generated) test = {name: value + queried[name] for name, value in test.items()} test = {name: value / args.iterations for name, value in test.items()} sys.stdout.write(' test: ') for name in query: sys.stdout.write('{}={:.3f} '.format(name, test[name])) sys.stdout.write('\n') sys.stdout.flush() if serialize: dataset.serialize(path=None, generated=generated, additional={name: (test[name], serialize[name]) for name in serialize}) if args.verbosity >= 1: sys.stdout.write('\n{} model evaluation finished\n'.format(datetime.now().strftime('%H:%M:%S'))) sys.stdout.flush() # else: # training # if args.verbosity >= 1: # sys.stdout.write('{} train model...\n'.format(datetime.now().strftime('%H:%M:%S'))) # sys.stdout.flush() # before = datetime.now() # if args.tf_records: # mean = {name: 0.0 for name in query} # n = 0 # for iteration in range(iteration_start, iteration_end + 1): # train = model(query=query, optimize=True, dropout=args.dropout_rate) # loss !!!??? # mean = {name: value + train[name] for name, value in mean.items()} # n += 1 # if iteration % args.evaluation_frequency == 0 or iteration == 1 or iteration == args.evaluation_frequency // 2 or iteration == iteration_end: # mean = {name: value / n for name, value in mean.items()} # after = datetime.now() # if args.verbosity >= 1: # sys.stdout.write('\r {:.0f}% {}/{} '.format(iteration * 100 / iteration_end, iteration, iteration_end)) # for name in query: # sys.stdout.write('{}={:.3f} '.format(name, train[name])) # sys.stdout.write('(time per evaluation iteration: {})'.format(str(after - before).split('.')[0])) # sys.stdout.flush() # before = datetime.now() # if args.report_file: # with open(args.report_file, 'a') as filehandle: # filehandle.write(str(iteration)) # for name in query: # filehandle.write(',' + str(train[name])) # filehandle.write('\n') # mean = {name: 0.0 for name in mean} # n = 0 # else: # for iteration in range(iteration_start, iteration_end + 1): # generated = dataset.generate(n=args.batch_size, mode='train', noise_range=args.pixel_noise) # model(data=generated, optimize=True, dropout=args.dropout_rate) # if iteration % args.evaluation_frequency == 0 or iteration == 1 or iteration == args.evaluation_frequency // 2 or iteration == iteration_end: # generated = dataset.generate(n=args.evaluation_size, mode='train', noise_range=args.pixel_noise) # train = model(query=query, data=generated) # generated = dataset.generate(n=args.evaluation_size, mode='validation', noise_range=args.pixel_noise) # validation = model(query=query, data=generated) # after = datetime.now() # if args.verbosity >= 1: # sys.stdout.write('\r {:.0f}% {}/{} '.format(iteration * 100 / iteration_end, iteration, iteration_end)) # sys.stdout.write('train: ') # for name in query: # sys.stdout.write('{}={:.3f} '.format(name, train[name])) # sys.stdout.write(' validation: ') # for name in query: # sys.stdout.write('{}={:.3f} '.format(name, validation[name])) # sys.stdout.write(' (time per evaluation iteration: {})'.format(str(after - before).split('.')[0])) # sys.stdout.flush() # before = datetime.now() # if args.report_file: # with open(args.report_file, 'a') as filehandle: # filehandle.write(str(iteration)) # for name in query: # filehandle.write(',' + str(train[name])) # for name in query: # filehandle.write(',' + str(validation[name])) # filehandle.write('\n')
en
0.419457
# import tensorflow # dataset # information about dataset and model # restore # no input tensors, hence None for placeholder creation # else: # training # if args.verbosity >= 1: # sys.stdout.write('{} train model...\n'.format(datetime.now().strftime('%H:%M:%S'))) # sys.stdout.flush() # before = datetime.now() # if args.tf_records: # mean = {name: 0.0 for name in query} # n = 0 # for iteration in range(iteration_start, iteration_end + 1): # train = model(query=query, optimize=True, dropout=args.dropout_rate) # loss !!!??? # mean = {name: value + train[name] for name, value in mean.items()} # n += 1 # if iteration % args.evaluation_frequency == 0 or iteration == 1 or iteration == args.evaluation_frequency // 2 or iteration == iteration_end: # mean = {name: value / n for name, value in mean.items()} # after = datetime.now() # if args.verbosity >= 1: # sys.stdout.write('\r {:.0f}% {}/{} '.format(iteration * 100 / iteration_end, iteration, iteration_end)) # for name in query: # sys.stdout.write('{}={:.3f} '.format(name, train[name])) # sys.stdout.write('(time per evaluation iteration: {})'.format(str(after - before).split('.')[0])) # sys.stdout.flush() # before = datetime.now() # if args.report_file: # with open(args.report_file, 'a') as filehandle: # filehandle.write(str(iteration)) # for name in query: # filehandle.write(',' + str(train[name])) # filehandle.write('\n') # mean = {name: 0.0 for name in mean} # n = 0 # else: # for iteration in range(iteration_start, iteration_end + 1): # generated = dataset.generate(n=args.batch_size, mode='train', noise_range=args.pixel_noise) # model(data=generated, optimize=True, dropout=args.dropout_rate) # if iteration % args.evaluation_frequency == 0 or iteration == 1 or iteration == args.evaluation_frequency // 2 or iteration == iteration_end: # generated = dataset.generate(n=args.evaluation_size, mode='train', noise_range=args.pixel_noise) # train = model(query=query, data=generated) # generated = dataset.generate(n=args.evaluation_size, mode='validation', noise_range=args.pixel_noise) # validation = model(query=query, data=generated) # after = datetime.now() # if args.verbosity >= 1: # sys.stdout.write('\r {:.0f}% {}/{} '.format(iteration * 100 / iteration_end, iteration, iteration_end)) # sys.stdout.write('train: ') # for name in query: # sys.stdout.write('{}={:.3f} '.format(name, train[name])) # sys.stdout.write(' validation: ') # for name in query: # sys.stdout.write('{}={:.3f} '.format(name, validation[name])) # sys.stdout.write(' (time per evaluation iteration: {})'.format(str(after - before).split('.')[0])) # sys.stdout.flush() # before = datetime.now() # if args.report_file: # with open(args.report_file, 'a') as filehandle: # filehandle.write(str(iteration)) # for name in query: # filehandle.write(',' + str(train[name])) # for name in query: # filehandle.write(',' + str(validation[name])) # filehandle.write('\n')
2.164194
2
autoflow/feature_engineer/text/topic/lda.py
auto-flow/autoflow
49
6621550
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author : <NAME> # @Contact : <EMAIL> from autoflow.feature_engineer.text.topic.base import BaseGensim import numpy as np class LdaTransformer(BaseGensim): def __init__(self, num_topics=100, chunksize=2000, passes=1, update_every=1, alpha='symmetric', eta=None, decay=0.5, offset=1.0, eval_every=10, iterations=50, gamma_threshold=0.001, minimum_probability=0.01, random_state=42, scorer='perplexity', dtype=np.float32): self.dtype = dtype self.scorer = scorer self.random_state = random_state self.minimum_probability = minimum_probability self.gamma_threshold = gamma_threshold self.iterations = iterations self.eval_every = eval_every self.offset = offset self.decay = decay self.eta = eta self.alpha = alpha self.update_every = update_every self.passes = passes self.chunksize = chunksize self.num_topics = num_topics self.transformer_package="gensim.sklearn_api.LdaTransformer"
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author : <NAME> # @Contact : <EMAIL> from autoflow.feature_engineer.text.topic.base import BaseGensim import numpy as np class LdaTransformer(BaseGensim): def __init__(self, num_topics=100, chunksize=2000, passes=1, update_every=1, alpha='symmetric', eta=None, decay=0.5, offset=1.0, eval_every=10, iterations=50, gamma_threshold=0.001, minimum_probability=0.01, random_state=42, scorer='perplexity', dtype=np.float32): self.dtype = dtype self.scorer = scorer self.random_state = random_state self.minimum_probability = minimum_probability self.gamma_threshold = gamma_threshold self.iterations = iterations self.eval_every = eval_every self.offset = offset self.decay = decay self.eta = eta self.alpha = alpha self.update_every = update_every self.passes = passes self.chunksize = chunksize self.num_topics = num_topics self.transformer_package="gensim.sklearn_api.LdaTransformer"
fr
0.289745
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author : <NAME> # @Contact : <EMAIL>
2.332128
2