code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
import AgentControl import Config import Memory import numpy as np import itertools class Agent: # Role of Agent class is to coordinate between AgentControll where we do all calculations # and Memory where we store all of the data def __init__(self, state_size, action_size, batch_size): self.agent_control = AgentControl.AgentControl(state_size=state_size, action_size=action_size) self.memory = Memory.Memory(state_size, action_size, batch_size) self.policy_loss_m = [] self.critic_loss_m = [] self.policy_loss_mm = [0] * 100 self.critic_loss_mm = [0] * 100 self.max_reward = -300 self.ep_count = 0 def set_optimizer_lr_eps(self, n_step): self.agent_control.set_optimizer_lr_eps(n_step) def get_action(self, state): actions, actions_logprob = self.agent_control.get_action(state) return actions.cpu().detach().numpy(), actions_logprob def add_to_memory(self, state, action, actions_logprob, new_state, reward, done, n_batch_step): self.memory.add(state, action, actions_logprob, new_state, reward, done, n_batch_step) def calculate_old_value_state(self): # Get NN output from collected states and pass it to the memory self.memory.set_old_value_state(self.agent_control.get_critic_value(self.memory.states).squeeze(-1).detach()) def calculate_advantage(self): # For basic advantage function we have to calculate future rewards we got from each state, where reward from # last state is estimation (since we only know rewards in steps we took, not after), discount them and # subtract from baseline which in this case will be estimated value of each state. # GAE advantage gives us to decide we want each state advantage to be calculated with # reward + estimate(next state) - estimate(state) which has low variance but high bias or with # reward + gamma*next_reward + ... + gamma^n * estimate(last next state) - estimate(state) which has high # variance but low bias. We can decide to calculate advantage with somethig between those two and Config.LAMBDA # will be hyperparameter for that values = self.agent_control.get_critic_value(self.memory.states).squeeze(-1).detach() if Config.GAE: next_values = self.agent_control.get_critic_value(self.memory.new_states).squeeze(-1).detach() self.memory.calculate_gae_advantage(values, next_values) else: next_value = self.agent_control.get_critic_value(self.memory.new_states[-1]).squeeze(-1).detach() self.memory.calculate_advantage(next_value, values) def update(self, indices): # Main PPO point is updating policy NN. This is done by calculating derivative of loss function and doing # gradient descent. First we have to calculate ratio. Second to find minimum between ratio*advantage and # clipped_ratio*advantage. Third to find mean of Config.MINIBATCH_SIZE losses. # To calculate ratio we need new and old action probability. We already have old when we fed states to # policy NN when we wanted to get action from it. We can get new action probabilities if we give same states # but also actions we got. With states NN can create Normal distribution and with action he will sample the same # part of distribution, but now with different probability because Normal distribution is different. new_action_logprob, entropy = self.agent_control.calculate_logprob(self.memory.states[indices], self.memory.actions[indices]) ratios = self.agent_control.calculate_ratio(new_action_logprob, self.memory.action_logprobs[indices]) policy_loss = self.agent_control.update_policy(self.memory.advantages[indices], ratios, entropy) # Similar to ratio in policy loss, we also clipped values from critic. For that we need old_value_state which # represent old estimate of states before updates. critic_loss = self.agent_control.update_critic(self.memory.gt[indices], self.memory.states[indices], self.memory.old_value_state[indices]) # Calculating mean losses for statistics self.policy_loss_m.append(policy_loss.detach().item()) self.critic_loss_m.append(critic_loss.detach().item()) def record_results(self, n_step, writer, env): self.max_reward = np.maximum(self.max_reward, np.max(env.return_queue)) self.policy_loss_mm[n_step % 100] = np.mean(self.policy_loss_m) self.critic_loss_mm[n_step % 100] = np.mean(self.critic_loss_m) print("Step " + str(n_step) + "/" + str(Config.NUMBER_OF_STEPS) + " Mean 100 policy loss: " + str( np.round(np.mean(self.policy_loss_mm[:min(n_step + 1, 100)]), 4)) + " Mean 100 critic loss: " + str( np.round(np.mean(self.critic_loss_mm[:min(n_step + 1, 100)]), 4)) + " Max reward: " + str( np.round(self.max_reward, 2)) + " Mean 100 reward: " + str( np.round(np.mean(env.return_queue), 2)) + " Last rewards: " + str( np.round(list(itertools.islice(env.return_queue, min(env.episode_count, 100)-(env.episode_count-self.ep_count), min(env.episode_count, 100))), 2)) + " Ep" + str(env.episode_count)) if Config.WRITER_FLAG: writer.add_scalar('pg_loss', np.mean(self.policy_loss_m), n_step) writer.add_scalar('vl_loss', np.mean(self.critic_loss_m), n_step) writer.add_scalar('rew', env.return_queue[-1], n_step) writer.add_scalar('100rew', np.mean(env.return_queue), n_step) self.critic_loss_m = [] self.policy_loss_m = [] self.ep_count = env.episode_count
[ "numpy.mean", "AgentControl.AgentControl", "Memory.Memory", "numpy.max", "numpy.round" ]
[((340, 413), 'AgentControl.AgentControl', 'AgentControl.AgentControl', ([], {'state_size': 'state_size', 'action_size': 'action_size'}), '(state_size=state_size, action_size=action_size)\n', (365, 413), False, 'import AgentControl\n'), ((437, 487), 'Memory.Memory', 'Memory.Memory', (['state_size', 'action_size', 'batch_size'], {}), '(state_size, action_size, batch_size)\n', (450, 487), False, 'import Memory\n'), ((4587, 4614), 'numpy.mean', 'np.mean', (['self.policy_loss_m'], {}), '(self.policy_loss_m)\n', (4594, 4614), True, 'import numpy as np\n'), ((4660, 4687), 'numpy.mean', 'np.mean', (['self.critic_loss_m'], {}), '(self.critic_loss_m)\n', (4667, 4687), True, 'import numpy as np\n'), ((4516, 4540), 'numpy.max', 'np.max', (['env.return_queue'], {}), '(env.return_queue)\n', (4522, 4540), True, 'import numpy as np\n'), ((5437, 5464), 'numpy.mean', 'np.mean', (['self.policy_loss_m'], {}), '(self.policy_loss_m)\n', (5444, 5464), True, 'import numpy as np\n'), ((5516, 5543), 'numpy.mean', 'np.mean', (['self.critic_loss_m'], {}), '(self.critic_loss_m)\n', (5523, 5543), True, 'import numpy as np\n'), ((5662, 5687), 'numpy.mean', 'np.mean', (['env.return_queue'], {}), '(env.return_queue)\n', (5669, 5687), True, 'import numpy as np\n'), ((5109, 5134), 'numpy.mean', 'np.mean', (['env.return_queue'], {}), '(env.return_queue)\n', (5116, 5134), True, 'import numpy as np\n'), ((5027, 5055), 'numpy.round', 'np.round', (['self.max_reward', '(2)'], {}), '(self.max_reward, 2)\n', (5035, 5055), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- import json import re import time from ..base.account import BaseAccount class EuroshareEu(BaseAccount): __name__ = "EuroshareEu" __type__ = "account" __version__ = "0.12" __status__ = "testing" __pyload_version__ = "0.5" __description__ = """Euroshare.eu account plugin""" __license__ = "GPLv3" __authors__ = [ ("zoidberg", "<EMAIL>"), ("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"), ] def grab_info(self, user, password, data): html = self.load("http://euroshare.eu/", get={"lang": "en"}) m = re.search( r'<span class="btn btn--nav green darken-3">Premium account until: (\d+/\d+/\d+ \d+:\d+:\d+)<', html, ) if m is None: premium = False validuntil = -1 else: premium = True validuntil = time.mktime(time.strptime(m.group(1), "%d/%m/%Y %H:%M:%S")) return {"validuntil": validuntil, "trafficleft": -1, "premium": premium} def signin(self, user, password, data): html = self.load("http://euroshare.eu/login.html") if r'href="http://euroshare.eu/logout.html"' in html: self.skip_login() json_data = json.loads( self.load( "http://euroshare.eu/ajax/_account_login.ajax.php", post={ "username": user, "password": password, "remember": "false", "backlink": "", }, ) ) if json_data.get("login_status") != "success": self.fail_login()
[ "re.search" ]
[((596, 722), 're.search', 're.search', (['"""<span class="btn btn--nav green darken-3">Premium account until: (\\\\d+/\\\\d+/\\\\d+ \\\\d+:\\\\d+:\\\\d+)<"""', 'html'], {}), '(\n \'<span class="btn btn--nav green darken-3">Premium account until: (\\\\d+/\\\\d+/\\\\d+ \\\\d+:\\\\d+:\\\\d+)<\'\n , html)\n', (605, 722), False, 'import re\n')]
# -*- coding: utf-8 -*- import factory from TWLight.resources.factories import PartnerFactory from TWLight.users.factories import EditorFactory from TWLight.applications.models import Application class ApplicationFactory(factory.django.DjangoModelFactory): class Meta: model = Application strategy = factory.CREATE_STRATEGY editor = factory.SubFactory(EditorFactory) partner = factory.SubFactory(PartnerFactory)
[ "factory.SubFactory" ]
[((364, 397), 'factory.SubFactory', 'factory.SubFactory', (['EditorFactory'], {}), '(EditorFactory)\n', (382, 397), False, 'import factory\n'), ((412, 446), 'factory.SubFactory', 'factory.SubFactory', (['PartnerFactory'], {}), '(PartnerFactory)\n', (430, 446), False, 'import factory\n')]
""" <NAME> Pygame Menu System Last Edit: 1 January 2021 """ # Imports import pygame import string # Initialize pygame pygame.init() # Settings menu_manager_settings = { "element_colorkey" : [0, 0, 0], "menu_background_color" : [0, 0, 0], "menu_fps" : 60 } class Action: """ Holds function and argument data for buttons. Attributes: function: The function to execute. arguments: Arguments for the function. keyword_arguments: Keyword arguments for the function. """ def __init__ (self, function, args, kwargs): """ Instantiate an Action object. Arguments: function: The function to execute. *args: Arguments for the function. **kwargs: Keyword arguments for the function. """ self.function = function self.arguments = args self.keyword_arguments = kwargs def execute (self): """ Calls the function, passing it the args and kwargs. """ self.function(*self.arguments, **self.keyword_arguments) class ButtonPicture(pygame.sprite.Sprite): """ Button object for menu manager. Attributes: image (pygame.image): Image for button. action (function): Function to execute when button is pressed. rect (pygame.image.rect): Position, height, width values for image. action_args (*args): Any arguments required by the action. """ def __init__ (self, image, pos = [0,0]): """ Instantiate a button object. Arguments: image (string): Path of image file to be used for button. action (function): Function to execute when button is pressed. action_args (*args): Any arguments required by the action. pos (tuple): XY position for the button. """ super(ButtonPicture, self).__init__() self.image = pygame.image.load(image) self.image.set_colorkey(menu_manager_settings["element_colorkey"]) self.rect = self.image.get_rect() self.rect.x = pos[0] self.rect.y = pos[1] self.actions = [] def get_dimensions (self): """ Get the width and height of the ButtonPicture. Returns: list: Width and Height of the ButtonPicture. Uses width and height from self.rect. Format: [width, height] """ dimensions = [self.rect.width, self.rect.height] return dimensions def get_pos (self): """ Get the position of this picture button element. Returns: list: XY position of the picture button. """ position = [self.rect.x, self.rect.y] return position def set_pos (self, pos): """ Set position of the button. Arguments: list: XY position to set the button to. Format: [x, y] """ self.rect.x = pos[0] self.rect.y = pos[1] def add_action (self, function, *args, **kwargs): """ Adds an action to the list of actions for this button. Arguments: function: The function to execute. *args: Arguments for the function. **kwargs: Keyword arguments for the function. """ new_action = Action(function, args, kwargs) self.actions.append(new_action) def execute_actions (self): """ Execute function linked to this button. """ for action in self.actions: action.execute() def is_clicked (self, mouse_pos): """ Returns true if the mouse cursor position is on this sprite. Arguments: mouse_pos (tuple): XY position of the cursor. """ # Check x axis within_x = mouse_pos[0] >= self.rect.x and mouse_pos[0] <= self.rect.x + self.rect.width # Check y axis within_y = mouse_pos[1] >= self.rect.y and mouse_pos[1] <= self.rect.y + self.rect.height # True if within x and y area return within_x and within_y class ButtonText (pygame.sprite.Sprite): """ Text Button object for menu manager. Attributes: image (pygame.image): Pygame image for the text button. action (function): Function to execute when button is pressed. rect (pygame.image.rect): Position, height, width values for image. action_args (*args): Any arguments required by the action. """ def __init__ (self, text, font, pos = [0,0], color = [255, 255, 255], \ antialias = True, background_color = None): """ Instantiate a button object. Arguments: text (string): Text to make the button from. font (pygame.font.SysFont): Font to render the text in. action (function): Function to execute when button is pressed. action_args (*args): Any arguments required by the action. pos (tuple): XY position for the button. """ super(ButtonText, self).__init__() self.text = text self.font = font self.antialias = antialias self.color = color self.background_color = background_color self.image = font.render(str(text), antialias, color, background_color) self.image.set_colorkey(menu_manager_settings["element_colorkey"]) self.rect = self.image.get_rect() self.rect.x = pos[0] self.rect.y = pos[1] self.actions = [] def get_pos (self): """ Get the position of this text button element. Returns: pos (list): XY position of the text button. """ position = [self.rect.x, self.rect.y] return position def get_text(self): """ Get the text of the button. Returns: self.text (String): Text of the button. """ return self.text def set_pos (self, pos): """ Set position of the text. Arguments: pos (list): XY position to set the text button to. """ self.rect.x = pos[0] self.rect.y = pos[1] def set_text (self, new_text): """ Changes the text of the button. Arguments: new_text (String): New text of the button. """ self.text = new_text self.image = self.font.render(str(new_text), self.antialias, self.color, self.background_color) def add_action (self, function, *args, **kwargs): """ Adds an action to the list of actions for this button. Arguments: function: The function to execute. *args: Arguments for the function. **kwargs: Keyword arguments for the function. """ new_action = Action(function, args, kwargs) self.actions.append(new_action) def execute_actions (self): """ Execute function linked to this button. """ for action in self.actions: action.execute() def is_clicked (self, mouse_pos): """ Returns true if the mouse cursor position is on this sprite. Arguments: mouse_pos (tuple): XY position of the cursor. """ # Check x area within_x = mouse_pos[0] >= self.rect.x and mouse_pos[0] <= self.rect.x + self.rect.width # Check y area within_y = mouse_pos[1] >= self.rect.y and mouse_pos[1] <= self.rect.y + self.rect.height # True if within x and y area return within_x and within_y class Picture (pygame.sprite.Sprite): """ Picture object for menu manager. Attributes: image (pygame.image): Image for picture. rect (pygame.image.rect): Position, height, width values for picture. """ def __init__ (self, image, pos = [0,0]): """ Instantiate a picture object. Arguments: image (string): Path of image file to be used for picture. pos (tuple): XY position for the picture. """ super(Picture, self).__init__() self.image = pygame.image.load(image) self.image.set_colorkey(menu_manager_settings["element_colorkey"]) self.rect = self.image.get_rect() self.rect.x = pos[0] self.rect.y = pos[1] def get_pos (self): """ Get the position of this picture element. Returns: pos (list): XY position of the picture. """ position = [self.rect.x, self.rect.y] return position def set_pos (self, pos): """ Set position of the picture. Arguments: pos (tuple): XY position to set the picture to. """ self.rect.x = pos[0] self.rect.y = pos[1] def set_image (self, new_image): """ Set a new picture for this instance of Picture. Preserves x and y position of the old picture. Arguments: new_image (String): File name of the new picture. """ # Store the current position temp_old_pos = self.get_pos() # Load the new image self.image = pygame.image.load(new_image) self.image.set_colorkey(menu_manager_settings["element_colorkey"]) self.rect = self.image.get_rect() # Set the x and y position using the old position self.rect.x = temp_old_pos[0] self.rect.y = temp_old_pos[1] class Text (pygame.sprite.Sprite): """ Text object for MenuManager. Attributes: text (String): Text to be rendered. font (pygame.font): Font used to render the text. pos (tuple): Position of the text. color (List): Color of the text. antialias (Boolean): Adds antialias to text. background_color (List): Background color of the text. image (pygame.image): Rendered text. rect (pygame.image.rect): Position, height, width values for Text. """ def __init__ (self, text, font, pos = [0,0], color = [255, 255, 255], \ antialias = True, background_color = None): """ Instantiates a new Text object. Arguments: text (String): Text to be rendered. font (pygame.font): Font used to render the text. pos (tuple): Position of the text. color (List): Color of the text. antialias (Boolean): Adds antialias to text. background_color (List): Background color of the text. """ super(Text, self).__init__() self.text = text self.font = font self.pos = pos self.color = color self.antialias = antialias self.background_color = background_color self.image = font.render(str(text), antialias, color, background_color) self.image.set_colorkey(menu_manager_settings["element_colorkey"]) self.rect = self.image.get_rect() self.rect.x = pos[0] self.rect.y = pos[1] def get_dimensions (self): """ Get the width and height of the Text. Returns: list: Width and Height of the Text. Uses width and height from self.rect. Format: [width, height] """ dimensions = [self.rect.width, self.rect.height] return dimensions def get_pos (self): """ Get the position of this text element. Returns: pos (list): XY position of the text. """ position = [self.rect.x, self.rect.y] return position def set_pos (self, pos): """ Set position of the text. Arguments: pos (list): XY position to set the text to. """ self.rect.x = pos[0] self.rect.y = pos[1] class MenuManager: """ Menu manager for pygame. Attributes: pages (List): List of pages in the menu manager. current_page (Page): Page currently being displayed. screen (pygame.display): Surface to blit the pages and game to. clock (pygame.time.Clock): Used to set/cap game FPS. start_page_set (Boolean): Switch that checks if start page has been set. """ def __init__ (self, screen, clock): """ Instantiate a MenuManager object. Arguments: screen (pygame.Surface): Surface we are blitting objects to. clock (pygame.time.Clock): Pygame clock. NOTE: For the menu manager system to work as intended, you will want to use the same screen and clock objects for the menu manager and your game. """ self.pages = list() self.current_page = None self.screen = screen self.clock = clock self.start_page = None self.exiting = False def run (self): """ Puts the menu loop into a function for ease of use. """ while self.__update(): self.__display() def add_page (self, new_page): """ Adds a page to the menu manager. Arguments: new_page (Page): Page to be added to the menu manager. """ self.pages.append(new_page) def set_start_page (self, page_id): """ Set a start page for the menu manager. This function must be called before calling ManuManager.run() or the program will be terminated. Arguments: page_id (String/Int): ID of the desired page destination. NOTE: See Page class for more info on page id's. """ for page in self.pages: if (page_id == page.id): self.current_page = page self.start_page = page return print("Invalid start page id!") exit() def navigate (self, page_id): """ Sets the currently showing page using the id attribute of Page class. Arguments: page_id (String/Int): ID of the desired page destination. NOTE: See Page class for more info on page id's. """ for page in self.pages: if page.id == page_id: self.current_page = page return print("Invalid page id, " + str(page_id) + "! Exiting.") exit(-1) def exit_menu (self): """ For exiting the menu manager. Flips the exiting flag. """ self.exiting = True def kill_program (self): """ Terminates the entire program. """ exit() def __display (self): """ Blit everything from backend to the screen. """ # Fill background self.screen.fill(menu_manager_settings["menu_background_color"]) # Display current screen self.current_page.display(self.screen) pygame.display.flip() self.clock.tick(menu_manager_settings["menu_fps"]) def __update (self): """ Handles user events. Also checks if a start page has been set. This function will prevent the program from running if a sart page has not beem set. Returns: Boolean: True if program execution should continue, False otherwise. """ if self.start_page == None: print("Start page not set!") self.kill_program() if self.exiting: self.exiting = False self.current_page = self.start_page return False for event in pygame.event.get(): if event.type == pygame.QUIT: self.kill_program() if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1: mouse_pos = pygame.mouse.get_pos() for element in self.current_page.elements: if isinstance(element, ButtonPicture) or \ isinstance(element, ButtonText): if element.is_clicked(mouse_pos): element.execute_actions() return True class Page: """ Page object for menu manager. Attributes: id (string/int): ID for this page. elements (List): List of elements on the page. NOTE: The ID doesn't have to be a string/int, it just has to be some distinct identifier for this page. I just recommend using a string or an integer for simplicity and readability. """ def __init__ (self, id): """ Instantiate a page object. Arguments: id (string/int): ID for this page. """ self.id = id self.elements = list() def add_element (self, new_element): """ Adds an element to the page. Arguments: new_element (Button): Element to add to the page. """ self.elements.append(new_element) def display (self, screen): """ Show this screen in the window. Arguments: screen (pygame.display): Surface to blit the elements to. """ for element in self.elements: screen.blit(element.image, [element.rect.x, element.rect.y])
[ "pygame.init", "pygame.event.get", "pygame.display.flip", "pygame.mouse.get_pos", "pygame.image.load" ]
[((120, 133), 'pygame.init', 'pygame.init', ([], {}), '()\n', (131, 133), False, 'import pygame\n'), ((1927, 1951), 'pygame.image.load', 'pygame.image.load', (['image'], {}), '(image)\n', (1944, 1951), False, 'import pygame\n'), ((8186, 8210), 'pygame.image.load', 'pygame.image.load', (['image'], {}), '(image)\n', (8203, 8210), False, 'import pygame\n'), ((9238, 9266), 'pygame.image.load', 'pygame.image.load', (['new_image'], {}), '(new_image)\n', (9255, 9266), False, 'import pygame\n'), ((14941, 14962), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (14960, 14962), False, 'import pygame\n'), ((15610, 15628), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (15626, 15628), False, 'import pygame\n'), ((15815, 15837), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (15835, 15837), False, 'import pygame\n')]
# -*- coding: utf-8 -*- import numpy as np import speechpy from scipy.io import wavfile from python_speech_features import mfcc class PythonMFCCFeatureExtraction(): def __init__(self): pass def audio2features(self, input_path): (rate, sig) = wavfile.read(input_path) mfcc_feat = mfcc(sig, dither=0, highfreq=7700, useEnergy=True, wintype='povey', numcep=23) # TODO temporarily add 2 feats to meet Kaldi_mfcc_features_extraction API mfcc_feat = np.append(mfcc_feat, [mfcc_feat[-1]], axis=0) mfcc_feat = np.append(mfcc_feat, [mfcc_feat[-1]], axis=0) mfcc_cmvn = speechpy.processing.cmvnw(mfcc_feat, win_size=301, variance_normalization=False) return mfcc_cmvn.astype(np.float32)
[ "python_speech_features.mfcc", "speechpy.processing.cmvnw", "scipy.io.wavfile.read", "numpy.append" ]
[((270, 294), 'scipy.io.wavfile.read', 'wavfile.read', (['input_path'], {}), '(input_path)\n', (282, 294), False, 'from scipy.io import wavfile\n'), ((315, 393), 'python_speech_features.mfcc', 'mfcc', (['sig'], {'dither': '(0)', 'highfreq': '(7700)', 'useEnergy': '(True)', 'wintype': '"""povey"""', 'numcep': '(23)'}), "(sig, dither=0, highfreq=7700, useEnergy=True, wintype='povey', numcep=23)\n", (319, 393), False, 'from python_speech_features import mfcc\n'), ((496, 541), 'numpy.append', 'np.append', (['mfcc_feat', '[mfcc_feat[-1]]'], {'axis': '(0)'}), '(mfcc_feat, [mfcc_feat[-1]], axis=0)\n', (505, 541), True, 'import numpy as np\n'), ((562, 607), 'numpy.append', 'np.append', (['mfcc_feat', '[mfcc_feat[-1]]'], {'axis': '(0)'}), '(mfcc_feat, [mfcc_feat[-1]], axis=0)\n', (571, 607), True, 'import numpy as np\n'), ((628, 713), 'speechpy.processing.cmvnw', 'speechpy.processing.cmvnw', (['mfcc_feat'], {'win_size': '(301)', 'variance_normalization': '(False)'}), '(mfcc_feat, win_size=301, variance_normalization=False\n )\n', (653, 713), False, 'import speechpy\n')]
from enum import Enum import logging from blatann.nrf.nrf_dll_load import driver import blatann.nrf.nrf_driver_types as util from blatann.nrf.nrf_types.enums import * logger = logging.getLogger(__name__) class BLEGapSecMode(object): def __init__(self, sec_mode, level): self.sm = sec_mode self.level = level def to_c(self): params = driver.ble_gap_conn_sec_mode_t() params.sm = self.sm params.lv = self.level return params @classmethod def from_c(cls, params): return cls(params.sm, params.lv) class BLEGapSecModeType(object): NO_ACCESS = BLEGapSecMode(0, 0) OPEN = BLEGapSecMode(1, 1) ENCRYPTION = BLEGapSecMode(1, 2) MITM = BLEGapSecMode(1, 3) LESC_MITM = BLEGapSecMode(1, 4) SIGN_OR_ENCRYPT = BLEGapSecMode(2, 1) SIGN_OR_ENCRYPT_MITM = BLEGapSecMode(2, 2) class BLEGapSecLevels(object): def __init__(self, lv1, lv2, lv3, lv4): self.lv1 = lv1 self.lv2 = lv2 self.lv3 = lv3 self.lv4 = lv4 @classmethod def from_c(cls, sec_level): return cls(lv1=sec_level.lv1, lv2=sec_level.lv2, lv3=sec_level.lv3, lv4=sec_level.lv4) def to_c(self): sec_level = driver.ble_gap_sec_levels_t() sec_level.lv1 = self.lv1 sec_level.lv2 = self.lv2 sec_level.lv3 = self.lv3 sec_level.lv4 = self.lv4 return sec_level def __repr__(self): return "{}(lv1={!r}, lv2={!r}, lv3={!r}, lv4={!r})".format(self.__class__.__name__, self.lv1, self.lv2, self.lv3, self.lv4) class BLEGapSecKeyDist(object): def __init__(self, enc_key=False, id_key=False, sign_key=False, link_key=False): self.enc_key = enc_key self.id_key = id_key self.sign_key = sign_key self.link_key = link_key @classmethod def from_c(cls, kdist): return cls(enc_key=kdist.enc, id_key=kdist.id, sign_key=kdist.sign, link_key=kdist.link) def to_c(self): kdist = driver.ble_gap_sec_kdist_t() kdist.enc = self.enc_key kdist.id = self.id_key kdist.sign = self.sign_key kdist.link = self.link_key return kdist def __repr__(self): return "{}(enc_key={!r}, id_key={!r}, sign_key={!r}, link_key={!r})".format(self.__class__.__name__, self.enc_key, self.id_key, self.sign_key, self.link_key) class BLEGapSecParams(object): def __init__(self, bond, mitm, le_sec_pairing, keypress_noti, io_caps, oob, min_key_size, max_key_size, kdist_own, kdist_peer): self.bond = bond self.mitm = mitm self.le_sec_pairing = le_sec_pairing self.keypress_noti = keypress_noti self.io_caps = io_caps self.oob = oob self.min_key_size = min_key_size self.max_key_size = max_key_size self.kdist_own = kdist_own self.kdist_peer = kdist_peer @classmethod def from_c(cls, sec_params): return cls(bond=sec_params.bond, mitm=sec_params.mitm, le_sec_pairing=sec_params.lesc, keypress_noti=sec_params.keypress, io_caps=sec_params.io_caps, oob=sec_params.oob, min_key_size=sec_params.min_key_size, max_key_size=sec_params.max_key_size, kdist_own=BLEGapSecKeyDist.from_c(sec_params.kdist_own), kdist_peer=BLEGapSecKeyDist.from_c(sec_params.kdist_peer)) def to_c(self): sec_params = driver.ble_gap_sec_params_t() sec_params.bond = self.bond sec_params.mitm = self.mitm sec_params.lesc = self.le_sec_pairing sec_params.keypress = self.keypress_noti sec_params.io_caps = self.io_caps sec_params.oob = self.oob sec_params.min_key_size = self.min_key_size sec_params.max_key_size = self.max_key_size sec_params.kdist_own = self.kdist_own.to_c() sec_params.kdist_peer = self.kdist_peer.to_c() return sec_params def __repr__(self): return "{}(bond={!r}, mitm={!r}, le_sec_pairing={!r}, keypress_noti={!r}, io_caps={!r}, oob={!r}, " \ "min_key_size={!r}, max_key_size={!r}, kdist_own={!r}, kdist_peer={!r})".format(self.__class__.__name__, self.bond, self.mitm, self.le_sec_pairing, self.keypress_noti, self.io_caps, self.oob, self.min_key_size, self.max_key_size, self.kdist_own, self.kdist_peer) class BLEGapSecKeyset(object): def __init__(self): self.sec_keyset = driver.ble_gap_sec_keyset_t() keys_own = driver.ble_gap_sec_keys_t() self.sec_keyset.keys_own = keys_own keys_peer = driver.ble_gap_sec_keys_t() keys_peer.p_enc_key = driver.ble_gap_enc_key_t() keys_peer.p_enc_key.enc_info = driver.ble_gap_enc_info_t() keys_peer.p_enc_key.master_id = driver.ble_gap_master_id_t() keys_peer.p_id_key = driver.ble_gap_id_key_t() keys_peer.p_id_key.id_info = driver.ble_gap_irk_t() keys_peer.p_id_key.id_addr_info = driver.ble_gap_addr_t() # keys_peer.p_sign_key = driver.ble_gap_sign_info_t() # keys_peer.p_pk = driver.ble_gap_lesc_p256_pk_t() self.sec_keyset.keys_peer = keys_peer @classmethod def from_c(cls, sec_params): raise NotImplemented() def to_c(self): return self.sec_keyset
[ "logging.getLogger", "blatann.nrf.nrf_dll_load.driver.ble_gap_conn_sec_mode_t", "blatann.nrf.nrf_dll_load.driver.ble_gap_sec_keys_t", "blatann.nrf.nrf_dll_load.driver.ble_gap_master_id_t", "blatann.nrf.nrf_dll_load.driver.ble_gap_addr_t", "blatann.nrf.nrf_dll_load.driver.ble_gap_sec_keyset_t", "blatann....
[((185, 212), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (202, 212), False, 'import logging\n'), ((386, 418), 'blatann.nrf.nrf_dll_load.driver.ble_gap_conn_sec_mode_t', 'driver.ble_gap_conn_sec_mode_t', ([], {}), '()\n', (416, 418), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((1330, 1359), 'blatann.nrf.nrf_dll_load.driver.ble_gap_sec_levels_t', 'driver.ble_gap_sec_levels_t', ([], {}), '()\n', (1357, 1359), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((2250, 2278), 'blatann.nrf.nrf_dll_load.driver.ble_gap_sec_kdist_t', 'driver.ble_gap_sec_kdist_t', ([], {}), '()\n', (2276, 2278), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((4081, 4110), 'blatann.nrf.nrf_dll_load.driver.ble_gap_sec_params_t', 'driver.ble_gap_sec_params_t', ([], {}), '()\n', (4108, 4110), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((5972, 6001), 'blatann.nrf.nrf_dll_load.driver.ble_gap_sec_keyset_t', 'driver.ble_gap_sec_keyset_t', ([], {}), '()\n', (5999, 6001), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((6022, 6049), 'blatann.nrf.nrf_dll_load.driver.ble_gap_sec_keys_t', 'driver.ble_gap_sec_keys_t', ([], {}), '()\n', (6047, 6049), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((6118, 6145), 'blatann.nrf.nrf_dll_load.driver.ble_gap_sec_keys_t', 'driver.ble_gap_sec_keys_t', ([], {}), '()\n', (6143, 6145), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((6177, 6203), 'blatann.nrf.nrf_dll_load.driver.ble_gap_enc_key_t', 'driver.ble_gap_enc_key_t', ([], {}), '()\n', (6201, 6203), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((6244, 6271), 'blatann.nrf.nrf_dll_load.driver.ble_gap_enc_info_t', 'driver.ble_gap_enc_info_t', ([], {}), '()\n', (6269, 6271), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((6313, 6341), 'blatann.nrf.nrf_dll_load.driver.ble_gap_master_id_t', 'driver.ble_gap_master_id_t', ([], {}), '()\n', (6339, 6341), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((6372, 6397), 'blatann.nrf.nrf_dll_load.driver.ble_gap_id_key_t', 'driver.ble_gap_id_key_t', ([], {}), '()\n', (6395, 6397), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((6436, 6458), 'blatann.nrf.nrf_dll_load.driver.ble_gap_irk_t', 'driver.ble_gap_irk_t', ([], {}), '()\n', (6456, 6458), False, 'from blatann.nrf.nrf_dll_load import driver\n'), ((6502, 6525), 'blatann.nrf.nrf_dll_load.driver.ble_gap_addr_t', 'driver.ble_gap_addr_t', ([], {}), '()\n', (6523, 6525), False, 'from blatann.nrf.nrf_dll_load import driver\n')]
from direct.showbase.ShowBase import ShowBase from panda3d.core import CardMaker, NodePath, loadPrcFileData from panda3d.core import * import sys configVars = """ win-size 1920 1080 show-frame-rate-meter 0 """ loadPrcFileData("", configVars) class mymenu(ShowBase): def __init__(self): ShowBase.__init__(self) self.accept("escape", sys.exit) #Text, Gui, buttons go here #if button click, run class Platformer etc. menu = mymenu() menu.run()
[ "panda3d.core.loadPrcFileData", "direct.showbase.ShowBase.ShowBase.__init__" ]
[((224, 255), 'panda3d.core.loadPrcFileData', 'loadPrcFileData', (['""""""', 'configVars'], {}), "('', configVars)\n", (239, 255), False, 'from panda3d.core import CardMaker, NodePath, loadPrcFileData\n'), ((317, 340), 'direct.showbase.ShowBase.ShowBase.__init__', 'ShowBase.__init__', (['self'], {}), '(self)\n', (334, 340), False, 'from direct.showbase.ShowBase import ShowBase\n')]
import django import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings") django.setup() from django.contrib.auth import get_user_model from importer.utils import import_ratings_from_csv from titles.models import Title from tmdb.api import TmdbWrapper, TitleDetailsGetter User = get_user_model() def test_csv(): user = User.objects.all().first() import_ratings_from_csv(user, 'G:/code/PycharmProjects/movie website/media/test.csv') # update_user_ratings_csv(user, 'G:/code/PycharmProjects/movie website/media/accounts/imdb.csv') # test_csv() # check popular # check import/eexport # for t in Title.objects.filter(tmdb_id='1414'): # for t in Title.objects.filter(imdb_id='tt0454848'): # # # print(t.similar.clear()) # # # print(t, t.imdb_id) # tmdb_instance = t.get_tmdb_instance() # tmdb_instance(title=t).update() # Title.objects.filter(tmdb_id='1414').delete() # for imdb_id in ['tt0286486', 'tt0133363']: # for imdb_id in ['tt0454848']: # TmdbWrapper().get(imdb_id=imdb_id) imdb_id_movie = 'tt0120889' # tmdb_id_movie = '12159' # imdb_id_series = 'tt4574334' tmdb_id_series = '66732' t = Title.objects.get(imdb_id='tt1037705') TitleDetailsGetter(t).run() # deleted = Title.objects.filter(imdb_id=collection_id).delete() # Collection.objects.all().delete() # deleted = Title.objects.filter(imdb_id=imdb_id_movie).delete() # deleted = Title.objects.filter(tmdb_id=tmdb_id_series).delete() # print(deleted) # title = TmdbWrapper().get(imdb_id_movie) # title = client.get_title_or_create() # print(title.collection.all()) # t = Title.objects.get(tmdb_id=tmdb_id_series) # print(t.cast.all()) # print(t.crew.all()) # for x in t.casttitle_set.all(): # print(x) # for x in t.castcrew_set.all(): # print(x) # popular_movies = PopularMovies().get() # print(popular_movies) # for title in Title.objects.all(): # print(title.name) # imdb_id = title.imdb_id # print(title.delete()) # TmdbWrapper().get(imdb_id)
[ "os.environ.setdefault", "django.contrib.auth.get_user_model", "django.setup", "titles.models.Title.objects.get", "importer.utils.import_ratings_from_csv", "tmdb.api.TitleDetailsGetter" ]
[((26, 92), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""mysite.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'mysite.settings')\n", (47, 92), False, 'import os\n'), ((93, 107), 'django.setup', 'django.setup', ([], {}), '()\n', (105, 107), False, 'import django\n'), ((300, 316), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (314, 316), False, 'from django.contrib.auth import get_user_model\n'), ((1155, 1193), 'titles.models.Title.objects.get', 'Title.objects.get', ([], {'imdb_id': '"""tt1037705"""'}), "(imdb_id='tt1037705')\n", (1172, 1193), False, 'from titles.models import Title\n'), ((377, 466), 'importer.utils.import_ratings_from_csv', 'import_ratings_from_csv', (['user', '"""G:/code/PycharmProjects/movie website/media/test.csv"""'], {}), "(user,\n 'G:/code/PycharmProjects/movie website/media/test.csv')\n", (400, 466), False, 'from importer.utils import import_ratings_from_csv\n'), ((1194, 1215), 'tmdb.api.TitleDetailsGetter', 'TitleDetailsGetter', (['t'], {}), '(t)\n', (1212, 1215), False, 'from tmdb.api import TmdbWrapper, TitleDetailsGetter\n')]
# -*- coding: utf-8 -*- import pygame import random import tinytag import groups from constants import * """ Pitää sisällään seuraavaa: class MusicPlayer(pygame.sprite.Sprite): taustamusiikin soittajaclass, osaa näyttää infoblurbin biisistä class MusicFile(object): lukee tiedoston tagit ja tallettaa tiedon siitä, missä ruuduissa biisiä saa soittaa MUSIC_FINISHED -pygame-eventti. Tämä nousee kun biisi on soitettu loppuun. Silloin pitää kutsua MusicPlayer.next() Katso luokkien ja metodien docstringeistä lisää. """ # TODO: eriytä kaikki biisit ja biiseistä tehdyt soittolistat toisistaan # Kustomieventti kappaleiden soiton händläämiseen MUSIC_FINISHED = pygame.USEREVENT + 1 class MusicPlayer(pygame.sprite.Sprite): """ class MusicPlayer(pygame.sprite.Sprite): Musiikin soittajaclassi. Osaa seuraavaa: -shuffle/ei shufflea -näyttää hienon infoblurbin kun biisi vaihtuu -osaa soittaa vain niitä biisejä, jotka on määritelty valideiksi käytössä olevaan ruutuun (game/menu/etc.) __init__:issä ottaa vastaan seuraavia argumentteja: -pos: 'topleft', 'topright', 'bottomleft' tai 'bottomright' - infoblurbin positio -shuffle: 1/0 -screen: (string) - tämä määrittää missä ruudussa ollaan ja soittaa sen perusteella oikeaa musaa -group: pygame.sprite.Group -window_size: ikkunan koko että osaa laskea infoblurbin position oikein HUOM HUOM! Toistaiseksi biisit pitää lisätä käsin __init__-osioon! """ def __init__(self, pos='topleft', shuffle=1, screen='menu', group=None, window_size=(800, 600)): pygame.sprite.Sprite.__init__(self, group) self.group = group self._shuffle = shuffle self._screen = screen # Infopläjäyksen graffa-init self.image = pygame.Surface((0, 0)) self.rect = self.image.get_rect() self._window_size = window_size # pos = 'topleft', 'topright', 'bottomleft' tai 'bottomright' self.pos = pos # Värit self.text_color = (255, 255, 255) self.bg_color = (127, 51, 0) self.border_color = (173, 69, 0) # Asemointi self.info_padding = 10 self.border_width = 5 self.screen_border_margin = 10 # Fadeout - counter alkaa counter_startista ja vähentää siitä # Muuttaa alfa-arvoa jos counter on välillä 0...255 # Tämä siis infoblurbin fadeouttia, ei musiikin self._fadeout_counter = 0 self._fadeout_counter_start = 1000 self._fadeout_decrement = 5 self.playlist = [] ############################################################ # HUOM HUOM! Toistaiseksi biisit pitää lisätä käsin tähän! # ############################################################ self.playlist.append(MusicFile(filename='sfx/mouse_meets_robot.ogg', allowed_screens='game')) self.playlist.append(MusicFile(filename='sfx/cavern_rain.ogg', allowed_screens='menu')) if shuffle: self.shuffle_playlist() self.playlist_pointer = 0 pygame.mixer.music.set_endevent(MUSIC_FINISHED) # Musiikin voimakkuus 0.0-1.0 self._volume = pygame.mixer.music.get_volume() def _get_volume(self): return self._volume def _set_volume(self, volume): self._volume = volume pygame.mixer.music.set_volume(self._volume) def _set_screen(self, screen): self._screen = screen self.next() volume = property(_get_volume, _set_volume) screen = property(None, _set_screen) def play(self): """ Soittaa playlist[]:issä olevan playlist_pointer:in määrittämän tähän ruutuun validin biisin. Jos soittolista on käyty loppuun niin aloitetaan alusta (shufflettaen jos niin määritetty). """ if Settings.data['music_on']: try: current_song = self.playlist[self.playlist_pointer] except IndexError: # Jos playlist on loppu niin aletaan alusta if self._shuffle: self.shuffle_playlist() self.playlist_pointer = 0 current_song = self.playlist[0] # Tarkastetaan onko biisi validi tähän ruutuun if self._screen in current_song.allowed_screens: pygame.mixer.music.load(current_song.filename) pygame.mixer.music.play() # Näytetään infoblurb self.now_playing(current_song) # print("Now playing:", current_song.filename, current_song.title, current_song.artist) else: # Song not allowed in this screen. Next! self.next() def update(self): """ Tämä laskee infoblurbin fadeoutin """ if self in self.group: if self._fadeout_counter > 0: self._fadeout_counter -= self._fadeout_decrement # Jos _fadeout_counter on välillä 0..255 niin asetetaan alpha siitä if 255 >= self._fadeout_counter >= 0: self.image.set_alpha(self._fadeout_counter) # Kuva tyhjäksi kun ollaan päästy nollaan if self._fadeout_counter <= 0: self.image = pygame.Surface((0, 0)) self.rect = self.image.get_rect() self.kill() def stop(self): pygame.mixer.music.stop() self._fadeout_counter = 0 def now_playing(self, current_song): """ Näyttää soivan biisin tiedot ruudulla (infoblurb)""" # Lisätään takaisin ryhmään että grafiikat päivittyy self.add(self.group) # Tekstit line1 = "Now playing:" line2 = current_song.title line3 = "by " + current_song.artist # Teksteistä kuvat font1 = pygame.font.Font(None, 24) font2 = pygame.font.Font(None, 48) font3 = pygame.font.Font(None, 24) textimg1 = font1.render(line1, 1, self.text_color, self.bg_color) textimg2 = font2.render(line2, 1, self.text_color, self.bg_color) textimg3 = font3.render(line3, 1, self.text_color, self.bg_color) # Lasketaan pläjäyksen koko x_size = max(textimg1.get_width(), textimg2.get_width(), textimg3.get_width()) + self.info_padding * 2 y_size = textimg1.get_height() + textimg2.get_height() + textimg3.get_height() + self.info_padding * 2 # Piirretään pläjäys self.imageen self.image = pygame.Surface((x_size, y_size)) self.image.fill(self.bg_color) pygame.draw.rect(self.image, self.border_color, (0, 0, x_size, y_size), self.border_width) self.image.blit(textimg1, (self.info_padding, self.info_padding)) self.image.blit(textimg2, (self.info_padding, self.info_padding + textimg1.get_height())) self.image.blit(textimg3, (self.info_padding, self.info_padding + textimg1.get_height() + textimg2.get_height())) # Määritetään alpha ettei myöhemmin herjaa kun sitä muutetaan self.image.set_alpha(255) self.rect = self.image.get_rect() self._calculate_rect_position() self._fadeout_counter = self._fadeout_counter_start def _calculate_rect_position(self): if self.pos == 'topleft': self.rect.topleft = (self.screen_border_margin, self.screen_border_margin) elif self.pos == 'topright': self.rect.topright = (self._window_size[0] - self.screen_border_margin, self.screen_border_margin) elif self.pos == 'bottomleft': self.rect.bottomleft = (self.screen_border_margin, self._window_size[1] - self.screen_border_margin) else: self.rect.bottomright = (self._window_size[0] - self.screen_border_margin, self._window_size[1] - self.screen_border_margin) def shuffle_playlist(self): random.shuffle(self.playlist) def next(self): self.playlist_pointer += 1 self.play() class MusicFile(object): """ Kertoo filen tiedot - filenamen, artistin, titlen ja sallitut ruudut """ def __init__(self, filename=None, allowed_screens=('game',)): self.filename = filename # Luetaan tagit tag = tinytag.TinyTag.get(filename) self.artist = tag.artist self.title = tag.title # Kertoo missä ruuduissa tätä saa soittaa - tuple/lista self.allowed_screens = allowed_screens def debug_run(): window = pygame.display.set_mode((800, 600)) pygame.display.set_caption("Music test") clock = pygame.time.Clock() window.fill((0, 0, 0)) # Music pygame.init() pygame.mixer.init() music_player = MusicPlayer(screen='game', window_size=(800, 600), pos='bottomleft', group=groups.TextGroup) music_player.play() running = True while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False if event.type == MUSIC_FINISHED: music_player.next() window.fill(0) groups.TextGroup.update() groups.TextGroup.draw(window) pygame.display.flip() clock.tick(30) pygame.quit() if __name__ == '__main__': debug_run()
[ "pygame.init", "pygame.quit", "pygame.mixer.music.set_volume", "pygame.font.Font", "pygame.display.set_mode", "pygame.display.flip", "pygame.draw.rect", "pygame.mixer.music.load", "random.shuffle", "pygame.Surface", "pygame.time.Clock", "pygame.mixer.music.play", "groups.TextGroup.update", ...
[((8446, 8481), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(800, 600)'], {}), '((800, 600))\n', (8469, 8481), False, 'import pygame\n'), ((8486, 8526), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Music test"""'], {}), "('Music test')\n", (8512, 8526), False, 'import pygame\n'), ((8539, 8558), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (8556, 8558), False, 'import pygame\n'), ((8604, 8617), 'pygame.init', 'pygame.init', ([], {}), '()\n', (8615, 8617), False, 'import pygame\n'), ((8622, 8641), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (8639, 8641), False, 'import pygame\n'), ((9168, 9181), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (9179, 9181), False, 'import pygame\n'), ((1611, 1653), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self', 'group'], {}), '(self, group)\n', (1640, 1653), False, 'import pygame\n'), ((1802, 1824), 'pygame.Surface', 'pygame.Surface', (['(0, 0)'], {}), '((0, 0))\n', (1816, 1824), False, 'import pygame\n'), ((3090, 3137), 'pygame.mixer.music.set_endevent', 'pygame.mixer.music.set_endevent', (['MUSIC_FINISHED'], {}), '(MUSIC_FINISHED)\n', (3121, 3137), False, 'import pygame\n'), ((3200, 3231), 'pygame.mixer.music.get_volume', 'pygame.mixer.music.get_volume', ([], {}), '()\n', (3229, 3231), False, 'import pygame\n'), ((3362, 3405), 'pygame.mixer.music.set_volume', 'pygame.mixer.music.set_volume', (['self._volume'], {}), '(self._volume)\n', (3391, 3405), False, 'import pygame\n'), ((5399, 5424), 'pygame.mixer.music.stop', 'pygame.mixer.music.stop', ([], {}), '()\n', (5422, 5424), False, 'import pygame\n'), ((5829, 5855), 'pygame.font.Font', 'pygame.font.Font', (['None', '(24)'], {}), '(None, 24)\n', (5845, 5855), False, 'import pygame\n'), ((5872, 5898), 'pygame.font.Font', 'pygame.font.Font', (['None', '(48)'], {}), '(None, 48)\n', (5888, 5898), False, 'import pygame\n'), ((5915, 5941), 'pygame.font.Font', 'pygame.font.Font', (['None', '(24)'], {}), '(None, 24)\n', (5931, 5941), False, 'import pygame\n'), ((6487, 6519), 'pygame.Surface', 'pygame.Surface', (['(x_size, y_size)'], {}), '((x_size, y_size))\n', (6501, 6519), False, 'import pygame\n'), ((6567, 6661), 'pygame.draw.rect', 'pygame.draw.rect', (['self.image', 'self.border_color', '(0, 0, x_size, y_size)', 'self.border_width'], {}), '(self.image, self.border_color, (0, 0, x_size, y_size),\n self.border_width)\n', (6583, 6661), False, 'import pygame\n'), ((7856, 7885), 'random.shuffle', 'random.shuffle', (['self.playlist'], {}), '(self.playlist)\n', (7870, 7885), False, 'import random\n'), ((8208, 8237), 'tinytag.TinyTag.get', 'tinytag.TinyTag.get', (['filename'], {}), '(filename)\n', (8227, 8237), False, 'import tinytag\n'), ((8839, 8857), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (8855, 8857), False, 'import pygame\n'), ((9046, 9071), 'groups.TextGroup.update', 'groups.TextGroup.update', ([], {}), '()\n', (9069, 9071), False, 'import groups\n'), ((9080, 9109), 'groups.TextGroup.draw', 'groups.TextGroup.draw', (['window'], {}), '(window)\n', (9101, 9109), False, 'import groups\n'), ((9118, 9139), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (9137, 9139), False, 'import pygame\n'), ((4349, 4395), 'pygame.mixer.music.load', 'pygame.mixer.music.load', (['current_song.filename'], {}), '(current_song.filename)\n', (4372, 4395), False, 'import pygame\n'), ((4412, 4437), 'pygame.mixer.music.play', 'pygame.mixer.music.play', ([], {}), '()\n', (4435, 4437), False, 'import pygame\n'), ((5269, 5291), 'pygame.Surface', 'pygame.Surface', (['(0, 0)'], {}), '((0, 0))\n', (5283, 5291), False, 'import pygame\n')]
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """take-while dataset transformation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export @deprecation.deprecated(None, "Use `tf.data.Dataset.take_while(...)") @tf_export("data.experimental.take_while") def take_while(predicate): """A transformation that stops dataset iteration based on a `predicate`. Args: predicate: A function that maps a nested structure of tensors (having shapes and types defined by `self.output_shapes` and `self.output_types`) to a scalar `tf.bool` tensor. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. """ def _apply_fn(dataset): return dataset.take_while(predicate=predicate) return _apply_fn
[ "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.util.tf_export.tf_export" ]
[((945, 1013), 'tensorflow.python.util.deprecation.deprecated', 'deprecation.deprecated', (['None', '"""Use `tf.data.Dataset.take_while(...)"""'], {}), "(None, 'Use `tf.data.Dataset.take_while(...)')\n", (967, 1013), False, 'from tensorflow.python.util import deprecation\n'), ((1015, 1056), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""data.experimental.take_while"""'], {}), "('data.experimental.take_while')\n", (1024, 1056), False, 'from tensorflow.python.util.tf_export import tf_export\n')]
from datetime import timezone from airflow.models import DagRun, TaskInstance from airflow.utils.types import DagRunType NORMALISE_NOTICE_METADATA_TASK_ID = "normalise_notice_metadata" CHECK_ELIGIBILITY_FOR_TRANSFORMATION_TASK_ID = "check_eligibility_for_transformation" PREPROCESS_XML_MANIFESTATION_TASK_ID = "preprocess_xml_manifestation" TRANSFORM_NOTICE_TASK_ID = "transform_notice" RESOLVE_ENTITIES_IN_THE_RDF_MANIFESTATION_TASK_ID = "resolve_entities_in_the_rdf_manifestation" VALIDATE_TRANSFORMED_RDF_MANIFESTATION_TASK_ID = "validate_transformed_rdf_manifestation" CHECK_ELIGIBILITY_FOR_PACKING_BY_VALIDATION_REPORT_TASK_ID = "check_eligibility_for_packing_by_validation_report" GENERATE_METS_PACKAGE_TASK_ID = "generate_mets_package" CHECK_PACKAGE_INTEGRITY_BY_PACKAGE_STRUCTURE_TASK_ID = "check_package_integrity_by_package_structure" PUBLISH_NOTICE_IN_CELLAR_TASK_ID = "publish_notice_in_cellar" CHECK_NOTICE_PUBLIC_AVAILABILITY_IN_CELLAR_TASK_ID = "check_notice_public_availability_in_cellar" NOTICE_SUCCESSFULLY_PROCESSED_TASK_ID = "notice_successfully_processed" FAIL_ON_STATE_TASK_ID = "fail_on_state" CHECK_NOTICE_STATE_BEFORE_TRANSFORM_TASK_ID = "check_notice_state_before_transform" CHECK_NOTICE_STATE_BEFORE_GENERATE_METS_PACKAGE_TASK_ID = "check_notice_state_before_generate_mets_package" CHECK_NOTICE_STATE_BEFORE_PUBLISH_NOTICE_IN_CELLAR_TASK_ID = "check_notice_state_before_publish_notice_in_cellar" CHECK_NOTICE_STATE_BEFORE_NOTICE_SUCCESSFULLY_PROCESSED_TASK_ID = "check_notice_state_before_notice_successfully_processed" START_PROCESSING_NOTICE_TASK_ID = "start_processing_notice" TRANSFORM_BRANCH_TASK_IDS = [ CHECK_ELIGIBILITY_FOR_TRANSFORMATION_TASK_ID, CHECK_NOTICE_STATE_BEFORE_TRANSFORM_TASK_ID, PREPROCESS_XML_MANIFESTATION_TASK_ID, TRANSFORM_NOTICE_TASK_ID, RESOLVE_ENTITIES_IN_THE_RDF_MANIFESTATION_TASK_ID, VALIDATE_TRANSFORMED_RDF_MANIFESTATION_TASK_ID, CHECK_ELIGIBILITY_FOR_PACKING_BY_VALIDATION_REPORT_TASK_ID, CHECK_NOTICE_STATE_BEFORE_GENERATE_METS_PACKAGE_TASK_ID ] PACKAGE_BRANCH_TASK_IDS = [ GENERATE_METS_PACKAGE_TASK_ID, CHECK_PACKAGE_INTEGRITY_BY_PACKAGE_STRUCTURE_TASK_ID, CHECK_NOTICE_STATE_BEFORE_PUBLISH_NOTICE_IN_CELLAR_TASK_ID ] PUBLISH_BRANCH_TASK_IDS = [ PUBLISH_NOTICE_IN_CELLAR_TASK_ID, CHECK_NOTICE_PUBLIC_AVAILABILITY_IN_CELLAR_TASK_ID, CHECK_NOTICE_STATE_BEFORE_NOTICE_SUCCESSFULLY_PROCESSED_TASK_ID, NOTICE_SUCCESSFULLY_PROCESSED_TASK_ID ] FULL_BRANCH_TASK_IDS = [START_PROCESSING_NOTICE_TASK_ID, NORMALISE_NOTICE_METADATA_TASK_ID] + TRANSFORM_BRANCH_TASK_IDS + PACKAGE_BRANCH_TASK_IDS + PUBLISH_BRANCH_TASK_IDS def run_task(dag, task, conf: dict, xcom_push_data: dict = None, ignore_first_depends_on_past=True) -> TaskInstance: start_date = dag.default_args["start_date"] end_date = dag.default_args["start_date"] start_date = start_date or task.start_date end_date = end_date or task.end_date or timezone.utcnow() info = list(task.dag.iter_dagrun_infos_between(start_date, end_date, align=False))[0] ignore_depends_on_past = info.logical_date == start_date and ignore_first_depends_on_past dr = DagRun( dag_id=task.dag_id, run_id=DagRun.generate_run_id(DagRunType.MANUAL, info.logical_date), run_type=DagRunType.MANUAL, execution_date=info.logical_date, data_interval=info.data_interval, conf=conf ) ti = TaskInstance(task, run_id=None) ti.dag_run = dr if xcom_push_data is not None: for key, value in xcom_push_data.items(): ti.xcom_push(key=str(key), value=value) ti.run( mark_success=False, ignore_task_deps=True, ignore_depends_on_past=ignore_depends_on_past, ignore_ti_state=False, test_mode=True, ) return ti
[ "airflow.models.DagRun.generate_run_id", "airflow.models.TaskInstance", "datetime.timezone.utcnow" ]
[((3430, 3461), 'airflow.models.TaskInstance', 'TaskInstance', (['task'], {'run_id': 'None'}), '(task, run_id=None)\n', (3442, 3461), False, 'from airflow.models import DagRun, TaskInstance\n'), ((2952, 2969), 'datetime.timezone.utcnow', 'timezone.utcnow', ([], {}), '()\n', (2967, 2969), False, 'from datetime import timezone\n'), ((3215, 3275), 'airflow.models.DagRun.generate_run_id', 'DagRun.generate_run_id', (['DagRunType.MANUAL', 'info.logical_date'], {}), '(DagRunType.MANUAL, info.logical_date)\n', (3237, 3275), False, 'from airflow.models import DagRun, TaskInstance\n')]
import os import re import math import subprocess import argparse from wand.image import Image class Tiler: '''Scale a tile map up and down in powers of 2.''' def __init__(self, filename='{x},{z}.png', path='.', zoomOut=4, zoomIn=1, verbose=False): self.path = path if not os.path.isdir(self.path + '/z0'): raise ValueError("The path {}/z0 does not exist.".format(self.path)) self.filename = filename self.pattern = re.compile('^' + self.filename .replace('{x}', '(-?[0-9]+)') .replace('{z}', '(-?[0-9]+)') + '$') self.zoomOut = zoomOut self.zoomIn = zoomIn self.verbose = verbose def execute(self): for i in range(self.zoomOut): self.scaleDown(self.path + '/z{}/'.format(i), self.path + '/z{}/'.format(i+1)) for i in range(0,-self.zoomIn,-1): self.scaleUp(self.path + '/z{}/'.format(i), self.path + '/z{}/'.format(i-1)) def findTiles(self, path): coords = set() for f in os.listdir(path): match = self.pattern.match(f) if match: x, z = int(match.group(1)), int(match.group(2)) coords.add((x,z)) if not coords: raise ValueError("No files matching \"{}\" were found in {}/z0; check the filename pattern.".format(self.filename, self.path)) return coords def scaleDown(self, src, target): ensurePath(target) coords = self.findTiles(src) tcoords = set() for i,j in coords: tcoords.add((math.floor(i/2), math.floor(j/2))) for k,l in tcoords: i,j = 2*k,2*l pics = [src + self.filename.format(x=x,z=z) for x,z in ((i,j),(i+1,j),(i,j+1),(i+1,j+1))] t = target + self.filename.format(x=k, z=l) if os.path.isfile(t): newest_tile = max([os.path.getmtime(pic) if os.path.isfile(pic) else 0 for pic in pics]) if newest_tile <= os.path.getmtime(t): self.log('Tile {i},{j}: -'.format(i=i,j=j)) continue pics = [pic if os.path.isfile(pic) else 'null:' for pic in pics] self.log('Tile {i},{j}: Update'.format(i=i,j=j)) subprocess.call(['montage', '-mode', 'concatenate', '-background', 'None', '-geometry', '256x256'] + pics + [t]) def scaleUp(self, src, target): ensurePath(target) coords = self.findTiles(src) for x,z in coords: source = src + self.filename.format(x=x,z=z) with Image(filename=source) as img: for i,j in ((0,0),(1,0),(0,1),(1,1)): dest = target + self.filename.format(x=2*x+i,z=2*z+j) if os.path.isfile(dest): if os.path.getmtime(source) <= os.path.getmtime(dest): self.log("Tile {},{}: Update".format(2*x+i,2*z+j)) continue with img[i*256:i*256+256, j*256:j*256+256] as tile: tile.resize(512,512) self.log("Tile {},{}: Update".format(2*x+i,2*z+j)) tile.save(filename=dest) def log(self, text): if self.verbose: print(text) def ensurePath(target): if not os.path.isdir(target): try: os.mkdir(target) except OSError: raise IOError("The path {} does not exist and could not be created.".format(target)) def parser(): parser = argparse.ArgumentParser( prog='tiler', description='''Scale the image tiles up and down. ''' ) parser.add_argument( '-v', '--verbose', action='store_const', help='Print verbose messages', dest='verbose', const=True, default=False ) parser.add_argument( '--path', type=str, help='Location of the z0 folder [=.]', dest='path', metavar='PATH', default='.' ) parser.add_argument( '--filename', type=str, help='Pattern of the image filenames [={x},{z}.png]', metavar='FILENAME', dest='filename', default='{x},{z}.png' ) parser.add_argument( '--up', type=int, help='How many zoom levels to scale up (each doubles the scale). [=1]', metavar='UP', dest='zoomIn', default='1' ) parser.add_argument( '--down', type=int, help='How many zoom levels to scale down (each halves the scale). [=4]', metavar='DOWN', dest='zoomOut', default='4' ) return parser def main(): args = parser().parse_args() try: Tiler(**vars(args)).execute() except Exception as e: print("Error:",e) if args.path == '.' and not os.path.isdir(args.path + '/z0'): parser().print_help() main()
[ "os.listdir", "argparse.ArgumentParser", "math.floor", "wand.image.Image", "os.path.isfile", "os.path.isdir", "subprocess.call", "os.mkdir", "os.path.getmtime" ]
[((3545, 3643), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""tiler"""', 'description': '"""Scale the image tiles up and down.\n """'}), "(prog='tiler', description=\n 'Scale the image tiles up and down.\\n ')\n", (3568, 3643), False, 'import argparse\n'), ((1037, 1053), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1047, 1053), False, 'import os\n'), ((3331, 3352), 'os.path.isdir', 'os.path.isdir', (['target'], {}), '(target)\n', (3344, 3352), False, 'import os\n'), ((298, 330), 'os.path.isdir', 'os.path.isdir', (["(self.path + '/z0')"], {}), "(self.path + '/z0')\n", (311, 330), False, 'import os\n'), ((1843, 1860), 'os.path.isfile', 'os.path.isfile', (['t'], {}), '(t)\n', (1857, 1860), False, 'import os\n'), ((2265, 2381), 'subprocess.call', 'subprocess.call', (["(['montage', '-mode', 'concatenate', '-background', 'None', '-geometry',\n '256x256'] + pics + [t])"], {}), "(['montage', '-mode', 'concatenate', '-background', 'None',\n '-geometry', '256x256'] + pics + [t])\n", (2280, 2381), False, 'import subprocess\n'), ((3379, 3395), 'os.mkdir', 'os.mkdir', (['target'], {}), '(target)\n', (3387, 3395), False, 'import os\n'), ((2580, 2602), 'wand.image.Image', 'Image', ([], {'filename': 'source'}), '(filename=source)\n', (2585, 2602), False, 'from wand.image import Image\n'), ((1580, 1597), 'math.floor', 'math.floor', (['(i / 2)'], {}), '(i / 2)\n', (1590, 1597), False, 'import math\n'), ((1597, 1614), 'math.floor', 'math.floor', (['(j / 2)'], {}), '(j / 2)\n', (1607, 1614), False, 'import math\n'), ((2001, 2020), 'os.path.getmtime', 'os.path.getmtime', (['t'], {}), '(t)\n', (2017, 2020), False, 'import os\n'), ((2142, 2161), 'os.path.isfile', 'os.path.isfile', (['pic'], {}), '(pic)\n', (2156, 2161), False, 'import os\n'), ((2762, 2782), 'os.path.isfile', 'os.path.isfile', (['dest'], {}), '(dest)\n', (2776, 2782), False, 'import os\n'), ((4718, 4750), 'os.path.isdir', 'os.path.isdir', (["(args.path + '/z0')"], {}), "(args.path + '/z0')\n", (4731, 4750), False, 'import os\n'), ((1922, 1941), 'os.path.isfile', 'os.path.isfile', (['pic'], {}), '(pic)\n', (1936, 1941), False, 'import os\n'), ((1897, 1918), 'os.path.getmtime', 'os.path.getmtime', (['pic'], {}), '(pic)\n', (1913, 1918), False, 'import os\n'), ((2811, 2835), 'os.path.getmtime', 'os.path.getmtime', (['source'], {}), '(source)\n', (2827, 2835), False, 'import os\n'), ((2839, 2861), 'os.path.getmtime', 'os.path.getmtime', (['dest'], {}), '(dest)\n', (2855, 2861), False, 'import os\n')]
from collections import namedtuple from turtle import fd, heading, lt, pd, position, pu, rt, setheading, setposition # pylint: disable=no-name-in-module from pudzu.utils import weighted_choice class LSystem: Rule = namedtuple("Rule", "predecessor successor weight", defaults=(1.0,)) def __init__(self, axiom, rules, angle=4): self.axiom = axiom self.angle = 360 / angle self.rules = {} self.weights = {} for rule in rules: pr = self.Rule(*rule) self.rules.setdefault(pr.predecessor, []).append(pr.successor) self.weights.setdefault(pr.predecessor, []).append(pr.weight) def expand(self, iterations): state = self.axiom for _ in range(iterations): state = "".join([weighted_choice(self.rules.get(c, [c]), self.weights.get(c, [1])) for c in state]) return state def plot(self, screen, iterations, size, reset=True, tracer=(0, 0)): if reset: screen.clearscreen() screen.tracer(*tracer) stack = [] for c in self.expand(iterations): if c == "F": fd(size) elif c == "G": pu() fd(size) pd() elif c == "+": rt(self.angle) elif c == "-": lt(self.angle) elif c == "[": stack.append((position(), heading())) elif c == "]": p, h = stack.pop() pu() setposition(p) setheading(h) pd() screen.update() Koch = LSystem("F--F--F", [("F", "F+F--F+F")], 6) Dragon = LSystem("FX", [("F", ""), ("Y", "+FX--FY+"), ("X", "-FX++FY-")], 8) Plant07 = LSystem("Z", [("Z", "ZFX[+Z][-Z]"), ("X", "X[-FFF][+FFF]FX")], 14) Plant08 = LSystem("SLFFF", [("S", "[+++Z][---Z]TS"), ("Z", "+H[-Z]L"), ("H", "-Z[+H]L"), ("T", "TL"), ("L", "[-FFF][+FFF]F")], 20) Sierpinski = LSystem("AF", [("A", "BF+AF+BF"), ("B", "AF-BF-AF"), ("F", "")], 6) Barnsley = LSystem("X", [("X", "F+[[X]-X]-F[-FX]+X"), ("F", "FF")], 14.4) RandomWalk = LSystem("F", [("F", "FF"), ("F", "F+F"), ("F", "F++F"), ("F", "F-F")], 4)
[ "turtle.position", "turtle.setheading", "collections.namedtuple", "turtle.heading", "turtle.lt", "turtle.pu", "turtle.rt", "turtle.fd", "turtle.setposition", "turtle.pd" ]
[((224, 291), 'collections.namedtuple', 'namedtuple', (['"""Rule"""', '"""predecessor successor weight"""'], {'defaults': '(1.0,)'}), "('Rule', 'predecessor successor weight', defaults=(1.0,))\n", (234, 291), False, 'from collections import namedtuple\n'), ((1149, 1157), 'turtle.fd', 'fd', (['size'], {}), '(size)\n', (1151, 1157), False, 'from turtle import fd, heading, lt, pd, position, pu, rt, setheading, setposition\n'), ((1201, 1205), 'turtle.pu', 'pu', ([], {}), '()\n', (1203, 1205), False, 'from turtle import fd, heading, lt, pd, position, pu, rt, setheading, setposition\n'), ((1222, 1230), 'turtle.fd', 'fd', (['size'], {}), '(size)\n', (1224, 1230), False, 'from turtle import fd, heading, lt, pd, position, pu, rt, setheading, setposition\n'), ((1247, 1251), 'turtle.pd', 'pd', ([], {}), '()\n', (1249, 1251), False, 'from turtle import fd, heading, lt, pd, position, pu, rt, setheading, setposition\n'), ((1295, 1309), 'turtle.rt', 'rt', (['self.angle'], {}), '(self.angle)\n', (1297, 1309), False, 'from turtle import fd, heading, lt, pd, position, pu, rt, setheading, setposition\n'), ((1353, 1367), 'turtle.lt', 'lt', (['self.angle'], {}), '(self.angle)\n', (1355, 1367), False, 'from turtle import fd, heading, lt, pd, position, pu, rt, setheading, setposition\n'), ((1527, 1531), 'turtle.pu', 'pu', ([], {}), '()\n', (1529, 1531), False, 'from turtle import fd, heading, lt, pd, position, pu, rt, setheading, setposition\n'), ((1548, 1562), 'turtle.setposition', 'setposition', (['p'], {}), '(p)\n', (1559, 1562), False, 'from turtle import fd, heading, lt, pd, position, pu, rt, setheading, setposition\n'), ((1579, 1592), 'turtle.setheading', 'setheading', (['h'], {}), '(h)\n', (1589, 1592), False, 'from turtle import fd, heading, lt, pd, position, pu, rt, setheading, setposition\n'), ((1609, 1613), 'turtle.pd', 'pd', ([], {}), '()\n', (1611, 1613), False, 'from turtle import fd, heading, lt, pd, position, pu, rt, setheading, setposition\n'), ((1425, 1435), 'turtle.position', 'position', ([], {}), '()\n', (1433, 1435), False, 'from turtle import fd, heading, lt, pd, position, pu, rt, setheading, setposition\n'), ((1437, 1446), 'turtle.heading', 'heading', ([], {}), '()\n', (1444, 1446), False, 'from turtle import fd, heading, lt, pd, position, pu, rt, setheading, setposition\n')]
# Copyright (c) 2014, <NAME> # All rights reserved. # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT # SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import roslib;roslib.load_manifest('nao_dcm_rqt_dashboard') import rospy from nao_dcm_msgs.srv import BoolService, BoolServiceRequest from std_msgs.msg import Float32 from rqt_robot_dashboard.widgets import IconToolButton class NaoDCMStiffnessWidget(IconToolButton): def __init__(self, name='stiffness'): self.name = name icons = [] icons.append(['bg-red.svg','ic-motors.svg']) icons.append(['bg-green.svg','ic-motors.svg']) super(NaoDCMStiffnessWidget,self).__init__(name=name,icons=icons) self.update_state(0) self.stiffness = 1.0 self.clicked.connect(self.changeStiffness) self._sub = rospy.Subscriber('/nao_dcm/stiffnesses',Float32,self.callback) def changeStiffness(self): stiff = rospy.ServiceProxy('/nao_dcm/Stiffnesses/Enable',BoolService) req = BoolServiceRequest() if(self.stiffness==1.0): req.enable = False else: req.enable = True self.stiffness = 1.0-self.stiffness stiff(req) def callback(self, msg): self.stiffness = msg.data if(msg.data == 1.0): self.update_state(0) else: self.update_state(1)
[ "rospy.ServiceProxy", "nao_dcm_msgs.srv.BoolServiceRequest", "rospy.Subscriber", "roslib.load_manifest" ]
[((1535, 1580), 'roslib.load_manifest', 'roslib.load_manifest', (['"""nao_dcm_rqt_dashboard"""'], {}), "('nao_dcm_rqt_dashboard')\n", (1555, 1580), False, 'import roslib\n'), ((2196, 2260), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/nao_dcm/stiffnesses"""', 'Float32', 'self.callback'], {}), "('/nao_dcm/stiffnesses', Float32, self.callback)\n", (2212, 2260), False, 'import rospy\n'), ((2315, 2377), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/nao_dcm/Stiffnesses/Enable"""', 'BoolService'], {}), "('/nao_dcm/Stiffnesses/Enable', BoolService)\n", (2333, 2377), False, 'import rospy\n'), ((2391, 2411), 'nao_dcm_msgs.srv.BoolServiceRequest', 'BoolServiceRequest', ([], {}), '()\n', (2409, 2411), False, 'from nao_dcm_msgs.srv import BoolService, BoolServiceRequest\n')]
import torch from torch import nn import torch.nn.functional as nf from torch.nn import init from torch.autograd import Variable import numpy as np from chainer.links.loss.hierarchical_softmax import TreeParser #class HSM(nn.Module): # def __init__(self, input_size, vocab_size): class HSBad(nn.Module): def __init__(self, input_size, n_vocab): super(HSM, self).__init__() self.input_size = input_size self.n_vocab = n_vocab + 3 self.l2v = nn.Linear( in_features = self.input_size, out_features = self.n_vocab ) def init_params(self): pass def __call__(self, x, t): v = self.l2v(x) loss = nf.cross_entropy(v, t) return loss class HSFil(nn.Module): def __init__(self, input_size, huff_tree): super(HSM, self).__init__() self.input_size = input_size self.tp = TreeParser() self.tp.parse(huff_tree) self.n_decisions = self.tp.size() # self.decs = nn.Embedding( # num_embeddings = self.n_decisions, # embedding_dim = self.input_size # ) self.decs = nn.Parameter( torch.Tensor(self.n_decisions, self.input_size) ) paths_d = self.tp.get_paths() codes_d = self.tp.get_codes() self.n_vocab = max(paths_d.keys()) + 1 self.paths = [paths_d[i] for i in range(self.n_vocab)] self.codes = [torch.from_numpy(codes_d[i]) for i in range(self.n_vocab)] self.lens = [len(path) for path in self.paths] self.begins = np.cumsum([0.0] + self.lens[:-1]) def init_params(self): g_paths = [torch.from_numpy(path).cuda().long() for path in self.paths] # g_paths = torch.from_numpy(np.concatenate(self.paths)).cuda().long() self.path_mats = [self.decs[g_path].cpu() for g_path in g_paths] def __call__(self, x, t): # import pdb; pdb.set_trace() curr_path_mats = [self.path_mats[i] for i in t] vv = [pm.cuda().mv(x_i) for pm, x_i in zip(curr_path_mats, x)] loss = -nf.logsigmoid(torch.cat(vv)).sum()/x.size()[0] # r_paths = [] # r_codes = [] # r_xs = [] # paths = torch.cat([self.paths[t_i] for t_i in t]) # codes = torch.cat([self.codes[t_i] for t_i in t]) # import pdb; pdb.set_trace() # for x_i, t_i in zip(x, t): # path, code = self.paths[t_i], self.codes[t_i] # r_paths.append(self.paths[t_i]) # r_codes.append(self.codes[t_i]) # r_xs.append(x_i.repeat(len(path), 1)) ## r_xs.append(x_i.expand(len(path), self.input_size)) # #g_paths = Variable(torch.from_numpy(np.concatenate(r_paths)).long().cuda(), requires_grad=False) ## g_codes = Variable(torch.from_numpy(np.concatenate(r_codes)).cuda(), requires_grad=False) # g_xs = torch.cat(r_xs) ## loss = nf.binary_cross_entropy(self.decs(g_paths) * g_xs, g_codes, size_average=False) # loss = nf.logsigmoid((self.decs(g_paths) * g_xs).sum(1) * g_codes).sum()/x.size()[0] return loss class HSM(nn.Module): def __init__(self, input_size, huff_tree): super(HSM, self).__init__() self.input_size = input_size self.tp = TreeParser() self.tp.parse(huff_tree) self.n_decisions = self.tp.size() # self.decs = nn.Linear( # in_features = self.input_size, # out_features = self.n_decisions # ) # self.decs = nn.Parameter( # torch.Tensor(self.n_decisions, self.input_size) # ) self.decs = nn.Embedding( num_embeddings = self.n_decisions, embedding_dim = self.input_size ) self.paths_d = self.tp.get_paths() self.codes_d = self.tp.get_codes() self.max_path = max([len(v) for v in self.paths_d.values()]) self.max_code = max([len(v) for v in self.codes_d.values()]) self.n_vocab = max(self.paths_d.keys()) + 1 def init_params(self): # init.kaiming_normal(self.decs) self.paths = [self.paths_d[i] for i in range(self.n_vocab)] self.paths = [np.pad(path, (0, max(0, self.max_path - len(path))), mode='constant') for path in self.paths] self.paths = torch.stack([torch.from_numpy(path) for path in self.paths], 0).long().cuda() self.codes = [self.codes_d[i] for i in range(self.n_vocab)] self.codes = [np.pad(code, (0, max(0, self.max_code - len(code))), mode='constant') for code in self.codes] self.codes = torch.stack([torch.from_numpy(code) for code in self.codes], 0).cuda() def __call__(self, x, t): # import pdb; pdb.set_trace() g_t = torch.from_numpy(t).cuda().long() ws = self.decs(Variable(self.paths[g_t])) # ws = self.decs(self.paths[t].view(-1)).view(x.size()[0], self.max_path, self.input_size) scores = ws.bmm(x.unsqueeze(2)).squeeze() * Variable(self.codes[g_t]) nz_mask = scores.ne(0).detach() loss = -nf.logsigmoid(scores.masked_select(nz_mask)).sum()/x.size()[0] # ws = [self.decs[self.paths[t_i]] * self.codes[t_i] for t_i in t] # ws = torch.cat([self.decs[self.paths[t_i]].mv(x_i) for t_i, x_i in zip(t, x)]) # cs = Variable(torch.cat([self.codes[t_i] for t_i in t])) # loss = -nf.logsigmoid(ws * cs).sum()/x.size()[0] return loss class HSFail(nn.Module): def __init__(self, input_size, vocab_size, branch_factor): super(HSM, self).__init__() self.input_size = input_size self.vocab_size = vocab_size self.branch_factor = branch_factor self.level1 = nn.Linear( in_features = self.input_size, out_features = self.branch_factor ) self.level2_w = nn.Parameter( torch.Tensor(self.branch_factor, self.branch_factor, self.input_size) ) # self.level2_b = nn.Parameter( # torch.Tensor(self.branch_factor, self.branch_factor) # ) def init_params(self): init.kaiming_normal(self.level2_w) # init.kaiming_normal(self.level2_b) pass def forward(self, x, t): # import pdb; pdb.set_trace() t1 = (t / self.branch_factor).long().cuda() t2 = (t % self.branch_factor) l1 = self.level1(x) l1_ce = nf.cross_entropy(l1, Variable(t1)) # l1_log_softmax = sum([res[idx] for res, idx in zip(nf.log_softmax(l1), t1)]) # l1_log_softmax = nf.log_softmax(l1).t()[t1].diag() ### l2_w = [self.level2_w[idx] for idx in t2] # l2_w = self.level2_w[t1] ## l2_b = self.level2_b[t1] ### l2_aff = torch.stack([mat.mv(vec) for mat, vec in zip(l2_w, x)], 0)# + l2_b l2_aff = torch.stack([self.level2_w[idx].mv(vec) for idx, vec in zip(t2, x)], 0) l2_ce = nf.cross_entropy(l2_aff, Variable(t2.cuda()).long()) # l2_aff = [mat.addmv(l2_b, vec) for mat, vec in zip(l2_w, x)] # l2_aff = l2_w.bmm(x.unsqueeze(2)).squeeze() + l2_b ## l2_log_softmax = torch.stack([res[idx] for res, idx in zip(nf.log_softmax(l2_aff), t2)], 0) # l2_log_softmax = nf.log_softmax(l2_aff).t()[t2].diag() ## ce = - (l1_log_softmax + l2_log_softmax).sum()/x.size()[0] # ce = -l1_log_softmax/x.size()[0] return l1_ce + l2_ce
[ "chainer.links.loss.hierarchical_softmax.TreeParser", "torch.Tensor", "torch.from_numpy", "torch.cat", "torch.nn.functional.cross_entropy", "torch.nn.Linear", "numpy.cumsum", "torch.autograd.Variable", "torch.nn.init.kaiming_normal", "torch.nn.Embedding" ]
[((463, 528), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'self.input_size', 'out_features': 'self.n_vocab'}), '(in_features=self.input_size, out_features=self.n_vocab)\n', (472, 528), False, 'from torch import nn\n'), ((648, 670), 'torch.nn.functional.cross_entropy', 'nf.cross_entropy', (['v', 't'], {}), '(v, t)\n', (664, 670), True, 'import torch.nn.functional as nf\n'), ((837, 849), 'chainer.links.loss.hierarchical_softmax.TreeParser', 'TreeParser', ([], {}), '()\n', (847, 849), False, 'from chainer.links.loss.hierarchical_softmax import TreeParser\n'), ((1442, 1475), 'numpy.cumsum', 'np.cumsum', (['([0.0] + self.lens[:-1])'], {}), '([0.0] + self.lens[:-1])\n', (1451, 1475), True, 'import numpy as np\n'), ((2997, 3009), 'chainer.links.loss.hierarchical_softmax.TreeParser', 'TreeParser', ([], {}), '()\n', (3007, 3009), False, 'from chainer.links.loss.hierarchical_softmax import TreeParser\n'), ((3298, 3374), 'torch.nn.Embedding', 'nn.Embedding', ([], {'num_embeddings': 'self.n_decisions', 'embedding_dim': 'self.input_size'}), '(num_embeddings=self.n_decisions, embedding_dim=self.input_size)\n', (3310, 3374), False, 'from torch import nn\n'), ((5213, 5284), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'self.input_size', 'out_features': 'self.branch_factor'}), '(in_features=self.input_size, out_features=self.branch_factor)\n', (5222, 5284), False, 'from torch import nn\n'), ((5555, 5589), 'torch.nn.init.kaiming_normal', 'init.kaiming_normal', (['self.level2_w'], {}), '(self.level2_w)\n', (5574, 5589), False, 'from torch.nn import init\n'), ((1072, 1119), 'torch.Tensor', 'torch.Tensor', (['self.n_decisions', 'self.input_size'], {}), '(self.n_decisions, self.input_size)\n', (1084, 1119), False, 'import torch\n'), ((1314, 1342), 'torch.from_numpy', 'torch.from_numpy', (['codes_d[i]'], {}), '(codes_d[i])\n', (1330, 1342), False, 'import torch\n'), ((4376, 4401), 'torch.autograd.Variable', 'Variable', (['self.paths[g_t]'], {}), '(self.paths[g_t])\n', (4384, 4401), False, 'from torch.autograd import Variable\n'), ((4545, 4570), 'torch.autograd.Variable', 'Variable', (['self.codes[g_t]'], {}), '(self.codes[g_t])\n', (4553, 4570), False, 'from torch.autograd import Variable\n'), ((5347, 5416), 'torch.Tensor', 'torch.Tensor', (['self.branch_factor', 'self.branch_factor', 'self.input_size'], {}), '(self.branch_factor, self.branch_factor, self.input_size)\n', (5359, 5416), False, 'import torch\n'), ((5844, 5856), 'torch.autograd.Variable', 'Variable', (['t1'], {}), '(t1)\n', (5852, 5856), False, 'from torch.autograd import Variable\n'), ((4193, 4215), 'torch.from_numpy', 'torch.from_numpy', (['code'], {}), '(code)\n', (4209, 4215), False, 'import torch\n'), ((4323, 4342), 'torch.from_numpy', 'torch.from_numpy', (['t'], {}), '(t)\n', (4339, 4342), False, 'import torch\n'), ((1517, 1539), 'torch.from_numpy', 'torch.from_numpy', (['path'], {}), '(path)\n', (1533, 1539), False, 'import torch\n'), ((1927, 1940), 'torch.cat', 'torch.cat', (['vv'], {}), '(vv)\n', (1936, 1940), False, 'import torch\n'), ((3921, 3943), 'torch.from_numpy', 'torch.from_numpy', (['path'], {}), '(path)\n', (3937, 3943), False, 'import torch\n')]
# coding: utf-8 """yieldについてのサンプルです。 元ネタは、stackoverflowの以下のページ。 https://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do-in-python/231855#231855 回答がとてもわかり易いので、自分用のメモとしても残しておく。 """ import time from datetime import datetime from trypython.common.commoncls import SampleBase from trypython.common.commonfunc import pr class Sample(SampleBase): def exec(self): # ---------------------------------------------------------- # [元ネタ] # https://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do-in-python/231855#231855 # # 以下は、元ネタのページ内の記載を自分なりに理解した内容メモです。 # ---------------------------------------------------------- # ---------------------------------------------------------- # 前書き # -------------- # yield が何を行っているのかを理解するためには # まず generator が何なのかを理解する必要があります。 # generator を理解するには、iterable の理解が必要です。 # てことで、generator の前に iterable から理解していきます。 # ---------------------------------------------------------- # ---------------------------------------------------------- # Iterable # -------------- # list を生成すると、1つずつ要素を読み出すことが出来ます。 # 一つずつ、要素を読み出すことを「イテレーション」といいます。 # ---------------------------------------------------------- list01 = list(range(5)) for i in list01: pr('list-item', i) # ---------------------------------------------------------- # list01 は、iterable です。 # リスト内包表記(list comprehension)を使ったとき # リストが生成され、これも iterable なものとなります。 # ---------------------------------------------------------- list02 = [x * 2 for x in range(5)] for i in list02: pr('list-item(list comprehension)', i) # ---------------------------------------------------------- # for ... in ... は、iterableなもの全てに使えます。 # (リストや文字列や辞書や集合やファイルなど・・・) # # このように iterable なオブジェクトはとても使いやすく便利 # なのですが、全ての値をメモリに保持してしまうという面も持っています。 # # 例えば、大量なデータを持つリストなどです。このような時 # 実際に全てのデータが必要では無い場合もあります。 # 利用しないデータの割合が多い場合、多くは不必要にメモリにロードされ # 空きメモリを圧迫します。このような場合に generator が登場します。 # ---------------------------------------------------------- # ---------------------------------------------------------- # Generators # -------------- # Generators は イテレータです。(iterator => iterate するもの) # でも、一回しかイテレートすることが出来ません。 # (つまり、一回全要素をループさせると終わり) # # これは、generatorが一度に全ての要素をメモリに持つのではなく # 必要な要素をその都度 (on the fly) 生成するからです。 # # なので、generator (generate するもの) となります。 # # generator を生成する場合、() で生成部分を囲みます。 # 先ほどのリスト内包表記を generator に変更する場合 # [] を () に変えるだけで generator になります。 # ---------------------------------------------------------- gen01 = (x * 2 for x in range(5)) pr('generator-object', gen01) for i in gen01: pr('generator-item(first-loop)', i) else: pr('first-loop', 'done') for i in gen01: pr('generator-item(second-loop)', i) else: pr('second-loop', 'done') # ---------------------------------------------------------- # Yield # -------------- # ここでやっと、yield の登場です。 # yield は return のような感じで使います。 # return は、値を返しますが、yield は generator を返します。 # # とりあえず、以下に yield のサンプルを記載します。 # 下記を実行すると、5回値を生成します。 # ---------------------------------------------------------- def create_generator(): yield 1 yield 2 yield 3 yield 4 yield 5 gen02 = create_generator() pr('generator-object', gen02) for x in gen02: pr('generator-item', x) # ----------------------------------------------------------------------- # 上記の create_generator 関数では yield が5回登場しています。 # なので、取得した generator をループすると 5回値が出力されます。 # # このように書くと、なんとなく分かるけど、なんとなく分からないって # なってしまうのが、yield のややこしいところです。 # # 大事なのが、以下の点です。 # 「関数内に yield が存在すると、python は 関数本体を実行せずに # generator を生成して、呼び元に返す。」 # # 内部に yield が利用されている関数は、呼ぶとすぐには実行されません。 # generator オブジェクトが生成されて、それが返ってきます。 # # なので、 # gen02 = create_generator() # と書くと、create_generator関数の中では yield があるので # python が内部で generator を生成して返してくれます。 # それが 変数 gen02 にセットされます。 # # generator は イテレートさせないと処理が進みません。 # (厳密にいうと、next() が呼ばれない限り。コルーチンの場合は send() されない限り) # 呼び出される度(つまり一回分イテレートされるたび)に、yield一つ分進み、次の # yield が見つかったタイミングで一時停止します。 # # なので、上のサンプルでは ループが一回進む度に yield も一つずつ進んでいって # 最終的に yield が5回分呼ばれたらループ終了となります。(StopIterationが発生します。) # # 一度使い切った generator は利用できないので、上記サンプルの gen02 を # 再度 for 文で利用しても、今度は一回もループされません。 # (yield 5 まで進んでしまっているので、次の yield がもうないため) # # yield の動き自体はシンプルなので、以下のように無限ループの中で yield すると # 止めない限り、永遠に値を生成するようにも出来ます。 # ------------------------------------------------------------------------ def gen_forever(): while True: yield datetime.now() gen03 = gen_forever() for i, d in enumerate(gen03): if i > 5: break pr('gen03', d.isoformat()) time.sleep(1) def go(): obj = Sample() obj.exec()
[ "datetime.datetime.now", "time.sleep", "trypython.common.commonfunc.pr" ]
[((2931, 2960), 'trypython.common.commonfunc.pr', 'pr', (['"""generator-object"""', 'gen01'], {}), "('generator-object', gen01)\n", (2933, 2960), False, 'from trypython.common.commonfunc import pr\n'), ((3766, 3795), 'trypython.common.commonfunc.pr', 'pr', (['"""generator-object"""', 'gen02'], {}), "('generator-object', gen02)\n", (3768, 3795), False, 'from trypython.common.commonfunc import pr\n'), ((1386, 1404), 'trypython.common.commonfunc.pr', 'pr', (['"""list-item"""', 'i'], {}), "('list-item', i)\n", (1388, 1404), False, 'from trypython.common.commonfunc import pr\n'), ((1742, 1780), 'trypython.common.commonfunc.pr', 'pr', (['"""list-item(list comprehension)"""', 'i'], {}), "('list-item(list comprehension)', i)\n", (1744, 1780), False, 'from trypython.common.commonfunc import pr\n'), ((2998, 3033), 'trypython.common.commonfunc.pr', 'pr', (['"""generator-item(first-loop)"""', 'i'], {}), "('generator-item(first-loop)', i)\n", (3000, 3033), False, 'from trypython.common.commonfunc import pr\n'), ((3060, 3084), 'trypython.common.commonfunc.pr', 'pr', (['"""first-loop"""', '"""done"""'], {}), "('first-loop', 'done')\n", (3062, 3084), False, 'from trypython.common.commonfunc import pr\n'), ((3122, 3158), 'trypython.common.commonfunc.pr', 'pr', (['"""generator-item(second-loop)"""', 'i'], {}), "('generator-item(second-loop)', i)\n", (3124, 3158), False, 'from trypython.common.commonfunc import pr\n'), ((3185, 3210), 'trypython.common.commonfunc.pr', 'pr', (['"""second-loop"""', '"""done"""'], {}), "('second-loop', 'done')\n", (3187, 3210), False, 'from trypython.common.commonfunc import pr\n'), ((3833, 3856), 'trypython.common.commonfunc.pr', 'pr', (['"""generator-item"""', 'x'], {}), "('generator-item', x)\n", (3835, 3856), False, 'from trypython.common.commonfunc import pr\n'), ((5464, 5477), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5474, 5477), False, 'import time\n'), ((5285, 5299), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5297, 5299), False, 'from datetime import datetime\n')]
# -*- coding: utf-8 -*- # 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”) # # 除非遵守当前许可,否则不得使用本软件。 # # * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件): # 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。 # 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。 # # * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件): # 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。 # 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。 # 详细的授权流程,请联系 <EMAIL> 获取。 from rqalpha.environment import Environment from rqalpha.const import DAYS_CNT from rqalpha.utils.repr import property_repr class BenchmarkPortfolio(object): __repr__ = property_repr def __init__(self, benchmark_provider, units): self._provider = benchmark_provider self._units = units @property def units(self): return self._units @property def daily_returns(self): return self._provider.daily_returns @property def total_returns(self): return self._provider.total_returns @property def annualized_returns(self): # fixme: do not rely on env if self.unit_net_value <= 0: return -1 env = Environment.get_instance() date_count = float(env.data_proxy.count_trading_dates(env.config.base.start_date, env.trading_dt.date())) return self.unit_net_value ** (DAYS_CNT.TRADING_DAYS_A_YEAR / date_count) - 1 @property def unit_net_value(self): return 1 + self.total_returns @property def static_unit_net_value(self): return self.unit_net_value / (1 + self.daily_returns) @property def total_value(self): return self.units * self.unit_net_value # Only for compatible @property def cash(self): return 0 market_value = total_value portfolio_value = total_value starting_cash = units
[ "rqalpha.environment.Environment.get_instance" ]
[((1358, 1384), 'rqalpha.environment.Environment.get_instance', 'Environment.get_instance', ([], {}), '()\n', (1382, 1384), False, 'from rqalpha.environment import Environment\n')]
# plot daily box-whisker graphs of GHI data # Use standard config_handler with .yaml config to parse arguments from config_handler import handle_config from datetime import datetime,timezone,date import pandas as pd import matplotlib.pyplot as plt import pysolar.solar as ps import sys import time # note: pathlibs Path object have their own open method, use that for python <3.6 from pathlib import Path if __name__ == "__main__": cp = handle_config( metadata={"invoking_script":"GHIplot"}, header="diagnostics" ) site = cp["site_id"] config = cp['downloader'] paths = cp['paths'] logpath = Path(paths['logging_path']) logpath.mkdir(exist_ok=True) GHIpath = Path(paths['raw_GHI_path']) start_date=int(cp['diagnostics']['start_date']) end_date=int(cp['diagnostics']['end_date']) GHIsensors = {} for GHIsensor in sorted(cp['GHI_sensors'].keys()): GHIsensor = GHIsensor.upper() GHIsensors[GHIsensor]=cp['GHI_sensors'][GHIsensor] source = Path(GHIpath,GHIsensor) if not source.is_dir(): continue print(source.name) datefiles=source.glob("20[0-9][0-9][0-1][0-9][0-3][0-9].csv") print("Available dates:") for datefile in sorted(datefiles): if ( not datefile.is_file() ): continue datestr=datefile.stem if ( int(datestr) < start_date ) or ( int(datestr) > end_date ): continue outfile=Path(source,datestr+".png") df=pd.read_csv(datefile,index_col="Record",usecols=["Record","TimeStamp","SlrW"],parse_dates=["TimeStamp"]) isodate=date(int(datestr[0:4]),int(datestr[4:6]),int(datestr[6:8])).isoformat() ax=df.plot(x='TimeStamp',y='SlrW',title="GHI sensor: %s, %sZ" % (GHIsensor,isodate)) # ,ylabel="GHI [W.m^-2]" ax.figure.show()
[ "config_handler.handle_config", "pandas.read_csv", "pathlib.Path" ]
[((442, 518), 'config_handler.handle_config', 'handle_config', ([], {'metadata': "{'invoking_script': 'GHIplot'}", 'header': '"""diagnostics"""'}), "(metadata={'invoking_script': 'GHIplot'}, header='diagnostics')\n", (455, 518), False, 'from config_handler import handle_config\n'), ((625, 652), 'pathlib.Path', 'Path', (["paths['logging_path']"], {}), "(paths['logging_path'])\n", (629, 652), False, 'from pathlib import Path\n'), ((700, 727), 'pathlib.Path', 'Path', (["paths['raw_GHI_path']"], {}), "(paths['raw_GHI_path'])\n", (704, 727), False, 'from pathlib import Path\n'), ((1019, 1043), 'pathlib.Path', 'Path', (['GHIpath', 'GHIsensor'], {}), '(GHIpath, GHIsensor)\n', (1023, 1043), False, 'from pathlib import Path\n'), ((1494, 1524), 'pathlib.Path', 'Path', (['source', "(datestr + '.png')"], {}), "(source, datestr + '.png')\n", (1498, 1524), False, 'from pathlib import Path\n'), ((1537, 1650), 'pandas.read_csv', 'pd.read_csv', (['datefile'], {'index_col': '"""Record"""', 'usecols': "['Record', 'TimeStamp', 'SlrW']", 'parse_dates': "['TimeStamp']"}), "(datefile, index_col='Record', usecols=['Record', 'TimeStamp',\n 'SlrW'], parse_dates=['TimeStamp'])\n", (1548, 1650), True, 'import pandas as pd\n')]
#Desafio 015: escreva um programa que pergunte a quantidade de km percorridos por um carro alugado e a quantidade de dias pelos quais ele foi alugado. Calcule o preço a pagar, sabendo que o carro custa R$60 por dia e R$0.15 por km rodado. from math import ceil n1 = float(input('Quantos dias você ficou com o carro? ')) n2 = float(input(f'Ok. já que você ficou {ceil(n1)} dias com ele, andou quantos km? ')) v = n1*60 + n2*0.15 print(f'Legal. Então você precisará pagar R${v:.2f} pelo aluguel. ')
[ "math.ceil" ]
[((365, 373), 'math.ceil', 'ceil', (['n1'], {}), '(n1)\n', (369, 373), False, 'from math import ceil\n')]
from starling_sim.basemodel.output.kpis import KPI import logging import pandas as pd class KpiOutput: def __init__(self, population_names, kpi_list, kpi_name=None): # name of the kpi, will compose the kpi filename : <kpi_name>.csv if kpi_name is None: if isinstance(population_names, list): self.name = "_&_".join(population_names) + "_kpi" else: self.name = population_names + "_kpi" else: self.name = kpi_name # population of agent to evaluate self.population_names = population_names self.populations = None # list of kpi to evaluate the given agents self.kpi_list = kpi_list # output file self.filename = None self.folder = None def setup(self, filename, folder, simulation_model): """ Setup method called during simulation setup. Sets the values of out file and folder, and call setup for KPIs. :param filename: .csv file :param folder: :param simulation_model: :return: """ self.filename = filename self.folder = folder for kpi in self.kpi_list: kpi.setup(simulation_model) if isinstance(self.population_names, list): self.populations = [simulation_model.agentPopulation[population_name] for population_name in self.population_names] else: self.populations = [simulation_model.agentPopulation[self.population_names]] def agent_kpi_dict(self, agent): """ Computes the KPIs for the given agent by calling their update method for all its trace :param agent: :return: """ indicators_dict = dict() # get agent trace events = agent.trace.eventList # evaluate all indicators in a single pass for event in events: for kpi in self.kpi_list: kpi.update(event, agent) # merge all completed indicators for kpi in self.kpi_list: indicators_dict.update(kpi.indicator_dict) # raising a warning with sphinx # indicators_dict = {**indicators_dict, **kpi.indicator_dict} # reset kpi values kpi.new_indicator_dict() # return complete indicator dict return indicators_dict def write_kpi_table(self): """ Write the KPI of the population in the csv file obtained from out file attributes The KPIs evaluated are defined by the kpi_list attribute """ # first row is always agent's id, then we add the kpi_list keys header_list = [KPI.KEY_ID] for kpi in self.kpi_list: header_list += kpi.keys path = self.folder + self.filename kpi_table = pd.DataFrame() # compute the kpi table for each population dict for population in self.populations: kpi_table = pd.concat([kpi_table, self.compute_population_kpi_table(population)]) # do not generate a kpi output if the kpi table is empty if kpi_table.empty: return # generate kpi output logging.info("Generating KPI output in file " + path) try: # write the dataframe into a csv file kpi_table.to_csv(path, sep=";", index=False, columns=header_list) except KeyError as e: logging.warning("Could not generate kpi output {}, " "error occurred : {}".format(path, e)) def compute_population_kpi_table(self, population): """ Compute a kpi table for the given population dict. :param population: population dict {id: agent} :return: DataFrame containing the KPI values """ df_output = pd.DataFrame() for agent in population.values(): # create kpi dict for the agent agent_indicators = self.agent_kpi_dict(agent) # build a dataframe from the dict if isinstance(agent_indicators[KPI.KEY_ID], list): df = pd.DataFrame(agent_indicators) else: df = pd.DataFrame(agent_indicators, index=[0]) # append the dataframe to the total output df_output = pd.concat([df_output, df]) return df_output
[ "pandas.DataFrame", "logging.info", "pandas.concat" ]
[((2893, 2907), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2905, 2907), True, 'import pandas as pd\n'), ((3257, 3310), 'logging.info', 'logging.info', (["('Generating KPI output in file ' + path)"], {}), "('Generating KPI output in file ' + path)\n", (3269, 3310), False, 'import logging\n'), ((3885, 3899), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3897, 3899), True, 'import pandas as pd\n'), ((4369, 4395), 'pandas.concat', 'pd.concat', (['[df_output, df]'], {}), '([df_output, df])\n', (4378, 4395), True, 'import pandas as pd\n'), ((4177, 4207), 'pandas.DataFrame', 'pd.DataFrame', (['agent_indicators'], {}), '(agent_indicators)\n', (4189, 4207), True, 'import pandas as pd\n'), ((4247, 4288), 'pandas.DataFrame', 'pd.DataFrame', (['agent_indicators'], {'index': '[0]'}), '(agent_indicators, index=[0])\n', (4259, 4288), True, 'import pandas as pd\n')]
import torch from torch import nn as nn from torch.nn import functional as F from torch.autograd import Variable from model.tensorized_layers.graphsage import BatchedGraphSAGE class DiffPoolAssignment(nn.Module): def __init__(self, nfeat, nnext): super().__init__() self.assign_mat = BatchedGraphSAGE(nfeat, nnext, use_bn=True) def forward(self, x, adj, log=False): s_l_init = self.assign_mat(x, adj) s_l = F.softmax(s_l_init, dim=-1) return s_l
[ "model.tensorized_layers.graphsage.BatchedGraphSAGE", "torch.nn.functional.softmax" ]
[((308, 351), 'model.tensorized_layers.graphsage.BatchedGraphSAGE', 'BatchedGraphSAGE', (['nfeat', 'nnext'], {'use_bn': '(True)'}), '(nfeat, nnext, use_bn=True)\n', (324, 351), False, 'from model.tensorized_layers.graphsage import BatchedGraphSAGE\n'), ((452, 479), 'torch.nn.functional.softmax', 'F.softmax', (['s_l_init'], {'dim': '(-1)'}), '(s_l_init, dim=-1)\n', (461, 479), True, 'from torch.nn import functional as F\n')]
import os import pyblish.api from openpype.lib import OpenPypeMongoConnection class IntegrateContextToLog(pyblish.api.ContextPlugin): """ Adds context information to log document for displaying in front end""" label = "Integrate Context to Log" order = pyblish.api.IntegratorOrder - 0.1 hosts = ["webpublisher"] def process(self, context): self.log.info("Integrate Context to Log") mongo_client = OpenPypeMongoConnection.get_mongo_client() database_name = os.environ["OPENPYPE_DATABASE_NAME"] dbcon = mongo_client[database_name]["webpublishes"] for instance in context: self.log.info("ctx_path: {}".format(instance.data.get("ctx_path"))) self.log.info("batch_id: {}".format(instance.data.get("batch_id"))) if instance.data.get("ctx_path") and instance.data.get("batch_id"): self.log.info("Updating log record") dbcon.update_one( { "batch_id": instance.data.get("batch_id"), "status": "in_progress" }, {"$set": { "path": instance.data.get("ctx_path") }} ) return
[ "openpype.lib.OpenPypeMongoConnection.get_mongo_client" ]
[((439, 481), 'openpype.lib.OpenPypeMongoConnection.get_mongo_client', 'OpenPypeMongoConnection.get_mongo_client', ([], {}), '()\n', (479, 481), False, 'from openpype.lib import OpenPypeMongoConnection\n')]
from heapq import heappush, heappop class PriorityQueue: def __init__(self): self.item_heap = [] # [ [priority, item] ] self.dummy = '<dummy entry>' self.item_dict = {} # { item: [priority, item] } def __bool__(self): return bool(self.item_dict) def add(self, item, priority=0): try: self.remove(item) except KeyError: pass wrapper = [priority, item] heappush(self.item_heap, wrapper) self.item_dict[item] = wrapper def remove(self, item): wrapper = self.item_dict[item] wrapper[1] = self.dummy def pop(self): while True: if not self.item_heap: raise KeyError('pop from an empty priority queue') wrapper = heappop(self.item_heap) item = wrapper[1] if item is not self.dummy: break del self.item_dict[item] return wrapper[1]
[ "heapq.heappush", "heapq.heappop" ]
[((457, 490), 'heapq.heappush', 'heappush', (['self.item_heap', 'wrapper'], {}), '(self.item_heap, wrapper)\n', (465, 490), False, 'from heapq import heappush, heappop\n'), ((794, 817), 'heapq.heappop', 'heappop', (['self.item_heap'], {}), '(self.item_heap)\n', (801, 817), False, 'from heapq import heappush, heappop\n')]
from django.db import models from django.utils import timezone # Create your models here. class Creator(models.Model): fullname = models.CharField(max_length=30) profile_picture_url = models.URLField() def __str__(self): return self.fullname class Customer(models.Model): pass class File(models.Model): file_url = models.URLField() file_mime_type = models.CharField(max_length=20) class Comment(models.Model): creator = models.ForeignKey(Creator, models.SET_NULL, null=True) created = models.DateTimeField(blank=True, null=True) modified = models.DateTimeField(blank=True, null=True) content = models.TextField() parent = models.ForeignKey('self', models.CASCADE, null=True, blank=True) upvote_count = models.IntegerField(default=0) def __str__(self): return '{creator}: {content}...'.format(creator=self.creator, content=self.content[:40]) class Upvote(models.Model): creator = models.ForeignKey(Creator, models.CASCADE, null=True) comment = models.ForeignKey(Comment, models.CASCADE, null=True) class Meta: unique_together = (("creator", "comment"),) def __str__(self): return '{creator} upvote {comment}'.format(creator=creator, comment=comment)
[ "django.db.models.TextField", "django.db.models.ForeignKey", "django.db.models.IntegerField", "django.db.models.DateTimeField", "django.db.models.URLField", "django.db.models.CharField" ]
[((136, 167), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (152, 167), False, 'from django.db import models\n'), ((194, 211), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (209, 211), False, 'from django.db import models\n'), ((349, 366), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (364, 366), False, 'from django.db import models\n'), ((388, 419), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (404, 419), False, 'from django.db import models\n'), ((465, 519), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Creator', 'models.SET_NULL'], {'null': '(True)'}), '(Creator, models.SET_NULL, null=True)\n', (482, 519), False, 'from django.db import models\n'), ((534, 577), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (554, 577), False, 'from django.db import models\n'), ((593, 636), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (613, 636), False, 'from django.db import models\n'), ((651, 669), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (667, 669), False, 'from django.db import models\n'), ((684, 748), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""self"""', 'models.CASCADE'], {'null': '(True)', 'blank': '(True)'}), "('self', models.CASCADE, null=True, blank=True)\n", (701, 748), False, 'from django.db import models\n'), ((769, 799), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (788, 799), False, 'from django.db import models\n'), ((964, 1017), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Creator', 'models.CASCADE'], {'null': '(True)'}), '(Creator, models.CASCADE, null=True)\n', (981, 1017), False, 'from django.db import models\n'), ((1032, 1085), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Comment', 'models.CASCADE'], {'null': '(True)'}), '(Comment, models.CASCADE, null=True)\n', (1049, 1085), False, 'from django.db import models\n')]
# -*- coding: utf-8 -*- """Classifying Images with Pre-trained ImageNet CNNs. Let’s learn how to classify images with pre-trained Convolutional Neural Networks using the Keras library. Example: $ python imagenet_pretrained.py --image example_images/example_01.jpg --model vgg16 $ python imagenet_pretrained.py --image example_images/example_02.jpg --model vgg19 $ python imagenet_pretrained.py --image example_images/example_03.jpg --model inception $ python imagenet_pretrained.py --image example_images/example_04.jpg --model xception $ python imagenet_pretrained.py --image example_images/example_05.jpg --model resnet Attributes: image (str): The path to our input image. model (str, optional): The name of the pre-trained network to use. """ import argparse import cv2 import numpy as np from keras.applications import ResNet50 from keras.applications import InceptionV3 from keras.applications import Xception # TensorFlow ONLY from keras.applications import VGG16 from keras.applications import VGG19 from keras.applications import imagenet_utils from keras.applications.inception_v3 import preprocess_input from keras.preprocessing.image import img_to_array from keras.preprocessing.image import load_img # define a dictionary that maps model names to their classes inside Keras MODELS = { "vgg16": VGG16, "vgg19": VGG19, "inception": InceptionV3, "xception": Xception, # TensorFlow ONLY "resnet": ResNet50, } def main(): """Classify images with a pre-trained neural network. Raises: AssertionError: The --model command line argument should be a key in the `MODELS` dictionary """ # construct the argument parse and parse the arguments args = argparse.ArgumentParser() args.add_argument("-i", "--image", required=True, help="path to the input image") args.add_argument("-model", "--model", type=str, default="vgg16", help="name of pre-trained network to use") args = vars(args.parse_args()) # ensure a valid model name was supplied via command line argument if args["model"] not in MODELS.keys(): raise AssertionError("The --model command line argument should " "be a key in the `MODELS` dictionary") # initialize the input image shape (224x224 pixels) along with the pre-processing function # (this might need to be changed based on which model we use to classify our image) input_shape = (224, 224) preprocess = imagenet_utils.preprocess_input # if we are using the InceptionV3 or Xception networks, then we need to set the input shape # to (299x299) [rather than (224x224)] and use a different image processing function if args["model"] in ("inception", "xception"): input_shape = (299, 299) preprocess = preprocess_input # load the network weights from disk (NOTE: if this is the first time you are running this # script for a given network, the weights will need to be downloaded first -- depending on # which network you are using, the weights can be 90-575MB, so be patient; the weights # will be cached and subsequent runs of this script will be *much* faster) print("[INFO] loading {}...".format(args["model"])) network = MODELS[args["model"]] model = network(weights="imagenet") # load the input image using the Keras helper utility while ensuring the image is resized # to `input_shape`, the required input dimensions for the ImageNet pre-trained network print("[INFO] loading and pre-processing image...") image = load_img(args["image"], target_size=input_shape) image = img_to_array(image) # our input image is now represented as a NumPy array of shape # (input_shape[0], input_shape[1], 3) however we need to expand the # dimension by making the shape (1, input_shape[0], input_shape[1], 3) # so we can pass it through the network. The 1 has to be specified # so we can train in batches. image = np.expand_dims(image, axis=0) # pre-process the image using the appropriate function based on the # model that has been loaded (i.e., mean subtraction, scaling, etc.) image = preprocess(image) # classify the image print("[INFO] classifying image with '{}'...".format(args["model"])) predictions = model.predict(image) prediction = imagenet_utils.decode_predictions(predictions) # loop over the predictions and display the rank-5 predictions + probabilities to our terminal for (i, (_, label, prob)) in enumerate(prediction[0]): print("{}. {}: {:.2f}%".format(i + 1, label, prob * 100)) # load the image via OpenCV, draw the top prediction on the image, # and display the image to our screen orig = cv2.imread(args["image"]) (_, label, prob) = prediction[0][0] cv2.putText(orig, "Label: {}".format(label), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2) cv2.imshow("Classification", orig) cv2.waitKey(0) if __name__ == "__main__": main()
[ "keras.preprocessing.image.img_to_array", "cv2.imread", "argparse.ArgumentParser", "cv2.imshow", "cv2.waitKey", "numpy.expand_dims", "keras.applications.imagenet_utils.decode_predictions", "keras.preprocessing.image.load_img" ]
[((1752, 1777), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1775, 1777), False, 'import argparse\n'), ((3554, 3602), 'keras.preprocessing.image.load_img', 'load_img', (["args['image']"], {'target_size': 'input_shape'}), "(args['image'], target_size=input_shape)\n", (3562, 3602), False, 'from keras.preprocessing.image import load_img\n'), ((3615, 3634), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (3627, 3634), False, 'from keras.preprocessing.image import img_to_array\n'), ((3966, 3995), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (3980, 3995), True, 'import numpy as np\n'), ((4325, 4371), 'keras.applications.imagenet_utils.decode_predictions', 'imagenet_utils.decode_predictions', (['predictions'], {}), '(predictions)\n', (4358, 4371), False, 'from keras.applications import imagenet_utils\n'), ((4732, 4757), 'cv2.imread', 'cv2.imread', (["args['image']"], {}), "(args['image'])\n", (4742, 4757), False, 'import cv2\n'), ((4920, 4954), 'cv2.imshow', 'cv2.imshow', (['"""Classification"""', 'orig'], {}), "('Classification', orig)\n", (4930, 4954), False, 'import cv2\n'), ((4963, 4977), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4974, 4977), False, 'import cv2\n')]
import numpy as np from matplotlib import pyplot as plt N = 30 y = np.random.rand(N) plt.plot(y,'bo')
[ "matplotlib.pyplot.plot", "numpy.random.rand" ]
[((68, 85), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (82, 85), True, 'import numpy as np\n'), ((87, 104), 'matplotlib.pyplot.plot', 'plt.plot', (['y', '"""bo"""'], {}), "(y, 'bo')\n", (95, 104), True, 'from matplotlib import pyplot as plt\n')]
import functools import types def define_module_exporter(): all_list = [] def export(obj): all_list.append(obj.__name__) return obj return export, all_list def copy_func(fn): if type(fn) is not types.FunctionType: return functools.wraps(fn)(lambda *args, **kwargs: fn(*args, **kwargs)) copy = type(lambda: None)( fn.__code__, fn.__globals__, name=fn.__name__, argdefs=fn.__defaults__, closure=fn.__closure__ ) copy = functools.update_wrapper(copy, fn) copy.__kwdefaults__ = fn.__kwdefaults__ return copy def alias_for(orig_fn): def alias_for_orig_func(alias_fn): new_fn = functools.wraps(alias_fn)(copy_func(orig_fn)) docstring_header = alias_fn.__doc__ if docstring_header is None: docstring_header = 'Alias for {orig_qual_name} ({orig_module_name} :: {orig_name}).'.format( orig_qual_name=orig_fn.__qualname__, orig_module_name=orig_fn.__module__, orig_name=orig_fn.__name__ ) docstring_footer = orig_fn.__doc__ if docstring_footer is None: new_fn.__doc__ = docstring_header else: new_fn.__doc__ = '\n'.join(( docstring_header, '', '[Original documentation]', docstring_footer )) return new_fn return alias_for_orig_func class Flag(object): __slots__ = ('_val',) def __init__(self, val=False): self._val = val def __bool__(self): return bool(self._val) def set(self): self._val = True return self def clear(self): self._val = False return self
[ "functools.wraps", "functools.update_wrapper" ]
[((513, 547), 'functools.update_wrapper', 'functools.update_wrapper', (['copy', 'fn'], {}), '(copy, fn)\n', (537, 547), False, 'import functools\n'), ((265, 284), 'functools.wraps', 'functools.wraps', (['fn'], {}), '(fn)\n', (280, 284), False, 'import functools\n'), ((690, 715), 'functools.wraps', 'functools.wraps', (['alias_fn'], {}), '(alias_fn)\n', (705, 715), False, 'import functools\n')]
""":mod:`autotweet.twitter` --- Twitter utilities ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module contains Twitter API key and some useful methods. """ from __future__ import unicode_literals import cgi import re import tweepy import webbrowser try: from HTMLParser import HTMLParser except ImportError: from html.parser import HTMLParser try: from urllib import urlencode from urllib2 import urlopen except ImportError: from urllib.parse import urlencode from urllib.request import urlopen from .compat import input __all__ = ('CONSUMER_KEY', 'CONSUMER_SECRET', 'authorize', 'strip_tweet') #: Consumer key for autoweet. CONSUMER_KEY = '62yWrV2RhpGgWOKlqvJPNQ' #: Consumer secret key for autotweet. CONSUMER_SECRET = '<KEY>' url_pattern = re.compile(r'https?://[^\s]+') mention_pattern = re.compile(r'@\w+') html_parser = HTMLParser() class OAuthToken(object): key = None secret = None def __init__(self, key, secret): self.key = key self.secret = secret def to_string(self): return urlencode({ 'oauth_token': self.key, 'oauth_token_secret': self.secret, }) @staticmethod def from_string(string): params = cgi.parse_qs(string, keep_blank_values=False) key = params['oauth_token'][0] secret = params['oauth_token_secret'][0] return OAuthToken(key, secret) def authorize(): """Authorize to twitter. Use PIN authentification. :returns: Token for authentificate with Twitter. :rtype: :class:`autotweet.twitter.OAuthToken` """ auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) url = auth.get_authorization_url() print('Open this url on your webbrowser: {0}'.format(url)) webbrowser.open(url) pin = input('Input verification number here: ').strip() token_key, token_secret = auth.get_access_token(verifier=pin) return OAuthToken(token_key, token_secret) def get_api(token): if not isinstance(token, OAuthToken): token = OAuthToken.from_string(token) auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(token.key, token.secret) api = tweepy.API(auth) return api def expand_url(status): """Expand url on statuses. :param status: A tweepy status to expand urls. :type status: :class:`tweepy.models.Status` :returns: A string with expanded urls. :rtype: :class:`str` """ try: txt = get_full_text(status) for url in status.entities['urls']: txt = txt.replace(url['url'], url['expanded_url']) except: # Manually replace txt = status tco_pattern = re.compile(r'https://t.co/\S+') urls = tco_pattern.findall(txt) for url in urls: with urlopen(url) as resp: expanded_url = resp.url txt = txt.replace(url, expanded_url) return txt def get_full_text(status): if hasattr(status, 'extended_tweet'): return status.extended_tweet['full_text'] elif hasattr(status, 'full_text'): return status.full_text return status.text def strip_tweet(text, remove_url=True): """Strip tweet message. This method removes mentions strings and urls(optional). :param text: tweet message :type text: :class:`str` :param remove_url: Remove urls. default :const:`True`. :type remove_url: :class:`boolean` :returns: Striped tweet message :rtype: :class:`str` """ if remove_url: text = url_pattern.sub('', text) else: text = expand_url(text) text = mention_pattern.sub('', text) text = html_parser.unescape(text) text = text.strip() return text
[ "re.compile", "cgi.parse_qs", "webbrowser.open", "tweepy.API", "html.parser.HTMLParser", "urllib.parse.urlencode", "urllib.request.urlopen", "tweepy.OAuthHandler" ]
[((785, 815), 're.compile', 're.compile', (['"""https?://[^\\\\s]+"""'], {}), "('https?://[^\\\\s]+')\n", (795, 815), False, 'import re\n'), ((834, 853), 're.compile', 're.compile', (['"""@\\\\w+"""'], {}), "('@\\\\w+')\n", (844, 853), False, 'import re\n'), ((868, 880), 'html.parser.HTMLParser', 'HTMLParser', ([], {}), '()\n', (878, 880), False, 'from html.parser import HTMLParser\n'), ((1621, 1671), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['CONSUMER_KEY', 'CONSUMER_SECRET'], {}), '(CONSUMER_KEY, CONSUMER_SECRET)\n', (1640, 1671), False, 'import tweepy\n'), ((1778, 1798), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (1793, 1798), False, 'import webbrowser\n'), ((2096, 2146), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['CONSUMER_KEY', 'CONSUMER_SECRET'], {}), '(CONSUMER_KEY, CONSUMER_SECRET)\n', (2115, 2146), False, 'import tweepy\n'), ((2208, 2224), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (2218, 2224), False, 'import tweepy\n'), ((1073, 1144), 'urllib.parse.urlencode', 'urlencode', (["{'oauth_token': self.key, 'oauth_token_secret': self.secret}"], {}), "({'oauth_token': self.key, 'oauth_token_secret': self.secret})\n", (1082, 1144), False, 'from urllib.parse import urlencode\n'), ((1245, 1290), 'cgi.parse_qs', 'cgi.parse_qs', (['string'], {'keep_blank_values': '(False)'}), '(string, keep_blank_values=False)\n', (1257, 1290), False, 'import cgi\n'), ((2709, 2740), 're.compile', 're.compile', (['"""https://t.co/\\\\S+"""'], {}), "('https://t.co/\\\\S+')\n", (2719, 2740), False, 'import re\n'), ((2823, 2835), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (2830, 2835), False, 'from urllib.request import urlopen\n')]
import json import discord import os from discord.ext import commands import discord_components import ErrorHandling import Reactions import Help with open('Bot_Secrets.json', 'r') as botSecrets: settings = json.load(botSecrets) TOKEN = settings['BotToken'] intents = discord.Intents.default() intents.members = True command_prefix = '%' client = commands.Bot(intents=intents, command_prefix=command_prefix) def addCog(client): ErrorHandling.setup(client) Reactions.setup(client) Help.setup(client) # LiveStreams.setup(client) @client.event async def on_ready(): # bot initialization discord_components.DiscordComponents(client) await client.change_presence(activity=discord.Game(name="Go Tigers! | %help")) print(f'{client.user} has connected to Discord!') client.remove_command('help') # gets rid of default help command on bot addCog(client) # adding all related cogs to the bot client.run(TOKEN)
[ "discord.Game", "discord.ext.commands.Bot", "Help.setup", "ErrorHandling.setup", "discord_components.DiscordComponents", "json.load", "Reactions.setup", "discord.Intents.default" ]
[((278, 303), 'discord.Intents.default', 'discord.Intents.default', ([], {}), '()\n', (301, 303), False, 'import discord\n'), ((358, 418), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'intents': 'intents', 'command_prefix': 'command_prefix'}), '(intents=intents, command_prefix=command_prefix)\n', (370, 418), False, 'from discord.ext import commands\n'), ((212, 233), 'json.load', 'json.load', (['botSecrets'], {}), '(botSecrets)\n', (221, 233), False, 'import json\n'), ((444, 471), 'ErrorHandling.setup', 'ErrorHandling.setup', (['client'], {}), '(client)\n', (463, 471), False, 'import ErrorHandling\n'), ((476, 499), 'Reactions.setup', 'Reactions.setup', (['client'], {}), '(client)\n', (491, 499), False, 'import Reactions\n'), ((504, 522), 'Help.setup', 'Help.setup', (['client'], {}), '(client)\n', (514, 522), False, 'import Help\n'), ((617, 661), 'discord_components.DiscordComponents', 'discord_components.DiscordComponents', (['client'], {}), '(client)\n', (653, 661), False, 'import discord_components\n'), ((704, 743), 'discord.Game', 'discord.Game', ([], {'name': '"""Go Tigers! | %help"""'}), "(name='Go Tigers! | %help')\n", (716, 743), False, 'import discord\n')]
''' Etapas do logger ''' import logging # Instancia do objeto getLogger() logger = logging.getLogger() # Definindo o level do logger logger.setLevel(logging.DEBUG) # formatador do log formatter = logging.Formatter( 'Data/Hora: %(asctime)s | level: %(levelname)s | file: %(filename)s | mensagem: %(message)s', # Padrão de data: dia/mes/ano # Padrão de hora: hora/minuto/segundos # Sistema (am/pm) datefmt='%d/%m/%Y %H:%M:%S %p' ) # definido handler ''' logging.FileHandler() -> Salva em arquivo logging.StreamHandler() -> Mostra no console logging.NullHandler -> Manipulador nulo ''' fh = logging.StreamHandler() fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) # Definindo handler logger.addHandler(fh) logger.debug('Olá.')
[ "logging.getLogger", "logging.Formatter", "logging.StreamHandler" ]
[((90, 109), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (107, 109), False, 'import logging\n'), ((203, 356), 'logging.Formatter', 'logging.Formatter', (['"""Data/Hora: %(asctime)s | level: %(levelname)s | file: %(filename)s | mensagem: %(message)s"""'], {'datefmt': '"""%d/%m/%Y %H:%M:%S %p"""'}), "(\n 'Data/Hora: %(asctime)s | level: %(levelname)s | file: %(filename)s | mensagem: %(message)s'\n , datefmt='%d/%m/%Y %H:%M:%S %p')\n", (220, 356), False, 'import logging\n'), ((633, 656), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (654, 656), False, 'import logging\n')]
import sys read = sys.stdin.buffer.read readline = sys.stdin.buffer.readline readlines = sys.stdin.buffer.readlines sys.setrecursionlimit(10 ** 7) m = int(readline()) n = int(readline()) a = [list(map(int, readline().split())) for _ in range(n)] ans = 0 def dfs(y, x, cnt): a[y][x] = 0 v = cnt + 1 for dy, dx in [(0, 1), (1, 0), (-1, 0), (0, -1)]: xx = x + dx yy = y + dy if 0 <= xx < m and 0 <= yy < n and a[yy][xx] == 1: cnt = max(dfs(yy, xx, v), cnt) a[y][x] = 1 return cnt for i, aa in enumerate(a): for j, check in enumerate(aa): if check == 1: ans = max(dfs(i, j, 1), ans) print(ans)
[ "sys.setrecursionlimit" ]
[((116, 146), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 7)'], {}), '(10 ** 7)\n', (137, 146), False, 'import sys\n')]
# Copyright 2017 SrMouraSilva # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from pluginsmanager.observer.autosaver.index_file import IndexFile from pluginsmanager.model.bank import Bank class IndexFileTest(unittest.TestCase): def test_load_error(self): index = IndexFile(path='/dev/null') self.assertListEqual([], index.load([])) def test_load_data(self): bank1 = Bank(name='Bank 1') bank2 = Bank(name='Bank 2') bank3 = Bank(name='Bank 3') bank4 = Bank(name='Bank 4') not_ordered = (bank3, bank2, bank1, bank4) # bank1, bank3 Já indexados # bank2, bank4 Serão indexados por ordem alfabética expected = [bank1, bank3, bank2, bank4] index = IndexFile(path='any') data = [ index.generate_index_data(0, bank1), index.generate_index_data(1, bank3) ] result = index.load_data(data, not_ordered) self.assertEqual(expected, result) def test_generate_data(self): bank1 = Bank(name='Bank 1') bank2 = Bank(name='Bank 2') bank3 = Bank(name='Bank 3') bank4 = Bank(name='Bank 4') # bank1, bank3 Já indexados # bank2, bank4 Serão indexados por ordem alfabética banks = [bank1, bank2, bank3, bank4] index = IndexFile(path='any') result = index.generate_data(banks) expected = [ index.generate_index_data(0, bank1), index.generate_index_data(1, bank2), index.generate_index_data(2, bank3), index.generate_index_data(3, bank4), ] self.assertEqual(expected, result)
[ "pluginsmanager.model.bank.Bank", "pluginsmanager.observer.autosaver.index_file.IndexFile" ]
[((794, 821), 'pluginsmanager.observer.autosaver.index_file.IndexFile', 'IndexFile', ([], {'path': '"""/dev/null"""'}), "(path='/dev/null')\n", (803, 821), False, 'from pluginsmanager.observer.autosaver.index_file import IndexFile\n'), ((918, 937), 'pluginsmanager.model.bank.Bank', 'Bank', ([], {'name': '"""Bank 1"""'}), "(name='Bank 1')\n", (922, 937), False, 'from pluginsmanager.model.bank import Bank\n'), ((954, 973), 'pluginsmanager.model.bank.Bank', 'Bank', ([], {'name': '"""Bank 2"""'}), "(name='Bank 2')\n", (958, 973), False, 'from pluginsmanager.model.bank import Bank\n'), ((990, 1009), 'pluginsmanager.model.bank.Bank', 'Bank', ([], {'name': '"""Bank 3"""'}), "(name='Bank 3')\n", (994, 1009), False, 'from pluginsmanager.model.bank import Bank\n'), ((1026, 1045), 'pluginsmanager.model.bank.Bank', 'Bank', ([], {'name': '"""Bank 4"""'}), "(name='Bank 4')\n", (1030, 1045), False, 'from pluginsmanager.model.bank import Bank\n'), ((1260, 1281), 'pluginsmanager.observer.autosaver.index_file.IndexFile', 'IndexFile', ([], {'path': '"""any"""'}), "(path='any')\n", (1269, 1281), False, 'from pluginsmanager.observer.autosaver.index_file import IndexFile\n'), ((1554, 1573), 'pluginsmanager.model.bank.Bank', 'Bank', ([], {'name': '"""Bank 1"""'}), "(name='Bank 1')\n", (1558, 1573), False, 'from pluginsmanager.model.bank import Bank\n'), ((1590, 1609), 'pluginsmanager.model.bank.Bank', 'Bank', ([], {'name': '"""Bank 2"""'}), "(name='Bank 2')\n", (1594, 1609), False, 'from pluginsmanager.model.bank import Bank\n'), ((1626, 1645), 'pluginsmanager.model.bank.Bank', 'Bank', ([], {'name': '"""Bank 3"""'}), "(name='Bank 3')\n", (1630, 1645), False, 'from pluginsmanager.model.bank import Bank\n'), ((1662, 1681), 'pluginsmanager.model.bank.Bank', 'Bank', ([], {'name': '"""Bank 4"""'}), "(name='Bank 4')\n", (1666, 1681), False, 'from pluginsmanager.model.bank import Bank\n'), ((1841, 1862), 'pluginsmanager.observer.autosaver.index_file.IndexFile', 'IndexFile', ([], {'path': '"""any"""'}), "(path='any')\n", (1850, 1862), False, 'from pluginsmanager.observer.autosaver.index_file import IndexFile\n')]
from pprint import pprint import httplib2 from file_path_collect import output_cache_path_dir as cache httplib2.debuglevel = 1 h = httplib2.Http(cache) # 网址换成简书 jianshu = 'https://www.jianshu.com/' # 首先 cache 一遍 response0, content0 = h.request(jianshu) print() response, content = h.request(jianshu) print() print(len(content)) print() print(response.status) print() print(response.fromcache) print() response2, content2 = h.request(jianshu, headers={'cache-control': 'no-cache'}) print() print(response2.status) print() print(response2.fromcache) print() pprint(dict(response2.items())) """ connect: (www.jianshu.com, 443) send: b'GET / HTTP/1.1\r\nHost: www.jianshu.com\r\nuser-agent: Python-httplib2/0.17.3 (gzip)\r\naccept-encoding: gzip, deflate\r\nif-none-match: W/"bbd77e231f5e58fa82c8623683fdc1a1"\r\n\r\n' reply: 'HTTP/1.1 304 Not Modified\r\n' header: Server: Tengine header: Date: Mon, 27 Apr 2020 01:42:48 GMT header: Connection: keep-alive header: X-Frame-Options: SAMEORIGIN header: X-XSS-Protection: 1; mode=block header: X-Content-Type-Options: nosniff header: Content-Security-Policy: script-src 'self' 'unsafe-inline' 'unsafe-eval' *.jianshu.com *.jianshu.io *.nkscdn.com *.huanqiu.com post.star-media.cn api.geetest.com static.geetest.com dn-staticdown.qbox.me zz.bdstatic.com *.google-analytics.com hm.baidu.com nkscdn.com push.zhanzhang.baidu.com res.wx.qq.com qzonestyle.gtimg.cn as.alipayobjects.com nbrecsys.4paradigm.com shared.ydstatic.com gorgon.youdao.com *.googlesyndication.com adservice.google.com www.googletagservices.com ;style-src 'self' 'unsafe-inline' *.jianshu.com *.jianshu.io api.geetest.com static.geetest.com shared.ydstatic.com ; header: ETag: W/"bbd77e231f5e58fa82c8623683fdc1a1" header: Cache-Control: max-age=0, private, must-revalidate header: Set-Cookie: signin_redirect=https%3A%2F%2Fwww.jianshu.com%2F; path=/ header: Set-Cookie: read_mode=day; path=/ header: Set-Cookie: default_font=font2; path=/ header: Set-Cookie: locale=zh-CN; path=/ header: X-Request-Id: 7d5de11a-339b-4ef1-a3e6-ecdb96bf70a8 header: X-Runtime: 0.015455 header: Strict-Transport-Security: max-age=31536000; includeSubDomains; preload send: b'GET / HTTP/1.1\r\nHost: www.jianshu.com\r\nuser-agent: Python-httplib2/0.17.3 (gzip)\r\naccept-encoding: gzip, deflate\r\nif-none-match: W/"bbd77e231f5e58fa82c8623683fdc1a1"\r\n\r\n' reply: 'HTTP/1.1 304 Not Modified\r\n' header: Server: Tengine header: Date: Mon, 27 Apr 2020 01:42:48 GMT header: Connection: keep-alive header: X-Frame-Options: SAMEORIGIN header: X-XSS-Protection: 1; mode=block header: X-Content-Type-Options: nosniff header: Content-Security-Policy: script-src 'self' 'unsafe-inline' 'unsafe-eval' *.jianshu.com *.jianshu.io *.nkscdn.com *.huanqiu.com post.star-media.cn api.geetest.com static.geetest.com dn-staticdown.qbox.me zz.bdstatic.com *.google-analytics.com hm.baidu.com nkscdn.com push.zhanzhang.baidu.com res.wx.qq.com qzonestyle.gtimg.cn as.alipayobjects.com nbrecsys.4paradigm.com shared.ydstatic.com gorgon.youdao.com *.googlesyndication.com adservice.google.com www.googletagservices.com ;style-src 'self' 'unsafe-inline' *.jianshu.com *.jianshu.io api.geetest.com static.geetest.com shared.ydstatic.com ; header: ETag: W/"bbd77e231f5e58fa82c8623683fdc1a1" header: Cache-Control: max-age=0, private, must-revalidate header: Set-Cookie: signin_redirect=https%3A%2F%2Fwww.jianshu.com%2F; path=/ header: Set-Cookie: read_mode=day; path=/ header: Set-Cookie: default_font=font2; path=/ header: Set-Cookie: locale=zh-CN; path=/ header: X-Request-Id: f5fbdb6f-da87-43e3-a5f4-6724b66d3b5f header: X-Runtime: 0.006987 header: Strict-Transport-Security: max-age=31536000; includeSubDomains; preload 23483 200 True send: b'GET / HTTP/1.1\r\nHost: www.jianshu.com\r\ncache-control: no-cache\r\nuser-agent: Python-httplib2/0.17.3 (gzip)\r\naccept-encoding: gzip, deflate\r\n\r\n' reply: 'HTTP/1.1 200 OK\r\n' header: Server: Tengine header: Date: Mon, 27 Apr 2020 01:42:48 GMT header: Content-Type: text/html; charset=utf-8 header: Transfer-Encoding: chunked header: Connection: keep-alive header: Vary: Accept-Encoding header: X-Frame-Options: SAMEORIGIN header: X-XSS-Protection: 1; mode=block header: X-Content-Type-Options: nosniff header: Content-Security-Policy: script-src 'self' 'unsafe-inline' 'unsafe-eval' *.jianshu.com *.jianshu.io *.nkscdn.com *.huanqiu.com post.star-media.cn api.geetest.com static.geetest.com dn-staticdown.qbox.me zz.bdstatic.com *.google-analytics.com hm.baidu.com nkscdn.com push.zhanzhang.baidu.com res.wx.qq.com qzonestyle.gtimg.cn as.alipayobjects.com nbrecsys.4paradigm.com shared.ydstatic.com gorgon.youdao.com *.googlesyndication.com adservice.google.com www.googletagservices.com ;style-src 'self' 'unsafe-inline' *.jianshu.com *.jianshu.io api.geetest.com static.geetest.com shared.ydstatic.com ; header: ETag: W/"bbd77e231f5e58fa82c8623683fdc1a1" header: Cache-Control: max-age=0, private, must-revalidate header: Set-Cookie: signin_redirect=https%3A%2F%2Fwww.jianshu.com%2F; path=/ header: Set-Cookie: read_mode=day; path=/ header: Set-Cookie: default_font=font2; path=/ header: Set-Cookie: locale=zh-CN; path=/ header: X-Request-Id: efd5e128-4c05-4da6-87de-3a832d6375eb header: X-Runtime: 0.004749 header: Strict-Transport-Security: max-age=31536000; includeSubDomains; preload header: Content-Encoding: gzip 200 False {'-content-encoding': 'gzip', 'cache-control': 'max-age=0, private, must-revalidate', 'connection': 'keep-alive', 'content-length': '23483', 'content-location': 'https://www.jianshu.com/', 'content-security-policy': "script-src 'self' 'unsafe-inline' 'unsafe-eval' " '*.jianshu.com *.jianshu.io *.nkscdn.com ' '*.huanqiu.com post.star-media.cn api.geetest.com ' 'static.geetest.com dn-staticdown.qbox.me ' 'zz.bdstatic.com *.google-analytics.com ' 'hm.baidu.com nkscdn.com push.zhanzhang.baidu.com ' 'res.wx.qq.com qzonestyle.gtimg.cn ' 'as.alipayobjects.com nbrecsys.4paradigm.com ' 'shared.ydstatic.com gorgon.youdao.com ' '*.googlesyndication.com adservice.google.com ' "www.googletagservices.com ;style-src 'self' " "'unsafe-inline' *.jianshu.com *.jianshu.io " 'api.geetest.com static.geetest.com ' 'shared.ydstatic.com ;', 'content-type': 'text/html; charset=utf-8', 'date': 'Mon, 27 Apr 2020 01:42:48 GMT', 'etag': 'W/"bbd77e231f5e58fa82c8623683fdc1a1"', 'server': 'Tengine', 'set-cookie': 'signin_redirect=https%3A%2F%2Fwww.jianshu.com%2F; path=/, ' 'read_mode=day; path=/, default_font=font2; path=/, ' 'locale=zh-CN; path=/', 'status': '200', 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload', 'transfer-encoding': 'chunked', 'vary': 'Accept-Encoding', 'x-content-type-options': 'nosniff', 'x-frame-options': 'SAMEORIGIN', 'x-request-id': 'efd5e128-4c05-4da6-87de-3a832d6375eb', 'x-runtime': '0.004749', 'x-xss-protection': '1; mode=block'} """
[ "httplib2.Http" ]
[((134, 154), 'httplib2.Http', 'httplib2.Http', (['cache'], {}), '(cache)\n', (147, 154), False, 'import httplib2\n')]
import tensorflow as tf from robust_offline_contextual_bandits.named_results import NamedResults import numpy as np class NamedResultsTest(tf.test.TestCase): def setUp(self): np.random.seed(42) def test_creation(self): patient = NamedResults(np.random.normal(size=[10, 3])) assert patient.num_evs() == 10 assert patient.num_reps() == 3 self.assertAllClose(patient.avg_evs(), [ 0.335379, 0.35158, 0.625724, -0.128862, -1.132079, -0.42029, -0.284893, -0.527665, -0.528151, -0.172211 ]) self.assertAlmostEqual(patient.min_ev(), -1.132078602) if __name__ == '__main__': tf.test.main()
[ "numpy.random.normal", "numpy.random.seed", "tensorflow.test.main" ]
[((663, 677), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (675, 677), True, 'import tensorflow as tf\n'), ((189, 207), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (203, 207), True, 'import numpy as np\n'), ((269, 299), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[10, 3]'}), '(size=[10, 3])\n', (285, 299), True, 'import numpy as np\n')]
""" Multiple Servers linked via broadcaster example. To run this example. - 0. Setup a broadcast medium and pass its configuration to the endpoint (e.g. postgres on 'postgres://localhost:5432/' ) - 1. run this script for the servers (as many instances as you'd like) - use the PORT env-variable to run them on different ports - 2. once the servers are up, run notifier_client_test.py and connect to one of them - 3. send get request to one server on: '/trigger' - 4. See that the client recives the event -no matter which server you connected it to, or which server got the initial trigger to publish """ import sys import os sys.path.append(os.path.abspath(os.path.join(os.path.basename(__file__), ".."))) from fastapi_websocket_pubsub import PubSubEndpoint import asyncio import os from starlette.websockets import WebSocket import uvicorn from fastapi import FastAPI from fastapi.routing import APIRouter PORT = int(os.environ.get("PORT") or "8000") app = FastAPI() router = APIRouter() endpoint = PubSubEndpoint(broadcaster="postgres://localhost:5432/") @router.websocket("/pubsub") async def websocket_rpc_endpoint(websocket: WebSocket): async with endpoint.broadcaster: await endpoint.main_loop(websocket) app.include_router(router) async def events(): await asyncio.sleep(1) await endpoint.publish(["guns", "germs"]) await asyncio.sleep(1) await endpoint.publish(["germs"]) await asyncio.sleep(1) await endpoint.publish(["steel"]) @app.get("/trigger") async def trigger_events(): asyncio.create_task(events()) uvicorn.run(app, host="0.0.0.0", port=PORT)
[ "fastapi.routing.APIRouter", "fastapi.FastAPI", "uvicorn.run", "os.environ.get", "fastapi_websocket_pubsub.PubSubEndpoint", "os.path.basename", "asyncio.sleep" ]
[((972, 981), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (979, 981), False, 'from fastapi import FastAPI\n'), ((991, 1002), 'fastapi.routing.APIRouter', 'APIRouter', ([], {}), '()\n', (1000, 1002), False, 'from fastapi.routing import APIRouter\n'), ((1014, 1070), 'fastapi_websocket_pubsub.PubSubEndpoint', 'PubSubEndpoint', ([], {'broadcaster': '"""postgres://localhost:5432/"""'}), "(broadcaster='postgres://localhost:5432/')\n", (1028, 1070), False, 'from fastapi_websocket_pubsub import PubSubEndpoint\n'), ((1578, 1621), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': '"""0.0.0.0"""', 'port': 'PORT'}), "(app, host='0.0.0.0', port=PORT)\n", (1589, 1621), False, 'import uvicorn\n'), ((930, 952), 'os.environ.get', 'os.environ.get', (['"""PORT"""'], {}), "('PORT')\n", (944, 952), False, 'import os\n'), ((1298, 1314), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (1311, 1314), False, 'import asyncio\n'), ((1371, 1387), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (1384, 1387), False, 'import asyncio\n'), ((1436, 1452), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (1449, 1452), False, 'import asyncio\n'), ((679, 705), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (695, 705), False, 'import os\n')]
""" Testing all methods from oap.utils.sizing """ import unittest import oap from tests.data import array01_original as array class TestUtilsSizing(unittest.TestCase): def test_xy_diameter(self): x, y = oap.xy_diameter(array) self.assertEqual(x, 43) self.assertEqual(y, 45) def test_x_diameter(self): self.assertEqual(oap.x_diameter(array), 43) def test_y_diameter(self): self.assertEqual(oap.y_diameter(array), 45) def test_min_diameter(self): self.assertEqual(round(oap.min_diameter(array), 10), 49.6358592054) def test_max_diameter(self): self.assertEqual(round(oap.max_diameter(array), 10), 62.2414652784) def test_area_ratio(self): pass def test_sphere_volume(self): self.assertEqual(round(oap.sphere_volume(diameter=9.2), 6), 407.720083) self.assertEqual(round(oap.sphere_volume(diameter=15.3), 5), 1875.30933) self.assertEqual(round(oap.sphere_volume(diameter=42.0), 4), 38792.3861) def test_sphere_surface(self): self.assertEqual(round(oap.sphere_surface(diameter=9.2), 6), 265.904402) self.assertEqual(round(oap.sphere_surface(diameter=15.3), 6), 735.415424) self.assertEqual(round(oap.sphere_surface(diameter=42.0), 5), 5541.76944) def test_hexprism_volume(self): self.assertEqual(round(oap.hexprism_volume(height=3.5, diameter=2.4), 12), 13.094304105221) self.assertEqual(round(oap.hexprism_volume(height=9.8, diameter=8.2), 11), 428.00187890592) self.assertEqual(round(oap.hexprism_volume(height=16.7, diameter=14.6), 10), 2312.1397377604) def test_hexprism_surface(self): self.assertEqual(round(oap.hexprism_surface(height=3.5, diameter=2.4), 12), 32.682459488698) self.assertEqual(round(oap.hexprism_surface(height=9.8, diameter=8.2), 11), 328.4273222257) self.assertEqual(round(oap.hexprism_surface(height=16.7, diameter=14.6), 9), 1008.362962606)
[ "oap.xy_diameter", "oap.sphere_surface", "oap.sphere_volume", "oap.x_diameter", "oap.y_diameter", "oap.hexprism_volume", "oap.max_diameter", "oap.hexprism_surface", "oap.min_diameter" ]
[((220, 242), 'oap.xy_diameter', 'oap.xy_diameter', (['array'], {}), '(array)\n', (235, 242), False, 'import oap\n'), ((364, 385), 'oap.x_diameter', 'oap.x_diameter', (['array'], {}), '(array)\n', (378, 385), False, 'import oap\n'), ((448, 469), 'oap.y_diameter', 'oap.y_diameter', (['array'], {}), '(array)\n', (462, 469), False, 'import oap\n'), ((540, 563), 'oap.min_diameter', 'oap.min_diameter', (['array'], {}), '(array)\n', (556, 563), False, 'import oap\n'), ((650, 673), 'oap.max_diameter', 'oap.max_diameter', (['array'], {}), '(array)\n', (666, 673), False, 'import oap\n'), ((806, 837), 'oap.sphere_volume', 'oap.sphere_volume', ([], {'diameter': '(9.2)'}), '(diameter=9.2)\n', (823, 837), False, 'import oap\n'), ((886, 918), 'oap.sphere_volume', 'oap.sphere_volume', ([], {'diameter': '(15.3)'}), '(diameter=15.3)\n', (903, 918), False, 'import oap\n'), ((967, 999), 'oap.sphere_volume', 'oap.sphere_volume', ([], {'diameter': '(42.0)'}), '(diameter=42.0)\n', (984, 999), False, 'import oap\n'), ((1084, 1116), 'oap.sphere_surface', 'oap.sphere_surface', ([], {'diameter': '(9.2)'}), '(diameter=9.2)\n', (1102, 1116), False, 'import oap\n'), ((1165, 1198), 'oap.sphere_surface', 'oap.sphere_surface', ([], {'diameter': '(15.3)'}), '(diameter=15.3)\n', (1183, 1198), False, 'import oap\n'), ((1247, 1280), 'oap.sphere_surface', 'oap.sphere_surface', ([], {'diameter': '(42.0)'}), '(diameter=42.0)\n', (1265, 1280), False, 'import oap\n'), ((1366, 1411), 'oap.hexprism_volume', 'oap.hexprism_volume', ([], {'height': '(3.5)', 'diameter': '(2.4)'}), '(height=3.5, diameter=2.4)\n', (1385, 1411), False, 'import oap\n'), ((1466, 1511), 'oap.hexprism_volume', 'oap.hexprism_volume', ([], {'height': '(9.8)', 'diameter': '(8.2)'}), '(height=9.8, diameter=8.2)\n', (1485, 1511), False, 'import oap\n'), ((1566, 1613), 'oap.hexprism_volume', 'oap.hexprism_volume', ([], {'height': '(16.7)', 'diameter': '(14.6)'}), '(height=16.7, diameter=14.6)\n', (1585, 1613), False, 'import oap\n'), ((1706, 1752), 'oap.hexprism_surface', 'oap.hexprism_surface', ([], {'height': '(3.5)', 'diameter': '(2.4)'}), '(height=3.5, diameter=2.4)\n', (1726, 1752), False, 'import oap\n'), ((1807, 1853), 'oap.hexprism_surface', 'oap.hexprism_surface', ([], {'height': '(9.8)', 'diameter': '(8.2)'}), '(height=9.8, diameter=8.2)\n', (1827, 1853), False, 'import oap\n'), ((1907, 1955), 'oap.hexprism_surface', 'oap.hexprism_surface', ([], {'height': '(16.7)', 'diameter': '(14.6)'}), '(height=16.7, diameter=14.6)\n', (1927, 1955), False, 'import oap\n')]
from typing import Iterable from copy import deepcopy from advent_of_code.runner import PuzzleTemplate from dataclasses import dataclass, field @dataclass class Board: # board juts for visualization purpose board: list[list[int]] = field(default_factory=lambda: [[None] * 5 for _ in range(5)]) # number can appear on a board at most once # maps number to indices on the board index: dict[int, tuple[int, int]] = field(default_factory=dict) # set of all number that wasn't marked unmarked_numbers: set[int] = field(default_factory=set) # number of remaining unmarked number in each row / col remaining_cols: dict[int, int] = field(default_factory=dict) remaining_rows: dict[int, int] = field(default_factory=dict) won: bool = False @classmethod def from_lines(cls, lines: Iterable[str]) -> "Board": """ Note: this functions get's an iterator, so make sure to take only what is required! """ board = cls() next(lines) # skip the first empty line for row, cols in enumerate(lines): for col, number_raw in enumerate(cols.split()): number = int(number_raw) board.board[row][col] = number board.index[number] = (row, col) board.unmarked_numbers.add(number) if row == 4: break for i in range(5): board.remaining_cols[i] = 5 board.remaining_rows[i] = 5 return board def call_number(self, number: int) -> bool: if not self.won and number in self.index: row, col = self.index[number] self.board[row][col] = "x" self.unmarked_numbers.remove(number) self.remaining_cols[col] -= 1 self.remaining_rows[row] -= 1 won = self.remaining_cols[col] == 0 or self.remaining_rows[row] == 0 if won: # bingo! self.won = True return won return False def score(self, called_number: int) -> int: """ The score of the winning board can now be calculated. Start by finding the sum of all unmarked numbers on that board. Then, multiply that sum by the number that was just called when the board won. """ return sum(self.unmarked_numbers) * called_number def __str__(self) -> str: return "\n".join(" ".join(str(char).rjust(2) for char in row) for row in self.board) class Puzzle(PuzzleTemplate): _input: list[int] _boards: list[Board] @classmethod def from_lines(cls, lines: Iterable[str]) -> "Puzzle": puzzle = cls() it = iter(lines) puzzle._input = [int(number) for number in next(it).split(",")] puzzle._boards = [] while it: try: puzzle._boards.append(Board.from_lines(it)) except StopIteration: break return puzzle def task_one(self) -> int: """ Return score of the board that has the first Bingo! """ boards = deepcopy(self._boards) for number in self._input: for board_num, board in enumerate(boards): if board.call_number(number): print(f"Bingo! on board {board_num} \n") print(f"{board}\n") return board.score(number) def task_two(self) -> int: """ Return score of the board that has the last Bingo! """ boards = deepcopy(self._boards) boards_remaining = len(boards) for number in self._input: for board_num, board in enumerate(boards): if board.call_number(number): boards_remaining -= 1 if boards_remaining == 0: print(f"Bingo! on last board {board_num} \n") print(f"{board}\n") return board.score(number)
[ "dataclasses.field", "copy.deepcopy" ]
[((434, 461), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (439, 461), False, 'from dataclasses import dataclass, field\n'), ((539, 565), 'dataclasses.field', 'field', ([], {'default_factory': 'set'}), '(default_factory=set)\n', (544, 565), False, 'from dataclasses import dataclass, field\n'), ((663, 690), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (668, 690), False, 'from dataclasses import dataclass, field\n'), ((728, 755), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (733, 755), False, 'from dataclasses import dataclass, field\n'), ((3117, 3139), 'copy.deepcopy', 'deepcopy', (['self._boards'], {}), '(self._boards)\n', (3125, 3139), False, 'from copy import deepcopy\n'), ((3556, 3578), 'copy.deepcopy', 'deepcopy', (['self._boards'], {}), '(self._boards)\n', (3564, 3578), False, 'from copy import deepcopy\n')]
from __future__ import annotations from typing import TYPE_CHECKING, Any from clovars.scientific import Gaussian, get_curve if TYPE_CHECKING: from clovars.scientific import Curve class Treatment: """Class representing a Treatment that influences Cells.""" def __init__( self, name: str | None = None, division_curve: Curve | None = None, death_curve: Curve | None = None, signal_disturbance: dict | None = None, fitness_memory_disturbance: int | None = None, ) -> None: """Initializes a Treatment instance.""" if name is None: name = "Treatment" if division_curve is None: division_curve = Gaussian() self.division_curve = division_curve if death_curve is None: death_curve = Gaussian() self.name = name self.death_curve = death_curve self.signal_disturbance = signal_disturbance self.fitness_memory_disturbance = fitness_memory_disturbance def division_chance( self, x: float, ) -> float: """Returns the division curve PDF evaluated at x.""" return self.division_curve(x=x) def death_chance( self, x: float, ) -> float: """Returns the death curve PDF evaluated at x.""" return self.death_curve(x=x) def plot( self, plot_division: bool = True, plot_death: bool = True, *args, **kwargs, ) -> None: """Plots the Treatment's curves.""" if plot_division is True: self.division_curve.plot_pdf(label='Division', *args, **kwargs) if plot_death is True: self.death_curve.plot_pdf(label='Death', *args, **kwargs) def get_treatment( name: str = '', division_curve: dict[str, Any] | None = None, death_curve: dict[str, Any] | None = None, signal_disturbance: dict[str, Any] | None = None, fitness_memory_disturbance: int | None = None, ) -> Treatment: """Returns a Treatment instance based on the input parameters.""" division_curve = division_curve if division_curve is not None else {} death_curve = death_curve if death_curve is not None else {} return Treatment( name=name, division_curve=get_curve(**division_curve), death_curve=get_curve(**death_curve), signal_disturbance=signal_disturbance, fitness_memory_disturbance=fitness_memory_disturbance, )
[ "clovars.scientific.get_curve", "clovars.scientific.Gaussian" ]
[((732, 742), 'clovars.scientific.Gaussian', 'Gaussian', ([], {}), '()\n', (740, 742), False, 'from clovars.scientific import Gaussian, get_curve\n'), ((846, 856), 'clovars.scientific.Gaussian', 'Gaussian', ([], {}), '()\n', (854, 856), False, 'from clovars.scientific import Gaussian, get_curve\n'), ((2373, 2400), 'clovars.scientific.get_curve', 'get_curve', ([], {}), '(**division_curve)\n', (2382, 2400), False, 'from clovars.scientific import Gaussian, get_curve\n'), ((2422, 2446), 'clovars.scientific.get_curve', 'get_curve', ([], {}), '(**death_curve)\n', (2431, 2446), False, 'from clovars.scientific import Gaussian, get_curve\n')]
import unittest import requests import json class TestServer(unittest.TestCase): @classmethod def setUpClass(self): self.port = 80 self.URL_BASE = 'http://localhost:{}'.format( self.port ) self.URL_USERS = self.URL_BASE + '/rest/users' self.URL_USERS_ALL = self.URL_USERS + '/all' requests.delete( self.URL_USERS_ALL ) @classmethod def tearDownClass(self): requests.delete( self.URL_USERS_ALL ) def test_index_is_up(self): response = requests.get( self.URL_BASE ) self.assertEqual(response.status_code, 200) def test_index_message(self): response = requests.get( self.URL_BASE ) response_json = response.json() self.assertEqual(response_json['status'], 'OK') def test_insert_is_up(self): user={'email':'jh<PASSWORD>', 'instrument': 'guitar'} response = requests.put( self.URL_USERS, data=user ) self.assertEqual( response.status_code, 200 ) requests.delete( self.URL_USERS_ALL ) def test_insert(self): user={'email':'jhon<PASSWORD>', 'instrument': 'guitar'} response = requests.put( self.URL_USERS, data=user) response_json = response.json() self.assertEqual(response_json['status'], 'SUCCESS') requests.delete( self.URL_USERS_ALL ) def test_find_is_up(self): user={'email':'jh<PASSWORD>', 'instrument': 'guitar'} response = requests.get( '{}/{}'.format( self.URL_USERS, user['email'] ) ) self.assertEqual( response.status_code, 200 ) requests.delete( self.URL_USERS_ALL ) def test_find_user(self): user={'email':'jh<PASSWORD>', 'instrument': 'guitar'} requests.put( self.URL_USERS, data=user ) response = requests.get( '{}/{}'.format( self.URL_USERS, user['email'] ) ) response_json = response.json() response_json['message'] self.assertEqual( [user], response_json['message'] ) requests.delete( self.URL_USERS_ALL ) def test_update_is_up(self): user={'email':'jhon@doe', 'instrument': 'guitar'} response = requests.post( self.URL_USERS, data=user ) self.assertEqual(response.status_code, 200) requests.delete( self.URL_USERS_ALL ) def test_update_user(self): user={'email':'jhon@doe', 'instrument': 'guitar'} requests.put(self.URL_USERS, data=user) user['instrument'] = 'bass' requests.post(self.URL_USERS, data=user) response = requests.get( '{}/{}'.format( self.URL_USERS, user['email'] ) ) response_json = response.json() self.assertEqual( [user], response_json['message'] ) requests.delete( self.URL_USERS_ALL ) def test_delete_is_up(self): user={'email':'jhon@doe', 'instrument': 'guitar'} response = requests.delete( '{}/{}'.format( self.URL_USERS, user['email'] ) ) self.assertEqual( response.status_code, 200 ) def test_delete_user(self): user={'email':'jhon@doe', 'instrument': 'guitar'} requests.put( self.URL_USERS, data=user ) response = requests.delete( '{}/{}'.format( self.URL_USERS, user['email'] ) ) response = requests.get( '{}/{}'.format( self.URL_USERS, user['email'] ) ) response_json = response.json() self.assertEqual( [], response_json['message'] ) requests.delete( self.URL_USERS_ALL ) def test_readall_is_up(self): response = requests.get( self.URL_USERS_ALL ) self.assertEqual( response.status_code, 200 ) if __name__ == '__main__': unittest.main()
[ "requests.post", "requests.get", "requests.delete", "requests.put", "unittest.main" ]
[((3614, 3629), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3627, 3629), False, 'import unittest\n'), ((341, 376), 'requests.delete', 'requests.delete', (['self.URL_USERS_ALL'], {}), '(self.URL_USERS_ALL)\n', (356, 376), False, 'import requests\n'), ((435, 470), 'requests.delete', 'requests.delete', (['self.URL_USERS_ALL'], {}), '(self.URL_USERS_ALL)\n', (450, 470), False, 'import requests\n'), ((526, 553), 'requests.get', 'requests.get', (['self.URL_BASE'], {}), '(self.URL_BASE)\n', (538, 553), False, 'import requests\n'), ((663, 690), 'requests.get', 'requests.get', (['self.URL_BASE'], {}), '(self.URL_BASE)\n', (675, 690), False, 'import requests\n'), ((905, 944), 'requests.put', 'requests.put', (['self.URL_USERS'], {'data': 'user'}), '(self.URL_USERS, data=user)\n', (917, 944), False, 'import requests\n'), ((1010, 1045), 'requests.delete', 'requests.delete', (['self.URL_USERS_ALL'], {}), '(self.URL_USERS_ALL)\n', (1025, 1045), False, 'import requests\n'), ((1160, 1199), 'requests.put', 'requests.put', (['self.URL_USERS'], {'data': 'user'}), '(self.URL_USERS, data=user)\n', (1172, 1199), False, 'import requests\n'), ((1311, 1346), 'requests.delete', 'requests.delete', (['self.URL_USERS_ALL'], {}), '(self.URL_USERS_ALL)\n', (1326, 1346), False, 'import requests\n'), ((1590, 1625), 'requests.delete', 'requests.delete', (['self.URL_USERS_ALL'], {}), '(self.URL_USERS_ALL)\n', (1605, 1625), False, 'import requests\n'), ((1730, 1769), 'requests.put', 'requests.put', (['self.URL_USERS'], {'data': 'user'}), '(self.URL_USERS, data=user)\n', (1742, 1769), False, 'import requests\n'), ((1998, 2033), 'requests.delete', 'requests.delete', (['self.URL_USERS_ALL'], {}), '(self.URL_USERS_ALL)\n', (2013, 2033), False, 'import requests\n'), ((2148, 2188), 'requests.post', 'requests.post', (['self.URL_USERS'], {'data': 'user'}), '(self.URL_USERS, data=user)\n', (2161, 2188), False, 'import requests\n'), ((2252, 2287), 'requests.delete', 'requests.delete', (['self.URL_USERS_ALL'], {}), '(self.URL_USERS_ALL)\n', (2267, 2287), False, 'import requests\n'), ((2390, 2429), 'requests.put', 'requests.put', (['self.URL_USERS'], {'data': 'user'}), '(self.URL_USERS, data=user)\n', (2402, 2429), False, 'import requests\n'), ((2474, 2514), 'requests.post', 'requests.post', (['self.URL_USERS'], {'data': 'user'}), '(self.URL_USERS, data=user)\n', (2487, 2514), False, 'import requests\n'), ((2708, 2743), 'requests.delete', 'requests.delete', (['self.URL_USERS_ALL'], {}), '(self.URL_USERS_ALL)\n', (2723, 2743), False, 'import requests\n'), ((3079, 3118), 'requests.put', 'requests.put', (['self.URL_USERS'], {'data': 'user'}), '(self.URL_USERS, data=user)\n', (3091, 3118), False, 'import requests\n'), ((3399, 3434), 'requests.delete', 'requests.delete', (['self.URL_USERS_ALL'], {}), '(self.URL_USERS_ALL)\n', (3414, 3434), False, 'import requests\n'), ((3492, 3524), 'requests.get', 'requests.get', (['self.URL_USERS_ALL'], {}), '(self.URL_USERS_ALL)\n', (3504, 3524), False, 'import requests\n')]
__author__ = "<NAME>" __version__ = "1.0.0" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" """ Description: This is batch job for uploading result file to S3. """ import os import argparse from libraries.botoClass import botoHandler ## argparse setting parser = argparse.ArgumentParser(prog='step3_upload_to_s3.py') parser.add_argument('-c','--ctype', type=str, dest='celltype', required=True,\ choices=['CD4','CD8','CD14'],help='Cell type for extraction, default = CD8') args = parser.parse_args() if __name__ == "__main__": uploadDataBucket = os.environ['uploadbucket'] # openkbc-ms-casting-bucket outputPath = os.environ['efspoint'] # /output/ ### Data prepration s3 = botoHandler(uploadDataBucket) # Call boto3 outputFile = outputPath+'DEG_'+args.celltype+'.result' f = open(outputFile, 'r').read() s3.uploadFile(outputFile, f, datatype='txt')
[ "libraries.botoClass.botoHandler", "argparse.ArgumentParser" ]
[((265, 318), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""step3_upload_to_s3.py"""'}), "(prog='step3_upload_to_s3.py')\n", (288, 318), False, 'import argparse\n'), ((700, 729), 'libraries.botoClass.botoHandler', 'botoHandler', (['uploadDataBucket'], {}), '(uploadDataBucket)\n', (711, 729), False, 'from libraries.botoClass import botoHandler\n')]
#------------------------------------------------------------------------------- # Add Two Numbers #------------------------------------------------------------------------------- # By <NAME> # https://leetcode.com/problems/add-two-numbers/ # Completed 12/3/20 #------------------------------------------------------------------------------- # Approach #------------------------------------------------------------------------------- """ 1. Start from head 2. While l1 or l2 is not null, keep advancing 3. Add l1 and l2 as value of l3, keep carry in mind Time: O(n) Space: O(n) """ #------------------------------------------------------------------------------- # Soluton #------------------------------------------------------------------------------- class ListNode: def __init__(self, val=0, next=None): self.val = val self.next = next class Solution: def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode: l3 = ListNode() l3_head = l3 carry = 0 while l1 is not None or l2 is not None: l1_val = 0 if l1 is None else l1.val l2_val = 0 if l2 is None else l2.val _sum = l1_val + l2_val + carry carry = 0 if _sum >= 10: carry = _sum // 10 _sum = _sum % 10 l3.val = _sum if l1: l1 = l1.next if l2: l2 = l2.next l3.next = ListNode() prev = l3 l3 = l3.next if carry > 0: l3.val = carry else: del l3 prev.next = None return l3_head #------------------------------------------------------------------------------- # Unit Test #------------------------------------------------------------------------------- import unittest if __name__ == '__main__': unittest.main()
[ "unittest.main" ]
[((1908, 1923), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1921, 1923), False, 'import unittest\n')]
import matplotlib.pyplot as plt import numpy as np def plot_convolution(f, g): fig, (ax1, ax2, ax3) = plt.subplots(3, 1) ax1.set_yticklabels([]) ax1.set_xticklabels([]) ax1.plot(f, color='blue', label='f') ax1.legend() ax2.set_yticklabels([]) ax2.set_xticklabels([]) ax2.plot(g, color='red', label='g') ax2.legend() filtered = np.convolve(f, g, "same") / sum(g) ax3.set_yticklabels([]) ax3.set_xticklabels([]) ax3.plot(filtered, color='green', label='f * g') ax3.legend() plt.show() def plot_convolution_step_by_step(f, g): fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1) ax1.set_yticklabels([]) ax1.set_xticklabels([]) ax1.plot(f, color='blue', label='f') ax1.plot(np.roll(g, -10000), color='red', label='g') ax2.set_yticklabels([]) ax2.set_xticklabels([]) ax2.plot(f, color='blue', label='f') ax2.plot(np.roll(g, -5000), color='red', label='g') ax3.set_yticklabels([]) ax3.set_xticklabels([]) ax3.plot(f, color='blue', label='f') ax3.plot(g, color='red', label='g') ax4.set_yticklabels([]) ax4.set_xticklabels([]) ax4.plot(f, color='blue', label='f') ax4.plot(np.roll(g, 5000), color='red', label='g') ax5.set_yticklabels([]) ax5.set_xticklabels([]) ax5.plot(f, color='blue', label='f') ax5.plot(np.roll(g, 10000), color='red', label='g') plt.show() signal = np.zeros(30000) signal[10000:20000] = 1 kernel = np.zeros(30000) kernel[10000:20000] = np.linspace(1, 0, 10000) plot_convolution(signal, kernel) plot_convolution_step_by_step(signal, kernel)
[ "numpy.convolve", "numpy.roll", "numpy.zeros", "numpy.linspace", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ]
[((1427, 1442), 'numpy.zeros', 'np.zeros', (['(30000)'], {}), '(30000)\n', (1435, 1442), True, 'import numpy as np\n'), ((1477, 1492), 'numpy.zeros', 'np.zeros', (['(30000)'], {}), '(30000)\n', (1485, 1492), True, 'import numpy as np\n'), ((1515, 1539), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(10000)'], {}), '(1, 0, 10000)\n', (1526, 1539), True, 'import numpy as np\n'), ((108, 126), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (120, 126), True, 'import matplotlib.pyplot as plt\n'), ((537, 547), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (545, 547), True, 'import matplotlib.pyplot as plt\n'), ((628, 646), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(1)'], {}), '(5, 1)\n', (640, 646), True, 'import matplotlib.pyplot as plt\n'), ((1405, 1415), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1413, 1415), True, 'import matplotlib.pyplot as plt\n'), ((371, 396), 'numpy.convolve', 'np.convolve', (['f', 'g', '"""same"""'], {}), "(f, g, 'same')\n", (382, 396), True, 'import numpy as np\n'), ((757, 775), 'numpy.roll', 'np.roll', (['g', '(-10000)'], {}), '(g, -10000)\n', (764, 775), True, 'import numpy as np\n'), ((912, 929), 'numpy.roll', 'np.roll', (['g', '(-5000)'], {}), '(g, -5000)\n', (919, 929), True, 'import numpy as np\n'), ((1204, 1220), 'numpy.roll', 'np.roll', (['g', '(5000)'], {}), '(g, 5000)\n', (1211, 1220), True, 'import numpy as np\n'), ((1357, 1374), 'numpy.roll', 'np.roll', (['g', '(10000)'], {}), '(g, 10000)\n', (1364, 1374), True, 'import numpy as np\n')]
import threading import copy import time from VisionROS.ROS_CONSTANTS import * from VisionUtils.TableDimensions import TableDimensions try: import rospy from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError from geometry_msgs.msg import Point from std_msgs.msg import Int32, Float32MultiArray from std_msgs.msg import Bool except ImportError: pass class RadiusConfigSubscriber: def __init__(self,i_config): """ dialog_config_Radius class's constructor. Initializes, notably, the the ROS subscriber that is used to get the value of the radius of the puck (in pixel) Args: i_config: A pointer to a PuckDetectorConfiguration object """ self.m_config = i_config self.m_radiusSubscriber = rospy.Subscriber(ROS_SUBSCRIBER_CONFIG_RADIUS_TOPIC_NAME, Int32, self.update_Radius) self.m_applySubscriber = rospy.Subscriber(ROS_SUBSCRIBER_CONFIG_APPLY_TOPIC_NAME, Bool, self.okPressed) self.m_config.DisplayRadius() def okPressed(self, i_apply): """ Calls the userWantsToQuit() method of the PuckDetectorConfiguration object, so that the DisplayRadius() thread dies """ if i_apply.data: self.m_config.userWantsToQuit() self.m_radiusSubscriber.unregister() self.m_applySubscriber.unregister() def update_Radius(self, i_radius): """ Called when the user changes the value of the radius slider. Used to update the internal value of the radius in the PuckDetectorConfiguration object """ self.m_config.SetRadiusValue(i_radius.data) class HSVConfigSubscriber: HPublisher = rospy.Publisher(ROS_SUBSCRIBER_CONFIG_H_TOPIC_NAME, Int32, queue_size=10) SPublisher = rospy.Publisher(ROS_SUBSCRIBER_CONFIG_S_TOPIC_NAME, Int32, queue_size=10) VPublisher = rospy.Publisher(ROS_SUBSCRIBER_CONFIG_V_TOPIC_NAME, Int32, queue_size=10) def __init__(self,i_config): """ HSVConfigSubscriber class's constructor. Initializes, notably, the ROS subscribers that are used to get the HSV values of the puck Args: i_config: A pointer to a PuckDetectorConfiguration object """ self.m_config = i_config self.defaultLowerValues = copy.deepcopy(self.m_config.m_lowerColor) self.defaultUpperValues = copy.deepcopy(self.m_config.m_upperColor) time.sleep(0.5) self.publishCurrentValues() # So that the webApp knows the default values that were calculated by the autoConfiguration() self.m_applySubscriber = rospy.Subscriber(ROS_SUBSCRIBER_CONFIG_APPLY_TOPIC_NAME, Bool, self.okPressed) self.m_resetSubscriber = rospy.Subscriber(ROS_SUBSCRIBER_CONFIG_HSV_RESET_TOPIC_NAME, Bool, self.resetValues) self.m_HSubscriber = rospy.Subscriber(ROS_SUBSCRIBER_CONFIG_H_TOPIC_NAME, Int32, self.update_H) self.m_SSubscriber = rospy.Subscriber(ROS_SUBSCRIBER_CONFIG_S_TOPIC_NAME, Int32, self.update_S) self.m_VSubscriber = rospy.Subscriber(ROS_SUBSCRIBER_CONFIG_V_TOPIC_NAME, Int32, self.update_V) self.m_config.SetConfiguration() def publishCurrentValues(self): h = self.m_config.GetHValue() HSVConfigSubscriber.HPublisher.publish(h) s = self.m_config.GetSValue() HSVConfigSubscriber.SPublisher.publish(s) v = self.m_config.GetVValue() HSVConfigSubscriber.VPublisher.publish(v) def resetValues(self,i_reset): """ Called when the user clicks on the "reset" button. Used to reset the H,S and V sliders to the values generated by the autoConfiguration() method of the PuckDetectorConfiguration object. """ if i_reset.data: self.m_config.m_lowerColor = copy.deepcopy(self.defaultLowerValues) self.m_config.m_upperColor = copy.deepcopy(self.defaultUpperValues) self.publishCurrentValues() self.m_config.SetHValue(self.m_config.GetHValue) # to trigger a refresh of the feed def okPressed(self, i_apply): """ Calls the userWantsToQuit() method of the PuckDetectorConfiguration object, so that the SetConfiguration() thread dies """ if i_apply.data: self.m_config.userWantsToQuit() self.m_applySubscriber.unregister() self.m_resetSubscriber.unregister() self.m_HSubscriber.unregister() self.m_SSubscriber.unregister() self.m_VSubscriber.unregister() def update_H(self, i_H): """ Called when the user changes the value of the H slider. Used to update the internal value of the H value in the PuckDetectorConfiguration object """ self.m_config.SetHValue(i_H.data) def update_S(self, i_S): """ Called when the user changes the value of the S slider. Used to update the internal value of the S value in the PuckDetectorConfiguration object """ self.m_config.SetHValue(i_S.data) def update_V(self, i_V): """ Called when the user changes the value of the V slider. Used to update the internal value of the V value in the PuckDetectorConfiguration object """ self.m_config.SetHValue(i_V.data) class DimensionsConverterConfigSubscriber: def __init__(self,i_DimensionsConverterConfiguration): """ dialog_config_DimensionsConverter class's constructor. Initializes, notably, the ROS subscriber that is used to get the values of the edges (in pixels and in meters) Args: i_config: A pointer to a PuckDetectorConfiguration object """ self.m_DimensionsConverterConfiguration = i_DimensionsConverterConfiguration self.m_resetSubscriber = rospy.Subscriber(ROS_SUBSCRIBER_CONFIG_TABLE_RESET_TOPIC_NAME, Bool, self.retryPressed) self.m_tableDimensionsSubscriber = rospy.Subscriber(ROS_SUBSCRIBER_CONFIG_TABLE_CHANGED_TOPIC_NAME, Bool, self.onTableDimensionsChanges) self.m_DimensionsConverterConfiguration.DisplayEdges() def onTableDimensionsChanges(self, i_msg): """ Specifies what should happen when the table dimensions are changed : setSidesDimensions() and computePixelToMetersRatio() should be called with the new values """ self.m_DimensionsConverterConfiguration.userWantsToQuit() tableDimensions = TableDimensions() tableDimensions.setHeight(rospy.get_param(ROS_TABLE_DIMENSIONS_HEIGHT_TOPIC_NAME)) tableDimensions.setWidth(rospy.get_param(ROS_TABLE_DIMENSIONS_WIDTH_TOPIC_NAME)) self.m_DimensionsConverterConfiguration.setSidesDimensions(tableDimensions) self.m_DimensionsConverterConfiguration.computePixelToMetersRatio() self.m_resetSubscriber.unregister() self.m_tableDimensionsSubscriber.unregister() def retryPressed(self): """ Specifies what should happen when the "Retry" button of the GUI is pressed : it should remove all edges that were selected """ self.m_DimensionsConverterConfiguration.resetEdges()
[ "rospy.Subscriber", "rospy.get_param", "VisionUtils.TableDimensions.TableDimensions", "time.sleep", "copy.deepcopy", "rospy.Publisher" ]
[((1725, 1798), 'rospy.Publisher', 'rospy.Publisher', (['ROS_SUBSCRIBER_CONFIG_H_TOPIC_NAME', 'Int32'], {'queue_size': '(10)'}), '(ROS_SUBSCRIBER_CONFIG_H_TOPIC_NAME, Int32, queue_size=10)\n', (1740, 1798), False, 'import rospy\n'), ((1816, 1889), 'rospy.Publisher', 'rospy.Publisher', (['ROS_SUBSCRIBER_CONFIG_S_TOPIC_NAME', 'Int32'], {'queue_size': '(10)'}), '(ROS_SUBSCRIBER_CONFIG_S_TOPIC_NAME, Int32, queue_size=10)\n', (1831, 1889), False, 'import rospy\n'), ((1907, 1980), 'rospy.Publisher', 'rospy.Publisher', (['ROS_SUBSCRIBER_CONFIG_V_TOPIC_NAME', 'Int32'], {'queue_size': '(10)'}), '(ROS_SUBSCRIBER_CONFIG_V_TOPIC_NAME, Int32, queue_size=10)\n', (1922, 1980), False, 'import rospy\n'), ((814, 903), 'rospy.Subscriber', 'rospy.Subscriber', (['ROS_SUBSCRIBER_CONFIG_RADIUS_TOPIC_NAME', 'Int32', 'self.update_Radius'], {}), '(ROS_SUBSCRIBER_CONFIG_RADIUS_TOPIC_NAME, Int32, self.\n update_Radius)\n', (830, 903), False, 'import rospy\n'), ((932, 1010), 'rospy.Subscriber', 'rospy.Subscriber', (['ROS_SUBSCRIBER_CONFIG_APPLY_TOPIC_NAME', 'Bool', 'self.okPressed'], {}), '(ROS_SUBSCRIBER_CONFIG_APPLY_TOPIC_NAME, Bool, self.okPressed)\n', (948, 1010), False, 'import rospy\n'), ((2338, 2379), 'copy.deepcopy', 'copy.deepcopy', (['self.m_config.m_lowerColor'], {}), '(self.m_config.m_lowerColor)\n', (2351, 2379), False, 'import copy\n'), ((2414, 2455), 'copy.deepcopy', 'copy.deepcopy', (['self.m_config.m_upperColor'], {}), '(self.m_config.m_upperColor)\n', (2427, 2455), False, 'import copy\n'), ((2465, 2480), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (2475, 2480), False, 'import time\n'), ((2646, 2724), 'rospy.Subscriber', 'rospy.Subscriber', (['ROS_SUBSCRIBER_CONFIG_APPLY_TOPIC_NAME', 'Bool', 'self.okPressed'], {}), '(ROS_SUBSCRIBER_CONFIG_APPLY_TOPIC_NAME, Bool, self.okPressed)\n', (2662, 2724), False, 'import rospy\n'), ((2758, 2847), 'rospy.Subscriber', 'rospy.Subscriber', (['ROS_SUBSCRIBER_CONFIG_HSV_RESET_TOPIC_NAME', 'Bool', 'self.resetValues'], {}), '(ROS_SUBSCRIBER_CONFIG_HSV_RESET_TOPIC_NAME, Bool, self.\n resetValues)\n', (2774, 2847), False, 'import rospy\n'), ((2873, 2947), 'rospy.Subscriber', 'rospy.Subscriber', (['ROS_SUBSCRIBER_CONFIG_H_TOPIC_NAME', 'Int32', 'self.update_H'], {}), '(ROS_SUBSCRIBER_CONFIG_H_TOPIC_NAME, Int32, self.update_H)\n', (2889, 2947), False, 'import rospy\n'), ((2977, 3051), 'rospy.Subscriber', 'rospy.Subscriber', (['ROS_SUBSCRIBER_CONFIG_S_TOPIC_NAME', 'Int32', 'self.update_S'], {}), '(ROS_SUBSCRIBER_CONFIG_S_TOPIC_NAME, Int32, self.update_S)\n', (2993, 3051), False, 'import rospy\n'), ((3081, 3155), 'rospy.Subscriber', 'rospy.Subscriber', (['ROS_SUBSCRIBER_CONFIG_V_TOPIC_NAME', 'Int32', 'self.update_V'], {}), '(ROS_SUBSCRIBER_CONFIG_V_TOPIC_NAME, Int32, self.update_V)\n', (3097, 3155), False, 'import rospy\n'), ((5853, 5945), 'rospy.Subscriber', 'rospy.Subscriber', (['ROS_SUBSCRIBER_CONFIG_TABLE_RESET_TOPIC_NAME', 'Bool', 'self.retryPressed'], {}), '(ROS_SUBSCRIBER_CONFIG_TABLE_RESET_TOPIC_NAME, Bool, self.\n retryPressed)\n', (5869, 5945), False, 'import rospy\n'), ((5984, 6090), 'rospy.Subscriber', 'rospy.Subscriber', (['ROS_SUBSCRIBER_CONFIG_TABLE_CHANGED_TOPIC_NAME', 'Bool', 'self.onTableDimensionsChanges'], {}), '(ROS_SUBSCRIBER_CONFIG_TABLE_CHANGED_TOPIC_NAME, Bool, self\n .onTableDimensionsChanges)\n', (6000, 6090), False, 'import rospy\n'), ((6490, 6507), 'VisionUtils.TableDimensions.TableDimensions', 'TableDimensions', ([], {}), '()\n', (6505, 6507), False, 'from VisionUtils.TableDimensions import TableDimensions\n'), ((3829, 3867), 'copy.deepcopy', 'copy.deepcopy', (['self.defaultLowerValues'], {}), '(self.defaultLowerValues)\n', (3842, 3867), False, 'import copy\n'), ((3909, 3947), 'copy.deepcopy', 'copy.deepcopy', (['self.defaultUpperValues'], {}), '(self.defaultUpperValues)\n', (3922, 3947), False, 'import copy\n'), ((6543, 6598), 'rospy.get_param', 'rospy.get_param', (['ROS_TABLE_DIMENSIONS_HEIGHT_TOPIC_NAME'], {}), '(ROS_TABLE_DIMENSIONS_HEIGHT_TOPIC_NAME)\n', (6558, 6598), False, 'import rospy\n'), ((6633, 6687), 'rospy.get_param', 'rospy.get_param', (['ROS_TABLE_DIMENSIONS_WIDTH_TOPIC_NAME'], {}), '(ROS_TABLE_DIMENSIONS_WIDTH_TOPIC_NAME)\n', (6648, 6687), False, 'import rospy\n')]
from __future__ import print_function from amd.rali.plugin.tf import RALIIterator from amd.rali.pipeline import Pipeline import amd.rali.ops as ops import amd.rali.types as types import sys import tensorflow.compat.v1 as tf tf.disable_v2_behavior() import numpy as np ############################### HYPER PARAMETERS FOR TRAINING ############################### learning_rate = 0.001 image_size = 28 # Network Parameters n_hidden_1 = 256 # 1st layer number of neurons n_hidden_2 = 256 # 2nd layer number of neurons num_input = 784 # MNIST data input (img shape: 28*28) num_classes = 10 # MNIST total classes (0-9 digits) ############################### HYPER PARAMETERS FOR TRAINING ############################### def get_label_one_hot(label_ndArray): one_hot_vector_list = [] for label in label_ndArray: one_hot_vector = np.zeros(num_classes) np.put(one_hot_vector, label - 1, 1) one_hot_vector_list.append(one_hot_vector) return one_hot_vector_list # Create model weights = { 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])), 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes])) } biases = { 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 'out': tf.Variable(tf.random_normal([num_classes])) } def neural_net(x): # Hidden fully connected layer with 256 neurons layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) # Hidden fully connected layer with 256 neurons layer_1 = tf.nn.relu(layer_1) layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) layer_2 = tf.nn.relu(layer_2) # Output fully connected layer with a neuron for each class out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] return out_layer #helper function not used in training def decode(tfrecord_serialized): tfrecord_features = tf.parse_single_example(tfrecord_serialized, features={ 'image/height': tf.FixedLenFeature([], tf.int64), 'image/width': tf.FixedLenFeature([], tf.int64), 'image/class/label': tf.FixedLenFeature([], tf.int64), 'image/raw': tf.FixedLenFeature([], tf.string), }, name='features') image = tf.decode_raw(tfrecord_features['image/raw'], tf.float32) image.set_shape([784]) label = tf.cast(tfrecord_features['image/class/label'], tf.int32) # image_batch, label_batch = tf.train.batch([image, label], batch_size=bs) return image, label #RALI pipeline class HybridPipe(Pipeline): def __init__(self, feature_key_map, tfrecordreader_type, batch_size, num_threads, device_id, data_dir, crop, rali_cpu = True): super(HybridPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id,rali_cpu=rali_cpu) self.input = ops.TFRecordReader(path=data_dir, index_path = "", reader_type=tfrecordreader_type, user_feature_key_map=feature_key_map, features={ 'image/encoded':tf.FixedLenFeature((), tf.string, ""), 'image/class/label':tf.FixedLenFeature([1], tf.int64, -1), 'image/filename':tf.FixedLenFeature((), tf.string, "") }, ) rali_device = 'cpu' if rali_cpu else 'gpu' decoder_device = 'cpu' if rali_cpu else 'mixed' self.decode = ops.ImageDecoderRaw(user_feature_key_map=feature_key_map, device=decoder_device, output_type=types.RGB) #self.res = ops.Resize(device=rali_device, resize_x=crop[0], resize_y=crop[1]) self.cmnp = ops.CropMirrorNormalize(device="cpu", output_dtype=types.FLOAT, output_layout=types.NCHW, crop=(crop,crop), image_type=types.GRAY, mean=[0 ,0,0], std=[255,255,255], mirror=0) self.coin = ops.CoinFlip(probability=0.5) print('rali "{0}" variant'.format(rali_device)) def define_graph(self): inputs = self.input(name ="Reader") images = inputs["image/encoded"] labels = inputs["image/class/label"] images = self.decode(images) #rng = self.coin() output = self.cmnp(images) return [output, labels] # compute accuracy def compute_accuracy(predictions, labels): correct_predictions = tf.equal(tf.argmax(predictions, 1), tf.argmax(labels, 1)) accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32)) return accuracy def train_mnist_rali(data_path, _rali_cpu, batch_size): # setup keep_prob input_X = tf.placeholder('float32',shape = (batch_size,784)) labels = tf.placeholder('float32',shape = (batch_size,10)) logits = neural_net(input_X) cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits), name="loss" ) optimizer = tf.train.AdamOptimizer().minimize(cost) train_prediction = tf.nn.softmax(logits) accuracy = compute_accuracy(train_prediction, labels) #correct_label = tf.argmax(labels, 1) num_epochs = 10 crop_size = 28 TFRecordReaderType = 0 featureKeyMap = { 'image/encoded':'image_raw', 'image/class/label':'label', 'image/filename':'' } trainPipe = HybridPipe(feature_key_map=featureKeyMap, tfrecordreader_type=TFRecordReaderType, batch_size=batch_size, num_threads=1, device_id=0, data_dir=data_path+"/train", crop=crop_size, rali_cpu=_rali_cpu) valPipe = HybridPipe(feature_key_map=featureKeyMap, tfrecordreader_type=TFRecordReaderType, batch_size=batch_size, num_threads=1, device_id=0, data_dir=data_path+"/val", crop=crop_size, rali_cpu=_rali_cpu) trainPipe.build() valPipe.build() trainIterator = RALIIterator(trainPipe) valIterator = RALIIterator(valPipe) with tf.Session() as sess: sess.run(tf.initialize_all_variables()) for epoch in range(num_epochs): print('\n\n----------------------------Training Model for Epoch: ', epoch, "-----------------------------------------------") epoch_loss = 0 train_accuracy = 0 for i, (image_train, label_train) in enumerate(trainIterator, 0): image_train_res = image_train.reshape(batch_size, 784) train_label_one_hot_list = get_label_one_hot(label_train) _, c, tacc = sess.run([optimizer, cost, accuracy], feed_dict={input_X:image_train_res, labels: train_label_one_hot_list}) epoch_loss += c train_accuracy += tacc print('Epoch', epoch, 'completed out of',num_epochs,'loss:',epoch_loss, 'accuracy:',(train_accuracy*100)/i, 'count :', i) #run evaluation for every epoch mean_acc = 0 print("\n\n----------------------------Evaluating Model ---------------------------------------------------------------") for j, (val_image_ndArray, val_label_ndArray) in enumerate(valIterator, 0): #val_image_ndArray_transposed = np.transpose(val_image_ndArray, [0, 2, 3, 1]) val_image_ndArray_res = val_image_ndArray.reshape(batch_size, 784) val_label_one_hot_list = get_label_one_hot(val_label_ndArray) val_accuracy = sess.run(accuracy, #[optimizer, accuracy, prediction, correct_label, correct_pred], feed_dict={input_X: val_image_ndArray_res, labels: val_label_one_hot_list}) mean_acc += val_accuracy #mean_loss = mean_loss + val_loss #num_correct_predicate = 0 #for predicate in correct_predicate: # if predicate == True: # num_correct_predicate += 1 #print ("Step :: %s\tTarget :: %s\tPrediction :: %s\tCorrect Predictions :: %s/%s\tValidation Loss :: %.2f\tValidation Accuracy :: %.2f%%\t" % (j, val_target, val_prediction, num_correct_predicate, len(correct_predicate), val_loss, (val_accuracy * 100))) mean_acc = (mean_acc * 100) / j #mean_loss = (mean_loss * 100)/ j print("\nSUMMARY:\nMean Accuracy :: %.2f%% count: %d" % (mean_acc, j)) def main(): if len(sys.argv) < 4: print ('Please pass mnistTFRecord_dir cpu/gpu batch_size') exit(0) image_path = sys.argv[1] if(sys.argv[2] == "cpu"): _rali_cpu = True else: _rali_cpu = False bs = int(sys.argv[3]) train_mnist_rali(image_path, _rali_cpu, bs) if __name__ == '__main__': main()
[ "tensorflow.compat.v1.disable_v2_behavior", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.compat.v1.FixedLenFeature", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.nn.relu", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.nn.softmax", "tensorflow.compat.v1.argmax", "tenso...
[((225, 249), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (247, 249), True, 'import tensorflow.compat.v1 as tf\n'), ((1547, 1566), 'tensorflow.compat.v1.nn.relu', 'tf.nn.relu', (['layer_1'], {}), '(layer_1)\n', (1557, 1566), True, 'import tensorflow.compat.v1 as tf\n'), ((1645, 1664), 'tensorflow.compat.v1.nn.relu', 'tf.nn.relu', (['layer_2'], {}), '(layer_2)\n', (1655, 1664), True, 'import tensorflow.compat.v1 as tf\n'), ((2197, 2254), 'tensorflow.compat.v1.decode_raw', 'tf.decode_raw', (["tfrecord_features['image/raw']", 'tf.float32'], {}), "(tfrecord_features['image/raw'], tf.float32)\n", (2210, 2254), True, 'import tensorflow.compat.v1 as tf\n'), ((2288, 2345), 'tensorflow.compat.v1.cast', 'tf.cast', (["tfrecord_features['image/class/label']", 'tf.int32'], {}), "(tfrecord_features['image/class/label'], tf.int32)\n", (2295, 2345), True, 'import tensorflow.compat.v1 as tf\n'), ((4318, 4368), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['"""float32"""'], {'shape': '(batch_size, 784)'}), "('float32', shape=(batch_size, 784))\n", (4332, 4368), True, 'import tensorflow.compat.v1 as tf\n'), ((4379, 4428), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['"""float32"""'], {'shape': '(batch_size, 10)'}), "('float32', shape=(batch_size, 10))\n", (4393, 4428), True, 'import tensorflow.compat.v1 as tf\n'), ((4641, 4662), 'tensorflow.compat.v1.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (4654, 4662), True, 'import tensorflow.compat.v1 as tf\n'), ((5392, 5415), 'amd.rali.plugin.tf.RALIIterator', 'RALIIterator', (['trainPipe'], {}), '(trainPipe)\n', (5404, 5415), False, 'from amd.rali.plugin.tf import RALIIterator\n'), ((5431, 5452), 'amd.rali.plugin.tf.RALIIterator', 'RALIIterator', (['valPipe'], {}), '(valPipe)\n', (5443, 5452), False, 'from amd.rali.plugin.tf import RALIIterator\n'), ((831, 852), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (839, 852), True, 'import numpy as np\n'), ((855, 891), 'numpy.put', 'np.put', (['one_hot_vector', '(label - 1)', '(1)'], {}), '(one_hot_vector, label - 1, 1)\n', (861, 891), True, 'import numpy as np\n'), ((1012, 1053), 'tensorflow.compat.v1.random_normal', 'tf.random_normal', (['[num_input, n_hidden_1]'], {}), '([num_input, n_hidden_1])\n', (1028, 1053), True, 'import tensorflow.compat.v1 as tf\n'), ((1075, 1117), 'tensorflow.compat.v1.random_normal', 'tf.random_normal', (['[n_hidden_1, n_hidden_2]'], {}), '([n_hidden_1, n_hidden_2])\n', (1091, 1117), True, 'import tensorflow.compat.v1 as tf\n'), ((1140, 1183), 'tensorflow.compat.v1.random_normal', 'tf.random_normal', (['[n_hidden_2, num_classes]'], {}), '([n_hidden_2, num_classes])\n', (1156, 1183), True, 'import tensorflow.compat.v1 as tf\n'), ((1217, 1247), 'tensorflow.compat.v1.random_normal', 'tf.random_normal', (['[n_hidden_1]'], {}), '([n_hidden_1])\n', (1233, 1247), True, 'import tensorflow.compat.v1 as tf\n'), ((1269, 1299), 'tensorflow.compat.v1.random_normal', 'tf.random_normal', (['[n_hidden_2]'], {}), '([n_hidden_2])\n', (1285, 1299), True, 'import tensorflow.compat.v1 as tf\n'), ((1322, 1353), 'tensorflow.compat.v1.random_normal', 'tf.random_normal', (['[num_classes]'], {}), '([num_classes])\n', (1338, 1353), True, 'import tensorflow.compat.v1 as tf\n'), ((1444, 1471), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['x', "weights['h1']"], {}), "(x, weights['h1'])\n", (1453, 1471), True, 'import tensorflow.compat.v1 as tf\n'), ((1585, 1618), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['layer_1', "weights['h2']"], {}), "(layer_1, weights['h2'])\n", (1594, 1618), True, 'import tensorflow.compat.v1 as tf\n'), ((1739, 1773), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['layer_2', "weights['out']"], {}), "(layer_2, weights['out'])\n", (1748, 1773), True, 'import tensorflow.compat.v1 as tf\n'), ((3221, 3329), 'amd.rali.ops.ImageDecoderRaw', 'ops.ImageDecoderRaw', ([], {'user_feature_key_map': 'feature_key_map', 'device': 'decoder_device', 'output_type': 'types.RGB'}), '(user_feature_key_map=feature_key_map, device=\n decoder_device, output_type=types.RGB)\n', (3240, 3329), True, 'import amd.rali.ops as ops\n'), ((3420, 3606), 'amd.rali.ops.CropMirrorNormalize', 'ops.CropMirrorNormalize', ([], {'device': '"""cpu"""', 'output_dtype': 'types.FLOAT', 'output_layout': 'types.NCHW', 'crop': '(crop, crop)', 'image_type': 'types.GRAY', 'mean': '[0, 0, 0]', 'std': '[255, 255, 255]', 'mirror': '(0)'}), "(device='cpu', output_dtype=types.FLOAT,\n output_layout=types.NCHW, crop=(crop, crop), image_type=types.GRAY,\n mean=[0, 0, 0], std=[255, 255, 255], mirror=0)\n", (3443, 3606), True, 'import amd.rali.ops as ops\n'), ((3675, 3704), 'amd.rali.ops.CoinFlip', 'ops.CoinFlip', ([], {'probability': '(0.5)'}), '(probability=0.5)\n', (3687, 3704), True, 'import amd.rali.ops as ops\n'), ((4095, 4120), 'tensorflow.compat.v1.argmax', 'tf.argmax', (['predictions', '(1)'], {}), '(predictions, 1)\n', (4104, 4120), True, 'import tensorflow.compat.v1 as tf\n'), ((4122, 4142), 'tensorflow.compat.v1.argmax', 'tf.argmax', (['labels', '(1)'], {}), '(labels, 1)\n', (4131, 4142), True, 'import tensorflow.compat.v1 as tf\n'), ((4171, 4211), 'tensorflow.compat.v1.cast', 'tf.cast', (['correct_predictions', 'tf.float32'], {}), '(correct_predictions, tf.float32)\n', (4178, 4211), True, 'import tensorflow.compat.v1 as tf\n'), ((4483, 4552), 'tensorflow.compat.v1.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (4522, 4552), True, 'import tensorflow.compat.v1 as tf\n'), ((5460, 5472), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (5470, 5472), True, 'import tensorflow.compat.v1 as tf\n'), ((4581, 4605), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (4603, 4605), True, 'import tensorflow.compat.v1 as tf\n'), ((5493, 5522), 'tensorflow.compat.v1.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (5520, 5522), True, 'import tensorflow.compat.v1 as tf\n'), ((1975, 2007), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (1993, 2007), True, 'import tensorflow.compat.v1 as tf\n'), ((2026, 2058), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (2044, 2058), True, 'import tensorflow.compat.v1 as tf\n'), ((2083, 2115), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (2101, 2115), True, 'import tensorflow.compat.v1 as tf\n'), ((2132, 2165), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (2150, 2165), True, 'import tensorflow.compat.v1 as tf\n'), ((2909, 2946), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string', '""""""'], {}), "((), tf.string, '')\n", (2927, 2946), True, 'import tensorflow.compat.v1 as tf\n'), ((2979, 3016), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['[1]', 'tf.int64', '(-1)'], {}), '([1], tf.int64, -1)\n', (2997, 3016), True, 'import tensorflow.compat.v1 as tf\n'), ((3047, 3084), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string', '""""""'], {}), "((), tf.string, '')\n", (3065, 3084), True, 'import tensorflow.compat.v1 as tf\n')]
# coding=utf8 # Main executable for download import sys, traceback import pprint from youtube_mp3 import YouTubeMP3 if __name__ == '__main__': if len(sys.argv) != 2: raise ValueError("Missing URL argument") yt = YouTubeMP3() url = sys.argv[1] data_list = yt.download(url) print(data_list) # try: # data_list = yt.download(url) # pprint.pprint(data_list) # except: # print("Failed to download %s" % url) # traceback.print_exc(file=sys.stdout)
[ "youtube_mp3.YouTubeMP3" ]
[((231, 243), 'youtube_mp3.YouTubeMP3', 'YouTubeMP3', ([], {}), '()\n', (241, 243), False, 'from youtube_mp3 import YouTubeMP3\n')]
import random import pytest from rolling_backup import backup CONTENT = "Hello" def create_backups(image_file, num: int): for i in range(num): image_file.write(f"{CONTENT} - {i}") assert backup(str(image_file), num_to_keep=num) d = image_file.dirpath() should = d / f"{image_file.basename}.{i:02d}" assert should.exists() for i in range(num): d = image_file.dirpath() should = d / f"{image_file.basename}.{i:02d}" assert should.read() == f"{CONTENT} - {num - i - 1}" @pytest.fixture(scope="function") def image_file(tmpdir_factory): fn = tmpdir_factory.mktemp("data").join("img.png") fn.write(CONTENT) return fn def test_dummy(image_file): create_backups(image_file, 12) def test_rollover(image_file): NUM = 12 create_backups(image_file, NUM) assert len(image_file.dirpath().listdir()) == NUM + 1 assert backup(str(image_file), num_to_keep=NUM) assert len(image_file.dirpath().listdir()) == NUM + 1 def test_missing(image_file): NUM = 12 create_backups(image_file, NUM) n = random.choice(range(NUM)) d = image_file.dirpath() to_del = d / f"{image_file.basename}.{n:02d}" to_del.remove() assert not to_del.exists() image_file.write("xxx") assert backup(str(image_file), NUM) assert to_del.exists() assert (d / f"{image_file.basename}.00").read() == "xxx" assert (d / f"{image_file.basename}.{n:02d}").read() == f"{CONTENT} - {NUM - n}" assert (d / f"{image_file.basename}.{(n + 1):02d}").read() == f"{CONTENT} - {NUM - n - 2}" def test_non_existing_dir(): assert not backup("xxxyyyzzz")
[ "pytest.fixture", "rolling_backup.backup" ]
[((569, 601), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (583, 601), False, 'import pytest\n'), ((1707, 1726), 'rolling_backup.backup', 'backup', (['"""xxxyyyzzz"""'], {}), "('xxxyyyzzz')\n", (1713, 1726), False, 'from rolling_backup import backup\n')]
from django.shortcuts import render, redirect, get_object_or_404 from django.utils import timezone from .models import Post, Category from taggit.models import Tag from .forms import AddPostForm #from .validator import group_required # complex lookups (for searching) from django.db.models import Q from django.urls import reverse_lazy # class based views from django.views.generic.edit import CreateView, DeleteView, UpdateView, FormView from django.views import View from django.utils.decorators import method_decorator from django.views.generic.detail import DetailView from django.views.generic.list import ListView from django.views.generic.base import TemplateView from django.views.generic.dates import YearArchiveView, MonthArchiveView, DayArchiveView from django.contrib.auth.mixins import ( LoginRequiredMixin, UserPassesTestMixin, PermissionRequiredMixin, ) from django.db import transaction class CategoryDatesMixin: def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["categories"] = Category.objects.all() # get queryset of datetime objects for all published posts #context["dates"] = Post.objects.published().filter(tags__slug=self.kwargs['slug']) context["dates"] = Post.objects.published().datetimes( field_name="published", kind="month", order="DESC" ) #context["dates"] = Post.objects.filter(status=Post.STATUS_PUBLISHED).datetimes( # field_name="published", kind="month", order="DESC" #) context["recent_posts"] = Post.objects.published().order_by( "-published" )[:3] #context["recent_posts"] = Post.objects.filter(status=Post.STATUS_PUBLISHED).order_by( # "-published" #)[:3] return context class ListPosts(CategoryDatesMixin, ListView): model = Post template_name = "posts/index.html" context_object_name = "posts" ordering = ("-published",) paginate_by = 5 def get_queryset(self): results = Post.objects.published() return results class ListByAuthor(CategoryDatesMixin, ListView): model = Post context_object_name = "posts" template_name = "posts/post_by_author.html" paginate_by = 5 ordering = ("-published",) def get_queryset(self): author = self.kwargs.get("author", None) results = [] if author: results = Post.objects.published().filter(author__username=author) return results def get_context_data(self, **kwargs): """ Pass author's name to the context """ context = super().get_context_data(**kwargs) context["author"] = self.kwargs.get("author", None) return context class ListByTag(CategoryDatesMixin, ListView): model = Post context_object_name = "posts" template_name = "posts/post_by_tag.html" paginate_by = 5 ordering = ("-published",) def get_queryset(self): tag = self.kwargs.get("tag", None) results = [] if tag: results = Post.objects.published().filter(tags__name=tag) return results def get_context_data(self, **kwargs): """ Pass tag name to the context """ context = super().get_context_data(**kwargs) context["tag"] = self.kwargs.get("tag", None) return context class ListByCategory(CategoryDatesMixin, ListView): model = Post context_object_name = "posts" template_name = "posts/post_by_category.html" paginate_by = 5 ordering = ("-published",) def get_queryset(self): category = self.kwargs.get("name", None) results = [] if category: results = Post.objects.published().filter(category__name=category) return results def get_context_data(self, **kwargs): """ Pass category's name to the context """ context = super().get_context_data(**kwargs) context["category"] = self.kwargs.get("name", None) return context class DetailPost(CategoryDatesMixin, DetailView): model = Post template_name = "posts/post_detail.html" #def get_queryset(self, queryset=None): #item = super().get_object(self) #item.viewed() #return item def get(self, request, *args, **kwargs): res = super().get(request, *args, **kwargs) self.object.viewed() return res # Post archive views class ArchiveMixin: model = Post date_field = "published" allow_future = False context_object_name = "posts" class PostYearArchive(CategoryDatesMixin, ArchiveMixin, YearArchiveView): make_object_list = True class PostYearMonthArchive(CategoryDatesMixin, ArchiveMixin, MonthArchiveView): pass # Create, delete and update post views # @group_required('Editors') class AddPost( CategoryDatesMixin, PermissionRequiredMixin, LoginRequiredMixin, CreateView ): form_class = AddPostForm permission_required = "posts.add_post" template_name = "posts/post_form.html" # to process request.user in the form def form_valid(self, form): form.save(commit=False) form.instance.author = self.request.user if form.instance.status in [Post.STATUS_PUBLISHED]: form.instance.published = timezone.now() else: form.instance.updated = timezone.now() return super().form_valid(form) def get_context_data(self, **kwargs): """ To use AddPostForm with 'Update' instead of 'Add' text in update view """ context = super().get_context_data(**kwargs) context["update"] = False return context class PostDraftsList( CategoryDatesMixin, PermissionRequiredMixin, LoginRequiredMixin, ListView ): template_name = "posts/list_drafts.html" permission_required = "posts.add_post" context_object_name = "posts" def get_queryset(self): return Post.objects.draft().filter( author__username=self.request.user.username ) class DeletePost( CategoryDatesMixin, LoginRequiredMixin, UserPassesTestMixin, DeleteView ): model = Post success_url = reverse_lazy("posts:index") def test_func(self): """ Only let the user delete object if they own the object being deleted """ return self.get_object().author.username == self.request.user.username class UpdatePost( CategoryDatesMixin, LoginRequiredMixin, UserPassesTestMixin, UpdateView ): model = Post form_class = AddPostForm def test_func(self): """ Only let the user update object if they own the object being updated """ return self.get_object().author.username == self.request.user.username def get_context_data(self, **kwargs): """ To use AddPostForm with 'Update' instead of 'Add' text in update view """ context = super().get_context_data(**kwargs) context["update"] = True return context class SearchPosts(CategoryDatesMixin, ListView): context_object_name = "posts" template_name = "posts/post_search.html" paginate_by = 5 ordering = ("-published",) def get_queryset(self): search_query = self.request.GET.get("q", None) results = [] if search_query: results = Post.objects.filter( Q(category__name__icontains=search_query) | Q(author__username__icontains=search_query) | Q(title__icontains=search_query) | Q(content__icontains=search_query) ).distinct() return results
[ "django.utils.timezone.now", "django.db.models.Q", "django.urls.reverse_lazy" ]
[((6239, 6266), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""posts:index"""'], {}), "('posts:index')\n", (6251, 6266), False, 'from django.urls import reverse_lazy\n'), ((5363, 5377), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (5375, 5377), False, 'from django.utils import timezone\n'), ((5428, 5442), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (5440, 5442), False, 'from django.utils import timezone\n'), ((7622, 7656), 'django.db.models.Q', 'Q', ([], {'content__icontains': 'search_query'}), '(content__icontains=search_query)\n', (7623, 7656), False, 'from django.db.models import Q\n'), ((7571, 7603), 'django.db.models.Q', 'Q', ([], {'title__icontains': 'search_query'}), '(title__icontains=search_query)\n', (7572, 7603), False, 'from django.db.models import Q\n'), ((7449, 7490), 'django.db.models.Q', 'Q', ([], {'category__name__icontains': 'search_query'}), '(category__name__icontains=search_query)\n', (7450, 7490), False, 'from django.db.models import Q\n'), ((7509, 7552), 'django.db.models.Q', 'Q', ([], {'author__username__icontains': 'search_query'}), '(author__username__icontains=search_query)\n', (7510, 7552), False, 'from django.db.models import Q\n')]
import logging import math import torch import torch.nn as nn from vedastr.models.bodies import build_sequence_decoder from vedastr.models.utils import build_torch_nn from vedastr.models.weight_init import init_weights from .registry import HEADS logger = logging.getLogger() @HEADS.register_module class TransformerHead(nn.Module): def __init__( self, decoder, generator, embedding, num_steps, pad_id, src_from, src_mask_from=None, ): super(TransformerHead, self).__init__() self.decoder = build_sequence_decoder(decoder) self.generator = build_torch_nn(generator) self.embedding = build_torch_nn(embedding) self.num_steps = num_steps self.pad_id = pad_id self.src_from = src_from self.src_mask_from = src_mask_from logger.info('TransformerHead init weights') init_weights(self.modules()) def pad_mask(self, text): pad_mask = (text == self.pad_id) pad_mask[:, 0] = False pad_mask = pad_mask.unsqueeze(1) return pad_mask def order_mask(self, text): t = text.size(1) order_mask = torch.triu(torch.ones(t, t), diagonal=1).bool() order_mask = order_mask.unsqueeze(0).to(text.device) return order_mask def text_embedding(self, texts): tgt = self.embedding(texts) tgt *= math.sqrt(tgt.size(2)) return tgt def forward(self, feats, texts): src = feats[self.src_from] if self.src_mask_from: src_mask = feats[self.src_mask_from] else: src_mask = None if self.training: tgt = self.text_embedding(texts) tgt_mask = (self.pad_mask(texts) | self.order_mask(texts)) out = self.decoder(tgt, src, tgt_mask, src_mask) out = self.generator(out) else: out = None for _ in range(self.num_steps): tgt = self.text_embedding(texts) tgt_mask = self.order_mask(texts) out = self.decoder(tgt, src, tgt_mask, src_mask) out = self.generator(out) next_text = torch.argmax(out[:, -1:, :], dim=-1) texts = torch.cat([texts, next_text], dim=-1) return out
[ "logging.getLogger", "torch.ones", "vedastr.models.utils.build_torch_nn", "vedastr.models.bodies.build_sequence_decoder", "torch.cat", "torch.argmax" ]
[((259, 278), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (276, 278), False, 'import logging\n'), ((586, 617), 'vedastr.models.bodies.build_sequence_decoder', 'build_sequence_decoder', (['decoder'], {}), '(decoder)\n', (608, 617), False, 'from vedastr.models.bodies import build_sequence_decoder\n'), ((643, 668), 'vedastr.models.utils.build_torch_nn', 'build_torch_nn', (['generator'], {}), '(generator)\n', (657, 668), False, 'from vedastr.models.utils import build_torch_nn\n'), ((694, 719), 'vedastr.models.utils.build_torch_nn', 'build_torch_nn', (['embedding'], {}), '(embedding)\n', (708, 719), False, 'from vedastr.models.utils import build_torch_nn\n'), ((2219, 2255), 'torch.argmax', 'torch.argmax', (['out[:, -1:, :]'], {'dim': '(-1)'}), '(out[:, -1:, :], dim=-1)\n', (2231, 2255), False, 'import torch\n'), ((2281, 2318), 'torch.cat', 'torch.cat', (['[texts, next_text]'], {'dim': '(-1)'}), '([texts, next_text], dim=-1)\n', (2290, 2318), False, 'import torch\n'), ((1209, 1225), 'torch.ones', 'torch.ones', (['t', 't'], {}), '(t, t)\n', (1219, 1225), False, 'import torch\n')]
from constants import Constants import numpy as np # TODO finish implementing all regions of the atmosphere class ISA(Constants): def __init__(self, altitude=0): """ Calculates International Standard Atmosphere properties for the specified geo-potential altitude :param float altitude: Geo-potential Altitude in SI meter [m] """ if 0. <= altitude <= 84852. and isinstance(altitude, float): self.altitude = altitude else: raise ValueError('Invalid altitude specified') @property def calculator(self): h, R, T0, P0, rho0 = self.altitude, self.gas_constant, self.temperature_sl, self.pressure_sl, self.rho_sl if h == 0: Talt, Palt, rhoalt = T0, P0, rho0 elif 0 < h < 11000.: a = -6.5e-3 Talt = T0 + (a * h) Palt = P0 * (Talt / T0) ^ (-(self.g / (a * R))) rhoalt = rho0 * ((Talt / T0) ^ (-((self.g / (a * R)) + 1))) elif 11000 <= h < 25000: a = -6.5e-3 Talt = 216.66 Palt = P0*(Talt/T0)**(-(self.g/(a*R))) rhoalt = 0.36480*(np.exp(-1 * ((self.g*(h-11000.))/(R * T0)))) else: Talt = None Palt = None rhoalt = None return Talt, Palt, rhoalt # function [T,Palt,rhoalt,a]=ISA(h) # global Econst # %Calculates the Temperature [K] using International Standard Atmosphere # if(h>=0)&&(h<=11000); # T=Econst.Temp0+(Econst.lambda*h); # Palt=Econst.P0*(T/Econst.Temp0)^(-(Econst.g/(Econst.lambda*Econst.R))); # rhoalt=Econst.rho0*((T/Econst.Temp0)^(-((Econst.g/(Econst.lambda*Econst.R))+1))); # elseif(h>11000)&&(h<=25000); # T=216.66; # Palt=22700*((exp(1))^(-((Econst.g*(h-11000))/(Econst.R*T)))); # rhoalt=0.36480*((exp(1))^(-((Econst.g*(h-11000))/(Econst.R*T)))); # elseif(h>25000)&&(h<=47000); # T=216.66+(1*((h-20000)/1000)); # Palt=5474.9*((216.65+(.001*(h-20000)))/216.65)^(-(Econst.g/(.001*Econst.R))); # rhoalt=0.088035*((216.65+(.001*(h-20000)))/216.65)^(-((Econst.g/(.001*Econst.R))-1)); # elseif(h>32000)&&(h<=47000); # T=228.65+(2.8*((h-32000)/1000)); # Palt=868.02*((228.65+(0.0028*(h-32000)))/228.65)^(-(Econst.g/(0.0028*Econst.R))); # rhoalt=0.013225*((228.65+(0.0028*(h-32000)))/228.65)^(-((Econst.g/(0.0028*Econst.R))-1)); # elseif(h>47000)&&(h<=53000); # T=270.65; # Palt=110.91*((exp(1))^(-((Econst.g*(h-47000))/(Econst.R*270.65)))); # rhoalt=0.001428*((exp(1))^(-((Econst.g*(h-47000))/(Econst.R*270.65)))); # elseif(h>53000)&&(h<=79000); # T=270.65+((-2.8)*((h-51000)/1000)); # Palt=66.939*((270.65+(-0.0028*(h-51000)))/270.65)^(-(Econst.g/(-0.0028*Econst.R))); # rhoalt=0.000862*((270.65+(-0.0028*(h-51000)))/270.65)^(-((Econst.g/(-0.0028*Econst.R))-1)); # elseif(h>79000)&&(h<=90000); # T=214.65+((-2.0)*((h-71000)/1000)); # Palt=3.9564*((214.65+(-0.002*(h-71000)))/214.65)^(-(Econst.g/(-0.002*Econst.R))); # rhoalt=0.000064*((214.65+(-0.002*(h-71000)))/214.65)^(-((Econst.g/(-0.002*Econst.R))-1)); # end # if(h<0)||(h>84852); # disp('International Standard Atmosphere Calculations cannot be used for values above 84,852m') # end # if(h>=0)&&(h<=84852); # a=sqrt(1.4*Econst.R*T); # %FL=ceil(((h*1250)/381)/100); # %disp(['Temperature at Flight Level ' num2str(FL) ' = ' num2str(T) 'K' ' = ' num2str(T-273.15) 'C']) # %disp(['Pressure at Flight Level ' num2str(FL) ' = ' num2str(Palt/1000) 'kPa']) # %disp(['Density at Flight Level ' num2str(FL) ' = ' num2str(rhoalt) ' [kg/m3]']) # %disp(['Speed of Sound at Flight Level ' num2str(FL) ' = ' num2str(a) ' [m/s]']) # end # end if __name__ == '__main__': obj = ISA(11000.) print(obj.altitude) print(obj.temperature)
[ "numpy.exp" ]
[((1141, 1189), 'numpy.exp', 'np.exp', (['(-1 * (self.g * (h - 11000.0) / (R * T0)))'], {}), '(-1 * (self.g * (h - 11000.0) / (R * T0)))\n', (1147, 1189), True, 'import numpy as np\n')]
from django.db import models __all__ = ('PostSaveImageField',) class PostSaveImageField(models.ImageField): def __init__(self, *args, **kwargs): kwargs['null'] = True kwargs['blank'] = True super(PostSaveImageField, self).__init__(*args, **kwargs) def contribute_to_class(self, cls, name): super(PostSaveImageField, self).contribute_to_class(cls, name) models.signals.post_save.connect(self.save_file, sender=cls) def save_file(self, sender, instance, created, **kwargs): file = super(PostSaveImageField, self).pre_save(instance, created) if file: instance.__class__.objects \ .filter(pk=instance.pk).update(**{self.attname: file.name}) def pre_save(self, model_instance, add): pass
[ "django.db.models.signals.post_save.connect" ]
[((407, 467), 'django.db.models.signals.post_save.connect', 'models.signals.post_save.connect', (['self.save_file'], {'sender': 'cls'}), '(self.save_file, sender=cls)\n', (439, 467), False, 'from django.db import models\n')]
import time import pyautogui def typer(command): pyautogui.typewrite(command) pyautogui.typewrite('\n') def open_valve(axis, step): typer("G91G0" + axis + "-" + str(step)) def close_valve(axis, step): typer("G91G0" + axis + str(step)) def give_me_some_white_bottle(duration): if duration == 0: return open_valve("X", 3) time.sleep(duration) close_valve("X", 3) def give_me_some_green_bottle(duration): if duration == 0: return close_valve("Y", 3) time.sleep(duration) open_valve("Y", 3) def give_me_some_rear_bottle(duration): if duration == 0: return close_valve("Z", 3) time.sleep(duration) open_valve("Z", 3) time.sleep(5)
[ "time.sleep", "pyautogui.typewrite" ]
[((712, 725), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (722, 725), False, 'import time\n'), ((54, 82), 'pyautogui.typewrite', 'pyautogui.typewrite', (['command'], {}), '(command)\n', (73, 82), False, 'import pyautogui\n'), ((87, 112), 'pyautogui.typewrite', 'pyautogui.typewrite', (['"""\n"""'], {}), "('\\n')\n", (106, 112), False, 'import pyautogui\n'), ((363, 383), 'time.sleep', 'time.sleep', (['duration'], {}), '(duration)\n', (373, 383), False, 'import time\n'), ((516, 536), 'time.sleep', 'time.sleep', (['duration'], {}), '(duration)\n', (526, 536), False, 'import time\n'), ((666, 686), 'time.sleep', 'time.sleep', (['duration'], {}), '(duration)\n', (676, 686), False, 'import time\n')]
import sys import soundcard import numpy import pytest skip_if_not_linux = pytest.mark.skipif(sys.platform != 'linux', reason='Only implemented for PulseAudio so far') ones = numpy.ones(1024) signal = numpy.concatenate([[ones], [-ones]]).T def test_speakers(): for speaker in soundcard.all_speakers(): assert isinstance(speaker.name, str) assert hasattr(speaker, 'id') assert isinstance(speaker.channels, int) assert speaker.channels > 0 def test_microphones(): for microphone in soundcard.all_microphones(): assert isinstance(microphone.name, str) assert hasattr(microphone, 'id') assert isinstance(microphone.channels, int) assert microphone.channels > 0 def test_default_playback(): soundcard.default_speaker().play(signal, 44100, channels=2) def test_default_record(): recording = soundcard.default_microphone().record(1024, 44100) assert len(recording == 1024) def test_default_blockless_record(): recording = soundcard.default_microphone().record(None, 44100) @skip_if_not_linux def test_name(): # The default is the application name, so when run from pytest, # it’s “pytest” or “_jb_pytest_runner.py” or so. assert 'pytest' in soundcard.get_name() soundcard.set_name('testapp') assert soundcard.get_name() == 'testapp' @skip_if_not_linux @pytest.mark.parametrize("argv,progname", [ (["./script.py"], "script.py"), # chmod +x script.py; ./script.py (["path/to/script.py"], "script.py"), # python path/to/script.py or # python -m path.to.script (["module/__main__.py"], "module"), # python -m module (["-m", "module.submodule"], "module.submodule"), # rare unresolved case (["-c", "import soundcard; soundcard.foo()"], "import soundcard; soundcard.fo..."), ]) def test_infer_name(monkeypatch, argv, progname): infer = soundcard.pulseaudio._PulseAudio._infer_program_name monkeypatch.setattr(sys, "argv", argv) assert infer() == progname @pytest.fixture def loopback_speaker(): import sys if sys.platform == 'win32': # must install https://www.vb-audio.com/Cable/index.htm return soundcard.get_speaker('Cable') elif sys.platform == 'darwin': # must install soundflower return soundcard.get_speaker('Soundflower64') elif sys.platform == 'linux': # pacmd load-module module-null-sink channels=6 rate=48000 return soundcard.get_speaker('Null') else: raise RuntimeError('Unknown platform {}'.format(sys.platform)) @pytest.fixture def loopback_player(loopback_speaker): with loopback_speaker.player(48000, channels=2, blocksize=512) as player: yield player @pytest.fixture def loopback_microphone(): if sys.platform == 'win32': # must install https://www.vb-audio.com/Cable/index.htm return soundcard.get_microphone('Cable') elif sys.platform == 'darwin': # must install soundflower return soundcard.get_microphone('Soundflower64') elif sys.platform == 'linux': return soundcard.get_microphone('Null', include_loopback=True) else: raise RuntimeError('Unknown platform {}'.format(sys.platform)) @pytest.fixture def loopback_recorder(loopback_microphone): with loopback_microphone.recorder(48000, channels=2, blocksize=512) as recorder: yield recorder def test_loopback_playback(loopback_player, loopback_recorder): loopback_player.play(signal) recording = loopback_recorder.record(1024*10) assert recording.shape[1] == 2 left, right = recording.T assert left.mean() > 0 assert right.mean() < 0 assert (left > 0.5).sum() == len(signal) assert (right < -0.5).sum() == len(signal) def test_loopback_reverse_recorder_channelmap(loopback_player, loopback_microphone): with loopback_microphone.recorder(48000, channels=[1, 0], blocksize=512) as loopback_recorder: loopback_player.play(signal) recording = loopback_recorder.record(1024*12) assert recording.shape[1] == 2 left, right = recording.T assert right.mean() > 0 assert left.mean() < 0 assert (right > 0.5).sum() == len(signal) assert (left < -0.5).sum() == len(signal) def test_loopback_reverse_player_channelmap(loopback_speaker, loopback_recorder): with loopback_speaker.player(48000, channels=[1, 0], blocksize=512) as loopback_player: loopback_player.play(signal) recording = loopback_recorder.record(1024*12) assert recording.shape[1] == 2 left, right = recording.T assert right.mean() > 0 assert left.mean() < 0 assert (right > 0.5).sum() == len(signal) assert (left < -0.5).sum() == len(signal) def test_loopback_mono_player_channelmap(loopback_speaker, loopback_recorder): with loopback_speaker.player(48000, channels=[0], blocksize=512) as loopback_player: loopback_player.play(signal[:,0]) recording = loopback_recorder.record(1024*12) assert recording.shape[1] == 2 left, right = recording.T assert left.mean() > 0 if sys.platform == 'linux': # unmapped channels on linux are filled with the mean of other channels assert right.mean() < left.mean() else: assert abs(right.mean()) < 0.01 # something like zero assert (left > 0.5).sum() == len(signal) def test_loopback_mono_recorder_channelmap(loopback_player, loopback_microphone): with loopback_microphone.recorder(48000, channels=[0], blocksize=512) as loopback_recorder: loopback_player.play(signal) recording = loopback_recorder.record(1024*12) assert len(recording.shape) == 1 or recording.shape[1] == 1 assert recording.mean() > 0 assert (recording > 0.5).sum() == len(signal) def test_loopback_multichannel_channelmap(loopback_speaker, loopback_microphone): with loopback_speaker.player(48000, channels=[2, 0], blocksize=512) as loopback_player: with loopback_microphone.recorder(48000, channels=[2, 0], blocksize=512) as loopback_recorder: loopback_player.play(signal) recording = loopback_recorder.record(1024*12) assert len(recording.shape) == 2 left, right = recording.T assert left.mean() > 0 assert right.mean() < 0 assert (left > 0.5).sum() == len(signal) assert (right < -0.5).sum() == len(signal)
[ "soundcard.get_microphone", "soundcard.all_speakers", "soundcard.get_name", "soundcard.all_microphones", "numpy.ones", "soundcard.default_microphone", "soundcard.default_speaker", "soundcard.set_name", "pytest.mark.parametrize", "soundcard.get_speaker", "numpy.concatenate", "pytest.mark.skipif...
[((76, 173), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(sys.platform != 'linux')"], {'reason': '"""Only implemented for PulseAudio so far"""'}), "(sys.platform != 'linux', reason=\n 'Only implemented for PulseAudio so far')\n", (94, 173), False, 'import pytest\n'), ((177, 193), 'numpy.ones', 'numpy.ones', (['(1024)'], {}), '(1024)\n', (187, 193), False, 'import numpy\n'), ((1363, 1663), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""argv,progname"""', "[(['./script.py'], 'script.py'), (['path/to/script.py'], 'script.py'), ([\n 'module/__main__.py'], 'module'), (['-m', 'module.submodule'],\n 'module.submodule'), (['-c', 'import soundcard; soundcard.foo()'],\n 'import soundcard; soundcard.fo...')]"], {}), "('argv,progname', [(['./script.py'], 'script.py'), (\n ['path/to/script.py'], 'script.py'), (['module/__main__.py'], 'module'),\n (['-m', 'module.submodule'], 'module.submodule'), (['-c',\n 'import soundcard; soundcard.foo()'], 'import soundcard; soundcard.fo...')]\n )\n", (1386, 1663), False, 'import pytest\n'), ((203, 239), 'numpy.concatenate', 'numpy.concatenate', (['[[ones], [-ones]]'], {}), '([[ones], [-ones]])\n', (220, 239), False, 'import numpy\n'), ((283, 307), 'soundcard.all_speakers', 'soundcard.all_speakers', ([], {}), '()\n', (305, 307), False, 'import soundcard\n'), ((524, 551), 'soundcard.all_microphones', 'soundcard.all_microphones', ([], {}), '()\n', (549, 551), False, 'import soundcard\n'), ((1267, 1296), 'soundcard.set_name', 'soundcard.set_name', (['"""testapp"""'], {}), "('testapp')\n", (1285, 1296), False, 'import soundcard\n'), ((1242, 1262), 'soundcard.get_name', 'soundcard.get_name', ([], {}), '()\n', (1260, 1262), False, 'import soundcard\n'), ((1308, 1328), 'soundcard.get_name', 'soundcard.get_name', ([], {}), '()\n', (1326, 1328), False, 'import soundcard\n'), ((2205, 2235), 'soundcard.get_speaker', 'soundcard.get_speaker', (['"""Cable"""'], {}), "('Cable')\n", (2226, 2235), False, 'import soundcard\n'), ((2897, 2930), 'soundcard.get_microphone', 'soundcard.get_microphone', (['"""Cable"""'], {}), "('Cable')\n", (2921, 2930), False, 'import soundcard\n'), ((767, 794), 'soundcard.default_speaker', 'soundcard.default_speaker', ([], {}), '()\n', (792, 794), False, 'import soundcard\n'), ((871, 901), 'soundcard.default_microphone', 'soundcard.default_microphone', ([], {}), '()\n', (899, 901), False, 'import soundcard\n'), ((1010, 1040), 'soundcard.default_microphone', 'soundcard.default_microphone', ([], {}), '()\n', (1038, 1040), False, 'import soundcard\n'), ((2321, 2359), 'soundcard.get_speaker', 'soundcard.get_speaker', (['"""Soundflower64"""'], {}), "('Soundflower64')\n", (2342, 2359), False, 'import soundcard\n'), ((3016, 3057), 'soundcard.get_microphone', 'soundcard.get_microphone', (['"""Soundflower64"""'], {}), "('Soundflower64')\n", (3040, 3057), False, 'import soundcard\n'), ((2476, 2505), 'soundcard.get_speaker', 'soundcard.get_speaker', (['"""Null"""'], {}), "('Null')\n", (2497, 2505), False, 'import soundcard\n'), ((3107, 3162), 'soundcard.get_microphone', 'soundcard.get_microphone', (['"""Null"""'], {'include_loopback': '(True)'}), "('Null', include_loopback=True)\n", (3131, 3162), False, 'import soundcard\n')]
""" @copyright: 2012-2016 <NAME> (as file __init__.py) @copyright: 2016-2018 <NAME> @license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY """ import os, sys sys.path.insert(1, os.path.dirname(sys.path[0])) import errno, fnmatch, glob, shutil, re import unittest, difflib, logging, imp import gettext t = gettext.translation(domain="wxglade", localedir="locale", fallback=True) t.install("wxglade") import common common.init_paths(None) import config config.testing = True import wxglade, codegen, compat, log if compat.PYTHON2: from StringIO import StringIO else: from io import StringIO class WXGladeBaseTest(unittest.TestCase): "Provide basic functions for all tests" longMessage = True caseDirectory = 'casefiles' # Directory with input files and (expected) result files outDirectory = 'generated' # Directory with generated result files # Language specific constants for file names: language, file prefix, file extensions language_constants = [("python","Py", ".py", ".py"),("perl","Pl", ".pl", ".pm"), ("C++","CPP", ".cpp", ".cpp"),("lisp","Lisp", ".lisp",".lisp"), ("XRC","xrc", ".xrc",".xrc")] @classmethod def setUpClass(cls): "Initialise parts of wxGlade before individual tests starts" # set icon path back to the default default #config.icons_path = 'icons' # initialise wxGlade preferences and set some useful values common.init_preferences() config.preferences.autosave = False config.preferences.write_timestamp = False config.preferences.show_progress = False config.version = '"faked test version"' # make e.g. the preview raise Exceptions config.testing = True # Determinate case and output directory cls.caseDirectory = os.path.join( os.path.dirname(__file__), cls.caseDirectory ) cls.outDirectory = os.path.join( os.path.dirname(__file__), cls.outDirectory ) if not os.path.exists(cls.outDirectory): os.mkdir(cls.outDirectory) # disable bug dialogs sys._called_from_test = True @classmethod def tearDownClass(cls): "Cleanup after all individual tests are done" # de-register own logging log.deinit() def setUp(self): "Initialise" codegen.BaseLangCodeWriter._show_warnings = False def tearDown(self): "Cleanup" pass def _read_file_lines(self, filename): "read a file, split into lines and drop 'generated by ...'" with open(filename, "rb") as f: ret = f.read() ret = ret.split( b"\r\n" if b"\r\n" in ret else b"\n" ) # drop empty lines and 'generated by...' while ret and not ret[-1]: del ret[-1] for i, line in enumerate(ret[:10]): if b'generated by wxGlade' in line: del ret[i] break return ret def _compare_files(self, expected_filename, generated_filename, check_mtime=False): self.assertTrue( os.path.isfile(generated_filename), "File %s was not generated"%generated_filename ) if check_mtime: self.assertGreater( os.stat(generated_filename).st_mtime, os.stat(expected_filename).st_mtime, "File was not overwritten" ) # open files, split into lines and convert to str/unicode expected = self._read_file_lines(expected_filename) generated = self._read_file_lines(generated_filename) if expected == generated: return False expected = [s.decode('ascii', 'replace') for s in expected] generated = [s.decode('ascii', 'replace') for s in generated] if wx.Platform == '__WXGTK__': # on gtk, the frames get resized after creation if len(expected)==len(generated): expected_ = [l for l in expected if not l.strip().startswith("<size>") and not "SetSize" in l] generated_ = [l for l in generated if not l.strip().startswith("<size>") and not "SetSize" in l] if expected_ == generated_: return False diff = difflib.unified_diff(expected, generated, fromfile=expected_filename, tofile=generated_filename, lineterm='') diff = list(diff) print( '\n'.join(diff[:40]) ) if len(diff)>40: print("...") #if compat.PYTHON3: self.assertFalse( diff, "Generated file and expected result differs:\n%s" % "\n".join(diff) ) return True def _get_inputfile_path(self, filename): "return the absolute path of a .wxg input file" basename, extension = os.path.splitext(filename) fn = os.path.join(self.caseDirectory, filename) if os.path.exists(fn): return fn return None def _get_casefile_path(self, filename): "return the absolute path of an input file or directory; for .py files, this might include _Phoenix/_Classic" basename, extension = os.path.splitext(filename) if extension.lower() == ".py": # search for version specific files if compat.IS_CLASSIC: fn = "%s_Classic%s"%(basename, extension) elif compat.IS_PHOENIX: fn = "%s_Phoenix%s"%(basename, extension) fn = os.path.join(self.caseDirectory, fn) if os.path.exists(fn): return fn # this could be a directory as well if extension.lower() == ".wxg": # search for "_Saved" version fn = "%s_Saved%s"%(basename, extension) fn = os.path.join(self.caseDirectory, fn) if os.path.exists(fn): return fn # this could be a directory as well fn = os.path.join(self.caseDirectory, filename) if os.path.exists(fn): return fn return None def _get_outputfile_path(self, filename): """return the name for an output file filename can be - a full path, where only the part relative to caseDirectory will be used - an absolute path, where only the leafname will be used - a relative path""" commonpath = os.path.commonprefix( (self.caseDirectory, filename) ) if commonpath==self.caseDirectory: leaf = filename[len(commonpath)+1:] elif os.path.isabs(filename): leaf = os.path.basename(filename) else: leaf = filename return os.path.join(self.outDirectory, leaf) class WXGladeCLITest(WXGladeBaseTest): @classmethod def setUpClass(cls): logging.disable(logging.WARNING) wxglade.init_stage1() wxglade.init_localization() wxglade.init_stage2(False) import xrc2wxg import wx import config, common, compat, main class WXGladeGUITest(WXGladeBaseTest): # as Python created an own instance for each test, we use class variables for persistent storing 'app' and 'frame': app = None # Reference to a wx.App object. The object is persistent after the creation in setUp(). frame = None # Reference to main.wxGladeFrame. The object is persistent after the creation in setUp(). nolog = None # nolog: wxWidgets Null logger to suppress error messages orig_stdout = None # original fd for stdout def mockMessageBox(self, message, caption, *args, **kwargs): "Mock object for wx.MessageBox" self._messageBox = [message, caption] def _assert_message(self, substring, caption ): self.assertTrue( self._messageBox, "No %s message generated"%caption ) generated_message, generated_caption = self._messageBox self.assertTrue( generated_caption==caption, "Expected %s message, got %s"%(caption, generated_caption) ) fmt='%s message does not match: Expected message containing "%s" \ngot wxMessageBox(message="%s", caption="%s")' msg = fmt%(caption, substring, self._messageBox[0], self._messageBox[1] ) self.assertTrue( substring in generated_message, msg ) self._messageBox = None def _assert_error_message(self, substring ): self._assert_message(substring, u"Error") def _assert_warning_message(self, substring ): self._assert_message(substring, u"Warning") def _assert_info_message(self, substring ): self._assert_message(substring, u"Information") @classmethod def setUpClass(cls): WXGladeBaseTest.setUpClass() xrc2wxg._write_timestamp = False # create an simply application cls.app = wx.App() cls.locale = wx.Locale(wx.LANGUAGE_DEFAULT) compat.wx_ArtProviderPush(main.wxGladeArtProvider()) cls.frame = main.wxGladeFrame() # suppress wx error messages cls.nolog = wx.LogNull() #cls.app.SetAssertMode(0) # avoid triggering of wx assertions; sometimes needed for debugging # hide all windows #cls.frame.Hide() #cls.frame.hide_all() @classmethod def tearDownClass(cls): cls.nolog = None def setUp(self): # redirect stdout self.orig_stdout = sys.stdout sys.stdout = StringIO() # initialise base class WXGladeBaseTest.setUp(self) # inject mock object for wxMessageBox wx.MessageBox = self.mockMessageBox self._messageBox = [] # show dialog "Code generation completed successfully" config.preferences.show_completion = True def tearDown(self): # restore original stdout if self.orig_stdout: sys.stdout = self.orig_stdout # initialise base class WXGladeBaseTest.tearDown(self) def _process_wx_events(self): "Process wx events, because we don't start the main loop" for i in range(3): wx.SafeYield() self.app.ProcessPendingEvents() def load_and_generate(self, basename, excluded=None, included=None, test_GUI=True, preview=True): "Load a wxGlade document 'basename' and generate code for all languages except the ones in list 'excluded'" if included is None: languages = set( [l[0] for l in self.language_constants] + ["wxg"] ) else: languages = set(included) if excluded is not None: languages -= set(excluded) # open file infilename = self._get_inputfile_path('%s.wxg' % basename) common.main._open_app(infilename, use_progress_dialog=False, add_to_history=False) # some shortcuts tree = common.app_tree app = tree.app if test_GUI or preview: # expand tree and show edit window first_window_node = app.node.children[0] first_window = first_window_node.widget first_window_item = first_window_node.item if test_GUI: if first_window_item.IsOk(): tree.expand() self._process_wx_events() tree.SelectItem(first_window_item) self._process_wx_events() tree.show_toplevel(first_window_node) self._process_wx_events() if preview: first_window.properties["preview"]() self._process_wx_events() if compat.PYTHON2: # the languages that failed due to differences to expected files diff_fails = [] else: # with Python 3, we use subTests subtest = 0 if "wxg" in languages: # save file again and check generated_filename = self._get_outputfile_path(infilename) compare_filename = self._get_casefile_path(infilename) # some properties may have changed on loading common.main._save_app(generated_filename) if compat.PYTHON2: if self._compare_files(compare_filename, generated_filename): diff_fails.append("wxg") else: with self.subTest(subtest): self._compare_files(compare_filename, generated_filename) subtest += 1 # try to generate code with empty output path -> will fail app.properties["output_path"].set("") app.generate_code() # first test should fail because no output file is given self._assert_error_message( u'You must specify an output file' ) # now test full code generation for language, dummy, ext, dummy in self.language_constants: if not language in languages: continue if language=="C++" and app.multiple_files: app_basename = os.path.splitext(config.default_cpp_app_name)[0] app_basename = "%s_%s"%(first_window.klass.split("_")[0], app_basename) app.app_filename = app_basename expected_filename = self._get_casefile_path( "%s.%s"%(app_basename, app.source_extension) ) # first_window.klass # 'Bug179_Frame' else: expected_filename = self._get_casefile_path( '%s%s' % (basename, ext) ) if not expected_filename: continue generated_filename = self._get_outputfile_path(expected_filename) # check for language first self.assertTrue( language in common.code_writers, "No codewriter loaded for %s" % language ) # set absolute "Output path", language and generate code if language=="C++" and app.multiple_files: app.properties["output_path"].set( os.path.dirname(generated_filename) ) else: app.properties["output_path"].set(generated_filename) app.properties["language"].set(language) self._process_wx_events() app.generate_code() self._assert_info_message(u'Code generation completed successfully') compare_files = [(expected_filename, generated_filename)] if language == 'C++': if not app.multiple_files: # compare header file as well expected_filename_h = '%s.%s' % ( os.path.splitext(expected_filename )[0], app.header_extension ) generated_filename_h = '%s.%s' % ( os.path.splitext(generated_filename)[0], app.header_extension ) compare_files.append( (expected_filename, generated_filename) ) compare_files.append( (expected_filename_h, generated_filename_h) ) else: for toplevel in app.node.children: classname = toplevel.widget.klass # class C++ file expected_filename = self._get_casefile_path( "%s.%s"%(classname, app.source_extension) ) if expected_filename: compare_files.append( (expected_filename, self._get_outputfile_path(expected_filename) ) ) # class header file expected_filename = self._get_casefile_path( "%s.%s"%(classname, app.header_extension) ) if expected_filename: compare_files.append( (expected_filename, self._get_outputfile_path(expected_filename) ) ) for expected_filename, generated_filename in compare_files: if compat.PYTHON2: # no subtests if self._compare_files(expected_filename, generated_filename): diff_fails.append(language) else: with self.subTest(subtest): self._compare_files(expected_filename, generated_filename) subtest += 1 if compat.PYTHON2: self.assertFalse(diff_fails, "Expected and generated files do not match for %s"%",".join(diff_fails)) def _copy_and_modify(self, source, target, original=None, replacement=None): if original is None: shutil.copy2( source, target ) return with open(source,"rb") as infile: content = infile.read().replace(original, replacement) with open(target, "wb") as outfile: outfile.write(content) shutil.copystat( source, target )
[ "difflib.unified_diff", "main.wxGladeArtProvider", "wx.App", "gettext.translation", "os.path.exists", "common.init_paths", "wxglade.init_localization", "shutil.copy2", "main.wxGladeFrame", "log.deinit", "wxglade.init_stage2", "os.mkdir", "common.init_preferences", "wx.SafeYield", "common...
[((325, 397), 'gettext.translation', 'gettext.translation', ([], {'domain': '"""wxglade"""', 'localedir': '"""locale"""', 'fallback': '(True)'}), "(domain='wxglade', localedir='locale', fallback=True)\n", (344, 397), False, 'import gettext\n'), ((434, 457), 'common.init_paths', 'common.init_paths', (['None'], {}), '(None)\n', (451, 457), False, 'import config, common, compat, main\n'), ((195, 223), 'os.path.dirname', 'os.path.dirname', (['sys.path[0]'], {}), '(sys.path[0])\n', (210, 223), False, 'import os, sys\n'), ((1493, 1518), 'common.init_preferences', 'common.init_preferences', ([], {}), '()\n', (1516, 1518), False, 'import config, common, compat, main\n'), ((2303, 2315), 'log.deinit', 'log.deinit', ([], {}), '()\n', (2313, 2315), False, 'import wxglade, codegen, compat, log\n'), ((4217, 4330), 'difflib.unified_diff', 'difflib.unified_diff', (['expected', 'generated'], {'fromfile': 'expected_filename', 'tofile': 'generated_filename', 'lineterm': '""""""'}), "(expected, generated, fromfile=expected_filename,\n tofile=generated_filename, lineterm='')\n", (4237, 4330), False, 'import unittest, difflib, logging, imp\n'), ((4711, 4737), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (4727, 4737), False, 'import os, sys\n'), ((4751, 4793), 'os.path.join', 'os.path.join', (['self.caseDirectory', 'filename'], {}), '(self.caseDirectory, filename)\n', (4763, 4793), False, 'import os, sys\n'), ((4805, 4823), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (4819, 4823), False, 'import os, sys\n'), ((5060, 5086), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (5076, 5086), False, 'import os, sys\n'), ((5749, 5791), 'os.path.join', 'os.path.join', (['self.caseDirectory', 'filename'], {}), '(self.caseDirectory, filename)\n', (5761, 5791), False, 'import os, sys\n'), ((5803, 5821), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (5817, 5821), False, 'import os, sys\n'), ((6186, 6238), 'os.path.commonprefix', 'os.path.commonprefix', (['(self.caseDirectory, filename)'], {}), '((self.caseDirectory, filename))\n', (6206, 6238), False, 'import os, sys\n'), ((6473, 6510), 'os.path.join', 'os.path.join', (['self.outDirectory', 'leaf'], {}), '(self.outDirectory, leaf)\n', (6485, 6510), False, 'import os, sys\n'), ((6602, 6634), 'logging.disable', 'logging.disable', (['logging.WARNING'], {}), '(logging.WARNING)\n', (6617, 6634), False, 'import unittest, difflib, logging, imp\n'), ((6643, 6664), 'wxglade.init_stage1', 'wxglade.init_stage1', ([], {}), '()\n', (6662, 6664), False, 'import wxglade, codegen, compat, log\n'), ((6673, 6700), 'wxglade.init_localization', 'wxglade.init_localization', ([], {}), '()\n', (6698, 6700), False, 'import wxglade, codegen, compat, log\n'), ((6709, 6735), 'wxglade.init_stage2', 'wxglade.init_stage2', (['(False)'], {}), '(False)\n', (6728, 6735), False, 'import wxglade, codegen, compat, log\n'), ((8562, 8570), 'wx.App', 'wx.App', ([], {}), '()\n', (8568, 8570), False, 'import wx\n'), ((8592, 8622), 'wx.Locale', 'wx.Locale', (['wx.LANGUAGE_DEFAULT'], {}), '(wx.LANGUAGE_DEFAULT)\n', (8601, 8622), False, 'import wx\n'), ((8704, 8723), 'main.wxGladeFrame', 'main.wxGladeFrame', ([], {}), '()\n', (8721, 8723), False, 'import config, common, compat, main\n'), ((8782, 8794), 'wx.LogNull', 'wx.LogNull', ([], {}), '()\n', (8792, 8794), False, 'import wx\n'), ((9160, 9170), 'io.StringIO', 'StringIO', ([], {}), '()\n', (9168, 9170), False, 'from io import StringIO\n'), ((10425, 10512), 'common.main._open_app', 'common.main._open_app', (['infilename'], {'use_progress_dialog': '(False)', 'add_to_history': '(False)'}), '(infilename, use_progress_dialog=False, add_to_history\n =False)\n', (10446, 10512), False, 'import config, common, compat, main\n'), ((16193, 16224), 'shutil.copystat', 'shutil.copystat', (['source', 'target'], {}), '(source, target)\n', (16208, 16224), False, 'import errno, fnmatch, glob, shutil, re\n'), ((1882, 1907), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1897, 1907), False, 'import os, sys\n'), ((1971, 1996), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1986, 1996), False, 'import os, sys\n'), ((2032, 2064), 'os.path.exists', 'os.path.exists', (['cls.outDirectory'], {}), '(cls.outDirectory)\n', (2046, 2064), False, 'import os, sys\n'), ((2066, 2092), 'os.mkdir', 'os.mkdir', (['cls.outDirectory'], {}), '(cls.outDirectory)\n', (2074, 2092), False, 'import os, sys\n'), ((3104, 3138), 'os.path.isfile', 'os.path.isfile', (['generated_filename'], {}), '(generated_filename)\n', (3118, 3138), False, 'import os, sys\n'), ((5347, 5383), 'os.path.join', 'os.path.join', (['self.caseDirectory', 'fn'], {}), '(self.caseDirectory, fn)\n', (5359, 5383), False, 'import os, sys\n'), ((5399, 5417), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (5413, 5417), False, 'import os, sys\n'), ((5617, 5653), 'os.path.join', 'os.path.join', (['self.caseDirectory', 'fn'], {}), '(self.caseDirectory, fn)\n', (5629, 5653), False, 'import os, sys\n'), ((5669, 5687), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (5683, 5687), False, 'import os, sys\n'), ((6345, 6368), 'os.path.isabs', 'os.path.isabs', (['filename'], {}), '(filename)\n', (6358, 6368), False, 'import os, sys\n'), ((8657, 8682), 'main.wxGladeArtProvider', 'main.wxGladeArtProvider', ([], {}), '()\n', (8680, 8682), False, 'import config, common, compat, main\n'), ((9817, 9831), 'wx.SafeYield', 'wx.SafeYield', ([], {}), '()\n', (9829, 9831), False, 'import wx\n'), ((11739, 11780), 'common.main._save_app', 'common.main._save_app', (['generated_filename'], {}), '(generated_filename)\n', (11760, 11780), False, 'import config, common, compat, main\n'), ((15947, 15975), 'shutil.copy2', 'shutil.copy2', (['source', 'target'], {}), '(source, target)\n', (15959, 15975), False, 'import errno, fnmatch, glob, shutil, re\n'), ((6389, 6415), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (6405, 6415), False, 'import os, sys\n'), ((3245, 3272), 'os.stat', 'os.stat', (['generated_filename'], {}), '(generated_filename)\n', (3252, 3272), False, 'import os, sys\n'), ((3283, 3309), 'os.stat', 'os.stat', (['expected_filename'], {}), '(expected_filename)\n', (3290, 3309), False, 'import os, sys\n'), ((12648, 12693), 'os.path.splitext', 'os.path.splitext', (['config.default_cpp_app_name'], {}), '(config.default_cpp_app_name)\n', (12664, 12693), False, 'import os, sys\n'), ((13548, 13583), 'os.path.dirname', 'os.path.dirname', (['generated_filename'], {}), '(generated_filename)\n', (13563, 13583), False, 'import os, sys\n'), ((14132, 14167), 'os.path.splitext', 'os.path.splitext', (['expected_filename'], {}), '(expected_filename)\n', (14148, 14167), False, 'import os, sys\n'), ((14251, 14287), 'os.path.splitext', 'os.path.splitext', (['generated_filename'], {}), '(generated_filename)\n', (14267, 14287), False, 'import os, sys\n')]
#! /usr/bin/env python3 import sh import click import re def real_git(*args, **kwargs): mock_git(*args, **kwargs) return sh.git(*args, **kwargs) def mock_git(*args, **kwargs): click.echo(sh.git.bake(*args, **kwargs), err=True) return "" def branch_exists(name): try: get_commit_hash(name) return True except: return False def get_current_branch(): return sh.git("rev-parse", "--abbrev-ref", "HEAD").strip() def get_commit_hash(commit_spec): return sh.git("rev-parse", commit_spec).strip() @click.command() @click.argument("commit", type=str) @click.argument("branch", type=str) @click.option("--base", type=str, default="origin/master", help="Base branch to branch from") @click.option("--push/--no-push", default=True, help="Push the feature branch") @click.option("--new/--not-new", default=False, help="Make a new branch") @click.option("--switch/--no-switch", default=False, help="Switch to the other branch") @click.option("--mock/--real", default=False, help="Just print git commands") def main(commit, branch, base, push, new, switch, mock): """ COMMIT: a commit range to be cherry-picked into BRANCH, e.g. HEAD^1 or HEAD..HEAD~1, or a hash range BRANCH: this branch will be rebased off of the base branch, e.g. myname/my-great-feature """ # Mock git just prints the command which would be run if mock: git = mock_git else: git = real_git current_branch = get_current_branch() exists = branch_exists(branch) if exists and new: raise click.UsageError(f"Branch {branch} already exists. remove --new") if not exists and not new: raise click.UsageError(f"Branch {branch} must be created. use --new") try: click.echo(f"Currently on {current_branch}", err=True) # Resolve the commit name unless a hash was specified if not re.match(r"[0-9a-f]{40}", commit): commit = get_commit_hash(commit) if "\n^" in commit: commit = commit.replace("\n^", "^..") # Describe the actions to be performed push_msg = "" if push and not new: push_msg = " and force push" if push and new: push_msg = " and push upstream" branch_action = "create" if new else "hard reset" click.echo(f"Going to {branch_action} branch {branch} on {base} then cherry pick {commit}{push_msg}", err=True) click.echo(err=True) # Checkout or create the branch and reset to the the base branch if not exists: git("checkout", "-b", branch) else: git("checkout", branch) git("reset", "--hard", base) # Cherry pick the commit(s) into the branch git("cherry-pick", commit) # Push to origin if push: # Set upstream if necessary, otherwise force push if not exists: git("push", "--set-upstream", "origin", branch) else: git("push", "--force") finally: if not switch: git("checkout", current_branch)
[ "click.UsageError", "click.argument", "click.option", "sh.git.bake", "re.match", "sh.git", "click.echo", "click.command" ]
[((559, 574), 'click.command', 'click.command', ([], {}), '()\n', (572, 574), False, 'import click\n'), ((576, 610), 'click.argument', 'click.argument', (['"""commit"""'], {'type': 'str'}), "('commit', type=str)\n", (590, 610), False, 'import click\n'), ((612, 646), 'click.argument', 'click.argument', (['"""branch"""'], {'type': 'str'}), "('branch', type=str)\n", (626, 646), False, 'import click\n'), ((648, 745), 'click.option', 'click.option', (['"""--base"""'], {'type': 'str', 'default': '"""origin/master"""', 'help': '"""Base branch to branch from"""'}), "('--base', type=str, default='origin/master', help=\n 'Base branch to branch from')\n", (660, 745), False, 'import click\n'), ((742, 820), 'click.option', 'click.option', (['"""--push/--no-push"""'], {'default': '(True)', 'help': '"""Push the feature branch"""'}), "('--push/--no-push', default=True, help='Push the feature branch')\n", (754, 820), False, 'import click\n'), ((822, 894), 'click.option', 'click.option', (['"""--new/--not-new"""'], {'default': '(False)', 'help': '"""Make a new branch"""'}), "('--new/--not-new', default=False, help='Make a new branch')\n", (834, 894), False, 'import click\n'), ((896, 987), 'click.option', 'click.option', (['"""--switch/--no-switch"""'], {'default': '(False)', 'help': '"""Switch to the other branch"""'}), "('--switch/--no-switch', default=False, help=\n 'Switch to the other branch')\n", (908, 987), False, 'import click\n'), ((984, 1060), 'click.option', 'click.option', (['"""--mock/--real"""'], {'default': '(False)', 'help': '"""Just print git commands"""'}), "('--mock/--real', default=False, help='Just print git commands')\n", (996, 1060), False, 'import click\n'), ((132, 155), 'sh.git', 'sh.git', (['*args'], {}), '(*args, **kwargs)\n', (138, 155), False, 'import sh\n'), ((204, 232), 'sh.git.bake', 'sh.git.bake', (['*args'], {}), '(*args, **kwargs)\n', (215, 232), False, 'import sh\n'), ((1577, 1642), 'click.UsageError', 'click.UsageError', (['f"""Branch {branch} already exists. remove --new"""'], {}), "(f'Branch {branch} already exists. remove --new')\n", (1593, 1642), False, 'import click\n'), ((1689, 1752), 'click.UsageError', 'click.UsageError', (['f"""Branch {branch} must be created. use --new"""'], {}), "(f'Branch {branch} must be created. use --new')\n", (1705, 1752), False, 'import click\n'), ((1771, 1825), 'click.echo', 'click.echo', (['f"""Currently on {current_branch}"""'], {'err': '(True)'}), "(f'Currently on {current_branch}', err=True)\n", (1781, 1825), False, 'import click\n'), ((2339, 2460), 'click.echo', 'click.echo', (['f"""Going to {branch_action} branch {branch} on {base} then cherry pick {commit}{push_msg}"""'], {'err': '(True)'}), "(\n f'Going to {branch_action} branch {branch} on {base} then cherry pick {commit}{push_msg}'\n , err=True)\n", (2349, 2460), False, 'import click\n'), ((2459, 2479), 'click.echo', 'click.echo', ([], {'err': '(True)'}), '(err=True)\n', (2469, 2479), False, 'import click\n'), ((416, 459), 'sh.git', 'sh.git', (['"""rev-parse"""', '"""--abbrev-ref"""', '"""HEAD"""'], {}), "('rev-parse', '--abbrev-ref', 'HEAD')\n", (422, 459), False, 'import sh\n'), ((515, 547), 'sh.git', 'sh.git', (['"""rev-parse"""', 'commit_spec'], {}), "('rev-parse', commit_spec)\n", (521, 547), False, 'import sh\n'), ((1904, 1936), 're.match', 're.match', (['"""[0-9a-f]{40}"""', 'commit'], {}), "('[0-9a-f]{40}', commit)\n", (1912, 1936), False, 'import re\n')]
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-05-03 14:34 from __future__ import unicode_literals from django.db import migrations, models import space_manager.branches.models class Migration(migrations.Migration): dependencies = [ ('branches', '0010_branch_lounge_img_cabinet'), ] operations = [ migrations.AddField( model_name='branch', name='minimap_img', field=models.ImageField(null=True, upload_to='', validators=[space_manager.branches.models.Branch.validate_image]), ), ]
[ "django.db.models.ImageField" ]
[((449, 562), 'django.db.models.ImageField', 'models.ImageField', ([], {'null': '(True)', 'upload_to': '""""""', 'validators': '[space_manager.branches.models.Branch.validate_image]'}), "(null=True, upload_to='', validators=[space_manager.\n branches.models.Branch.validate_image])\n", (466, 562), False, 'from django.db import migrations, models\n')]
# Generated by Django 2.2.15 on 2020-08-26 03:42 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cv', '0002_experience_experience_name'), ] operations = [ migrations.AlterField( model_name='experience', name='experience_name', field=models.TextField(default='Intern'), ), ]
[ "django.db.models.TextField" ]
[((353, 387), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Intern"""'}), "(default='Intern')\n", (369, 387), False, 'from django.db import migrations, models\n')]
"""Wikidump reader and processor module. """ import os with open(os.path.join( os.path.dirname(__file__), 'scripts', 'DUMP_VERSION')) as f: DUMP_VERSION = f.readline().strip() with open(os.path.join( os.path.dirname(__file__), 'scripts', 'TORRENT_HASH')) as f: HASH = f.readline().strip() BZ_FILE = 'enwiki-%s-pages-articles-multistream.xml.bz2' % DUMP_VERSION BZ_PATH = os.path.join('data', BZ_FILE) DEFAULT_NAMESPACE = 'http://www.mediawiki.org/xml/export-0.10/' # Known namespaces used by Database Exporter NSMAP = { None: DEFAULT_NAMESPACE, 'xsi': 'http://www.w3.org/2001/XMLSchema-instance' } __all__ = ['DUMP_VERSION', 'HASH', 'BZ_FILE', 'BZ_PATH', 'DEFAULT_NAMESPACE', 'NSMAP']
[ "os.path.dirname", "os.path.join" ]
[((432, 461), 'os.path.join', 'os.path.join', (['"""data"""', 'BZ_FILE'], {}), "('data', BZ_FILE)\n", (444, 461), False, 'import os\n'), ((90, 115), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (105, 115), False, 'import os\n'), ((240, 265), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (255, 265), False, 'import os\n')]
# Generated by Django 3.1 on 2020-08-12 18:03 from django.conf import settings from django.db import migrations class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('userclass', '0002_auto_20200812_1731'), ] operations = [ migrations.RenameModel( old_name='UserClass', new_name='UserDetails', ), ]
[ "django.db.migrations.RenameModel", "django.db.migrations.swappable_dependency" ]
[((184, 241), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (215, 241), False, 'from django.db import migrations\n'), ((327, 395), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""UserClass"""', 'new_name': '"""UserDetails"""'}), "(old_name='UserClass', new_name='UserDetails')\n", (349, 395), False, 'from django.db import migrations\n')]
from .base_trainer import BaseTrainer import torch import os class TPUTrainer(BaseTrainer): r"""TPUTrainer: Trains the vathos model on TPU """ def __init__(self, *args, **kwargs): super(TPUTrainer, self).__init__(*args, **kwargs) import torch_xla import torch_xla.core.xla_model as xm import torch_xla.distributed.parallel_loader as pl import torch_xla.distributed.xla_multiprocessing as xmp os.environ['XLA_USE_BF16'] = 1 @staticmethod def _xla_train(index): device = xm.xla_device() para_loader = pl.ParallelLoader(train_loader, [device]) model = MNIST().train().to(device) loss_fn = nn.NLLLoss() optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum) for data, target in para_loader.per_device_loader(device): optimizer.zero_grad() output = model(data) loss = loss_fn(output, target) loss.backward() xm.optimizer_step(optimizer) def train_epoch(self, epoch): return 0 def test_epoch(self, epoch): device = xm.xla_device() xm.mark_step() return 0
[ "torch_xla.core.xla_model.xla_device", "torch_xla.distributed.parallel_loader.ParallelLoader", "torch_xla.core.xla_model.mark_step", "torch_xla.core.xla_model.optimizer_step" ]
[((552, 567), 'torch_xla.core.xla_model.xla_device', 'xm.xla_device', ([], {}), '()\n', (565, 567), True, 'import torch_xla.core.xla_model as xm\n'), ((590, 631), 'torch_xla.distributed.parallel_loader.ParallelLoader', 'pl.ParallelLoader', (['train_loader', '[device]'], {}), '(train_loader, [device])\n', (607, 631), True, 'import torch_xla.distributed.parallel_loader as pl\n'), ((1135, 1150), 'torch_xla.core.xla_model.xla_device', 'xm.xla_device', ([], {}), '()\n', (1148, 1150), True, 'import torch_xla.core.xla_model as xm\n'), ((1160, 1174), 'torch_xla.core.xla_model.mark_step', 'xm.mark_step', ([], {}), '()\n', (1172, 1174), True, 'import torch_xla.core.xla_model as xm\n'), ((1001, 1029), 'torch_xla.core.xla_model.optimizer_step', 'xm.optimizer_step', (['optimizer'], {}), '(optimizer)\n', (1018, 1029), True, 'import torch_xla.core.xla_model as xm\n')]
#!/usr/bin/env python # # Copyright 2019 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Packages test dependencies as tar.gz file.""" import argparse import json import logging import os import sys import tarfile parser = argparse.ArgumentParser( description='Package test dependencies as tar.gz files.') parser.add_argument('--output', required=True, help='Full path to the output file.') parser.add_argument('--deps_list_path', required=True, help='Full path to the json dependencies file.') parser.add_argument('--exclude_deps', required=False, default='', help=('Comma separated list of dependencies to exclude' ' from tar.gz file.')) parser.add_argument('--additional_deps', required=False, default='', help=('Comma separated list of additional deps' ' to include in tar.gz.')) def read_dependencies(file_path): """Reads a json file and creates an iterable of unique dependencies. Args: file_path: The path to the runtime dependencies file. Returns: An iterable with unique dependencies. """ deps = None with open(file_path) as deps_file: deps = json.load(deps_file) deps_set = set() for _, dep_list in deps.items(): deps_set.update(dep_list) return deps_set def filter_dependencies(dependencies, filters): """Filters out dependencies from a dependencies iterable. Args: dependencies: An iterable with the full list of dependencies. filters: A list of dependencies to remove. Returns: An iterable with the filtered dependencies. """ filters_list = filters.strip(',').split(',') logging.info('Filtering: %s', filters_list) filtered_deps = set() for dep in dependencies: norm_dep = os.path.normpath(dep) if not any(norm_dep.startswith(f) for f in filters_list): filtered_deps.add(norm_dep) return filtered_deps def create_tarfile(output_path, dependencies): """Creates a tar.gz file and saves it to output_path. Args: output_path: A string with the path to where tar.gz file will be saved to. dependencies: An iterable with file/folders test dependencies. """ total_deps = len(dependencies) if total_deps < 1: logging.error('There are no dependencies to archive') sys.exit(1) step = (total_deps / 10) or 1 logging.info('Adding %s files', total_deps) with tarfile.open(output_path, 'w:gz') as tar_file: for idx, dep in enumerate(dependencies): dep = os.path.normpath(dep) archive_name = os.path.join('fuchsia/release', dep) archive_name = os.path.normpath(archive_name) tar_file.add(dep, arcname=archive_name) if idx % step == 0 or idx == (total_deps - 1): logging.info('Progress: %s percent', int(round(100.0/total_deps * idx))) def main(): logging.basicConfig(level=logging.INFO) args = parser.parse_args() dependencies = read_dependencies(args.deps_list_path) if args.additional_deps: to_include = args.additional_deps.strip(',').split(',') logging.info('Including: %s', to_include) dependencies.update(to_include) if args.exclude_deps: dependencies = filter_dependencies(dependencies, args.exclude_deps) create_tarfile(args.output, dependencies) if __name__ == '__main__': main()
[ "logging.basicConfig", "tarfile.open", "argparse.ArgumentParser", "os.path.join", "os.path.normpath", "sys.exit", "json.load", "logging.info", "logging.error" ]
[((327, 413), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Package test dependencies as tar.gz files."""'}), "(description=\n 'Package test dependencies as tar.gz files.')\n", (350, 413), False, 'import argparse\n'), ((1830, 1873), 'logging.info', 'logging.info', (['"""Filtering: %s"""', 'filters_list'], {}), "('Filtering: %s', filters_list)\n", (1842, 1873), False, 'import logging\n'), ((2509, 2552), 'logging.info', 'logging.info', (['"""Adding %s files"""', 'total_deps'], {}), "('Adding %s files', total_deps)\n", (2521, 2552), False, 'import logging\n'), ((2992, 3031), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (3011, 3031), False, 'import logging\n'), ((1360, 1380), 'json.load', 'json.load', (['deps_file'], {}), '(deps_file)\n', (1369, 1380), False, 'import json\n'), ((1940, 1961), 'os.path.normpath', 'os.path.normpath', (['dep'], {}), '(dep)\n', (1956, 1961), False, 'import os\n'), ((2405, 2458), 'logging.error', 'logging.error', (['"""There are no dependencies to archive"""'], {}), "('There are no dependencies to archive')\n", (2418, 2458), False, 'import logging\n'), ((2463, 2474), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2471, 2474), False, 'import sys\n'), ((2560, 2593), 'tarfile.open', 'tarfile.open', (['output_path', '"""w:gz"""'], {}), "(output_path, 'w:gz')\n", (2572, 2593), False, 'import tarfile\n'), ((3208, 3249), 'logging.info', 'logging.info', (['"""Including: %s"""', 'to_include'], {}), "('Including: %s', to_include)\n", (3220, 3249), False, 'import logging\n'), ((2664, 2685), 'os.path.normpath', 'os.path.normpath', (['dep'], {}), '(dep)\n', (2680, 2685), False, 'import os\n'), ((2707, 2743), 'os.path.join', 'os.path.join', (['"""fuchsia/release"""', 'dep'], {}), "('fuchsia/release', dep)\n", (2719, 2743), False, 'import os\n'), ((2765, 2795), 'os.path.normpath', 'os.path.normpath', (['archive_name'], {}), '(archive_name)\n', (2781, 2795), False, 'import os\n')]
import copy as cp s = input() t = input() u = list(s) v = list(t) w = cp.deepcopy(u) p = 0 q = 0 for h in range(len(u) - 1, -1, -1): if q == 1: break if v[-1] == u[h]: #w[i] = v[0] for j in range(len(v)): if (v[-1 - j] == u[h - j]) and h - j >= 0: pass if j == len(v) - 1: #for k in range(len(v)): #w[h - k] = v[-1 - k] q = 1 p = 1 else: break if q == 0: for i in range(len(u) - 1, -1, -1): if p == 1: break if v[-1] == u[i] or "?" == u[i]: #w[i] = v[0] for j in range(len(v)): if (v[-1 - j] == u[i - j] or "?" == u[i - j]) and i - j >= 0: pass if j == len(v) - 1: for k in range(len(v)): w[i - k] = v[-1 - k] p = 1 else: break if p == 1: for m in range(len(u)): if w[m] == "?": w[m] = "a" print("".join(w)) if p == 0: print("UNRESTORABLE")
[ "copy.deepcopy" ]
[((70, 84), 'copy.deepcopy', 'cp.deepcopy', (['u'], {}), '(u)\n', (81, 84), True, 'import copy as cp\n')]
import logging import os from lisa.trace import FtraceCollector, Trace from lisa.utils import setup_logging from lisa.target import Target, TargetConf from lisa.wlgen.rta import RTA, Periodic from lisa.datautils import df_filter_task_ids import pandas as pd setup_logging() target = Target.from_one_conf('conf/lisa/qemu_target_default.yml') #target = Target.from_default_conf() rtapp_profile = {} tasks = [] for cpu in range(4): tasks.append("tsk{}-{}".format(cpu,cpu)) rtapp_profile["tsk{}".format(cpu)] = Periodic(duty_cycle_pct=50, duration_s=120) wload = RTA.by_profile(target, "experiment_wload", rtapp_profile) ftrace_coll = FtraceCollector(target, events=["sched_switch"]) trace_path = os.path.join(wload.res_dir, "trace.dat") with ftrace_coll: wload.run() ftrace_coll.get_trace(trace_path) trace = Trace(trace_path, target.plat_info, events=["sched_switch"]) # sched_switch __comm __pid __cpu __line prev_comm prev_pid prev_prio prev_state next_comm next_pid next_prio df = trace.df_events('sched_switch')[['next_pid', 'next_comm', '__cpu']] def analize_task_migration(task_id, ddf): start = ddf.index[0] stop = min(ddf.index[1] + 1.0, df.index[-1]) start_cpu = ddf['__cpu'].values[0] stop_cpu = ddf['__cpu'].values[1] _df = df[start:stop][ df[start:stop]['__cpu'] == start_cpu ] print("Task {} migrated from CPU {} to CPU {}\n".format(task_id, start_cpu, stop_cpu)) print(_df.to_string(max_cols = 64) + "\n") for task in tasks: task_id = trace.get_task_id(task, update=False) _df = df_filter_task_ids(df, [task_id], pid_col='next_pid', comm_col='next_comm') ddf = _df.drop_duplicates(subset='__cpu', keep='first', inplace=False) print("****************** sched_switch {} ********************\n {} \n".format(task , ddf.to_string(max_cols = 64))) if len(ddf.index) > 1: analize_task_migration(task_id, ddf)
[ "lisa.utils.setup_logging", "os.path.join", "lisa.trace.Trace", "lisa.wlgen.rta.Periodic", "lisa.datautils.df_filter_task_ids", "lisa.target.Target.from_one_conf", "lisa.wlgen.rta.RTA.by_profile", "lisa.trace.FtraceCollector" ]
[((259, 274), 'lisa.utils.setup_logging', 'setup_logging', ([], {}), '()\n', (272, 274), False, 'from lisa.utils import setup_logging\n'), ((284, 341), 'lisa.target.Target.from_one_conf', 'Target.from_one_conf', (['"""conf/lisa/qemu_target_default.yml"""'], {}), "('conf/lisa/qemu_target_default.yml')\n", (304, 341), False, 'from lisa.target import Target, TargetConf\n'), ((571, 628), 'lisa.wlgen.rta.RTA.by_profile', 'RTA.by_profile', (['target', '"""experiment_wload"""', 'rtapp_profile'], {}), "(target, 'experiment_wload', rtapp_profile)\n", (585, 628), False, 'from lisa.wlgen.rta import RTA, Periodic\n'), ((644, 692), 'lisa.trace.FtraceCollector', 'FtraceCollector', (['target'], {'events': "['sched_switch']"}), "(target, events=['sched_switch'])\n", (659, 692), False, 'from lisa.trace import FtraceCollector, Trace\n'), ((706, 746), 'os.path.join', 'os.path.join', (['wload.res_dir', '"""trace.dat"""'], {}), "(wload.res_dir, 'trace.dat')\n", (718, 746), False, 'import os\n'), ((824, 884), 'lisa.trace.Trace', 'Trace', (['trace_path', 'target.plat_info'], {'events': "['sched_switch']"}), "(trace_path, target.plat_info, events=['sched_switch'])\n", (829, 884), False, 'from lisa.trace import FtraceCollector, Trace\n'), ((517, 560), 'lisa.wlgen.rta.Periodic', 'Periodic', ([], {'duty_cycle_pct': '(50)', 'duration_s': '(120)'}), '(duty_cycle_pct=50, duration_s=120)\n', (525, 560), False, 'from lisa.wlgen.rta import RTA, Periodic\n'), ((1556, 1631), 'lisa.datautils.df_filter_task_ids', 'df_filter_task_ids', (['df', '[task_id]'], {'pid_col': '"""next_pid"""', 'comm_col': '"""next_comm"""'}), "(df, [task_id], pid_col='next_pid', comm_col='next_comm')\n", (1574, 1631), False, 'from lisa.datautils import df_filter_task_ids\n')]
import bisect breakpoints = [60, 70, 80, 90] grades = 'FDCBA' scores = [33, 99, 77, 70, 89, 90, 100] def grade(score, breakpoints=breakpoints, grades=grades): i = bisect.bisect(breakpoints, score) return grades[i] print('breakpoints:', breakpoints) print('grades:', grades) print('scores:', scores) print([grade(score) for score in scores])
[ "bisect.bisect" ]
[((167, 200), 'bisect.bisect', 'bisect.bisect', (['breakpoints', 'score'], {}), '(breakpoints, score)\n', (180, 200), False, 'import bisect\n')]
""" Functions for displaying text to the screen. Text rendering in pygame does not allow for line breaks. This can lead to issues when attempting to render text, particularly if one is unsure of the width and height of a to-be-rendered string in a given font. The functions here handle these difficulties. This module includes the following functions (see the docstrings for more information): tallest_letter: returns the height in pixels of the tallest letter when rendered in a given font. text_to_sentences: convert a string into a list of sentences. screen_dimensions: returns the dimensions of the active display surface. longest_string_to_render: return the string in a list that will take up the most horizontal space in pixels (this will usually, but not necessarily, be the string with the most characters). height_of_strings: return the height of a list of strings when rendered in a given font. wrap_text: break a string into lines on a screen. string_to_screens_and_lines: break a string into screens and lines. render_string: get pygame.Surface and pygame.Rect objects for a string. render_lines: return pygame.Surface and pygame.Rect objects for a list of strings. string_to_surface_and_rect: return pygame.Surface and pygame.Rect objects for a string, given constraints on the pixel dimensions of the screen. display_text_until_keypress: present text to the screen until the user presses a specific key (or any specific key from a list). ask_question: display a question to the screen and return the response. """ from __future__ import division import sys import pygame from pygame.locals import * from cogandmem import experiment from cogandmem import generic LETTERS = ( K_a, K_b, K_c, K_d, K_e, K_f, K_g, K_h, K_i, K_j, K_k, K_l, K_m, K_n, K_o, K_p, K_q, K_r, K_s, K_t, K_u, K_v, K_w, K_x, K_y, K_z ) ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" NUMBERS = ( K_0, K_1, K_2, K_3, K_4, K_5, K_6, K_7, K_8, K_9 ) PUNCTUATION = ( K_PERIOD, K_COMMA, K_QUESTION, K_QUOTE, K_EXCLAIM, K_COLON, K_SEMICOLON ) def tallest_letter(font): """ Get the height, in pixels, of the tallest letter in the alphabet when rendered with a given font. """ return font.size(ALPHABET)[1] def text_to_sentences(text, terminators = (".", "?", "!", '."', '?"', '!"'), exclude = ("Mr.", "Ms.", "Mrs.", "Dr.", "e.g.", "i.e.")): """ Break text into a list of sentences. This is a highly imperfect function that takes a passage of text and breaks it into a list of sentences. The main stumbling block for the function is that there are numerous words that could indicate either the end of a sentence or the end of an abbreviation. I do not know of an effective solution to this problem. NB: The assumption is made that line breaks always denote the end of a sentence, regardless of the preceding character. Parameters: text: the passage to break into sentences. Keyword Parameters: terminators: strings that denote the end of a sentence. exclude: exceptions to the terminators. Returns: sentences: a list of sentences in text. """ sentences = [] text_as_paragraphs = text.split("\n") paragraphs_as_words = [] for paragraph in text_as_paragraphs: paragraph = paragraph.strip() if not paragraph: # This is a blank line. paragraphs_as_words.append([]) # Go to the next paragraph: continue words = paragraph.split(" ") paragraphs_as_words.append(words) for paragraph in paragraphs_as_words: if not paragraph: # This is a blank line. sentences.append("") continue sentence = "" for word in paragraph: # Add word to sentence, along with a leading space if necessary: if sentence: sentence = sentence+" "+word else: sentence = word # Check whether word ends with a terminator: try: ends_with_terminator = word.endswith(terminators) except TypeError: # terminators is probably a list rather than a tuple. # str.endswith() requires a tuple. terminators = tuple(terminators) ends_with_terminator = word.endswith(terminators) if ends_with_terminator and word not in exclude: # This ends the sentence. sentences.append(sentence) sentence = "" # Check for a dangling sentence: if sentence: sentences.append(sentence) return sentences def screen_dimensions(): """ Get the width and height of the active display surface. If no display surface has been set, get the first element in pygame.display.list_modes(). """ screen_surface = pygame.display.get_surface() try: w, h = screen_surface.get_size() except AttributeError: w, h = pygame.display.list_modes()[0] return w, h def longest_string_to_render(strings, f): """ Get the longest string to render from a list. Parameters: strings: a list or tuple of strings. f: the pygame.font.Font object used for rendering. Returns: s: the longest string to render with f; if there is a tie, s is set to the string occurring earlier in the strings list. n: the number of pixel columns needed to render s. """ s = "" n = 0 for string in strings: # Get the width of string: w = f.size(string)[0] if w > n: n = w s = string return s, n def height_of_strings(strings, f, line_size): """ Compute the height of a list of strings when rendered with a given font, taking into account the line size (i.e., the number of pixel rows interpolated between the bottom of one line of text and the top of the next). Parameters: strings: the list of strings whose height is measured; assumes that each string is a separate line. f: the pygame.font.Font object in which text is to be rendered. line_size: the number of pixel rows between lines; must be positive. Returns: h: the height of strings when rendered. """ h = 0 # Ignore line_size gaps to start: for string in strings: # Get current height: line_height = f.size(string)[1] h = h+line_height # Line gaps are added now. h = h+line_size*(len(strings)-1) return h def wrap_text(new_text, width, f, old_text = [], start_new_line = False, return_height = False, line_height = None): """ Break a string into lines on a screen. Parameters: new_text: words to convert to lines; if list or tuple, each element is a list of words from a paragraph, with line breaks automatically following an element; if str, split() is used to make into paragraphs and then individual words; if new_text evaluates to False in a Boolean context, a blank line is returned width: maximum pixels per line (width > 0). f: the font object to use. Keyword Parameters: return_height: whether the height of the rendered lines is returned; defaults to False. old_text: optional list of lines to which new_text is added; each element in old_text is assumed to be a single line; its legality given width is not checked; defaults to an empty list. start_new_line: only applies if old_text evaluates to True in a Boolean context; indicates whether a new line should start (i.e., whether new_text should begin a new line); defaults to False. line_size: only applies if return_height is True; enotes the pixels interpolated between lines of text; if not set but line_size is needed, it is obtained from f. Returns: lines: old_text with new_text added. optionally: height: the height of lines when rendered with f. """ # Branch depending on whether additional text or a blank line is being # added: if new_text: # If new_text is a string, it needs to be converted to a list. try: # Get paragraphs: new_text_as_paragraphs = new_text.split("\n") # Overwrite new_text: new_text = [] for paragraph in new_text_as_paragraphs: paragraph_list = paragraph.split(" ") new_text.append(paragraph_list) except AttributeError: # new_text is already a list. pass new_lines = list(old_text) # Check if a last line from old_text is needed: if old_text and not start_new_line: line = old_text[-1] # Delete line from new_lines: del new_lines[-1] else: line = "" # Set line width: line_width = f.size(line)[0] # Fill new_lines paragraph by paragraph: for paragraph in new_text: # Fill each line word by word: for word in paragraph: # Unless line is currently empty, a leading space is needed # when calculating word's width. if line: word_width = f.size(" "+word)[0] else: word_width = f.size(word)[0] line_width = line_width+word_width if line_width < width: # word fits on this line. if line: line = line+" "+word else: line = word elif line_width == width: # word fits, but no more words will. line = line+" "+word new_lines.append(line) line= "" line_width = 0 else: # word doesn't fit. new_lines.append(line) line = word word_width = f.size(word)[0] line_width = word_width # Some part of a line might be left. if line: new_lines.append(line) line = "" line_width = 0 else: # A blank line is being added to old_text. new_lines = list(old_text) new_lines.append("") # Check if height is calculated: if return_height: # Check if line_height needs to be set: if not line_height: line_height = f.get_linesize() height = height_of_strings(new_lines, f, line_height) return new_lines, height return new_lines def string_to_screens_and_lines(source, allowed_width, allowed_height, f, pixels_between_lines = None, end_screens_with = (), do_not_include = ()): """ Convert a string to screens and lines. Pygame does not allow line breaks ("\n") when rendering text. The purpose of this function is to break a string into lines and screens given a font and screen dimensions. The following two assumptions are made: 1. Line breaks ("\n") in source denote the start of a new paragraph. Therefore, to have an actual blank line (i.e., an empty string) appear in the returned array, add another "\n" immediately following the first. 2. Spaces denote the end of a word. Parameters: source: the string to divide into screens and lines. allowed_width: the width, in pixels, permitted for lines; can be a number of pixels or a proportion of the active screen's width. allowed_height: same as allowed_width but for the height of a single screen. f: the font with which source is measured. Keyword Parameters: pixels_between_lines: blank pixel rows between lines of text; defaults to None, in which case it is obtained from f. end_screens_with: a restricted set of characters that may end a screen; defaults to an empty tuple, in which case any character ending a word can end a screen. do_not_include: words that are exceptions to the end_screens_with words (e.g., "Mrs." ends in a period but should not end a screen) Returns: screens: a multidimensional list of screens and lines. """ # Check if allowed_height and allowed_width need to be set: if 0 < allowed_width <= 1 and 0 < allowed_height <= 1: allowed_width, allowed_height = screen_dimensions() elif 0 < allowed_width <= 1 or 0 < allowed_height <= 1: raise ValueError("Both or neither of allowed_width and \ allowed_height can be between 0 and 1.") # Check if pixels_between_lines needs to be set: if not pixels_between_lines: pixels_between_lines = f.get_linesize() else: assert pixels_between_lines > 0, "pixels_between_lines must be \ positive." # Make sure that allowed_height can accommodate the tallest word in # source: assert f.size(source)[1] <= allowed_height, "allowed_height cannot \ accommodate source." screens = [] # Break source into paragraphs and paragraphs into single words: paragraphs = source.split("\n") single_words = [] for paragraph in paragraphs: individual_words = paragraph.split(" ") # While here, verify that the longest word fits: widest_word, pixels = longest_string_to_render(individual_words, f) assert pixels < allowed_width, "{:s} in source is too long for \ allowed_width.".format(widest_word) single_words.append(individual_words) # The function branches next, depending on whether restrictions have been # placed on where screen breaks can occur. if not end_screens_with: # Screen breaks can occur following any word. # Break single_words into lines without regard to screens: lines_of_text, total_height = wrap_text( single_words, allowed_width, f, return_height = True, line_height = pixels_between_lines ) if total_height <= allowed_height: # Everything fits on one screen. screens.append(lines_of_text) else: # There will be at least two screens. # Initialize the first screen and a height counter: screen = [] screen_height = 0 for line in lines_of_text: line_height = f.size(line)[1] screen_height = screen_height+line_height+pixels_between_lines if screen_height < allowed_height: # line fits on the current screen. screen.append(line) elif screen_height == allowed_height or screen_height-pixels_between_lines < allowed_height: # line fits, but no more will. screen.append(line) screens.append(screen) screen = [] screen_height = 0 else: # line doesn't fit. screens.append(screen) screen = [line] screen_height = line_height+pixels_between_lines # Check for a remaining screen: if screen: screens.append(screen)\ else: # Screens can only end following specific strings. # These strings do not need to be end-of-sentence characters, but it # is difficult to imagine what else they would be. Therefore, I refer # to the resulting strings as sentences, acknowledging that this may # be incorrect terminology. # Break paragraphs into sentences: sentences = [] for paragraph in paragraphs: if sentences: # This is not the first line, so start the paragraph on a new # line: sentences.append("") if paragraph: # paragraph is not a blank line. # Break it into sentences: paragraph_as_sentences = text_to_sentences( paragraph, terminators = end_screens_with, exclude = do_not_include ) sentences = sentences+paragraph_as_sentences else: # paragraph is a blank line. sentences.append("") # Initialize the first screen: screen = [] for sentence in sentences: # Determine whether sentence starts on a new line or continues # from the current line: if screen: # If the last line in screen is blank, then sentence starts on # a new line. last_line = screen[-1] if last_line: next_line = False else: next_line = True else: # This screen is blank. # Arbitrarily set next_line to False: next_line = False # Try adding sentence to the current screen: possible_screen, screen_height = wrap_text( sentence, allowed_width, f, old_text = screen, start_new_line = next_line, return_height = True, line_height = pixels_between_lines ) if screen_height <= allowed_height: # Update the current screen: screen = possible_screen else: # This sentence does not fit. # If screen is currently blank, it means that sentence needs # to be broken across screens (i.e., it will not fit on a # single screen). if screen: # This is not an issue. # Save screen: screens.append(screen) # Initialize the next screen with sentence: screen, current_height = wrap_text( sentence, allowed_width, f, return_height = True, line_height = pixels_between_lines ) if current_height > allowed_height: # sentence needs to be broken across screens. # This can be accomplished by calling the present # function without restrictions on screen endings. # However, the text currently on screen is needed too. text_to_add = "" for line in screen: text_to_add = text_to_add+line+"" text_to_add = text_to_add+sentence multiple_screens = string_to_screens_and_lines( text_to_add, allowed_width, allowed_height, f, pixels_between_lines = pixels_between_lines ) for s in multiple_screens: screens.append(s) else: # screen is empty, but sentence will not fit. # Call the present function to get this sentence's # screens: multiple_screens = string_to_screens_and_lines( sentence, allowed_width, allowed_height, f, pixels_between_lines = pixels_between_lines ) for s in multiple_screens: screens.append(s) # Check if a final screen needs to be added: if screen: screens.append(screen) return screens def render_string(s, f, colour, background, antialiasing = True): """ Create pygame.Surface and pygame.Rect objects for a string, using a given font (f) and colour. Parameters: s: the string to render. f: the font in which to render s. colour: the colour of text to use, expressed as an RGB list or tuple. background: the background colour. Keyword Parameters: antialiasing: indicates whether text is rendered with antialiasing; defaults to True. Returns: s: the pygame.Surface object. r: the pygame.Rect object. """ s = f.render(s, antialiasing, colour, background) r = s.get_rect() return s, r def render_lines(lines, f, text_colour, background_colour, line_size = None, use_antialiasing = True): """ Create pygame.Surface and pygame.Rect objects for a list of strings. Parameters: lines: the lines to render; "" is treated as a blank line. f: the font in which to render text. text_colour: an RGB list or tuple for the colour of the text. background_colour: RGB for background. Keyword Parameters: line_size: the number of pixel rows between lines; defaults to None, in which case it is set from f. use_antialiasing: indicates whether lines are rendered with antialiasing; defaults to True. Returns: surf: the pygame.Surface object. rect: the pygame.Rect object. """ height = 0 surfaces = [] rects = [] for line in lines: s, r = render_string( line, f, text_colour, background_colour, antialiasing = use_antialiasing ) surfaces.append(s) rects.append(r) height = height+r.height try: height = height+line_size*(len(surfaces)-1) except TypeError: line_size = f.get_linesize() height = height+line_size*(len(surfaces)-1) # height will be the height of the returned surface and rect. # The width will be equal to the widest rect in rects. width = rects[0].width for rect in rects[1:]: if rect.width > width: width = rect.width # Initialize the returned surface: surf = pygame.Surface((width, height)) surf.fill(background_colour) # Keep track of the pixel row at which to blit each surface in surfaces: top = 0 for i in range(len(surfaces)): s = surfaces[i] r = rects[i] r.topleft = (0, top) surf.blit(s, r) top = top+r.height+line_size rect = surf.get_rect() return surf, rect def string_to_surface_and_rect(s, f, colour, background, max_width, max_height, max_screens = 1, line_size = None, antialiasing = True): """ Create a surface and rect from a string given a maximum pixel width for each line and a maximum pixel height for each screen. Parameters: s: the string. f: the font to use. colour: RGB for the text colour. background: RGB for the background colour. max_width: the maximum pixel width for each line. max_height: the maximum pixel height for each screen. Keyword Parameters: line_size: pixels between lines of text; defaults to None, in which case it is obtained from f. antialiasing: Boolean indicating whether antialiasing is used. Returns: surfaces: list of the pygame.Surface objects. rects: the list of pygame.Rect objects. """ surfaces = [] rects = [] lines = string_to_screens_and_lines( s, max_width, max_height, f, line_size ) assert len(lines) <= max_screens, "s is too long." for screen in lines: surf, rect = render_lines( screen, f, colour, background, line_size = line_size, use_antialiasing = antialiasing ) surfaces.append(surf) rects.append(rect) return surfaces, rects def display_text_until_keypress(main_text, main_font, text_colour, background, antialias = True, proportion_width = 0.95, proportion_height = 0.95, main_line = None, break_sentences = False, sentence_terminators = (".", "!", "?"), terminator_exceptions = (), gap = 1, bottom_message = "Press the space bar to continue.", bottom_font = None, bottom_line = None, advance_keys = (K_SPACE,), reverse_keys = (K_LEFT, K_BACKSPACE,), quit_keys = (K_ESCAPE,), ticker = None, frame_rate = 30, files = ()): """ Display text to the screen and wait for the user to advance. If the text exceeds a single screen, users can move back and forth between the screens. Parameters: main_text: the text to be displayed, excluding the advance message. main_font: the font used for main_text. text_colour: RGB list/tuple for the colour of text. background: RGB list/tuple for main_text's background. Keyword Parameters: antialias: Boolean indicating whether antialiasing is used in text rendering; defaults to True. proportion_width: proportion of the main display surface's width used for text rendering (default = 0.95). proportion_height: proportion of the main display surface's height used for text rendering (default = 0.95). main_line: pixel rows between lines of text; taken from main_font if not set. break_sentences: Boolean indicating whether sentences can be broken across screens; defaults to False. NB: Sentences may be broken across screens even if False if they cannot fit on a single screen. sentence_terminators: strings that end a sentence. terminator_exceptions: exceptions to the strings in sentence_terminators. gap: the number of line breaks between the bottom of the main text and the top of bottom_message (default = 1). bottom_message: text at the end of each screen; defaults to "Press the space bar to continue.", but should be changed if advance_keys does not include K_SPACE. bottom_font: font to use for bottom_message; if left as None, main_font is used. bottom_line: same as main_line for bottom_line; taken from bottom_font if not set. advance_keys: keys to move through the screens; defaults to (K_SPACE,). reverse_keys: keys to move backward through the screens; defaults to (K_LEFT, K_BACKSPACE); to disable the user's ability to move backward, pass an empty tuple. quit_keys: keys to exit the program; set as an empty tuple to disable. ticker: a pygame.time.Clock object for controlling frame rate; one is created if one is not passed. frame_rate: the maximum frames per second; defaults to 30. files: an optional list/tuple of open files to close in case the user quits. """ if not main_line: main_line = main_font.get_linesize() if not bottom_font: bottom_font = main_font if not bottom_line: bottom_line = bottom_font.get_linesize() window_surface = pygame.display.get_surface() try: window_rect = window_surface.get_rect() except AttributeError: window_surface = pygame.display.set_mode(pygame.display.list_modes()[0]) window_rect = window_surface.get_rect() window_surface.fill(background) window_width, window_height = window_surface.get_size() pixel_columns = int(proportion_width*window_width) pixel_rows = int(proportion_height*window_height) # Initialize lists to hold the generated surfaces and rects: surfaces = [] rects = [] # Get a surface and rect for bottom_message: bottom_lines = string_to_screens_and_lines( bottom_message, pixel_columns, pixel_rows, f = bottom_font, pixels_between_lines = bottom_line ) assert len(bottom_lines) == 1, "The bottom_message parameter cannot \ exceed a single screen." bottom_lines = bottom_lines[0] bottom_surface, bottom_rect = render_lines( bottom_lines, bottom_font, text_colour, background, line_size = bottom_line, use_antialiasing = antialias ) # Get the height for main_text: main_height = pixel_rows-bottom_rect.height-gap*main_line if break_sentences: main_lines = string_to_screens_and_lines( main_text, pixel_columns, main_height, main_font, pixels_between_lines = main_line, end_screens_with = sentence_terminators, do_not_include = terminator_exceptions ) else: main_lines = string_to_screens_and_lines( main_text, pixel_columns, main_height, main_font, pixels_between_lines = main_line ) for screen in main_lines: main_surface, main_rect = render_lines( screen, main_font, text_colour, background, line_size = main_line, use_antialiasing = antialias ) bottom_rect_copy = pygame.Rect(bottom_rect) # Create a surface to hold both main_surface and bottom_surface: surfaces_combined = pygame.Surface((pixel_columns, pixel_rows)) rects_combined = surfaces_combined.get_rect() surfaces_combined.fill(background) # Centre text with reference to the longer of the main_rect and # bottom_rect: if main_rect.width >= bottom_rect.width: left_coordinate = (pixel_columns-main_rect.width)//2 else: left_coordinate = (pixel_columns-bottom_rect.width)//2 main_rect.topleft = (left_coordinate, 0) bottom_rect_copy.topleft = (left_coordinate, main_rect.bottom+gap*main_line) surfaces_combined.blit(main_surface, main_rect) surfaces_combined.blit(bottom_surface, bottom_rect_copy) rects_combined.center = window_rect.center surfaces.append(surfaces_combined) rects.append(rects_combined) i = 0 surface_i = surfaces[i] rect_i = rects[i] window_surface.blit(surface_i, rect_i) keep_looping = True pygame.display.update() while keep_looping: for event in pygame.event.get(): if (event.type == KEYUP and event.key in quit_keys) or event.type == QUIT: generic.terminate(files) elif event.type == KEYUP and event.key in reverse_keys and i > 0: # Move back a screen: i = i-1 surface_i = surfaces[i] rect_i = rects[i] window_surface.blit(surface_i, rect_i) pygame.display.update(rect_i) elif event.type == KEYUP and event.key in advance_keys and i < len(surfaces)-1: # Moving forward a screen. i = i+1 surface_i = surfaces[i] rect_i = rects[i] window_surface.blit(surface_i, rect_i) pygame.display.update(rect_i) elif event.type == KEYUP and event.key in advance_keys and i == len(surfaces)-1: keep_looping= False else: pass try: ticker.tick(frame_rate) except AttributeError: ticker = pygame.time.Clock() ticker.tick(frame_rate) def ask_question(question, f, text_colour, background, antialiasing = True, w = 0.95, h = 0.95, line_size = None, gap = 1, continue_message = "Press the down arrow key to advance.", continue_font = None, continue_line_size = None, min_response = 1, max_response = None, allowed_keys = LETTERS+NUMBERS+PUNCTUATION+(K_SPACE,), move_ahead = (K_DOWN,), move_back = (K_UP,), finished = (K_RETURN,), quit_keys = (K_ESCAPE,), allow_changes = True, ticker = None, frame_rate = 30, files = ()): """ Display a question and return the user's typed response. Parameters: question: the question to which the user responds. f: the font used. text_colour: RGB list/tuple for text. background: RGB list/tuple for background. Keyword Parameters: antialiasing: Boolean indicating whether antialiasing is used in text rendering; defaults to True. w: the proportion of the active display surface's width allowed for text rendering (default = 0.95). h: the proportion of the active display surface's height allowed for text rendering (default = 0.95). line_size: pixel rows between lines of text; if not set, obtained from f. gap: line breaks between question and continue_message or the user's response (default = 1). continue_message: applies only if question exceeds a single screen; the message prompting the user to press a key to advance (default = "Press the down arrow key to advance.") continue_font: font used instead of f for continue_message; f used if not set. continue_line_size: same as line_size but for continue_message. min_response: the minimum length of the user's response (default = 1). max_response: the maximum length of the user's response; (default = None). allowed_keys: keys that can be used for responding. move_ahead: keys that move to the next screen; only applicable if question takes up more than one screen. move_back: keys that move to the previous screen. finished: keys to press when the response is finished; defaults to return, but can be empty if min_response and max_response are equal. quit_keys: keys for closing the program. allow_changes: Boolean guiding whether input can be deleted once typed; defaults to True. ticker: pygame.time.Clock object; one is created if none is passed. frame_rate: the maximum frames per second (default = 30). files: files to close if terminate() is called. Returns: r: the user's response. """ if not line_size: line_size = f.get_linesize() r = "" window_surface = pygame.display.get_surface() try: window_rect = window_surface.get_rect() except AttributeError: window_surface = pygame.display.set_mode(pygame.display.list_modes()[0]) window_surface.fill(background) window_width, window_height = window_rect.size # Set pixel columns and rows allowed: pixel_columns = int(w*window_width) pixel_rows = int(h*window_height) surfaces = [] rects = [] # Get pixel rows available for question after accounting for the user's # response and gap: question_height = pixel_rows-(gap+1)*line_size # Break question into screens and lines: question_screens = string_to_screens_and_lines( question, pixel_columns, question_height, f, pixels_between_lines = line_size ) if len(question_screens) == 1: # question fits on one screen. # Extract question_screens from its outter list: question_screens = question_screens[0] question_surface, question_rect = render_lines( question_screens, f, text_colour, background, line_size = line_size, use_antialiasing = antialiasing ) # Initialize the surface for question: main_surface = pygame.Surface((pixel_columns, pixel_rows)) main_rect = main_surface.get_rect() # Centre question_rect within a pixel_columns by question_height # rectangle with a top-left coordinate of (0, 0): question_rect.center = (pixel_columns//2, question_height//2) # Get the response position: response_position = ( question_rect.left+(window_width-pixel_columns)//2, question_rect.bottom+gap*line_size+(window_height-pixel_rows)//2 ) main_surface.fill(background) main_surface.blit(question_surface, question_rect) # Centre main_rect with respect to window_surface: main_rect.center = window_rect.center surfaces.append(main_surface) rects.append(main_rect) else: # question doesn't fit on one screen, so continue_message needs to be # created. if not continue_font: continue_font = f if not continue_line_size: continue_line_size = continue_font.get_linesize() continue_lines = string_to_screens_and_lines( continue_message, pixel_columns, pixel_rows, continue_font, pixels_between_lines = continue_line_size ) assert len(continue_lines) == 1, "continue_message is too long; it \ takes up multiple screens." continue_lines = continue_lines[0] continue_surface, continue_rect = render_lines( continue_lines, continue_font, text_colour, background, line_size = continue_line_size, use_antialiasing = antialiasing ) # Unless continue_message takes up the same number of pixel rows as # given for the user's typed response, question_text will need to be # reconverted to question_screens. if continue_rect.height != line_size: # It is possible that continue_message is so much smaller than # line_size that question_text now fits on a single screen. # This causes problems because of the different keypresses # accepted on the last screen. # Prevent it: for i in range(question_height, 0, -1): question_screens = string_to_screens_and_lines( question, pixel_columns, i, f, pixels_between_lines = line_size ) if len(question_screens) > 1: break last_screen = question_screens[-1] last_screen_height = height_of_strings(last_screen, f, line_size) # Because last_screen was just fit with continue_message, ensure # that it fits with line_size. # If it doesn't, it's split into two screens. if last_screen_height+(gap+1)*line_size > pixel_rows: final_line = last_screen[-1] del last_screen[-1] new_last_screen = [final_line] question_screens.append(new_last_screen) for i in range(len(question_screens)): question_screen = question_screens[i] question_surface, question_rect = render_lines( question_screen, f, text_colour, background, line_size = line_size, use_antialiasing = antialiasing ) current_surface = pygame.Surface((pixel_columns, pixel_rows)) current_surface.fill(background) current_rect = current_surface.get_rect() if i == len(question_screens)-1: # continue_message is not on this screen. question_rect.topleft = ((pixel_columns-question_rect.width)//2, 0) response_position = ( question_rect.left+(window_width-pixel_columns), question_rect.bottom+gap*line_size+(window_height-pixel_rows)//2 ) current_surface.blit(question_surface, question_rect) else: # continue_message appears on this screen. # The left coordinate depends on which is wider. if question_rect.width >= continue_rect.width: left_coordinate = (pixel_columns-question_rect.width)//2 else: left_coordinate = (pixel_columns-continue_rect.width)//2 question_rect.topleft = (left_coordinate, 0) continue_rect.topleft = ( left_coordinate, question_rect.bottom+gap*line_size ) current_surface.blit(question_surface, question_rect) current_surface.blit(continue_surface, continue_rect) current_rect.center = window_rect.center surfaces.append(current_surface) rects.append(current_rect) # Create a screen tracker and show the first screen: i = 0 surface_i = surfaces[i] rect_i = rects[i] window_surface.blit(surface_i, rect_i) answer_obtained = False pygame.display.update() while not answer_obtained: for event in pygame.event.get(): if event.type == QUIT or (event.type == KEYUP and event.key in quit_keys): generic.terminate(files) elif i < len(surfaces)-1 and event.type == KEYUP and event.key in move_ahead: # The user is moving to the next screen. i = i+1 window_surface.fill(background, rect_i) surface_i = surfaces[i] rect_i = rects[i] window_surface.blit(surface_i, rect_i) pygame.display.update(rect_i) elif i > 0 and event.type == KEYUP and event.key in move_back: # The user is moving back one screen. i = i-1 window_surface.fill(background, rect_i) surface_i = surfaces[i] rect_i = rects[i] window_surface.blit(surface_i, rect_i) pygame.display.update(rect_i) elif event.type == KEYUP and event.key in allowed_keys and (len(r) < max_response or not max_response): # A character has been added to r. character = pygame.key.name(event.key) r = r+character if not finished and len(r) == max_response: answer_obtained = True else: surface_r, rect_r = render_string( r, f, text_colour, background, antialiasing ) rect_r.topleft = response_position window_surface.fill(background, rect_r) window_surface.blit(surface_r, rect_r) pygame.display.update(rect_r) elif event.type == KEYUP and event.key == K_BACKSPACE and allow_changes and r and i == len(surfaces)-1: # The last character has been deleted. r = r[:len(r)-1] update_rect = rect_r surface_r, rect_r = render_string( r, f, text_colour, background, antialiasing ) rect_r.topleft = response_position window_surface.fill(background, update_rect) window_surface.blit(surface_r, rect_r) pygame.display.update(update_rect) elif event.type == KEYUP and event.key in finished and i == len(surfaces)-1 and len(r) >= min_response: # The user has finished r. answer_obtained = True else: pass try: ticker.tick(frame_rate) except AttributeError: # No clock was passed to the function. ticker = pygame.time.Clock() ticker.tick(frame_rate) return r
[ "pygame.event.get", "pygame.Surface", "pygame.display.get_surface", "pygame.display.list_modes", "cogandmem.generic.terminate", "pygame.key.name", "pygame.time.Clock", "pygame.display.update", "pygame.Rect" ]
[((4994, 5022), 'pygame.display.get_surface', 'pygame.display.get_surface', ([], {}), '()\n', (5020, 5022), False, 'import pygame\n'), ((22783, 22814), 'pygame.Surface', 'pygame.Surface', (['(width, height)'], {}), '((width, height))\n', (22797, 22814), False, 'import pygame\n'), ((27751, 27779), 'pygame.display.get_surface', 'pygame.display.get_surface', ([], {}), '()\n', (27777, 27779), False, 'import pygame\n'), ((30724, 30747), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (30745, 30747), False, 'import pygame\n'), ((34744, 34772), 'pygame.display.get_surface', 'pygame.display.get_surface', ([], {}), '()\n', (34770, 34772), False, 'import pygame\n'), ((40948, 40971), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (40969, 40971), False, 'import pygame\n'), ((29641, 29665), 'pygame.Rect', 'pygame.Rect', (['bottom_rect'], {}), '(bottom_rect)\n', (29652, 29665), False, 'import pygame\n'), ((29767, 29810), 'pygame.Surface', 'pygame.Surface', (['(pixel_columns, pixel_rows)'], {}), '((pixel_columns, pixel_rows))\n', (29781, 29810), False, 'import pygame\n'), ((30793, 30811), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (30809, 30811), False, 'import pygame\n'), ((35965, 36008), 'pygame.Surface', 'pygame.Surface', (['(pixel_columns, pixel_rows)'], {}), '((pixel_columns, pixel_rows))\n', (35979, 36008), False, 'import pygame\n'), ((41024, 41042), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (41040, 41042), False, 'import pygame\n'), ((39282, 39325), 'pygame.Surface', 'pygame.Surface', (['(pixel_columns, pixel_rows)'], {}), '((pixel_columns, pixel_rows))\n', (39296, 39325), False, 'import pygame\n'), ((5115, 5142), 'pygame.display.list_modes', 'pygame.display.list_modes', ([], {}), '()\n', (5140, 5142), False, 'import pygame\n'), ((30916, 30940), 'cogandmem.generic.terminate', 'generic.terminate', (['files'], {}), '(files)\n', (30933, 30940), False, 'from cogandmem import generic\n'), ((31859, 31878), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (31876, 31878), False, 'import pygame\n'), ((41147, 41171), 'cogandmem.generic.terminate', 'generic.terminate', (['files'], {}), '(files)\n', (41164, 41171), False, 'from cogandmem import generic\n'), ((43687, 43706), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (43704, 43706), False, 'import pygame\n'), ((27913, 27940), 'pygame.display.list_modes', 'pygame.display.list_modes', ([], {}), '()\n', (27938, 27940), False, 'import pygame\n'), ((31226, 31255), 'pygame.display.update', 'pygame.display.update', (['rect_i'], {}), '(rect_i)\n', (31247, 31255), False, 'import pygame\n'), ((34906, 34933), 'pygame.display.list_modes', 'pygame.display.list_modes', ([], {}), '()\n', (34931, 34933), False, 'import pygame\n'), ((41544, 41573), 'pygame.display.update', 'pygame.display.update', (['rect_i'], {}), '(rect_i)\n', (41565, 41573), False, 'import pygame\n'), ((31560, 31589), 'pygame.display.update', 'pygame.display.update', (['rect_i'], {}), '(rect_i)\n', (31581, 31589), False, 'import pygame\n'), ((41928, 41957), 'pygame.display.update', 'pygame.display.update', (['rect_i'], {}), '(rect_i)\n', (41949, 41957), False, 'import pygame\n'), ((42153, 42179), 'pygame.key.name', 'pygame.key.name', (['event.key'], {}), '(event.key)\n', (42168, 42179), False, 'import pygame\n'), ((42676, 42705), 'pygame.display.update', 'pygame.display.update', (['rect_r'], {}), '(rect_r)\n', (42697, 42705), False, 'import pygame\n'), ((43263, 43297), 'pygame.display.update', 'pygame.display.update', (['update_rect'], {}), '(update_rect)\n', (43284, 43297), False, 'import pygame\n')]
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_preprocessor.ipynb (unless otherwise specified). __all__ = ['PreProcessor'] # Cell from .dataframeloader import * from .logger import * # Cell # hide from sklearn.compose import ColumnTransformer, make_column_transformer from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler, MinMaxScaler, LabelEncoder from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error import numpy as np import pandas as pd # Cell class PreProcessor: """ Represent PreProcessor class Attributes: numerical_transformer: Numerical Columns Tranformer categorical_transformer: Categorical Columns Transformer preprocessor: Preprocessor for Columns Tranformer """ def __init__(self): self.columns_transfomer = None self.target_cols__encoder = None self.target_cols_pl = None self.cat_cols_pl = None self.num_cols_pl = None def __str__(self): """Returns human readable string reprsentation""" attr_str = "numerical_transformer, categorical_transformer,columns_transfomer" return "PreProcessor object with attributes:" + attr_str def __repr__(self): return self.__str__() # PreProcessor Pipeline core methods # Create preprocessing pipeline for numerical data def create_num_cols_pp_pl(self, num_cols__imputer, num_cols__scaler): self.num_cols_pl = Pipeline(steps=[('imputer', num_cols__imputer), ('scaler', num_cols__scaler)], #memory="pipeline_cache_dir" ) # Create Preprocessing pipeline for categorical data def create_cat_cols_pp_pl(self, cat_cols__imputer, cat_cols__encoder): self.cat_cols_pl = Pipeline(steps=[('imputer', cat_cols__imputer), ('encoder', cat_cols__encoder)], #memory="pipeline_cache_dir" ) # # Create Preprocessing pipeline for target cols # def create_target_cols_pp_pl(self, target_cols__encoder): # self.target_cols_pl = Pipeline(steps=[('encoder', target_cols__encoder)], # #memory="pipeline_cache_dir" # ) # Bundle preprocessing pipelines based upon types of columns def preprocess_all_cols(self, dataframeloader, problem_type="regression", num_cols__imputer=SimpleImputer(strategy='constant'), num_cols__scaler=StandardScaler(), cat_cols__imputer=SimpleImputer(strategy='constant'), cat_cols__encoder=OneHotEncoder(handle_unknown='ignore'), target_cols__encoder=LabelEncoder()): #cat_cols__encoder=OrdinalEncoder(handle_unknown='use_encoded_value', #unknown_value=np.nan)): #TODO: REALLY NOT HAPPY WITH THIS LENGTH BASED REPEATED FLOW CHECK! tranformer_tuple_list = [] # change preprocessor according to type of column found if len(dataframeloader.categorical_cols) < 1: logger.info("categorical columns are None, Preprocessing will done accordingly!") # create scikit-learn pipelines instance self.create_num_cols_pp_pl(num_cols__imputer, num_cols__scaler) #now setup columns tranformer num_cols_tuple = ("num_cols_pl", self.num_cols_pl, dataframeloader.numerical_cols) tranformer_tuple_list.append(num_cols_tuple) elif len(dataframeloader.numerical_cols) < 1: logger.info("numerical columns are None, Preprocessing will done accordingly!") # create sklearn pipelines instance self.create_cat_cols_pp_pl(cat_cols__imputer, cat_cols__encoder) #now setup columns tranformer cat_cols_tuple = ("cat_cols_pl", self.cat_cols_pl, dataframeloader.categorical_cols) tranformer_tuple_list.append(cat_cols_tuple) else: # create scikit-learn pipelines instance logger.info("Both Numerical & Categorical columns found, Preprocessing will done accordingly!") self.create_num_cols_pp_pl(num_cols__imputer, num_cols__scaler) self.create_cat_cols_pp_pl(cat_cols__imputer, cat_cols__encoder) #now setup columns tranformer num_cols_tuple = ("num_cols_pl", self.num_cols_pl, dataframeloader.numerical_cols) tranformer_tuple_list.append(num_cols_tuple) cat_cols_tuple = ("cat_cols_pl", self.cat_cols_pl, dataframeloader.categorical_cols) tranformer_tuple_list.append(cat_cols_tuple) # encode target based upon problem type if "classification" in problem_type: logger.info("PreProcessing will include target(s) encoding!") self.target_cols__encoder = target_cols__encoder #now make final column tranfomer object self.columns_transfomer = ColumnTransformer(tranformer_tuple_list, remainder='passthrough', sparse_threshold=0) #logger.info(f"self.transformer_type: {self.transformer_type}") return self
[ "sklearn.preprocessing.LabelEncoder", "sklearn.preprocessing.OneHotEncoder", "sklearn.preprocessing.StandardScaler", "sklearn.impute.SimpleImputer", "sklearn.compose.ColumnTransformer", "sklearn.pipeline.Pipeline" ]
[((1555, 1633), 'sklearn.pipeline.Pipeline', 'Pipeline', ([], {'steps': "[('imputer', num_cols__imputer), ('scaler', num_cols__scaler)]"}), "(steps=[('imputer', num_cols__imputer), ('scaler', num_cols__scaler)])\n", (1563, 1633), False, 'from sklearn.pipeline import Pipeline\n'), ((1897, 1982), 'sklearn.pipeline.Pipeline', 'Pipeline', ([], {'steps': "[('imputer', cat_cols__imputer), ('encoder', cat_cols__encoder)]"}), "(steps=[('imputer', cat_cols__imputer), ('encoder', cat_cols__encoder)]\n )\n", (1905, 1982), False, 'from sklearn.pipeline import Pipeline\n'), ((2583, 2617), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""constant"""'}), "(strategy='constant')\n", (2596, 2617), False, 'from sklearn.impute import SimpleImputer\n'), ((2664, 2680), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2678, 2680), False, 'from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler, MinMaxScaler, LabelEncoder\n'), ((2728, 2762), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""constant"""'}), "(strategy='constant')\n", (2741, 2762), False, 'from sklearn.impute import SimpleImputer\n'), ((2810, 2848), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (2823, 2848), False, 'from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler, MinMaxScaler, LabelEncoder\n'), ((2899, 2913), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2911, 2913), False, 'from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler, MinMaxScaler, LabelEncoder\n'), ((5206, 5295), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', (['tranformer_tuple_list'], {'remainder': '"""passthrough"""', 'sparse_threshold': '(0)'}), "(tranformer_tuple_list, remainder='passthrough',\n sparse_threshold=0)\n", (5223, 5295), False, 'from sklearn.compose import ColumnTransformer, make_column_transformer\n')]
import os from sys import argv, stdout os.environ["CUDA_VISIBLE_DEVICES"]="-1" import tensorflow as tf import numpy as np import scipy import scipy.io from itertools import product as prod import time from tensorflow.python.client import timeline import cProfile from sys import argv, stdout from get_data import * import pathlib from noise_models_and_integration import * from architecture import * # from experiments import noise_1_paramas as noise_params def variation_acc2_local_disturb(sess, network, x_, keep_prob, saver, test_input, test_target, params): eps = 10 ** (-params.eps_order) # restoring saved model saver.restore(sess, "weights/dim_{}/{}/gam_{}_alfa_{}.ckpt".format(params.model_dim, params.noise_name, params.gamma, params.alpha)) # initializoing resulting tensor, first two dimensions corresponds to coordinate which will be disturbed, on the last dimension, there will be added variation of outputs results = np.zeros((n_ts, controls_nb, len(np.array(test_input)))) print(len(test_input)) print(np.shape(results)) iter = -1 for sample_nb in range(len(np.array(test_input))): # taking sample NCP origin_NCP = test_input[sample_nb] # taking target superoperator corresponding to the NCP origin_superoperator = test_target[sample_nb] tf_result = False # calculating nnDCP corresponding to input NCP pred_DCP = get_prediction(sess, network, x_, keep_prob, np.reshape(origin_NCP, [1, params.n_ts, params.controls_nb])) # calculating superoperator from nnDCP sup_from_pred_DCP = integrate_lind(pred_DCP[0], tf_result, params) print("sanity check") acceptable_error = fidelity_err([origin_superoperator, sup_from_pred_DCP], params.dim, tf_result) print("predicted DCP", acceptable_error) print("---------------------------------") ############################################################################################################ #if sanity test is above assumed error then the experiment is performed if acceptable_error <= params.accept_err: iter += 1 # iteration over all coordinates for (t, c) in prod(range(params.n_ts), range(params.controls_nb)): new_NCP = origin_NCP if new_NCP[t, c] < (1 - eps): new_NCP[t, c] += eps else: new_NCP[t, c] -= eps sup_from_new_NCP = integrate_lind(new_NCP, tf_result, params) new_DCP = get_prediction(sess, network, x_, keep_prob, np.reshape(new_NCP, [1, n_ts, controls_nb])) sup_form_new_DCP = integrate_lind(new_DCP[0], tf_result, params) error = fidelity_err([sup_from_new_NCP, sup_form_new_DCP], params.dim, tf_result) #print(error) # if predicted nnDCP gives wrong superopertaor, then we add not variation of output, but some label if error <= params.accept_err: results[t, c, iter] = np.linalg.norm(pred_DCP - new_DCP) else: results[t, c, iter] = -1 print(iter) print(np.shape(results)) return results def experiment_loc_disturb(params): ########################################### # PLACEHOLDERS ########################################### # input placeholder x_ = tf.placeholder(tf.float32, [None, params.n_ts, params.controls_nb]) # output placeholder y_ = tf.placeholder(tf.complex128, [None, params.supeop_size, params.supeop_size]) # dropout placeholder keep_prob = tf.placeholder(tf.float32) # creating the graph network = my_lstm(x_, keep_prob, params) # instance for saving the model saver = tf.train.Saver() # loading the data (_, _, test_input, test_target) = get_data(params.train_set_size, params.test_set_size, params.model_dim) # maintaining the memory config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: # essential function which executes the experiment result = variation_acc2_local_disturb(sess, network, x_, keep_prob, saver, test_input, test_target, params) sess.close() tf.reset_default_graph() return result def train_and_predict(params, file_name): ########################################### # PLACEHOLDERS ########################################### # input placeholder x_ = tf.placeholder(tf.float32, [None, params.n_ts, params.controls_nb]) # output placeholder y_ = tf.placeholder(tf.complex128, [None, params.supeop_size, params.supeop_size]) # dropout placeholder keep_prob = tf.placeholder(tf.float32) # creating the graph network = my_lstm(x_, keep_prob, params) # instance for saving the model saver = tf.train.Saver() # loading the data (train_input, train_target, test_input, test_target) = get_data(params.train_set_size, params.test_set_size, params.model_dim, params.data_type) # maintaining the memory config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: # training the network (acc,train_table,test_table) = fit(sess, network, x_, y_, keep_prob, train_input, train_target, test_input, test_target, params) # making prediction by trained model pred = get_prediction(sess, network, x_, keep_prob, test_input) # saving trained model saver.save(sess, "weights/weights_from_{}.ckpt".format(file_name)) sess.close() tf.reset_default_graph() return (pred,acc,train_table,test_table) # --------------------------------------------------------------------------- def main(testing_effectiveness,argv_number): config_path = "configurations/" file_name = 'config{}.txt'.format(argv_number) file = open(config_path+file_name, "r") parameters = dict_to_ntuple(eval(file.read()), "parameters") print(parameters.activ_fn) pathlib.Path("weights/dim_{}/{}".format(parameters.model_dim, parameters.noise_name)).mkdir(parents=True, exist_ok=True) if testing_effectiveness: pathlib.Path("results/prediction/dim_{}".format(parameters.model_dim)).mkdir(parents=True, exist_ok=True) # main functionality if os.path.isfile("results/eff_fid_lstm/experiment_{}".format(file_name[0:-4])+".npz"): statistic = list(np.load("results/eff_fid_lstm/experiment_{}".format(file_name[0:-4])+".npz")["arr_0"][()]) else: statistic = [] for i in range(5): pred, acc, train_table, test_table = train_and_predict(parameters,file_name) # statistic.append(acc) statistic.append(pred) # save the results print(acc) # np.savez("results/eff_fid_lstm/experiment_{}".format(file_name[0:-4]), statistic) np.savez("results/prediction/experiment_{}".format(file_name[0:-4]), statistic) else: # main functionality data = experiment_loc_disturb(n_ts, gamma, alpha, evo_time, supeop_size, controls_nb, train_set_size, test_set_size, size_of_lrs, noise_name, model_dim, eps, accept_err) pathlib.Path("results/NN_as_approx/dim_{}".format(model_dim)).mkdir(parents=True, exist_ok=True) np.savez("results/NN_as_approx/experiment_{}".format(file_name[0:-4]), data) file.close() if __name__ == "__main__": # prepare dirs for the output files # Note: change the below value if you have already trained the network # train_model = True if len(argv) == 2: argv_number = int(argv[1]) else: argv_number = 63 main(True,argv_number )
[ "tensorflow.reset_default_graph", "numpy.reshape", "tensorflow.placeholder", "tensorflow.train.Saver", "tensorflow.Session", "numpy.array", "numpy.linalg.norm", "tensorflow.ConfigProto", "numpy.shape" ]
[((3720, 3787), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, params.n_ts, params.controls_nb]'], {}), '(tf.float32, [None, params.n_ts, params.controls_nb])\n', (3734, 3787), True, 'import tensorflow as tf\n'), ((3822, 3899), 'tensorflow.placeholder', 'tf.placeholder', (['tf.complex128', '[None, params.supeop_size, params.supeop_size]'], {}), '(tf.complex128, [None, params.supeop_size, params.supeop_size])\n', (3836, 3899), True, 'import tensorflow as tf\n'), ((3942, 3968), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (3956, 3968), True, 'import tensorflow as tf\n'), ((4090, 4106), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4104, 4106), True, 'import tensorflow as tf\n'), ((4284, 4300), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (4298, 4300), True, 'import tensorflow as tf\n'), ((4902, 4926), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4924, 4926), True, 'import tensorflow as tf\n'), ((5138, 5205), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, params.n_ts, params.controls_nb]'], {}), '(tf.float32, [None, params.n_ts, params.controls_nb])\n', (5152, 5205), True, 'import tensorflow as tf\n'), ((5240, 5317), 'tensorflow.placeholder', 'tf.placeholder', (['tf.complex128', '[None, params.supeop_size, params.supeop_size]'], {}), '(tf.complex128, [None, params.supeop_size, params.supeop_size])\n', (5254, 5317), True, 'import tensorflow as tf\n'), ((5360, 5386), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (5374, 5386), True, 'import tensorflow as tf\n'), ((5508, 5524), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5522, 5524), True, 'import tensorflow as tf\n'), ((5944, 5960), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (5958, 5960), True, 'import tensorflow as tf\n'), ((6628, 6652), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (6650, 6652), True, 'import tensorflow as tf\n'), ((1284, 1301), 'numpy.shape', 'np.shape', (['results'], {}), '(results)\n', (1292, 1301), True, 'import numpy as np\n'), ((3496, 3513), 'numpy.shape', 'np.shape', (['results'], {}), '(results)\n', (3504, 3513), True, 'import numpy as np\n'), ((4353, 4378), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (4363, 4378), True, 'import tensorflow as tf\n'), ((6013, 6038), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (6023, 6038), True, 'import tensorflow as tf\n'), ((1349, 1369), 'numpy.array', 'np.array', (['test_input'], {}), '(test_input)\n', (1357, 1369), True, 'import numpy as np\n'), ((1709, 1769), 'numpy.reshape', 'np.reshape', (['origin_NCP', '[1, params.n_ts, params.controls_nb]'], {}), '(origin_NCP, [1, params.n_ts, params.controls_nb])\n', (1719, 1769), True, 'import numpy as np\n'), ((1222, 1242), 'numpy.array', 'np.array', (['test_input'], {}), '(test_input)\n', (1230, 1242), True, 'import numpy as np\n'), ((2902, 2945), 'numpy.reshape', 'np.reshape', (['new_NCP', '[1, n_ts, controls_nb]'], {}), '(new_NCP, [1, n_ts, controls_nb])\n', (2912, 2945), True, 'import numpy as np\n'), ((3362, 3396), 'numpy.linalg.norm', 'np.linalg.norm', (['(pred_DCP - new_DCP)'], {}), '(pred_DCP - new_DCP)\n', (3376, 3396), True, 'import numpy as np\n')]
from __future__ import print_function import os from time import strftime, sleep import requests import time def main(BLUE_ENV_NAME, boto_authenticated_client): beanstalkclient = boto_authenticated_client.client('elasticbeanstalk') wait_until_env_be_ready(beanstalkclient, BLUE_ENV_NAME) if os.getenv("RELEASE_HEALTH_CHECKING_PATH"): blue_env_cname = os.getenv("RELEASE_HEALTH_CHECKING_PATH") else: blue_env_info = get_env_info(beanstalkclient, BLUE_ENV_NAME) blue_env_cname = "http://" + blue_env_info["Environments"][0]["CNAME"] print("blue_env_cname: " + blue_env_cname) env_http_response = requests.get(blue_env_cname, verify=False) env_reponse_status = env_http_response.status_code if env_reponse_status == 200 or env_reponse_status == 301: return "Ok" else: raise Exception("The environment isn't health") def get_env_info(beanstalkclient, env_name): response = beanstalkclient.describe_environments( EnvironmentNames=[ env_name ]) return response def wait_until_env_be_ready(beanstalkclient, ENV_NAME): env_info = get_env_info(beanstalkclient, ENV_NAME) while env_info["Environments"][0]["Status"] != "Ready": print("Waiting the blue environment be Ready!") time.sleep(10) env_info = get_env_info(beanstalkclient, ENV_NAME) return "Env is ready"
[ "requests.get", "time.sleep", "os.getenv" ]
[((300, 341), 'os.getenv', 'os.getenv', (['"""RELEASE_HEALTH_CHECKING_PATH"""'], {}), "('RELEASE_HEALTH_CHECKING_PATH')\n", (309, 341), False, 'import os\n'), ((625, 667), 'requests.get', 'requests.get', (['blue_env_cname'], {'verify': '(False)'}), '(blue_env_cname, verify=False)\n', (637, 667), False, 'import requests\n'), ((364, 405), 'os.getenv', 'os.getenv', (['"""RELEASE_HEALTH_CHECKING_PATH"""'], {}), "('RELEASE_HEALTH_CHECKING_PATH')\n", (373, 405), False, 'import os\n'), ((1244, 1258), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1254, 1258), False, 'import time\n')]
from pycoin.networks.bitcoinish import create_bitcoinish_network network = create_bitcoinish_network( network_name="Monacoin", symbol="MONA", subnet_name="mainnet", wif_prefix_hex="b0", sec_prefix="MONASEC:", address_prefix_hex="32", pay_to_script_prefix_hex="37", bip32_prv_prefix_hex="0488ade4", bip32_pub_prefix_hex="0488b21e", bech32_hrp="mona", magic_header_hex="fbc0b6db", default_port=9401, dns_bootstrap=["dnsseed.monacoin.org"])
[ "pycoin.networks.bitcoinish.create_bitcoinish_network" ]
[((77, 458), 'pycoin.networks.bitcoinish.create_bitcoinish_network', 'create_bitcoinish_network', ([], {'network_name': '"""Monacoin"""', 'symbol': '"""MONA"""', 'subnet_name': '"""mainnet"""', 'wif_prefix_hex': '"""b0"""', 'sec_prefix': '"""MONASEC:"""', 'address_prefix_hex': '"""32"""', 'pay_to_script_prefix_hex': '"""37"""', 'bip32_prv_prefix_hex': '"""0488ade4"""', 'bip32_pub_prefix_hex': '"""0488b21e"""', 'bech32_hrp': '"""mona"""', 'magic_header_hex': '"""fbc0b6db"""', 'default_port': '(9401)', 'dns_bootstrap': "['dnsseed.monacoin.org']"}), "(network_name='Monacoin', symbol='MONA',\n subnet_name='mainnet', wif_prefix_hex='b0', sec_prefix='MONASEC:',\n address_prefix_hex='32', pay_to_script_prefix_hex='37',\n bip32_prv_prefix_hex='0488ade4', bip32_pub_prefix_hex='0488b21e',\n bech32_hrp='mona', magic_header_hex='fbc0b6db', default_port=9401,\n dns_bootstrap=['dnsseed.monacoin.org'])\n", (102, 458), False, 'from pycoin.networks.bitcoinish import create_bitcoinish_network\n')]
def generatePostscriptNameMap(glyphList): """ Generate a PostScript Name Map to be stored in the "public.postscriptNames" lib. Used to rename glyphs during generation, like so: {"indianrupee.tab": "uni20B9.tab"} Args: glyphList (list): A list of Glyph objects or a Font object (Defcon or FontParts) AGL (bool): Keep the names that appear in the Adobe Glyph List the same """ from fontTools.agl import UV2AGL import re unicodeMap = {} unicodeMap.update(UV2AGL) # 1. Make a map from old glyph order to new glyph order renameMap = dict(zip([glyph.name for glyph in glyphList], [glyph.name for glyph in glyphList])) # 2. For every glyph that has a unicode, make a unicode name for it # unless AGL is enabled, use the Adobe-given names for that for g in glyphList: u = g.unicode if u: if u in unicodeMap.keys(): renameMap[g.name] = unicodeMap[u] else: renameMap[g.name] = "uni%04X" % u # 3. Now go through all the glyphs that have not been mapped yet # and split them into parts. If they are more than 1 part, run through # each part and use the existing map to rename that part to what it # should be. i.e. # "indianrupee.tab" -> ["indianrupee", ".", "tab"] -> ["uni20B9", ".", "tab"] -> "uni20B9.tab" # resulting in the map: # "indianrupee.tab": "uni20B9.tab" for k,v in renameMap.items(): if k == v: splitName = re.split(r"((?<!^)[\W\_\-]+)", k) if len(splitName) > 1: for i, n in enumerate(splitName): if n in renameMap.keys(): splitName[i] = renameMap[n] recomposed = "".join(splitName) renameMap[k] = recomposed # 4. Return only the items that are different return {k:v for k,v in renameMap.items() if k != v} f = CurrentFont() f.lib["public.postscriptNames"] = generatePostscriptNameMap(f) print(f.lib["public.postscriptNames"]) print("Done!")
[ "re.split" ]
[((1545, 1580), 're.split', 're.split', (['"""((?<!^)[\\\\W\\\\_\\\\-]+)"""', 'k'], {}), "('((?<!^)[\\\\W\\\\_\\\\-]+)', k)\n", (1553, 1580), False, 'import re\n')]
import csv import argparse def get_striked_header_pairs(strikethrough): if strikethrough is not None: return [pair.split('--') for pair in strikethrough] else: return [] def build_colored_str(v, cancelled): if (v < 0.20): return '\\textcolor{cor-very-weak}{' + str(cancelled) + "}" elif v >= 0.20 and v < 0.40: return '\\textcolor{cor-weak}{' + str(cancelled) + "}" elif v >= 0.40 and v < 0.60: return '\\textcolor{cor-moderate}{' + str(cancelled) + "}" elif v >= 0.60 and v < 0.80: return '\\textcolor{cor-strong}{' + str(cancelled) + "}" elif v >= 0.80 and v <= 1.0: return '\\textcolor{cor-very-strong}{' + str(cancelled) + "}" def build_cancel(v, dim1, dim2, striked_header_pairs): if ([dim1, dim2] in striked_header_pairs) or \ ([dim2, dim1] in striked_header_pairs): return "\\hcancel{" + str(v) + "}" return v def build_table_text(csv_header, csv_data, striked_header_pairs, hide_top_header=False): i = 0 j = 0 if hide_top_header: top_header = ' '*len(csv_header) else: top_header = csv_header table = '& ' + ' & '.join(top_header) + '\\\\ \\hline \n' while i < len(csv_data): j = 0 dim1 = csv_header[i] if dim1 == "SS": i += 1 continue rowstr = dim1 while j < len(csv_data[i]): dim2 = csv_header[j] if dim2 == "SS": j += 1 continue if i > j: rowstr += " & " else: v = float("%.2f" % round(float(csv_data[i][j]), 2)) cancelled = build_cancel( v, dim1, dim2, striked_header_pairs ) v = build_colored_str(v, cancelled) rowstr += " & " + v j += 1 table += rowstr + '\\\\ \\hline \n' i += 1 return table def generate_latex_corr_table(csv_file, delimiter=",", strikethrough=None): # Read CSV file header = [] data = [] with open(csv_file, 'rt') as csvfile: all_data = list(csv.reader(csvfile, delimiter=delimiter)) header = all_data[0] header = [name.replace('_', ' ') for name in header] data = all_data[1:] striked_header_pairs = get_striked_header_pairs(strikethrough) # Format and print latex commands table = build_table_text(header, data, striked_header_pairs, hide_top_header=True) begin_tag = "\\begin{{tabular}}{{{}}}\n".format('c'*(len(header)+1)) end_tag = "\\end{tabular}\n" return begin_tag + table + end_tag if __name__ == '__main__': # --------------- # ARGUMENTS # --------------- parser = argparse.ArgumentParser( description="This program pretty-prints a CSV correlations matrix for usage in latex", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument( 'csv_file', type=str, help='CSV file to load' ) parser.add_argument( '-d', '--delimiter', type=str, default=",", help='Column delimiter', ) parser.add_argument( '-s', '--strikethrough', type=str, nargs='+', help='Column pair to strike through (e.g. A--B, B--F for headers named A to F)', ) # --------------- # MAIN # --------------- # Parse arguments args = parser.parse_args() output = generate_latex_corr_table(args.csv_file, delimiter=args.delimiter, strikethrough=args.strikethrough) print(output)
[ "csv.reader", "argparse.ArgumentParser" ]
[((2814, 2989), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""This program pretty-prints a CSV correlations matrix for usage in latex"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'This program pretty-prints a CSV correlations matrix for usage in latex',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (2837, 2989), False, 'import argparse\n'), ((2212, 2252), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': 'delimiter'}), '(csvfile, delimiter=delimiter)\n', (2222, 2252), False, 'import csv\n')]
from ..utils.GPIO_utils import setup_output, output, GPIO_Base from time import sleep import random def keep_decorate(func): def func_wrapper(self, keep=None): func(self, keep) if keep is None: keep = self.keep if keep > 0: sleep(keep) self._stop() return func_wrapper class L289N(GPIO_Base): def __init__(self, pins=(23,22, 19,21), keep=1.0, **kwargs): ''' mode: the pin mode, 'BOARD' or 'BCM'. pins: pins for left forward, left backward, right forward, right backward. keep: the duration an action is kept, if keep <= 0 then the motor will not stop ''' super(L289N, self).__init__(**kwargs) self.pins = pins for pin in pins: setup_output(pin) self.keep = keep # ============== actions ================ def _stop(self, keep=None): output(self.pins, [0,0,0,0]) @keep_decorate def stop(self, keep=None): output(self.pins, [0,0,0,0]) @keep_decorate def left_backward(self, keep=None): output(self.pins[:2], [0, 1]) @keep_decorate def left_forward(self, keep=None): output(self.pins[:2], [1, 0]) @keep_decorate def right_backward(self, keep=None): output(self.pins[-2:], [0, 1]) @keep_decorate def right_forward(self, keep=None): output(self.pins[-2:], [1, 0]) @keep_decorate def forward(self, keep=None): self.right_forward(keep=-1) self.left_forward(keep=-1) @keep_decorate def backward(self, keep=None): self.right_backward(keep=-1) self.left_backward(keep=-1) @keep_decorate def spin_right(self, keep=None): self.right_backward(keep=-1) self.left_forward(keep=-1) @keep_decorate def spin_left(self, keep=None): self.right_forward(keep=-1) self.left_backward(keep=-1)
[ "time.sleep" ]
[((245, 256), 'time.sleep', 'sleep', (['keep'], {}), '(keep)\n', (250, 256), False, 'from time import sleep\n')]
from matplotlib import pyplot as plt def leibniz(n): lz = 0 ret = list() for i in range(n + 1): lz += ((-1) ** i) * (4 / (2 * i + 1)) ret.append(lz) return lz, ret lz, ret = leibniz(1000) plt.plot(ret) plt.show()
[ "matplotlib.pyplot.plot", "matplotlib.pyplot.show" ]
[((198, 211), 'matplotlib.pyplot.plot', 'plt.plot', (['ret'], {}), '(ret)\n', (206, 211), True, 'from matplotlib import pyplot as plt\n'), ((212, 222), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (220, 222), True, 'from matplotlib import pyplot as plt\n')]
from typing import Dict, List, Any import numpy from overrides import overrides from ..instance import TextInstance, IndexedInstance from ...data_indexer import DataIndexer class TaggingInstance(TextInstance): """ A ``TaggingInstance`` represents a passage of text and a tag sequence over that text. There are some sticky issues with tokenization and how exactly the label is specified. For example, if your label is a sequence of tags, that assumes a particular tokenization, which interacts in a funny way with our tokenization code. This is a general superclass containing common functionality for most simple sequence tagging tasks. The specifics of reading in data from a file and converting that data into properly-indexed tag sequences is left to subclasses. """ def __init__(self, text: str, label: Any, index: int=None): super(TaggingInstance, self).__init__(label, index) self.text = text def __str__(self): return "TaggedSequenceInstance(" + self.text + ", " + str(self.label) + ")" @overrides def words(self) -> Dict[str, List[str]]: words = self._words_from_text(self.text) words['tags'] = self.tags_in_label() return words def tags_in_label(self): """ Returns all of the tag words in this instance, so that we can convert them into indices. This is called in ``self.words()``. Not necessary if you have some pre-indexed labeling scheme. """ raise NotImplementedError def _index_label(self, label: Any, data_indexer: DataIndexer) -> List[int]: """ Index the labels. Since we don't know what form the label takes, we leave it to subclasses to implement this method. If you need to convert tag names into indices, use the namespace 'tags' in the ``DataIndexer``. """ raise NotImplementedError def to_indexed_instance(self, data_indexer: DataIndexer): text_indices = self._index_text(self.text, data_indexer) label_indices = self._index_label(self.label, data_indexer) assert len(text_indices) == len(label_indices), "Tokenization is off somehow" return IndexedTaggingInstance(text_indices, label_indices, self.index) class IndexedTaggingInstance(IndexedInstance): def __init__(self, text_indices: List[int], label: List[int], index: int=None): super(IndexedTaggingInstance, self).__init__(label, index) self.text_indices = text_indices @classmethod @overrides def empty_instance(cls): return TaggingInstance([], label=None, index=None) @overrides def get_lengths(self) -> Dict[str, int]: return self._get_word_sequence_lengths(self.text_indices) @overrides def pad(self, max_lengths: Dict[str, int]): self.text_indices = self.pad_word_sequence(self.text_indices, max_lengths, truncate_from_right=False) self.label = self.pad_sequence_to_length(self.label, desired_length=max_lengths['num_sentence_words'], default_value=lambda: self.label[0], truncate_from_right=False) @overrides def as_training_data(self): text_array = numpy.asarray(self.text_indices, dtype='int32') label_array = numpy.asarray(self.label, dtype='int32') return text_array, label_array
[ "numpy.asarray" ]
[((3384, 3431), 'numpy.asarray', 'numpy.asarray', (['self.text_indices'], {'dtype': '"""int32"""'}), "(self.text_indices, dtype='int32')\n", (3397, 3431), False, 'import numpy\n'), ((3454, 3494), 'numpy.asarray', 'numpy.asarray', (['self.label'], {'dtype': '"""int32"""'}), "(self.label, dtype='int32')\n", (3467, 3494), False, 'import numpy\n')]
#<NAME> # todo mov not working import nuke from PySide import QtGui def run(node): clipboard = QtGui.QApplication.clipboard() filename = node['file'].evaluate() filesplit = filename.rsplit('.',-2) filesplit[1] = '%0'+str(len(filesplit[1]))+'d' filep = '.'.join(filesplit) filenameFrame = nuke.getFileNameList(os.path.dirname(filep))[0].rsplit(' ',-1)[1] clipboard.setText(( filep+" "+filenameFrame)) nuke.nodePaste("%clipboard%") #run(nuke.selectedNode())
[ "PySide.QtGui.QApplication.clipboard", "nuke.nodePaste" ]
[((98, 128), 'PySide.QtGui.QApplication.clipboard', 'QtGui.QApplication.clipboard', ([], {}), '()\n', (126, 128), False, 'from PySide import QtGui\n'), ((424, 453), 'nuke.nodePaste', 'nuke.nodePaste', (['"""%clipboard%"""'], {}), "('%clipboard%')\n", (438, 453), False, 'import nuke\n')]
import sublime def pkg_settings(): # NOTE: The sublime.load_settings(...) call has to be deferred to this function, # rather than just being called immediately and assigning a module-level variable, # because of: https://www.sublimetext.com/docs/3/api_reference.html#plugin_lifecycle return sublime.load_settings("Git blame.sublime-settings") PKG_SETTINGS_KEY_CUSTOMBLAMEFLAGS = "custom_blame_flags" PKG_SETTINGS_KEY_INLINE_BLAME_ENABLED = "inline_blame_enabled" PKG_SETTINGS_KEY_INLINE_BLAME_DELAY = "inline_blame_delay"
[ "sublime.load_settings" ]
[((309, 360), 'sublime.load_settings', 'sublime.load_settings', (['"""Git blame.sublime-settings"""'], {}), "('Git blame.sublime-settings')\n", (330, 360), False, 'import sublime\n')]
import json with open('train.json','r') as f: gt = json.load(f) def parse(f): imgid_2_anno = {} imgid_2_img = {} annotations = f['annotations'] images = f['images'] print (len(images)) categories = f['categories'] for img in images: id = img['id'] imgid_2_img[id] = img for anno in annotations: image_id = anno['image_id'] if image_id not in imgid_2_anno: imgid_2_anno[image_id] = [] imgid_2_anno[image_id].append(anno) ret_categories = categories split = [4,16,32,64,128,len(images)] for num in split: ret_imgs = [] ret_annos = [] for id in list(imgid_2_anno.keys())[:num]: ret_imgs.append(imgid_2_img[id]) for anno in imgid_2_anno[id]: ret_annos.append(anno) ret_obj = {} ret_obj['images'] = ret_imgs ret_obj['annotations'] = ret_annos ret_obj['categories'] = ret_categories with open('train_{}.json'.format(num),'w') as f: json.dump(ret_obj,f) parse(gt)
[ "json.load", "json.dump" ]
[((56, 68), 'json.load', 'json.load', (['f'], {}), '(f)\n', (65, 68), False, 'import json\n'), ((1125, 1146), 'json.dump', 'json.dump', (['ret_obj', 'f'], {}), '(ret_obj, f)\n', (1134, 1146), False, 'import json\n')]
import time from sanic.response import HTTPResponse from sanic.server import HttpProtocol from insanic.log import access_logger class InsanicHttpProtocol(HttpProtocol): def log_response(self, response: HTTPResponse) -> None: """ Logs the response. More expressive than Sanic's implmenetation. Is there a better way to do this? :param response: :return: """ if self.access_log: if self.request.url.endswith("/health/"): return extra = { "status": response.status, "byte": len(response.body), "host": f"{self.request.socket[0]}:{self.request.socket[1]}", "request": f"{self.request.method} {self.request.url}", "request_duration": int(time.time() * 1000000) - (self.request._request_time), "method": self.request.method, "path": self.request.path, "error_code_name": None, "error_code_value": None, "uri_template": self.request.uri_template, } if ( hasattr(response, "error_code") and response.error_code is not None ): extra.update({"error_code_name": response.error_code["name"]}) extra.update({"error_code_value": response.error_code["value"]}) if hasattr(self.request, "_service"): extra.update( { "request_service": str( self.request._service.request_service ) } ) if str(response.status)[0] == "5": access_logger.exception( "", extra=extra, exc_info=response.exception ) else: access_logger.info("", extra=extra)
[ "insanic.log.access_logger.exception", "time.time", "insanic.log.access_logger.info" ]
[((1775, 1844), 'insanic.log.access_logger.exception', 'access_logger.exception', (['""""""'], {'extra': 'extra', 'exc_info': 'response.exception'}), "('', extra=extra, exc_info=response.exception)\n", (1798, 1844), False, 'from insanic.log import access_logger\n'), ((1917, 1952), 'insanic.log.access_logger.info', 'access_logger.info', (['""""""'], {'extra': 'extra'}), "('', extra=extra)\n", (1935, 1952), False, 'from insanic.log import access_logger\n'), ((819, 830), 'time.time', 'time.time', ([], {}), '()\n', (828, 830), False, 'import time\n')]
# %% import pandas as pd # %% # path to data, see # res stock meta: https://data.openei.org/s3_viewer?bucket=oedi-data-lake&prefix=nrel-pds-building-stock%2Fend-use-load-profiles-for-us-building-stock%2F2021%2Fresstock_amy2018_release_1%2Ftimeseries_aggregates_metadata%2F # com stock meta: https://data.openei.org/s3_viewer?bucket=oedi-data-lake&prefix=nrel-pds-building-stock%2Fend-use-load-profiles-for-us-building-stock%2F2021%2Fcomstock_tmy3_release_1%2Ftimeseries_aggregates_metadata%2F comStockPath = "F:/CHI-HACK/decarb/commercial_nerl.tsv" resStockPath = "F:/CHI-HACK/decarb/residential_nerl.tsv" # %% com_data = pd.read_csv(comStockPath, sep="\t") res_data = pd.read_csv(resStockPath, sep="\t") geog_col = 'in.state_name' heating_fuel = 'in.heating_fuel' # %% def cleanGroup(df, colName): temp = df.groupby(geog_col).count().reset_index()[[geog_col, 'bldg_id']] temp.columns = ['geoid', colName] return temp # %% res_count = cleanGroup(res_data, 'res-count') com_count = cleanGroup(com_data, 'com-count') # %% # pull relevant columns and group by state res_non_el_heating = cleanGroup(res_data[res_data['in.heating_fuel'] != "Electricity"], 'res-non-ele-heating') com_non_el_heating = cleanGroup(com_data[com_data['in.heating_fuel'] != "Electricity"], 'com-non-ele-heating') res_non_el_water_heating = cleanGroup(res_data[res_data['in.water_heater_fuel'] != "Electricity"], 'res-non-ele-water-heating') com_non_el_water_heating = cleanGroup(com_data[com_data['in.service_water_heating_fuel'] != "Electricity"], 'com-non-ele-water-heating') res_non_el_range = cleanGroup(res_data[~res_data['in.cooking_range'].str.contains("Electric")], 'res-non-ele-range') # %% # Merge em up! merged = res_count.merge(com_count, on="geoid", how="outer") \ .merge(res_non_el_heating, on="geoid",) \ .merge(com_non_el_heating, on="geoid",) \ .merge(res_non_el_water_heating, on="geoid",) \ .merge(com_non_el_water_heating, on="geoid",) \ .merge(res_non_el_range, on="geoid",) # %% # calculate percentages merged['pct-res-non-ele-heating'] = merged['res-non-ele-heating'] / merged['res-count'] * 100 merged['pct-com-non-ele-heating'] = merged['com-non-ele-heating'] / merged['com-count'] * 100 merged['pct-res-non-ele-water-heating'] = merged['res-non-ele-water-heating'] / merged['res-count'] * 100 merged['pct-com-non-ele-water-heating'] = merged['com-non-ele-water-heating'] / merged['com-count'] * 100 merged['pct-res-non-ele-range'] = merged['res-non-ele-range'] / merged['res-count'] * 100 # %% # export! merged.to_csv("../raw/nrel_summary.csv", index=False) # %% ## combine with MS footprint data ms_buildings = pd.read_csv('../raw/microsoft_footprints.csv') ms_buildings['Microsoft Footprint Count'] = ms_buildings['Microsoft Footprint Count'].str.replace(',','').astype('int64')# %% # %% output = ms_buildings[['State','Microsoft Footprint Count']].merge(merged, left_on="State", right_on="geoid") output = output.drop(columns={"geoid"}) # %% # cleanup output['res-count'] = output['res-count'].astype('int64') output['com-count'] = output['com-count'].astype('int64') output = output.rename(columns={ 'State': 'state', 'res-count':'nrelRes', 'com-count':'nrelCom', "Microsoft Footprint Count": "buildings", "pct-res-non-ele-heating": "pctResNonElHeating", "pct-com-non-ele-heating": "pctComNonElHeating", "pct-res-non-ele-water-heating": "pctResNonElWaterHeating", "pct-com-non-ele-water-heating": "pctComNonElWaterHeating", "pct-res-non-ele-range": "pctResNonElRange", 'res-non-ele-heating': 'resNonElHeating', 'com-non-ele-heating': 'comNonElHeating', 'res-non-ele-water-heating': 'resNonElWaterHeating', 'com-non-ele-water-heating': 'resNonElWaterHeating', 'res-non-ele-range': 'resNonElRange' }) # %% output['state'] = output['state'].str.replace(' ','_').str.lower() # %% output.round(2).to_csv('../raw/buildings_data.csv', index=False) # %%
[ "pandas.read_csv" ]
[((622, 657), 'pandas.read_csv', 'pd.read_csv', (['comStockPath'], {'sep': '"""\t"""'}), "(comStockPath, sep='\\t')\n", (633, 657), True, 'import pandas as pd\n'), ((669, 704), 'pandas.read_csv', 'pd.read_csv', (['resStockPath'], {'sep': '"""\t"""'}), "(resStockPath, sep='\\t')\n", (680, 704), True, 'import pandas as pd\n'), ((2644, 2690), 'pandas.read_csv', 'pd.read_csv', (['"""../raw/microsoft_footprints.csv"""'], {}), "('../raw/microsoft_footprints.csv')\n", (2655, 2690), True, 'import pandas as pd\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun May 23 19:53:22 2021 @author: <NAME> (<EMAIL>) at USTC This script refers to some theories and codes of Obspy/MoPaD/Introduction to Seismology (Yongge Wan): 1) The MoPaD program (https://github.com/geophysics/MoPaD); 2) str_dip_rake to mt; 3) The conversion between str_dip_rake and A/N vector; 4) The conversion between A/N vector and P/T/N vector; 5) mt to P/T/N vector; 6) P/T/N vector to P/T/N vector's stirke and dip; 7) project station to beachball; 8) Hudson plot 9) Decompose of mt: isotropic + deviatoric = isotropic + DC + CLVD; 10) Describe_fault_plane with two str_dip_rake. Modify history: 1) May 23 19:53:22 2021 || Fu Yin at USTC || The initial release. 2) ... """ import numpy as np import math from math import pi from math import sin,cos,tan,atan2,atan,sqrt,acos #%%###################################################### # (2) str_dip_rake to mt ######################################################### def str_dip_rake2MT(strike,dip,rake): """ Input: fault plane' strike dip and rake in degrees. strike : [0, 360) dip : [0, 90] rake : [-180, 180) Output: a moment tensor object in NED system. """ strike = strike/180*pi dip = dip/180*pi rake = rake/180*pi M0 = 1 Mxx = -M0*( sin(dip) * cos(rake) * sin(2*strike) + sin(2*dip) * sin(rake) * sin(strike)**2 ) Myy = M0*( sin(dip) * cos(rake) * sin(2*strike) - sin(2*dip) * sin(rake) * cos(strike)**2 ) Mzz = M0*( sin(2*dip) * sin(rake) ) Mxy = M0*( sin(dip) * cos(rake) * cos(2*strike) + 1/2* sin(2*dip) * sin(rake) * sin(2*strike) ) Mxz = -M0*( cos(dip) * cos(rake) * cos(strike) + cos(2*dip) * sin(rake) * sin(strike) ) Myz = -M0*( cos(dip) * cos(rake) * sin(strike) - cos(2*dip) * sin(rake) * cos(strike) ) A = MTensor([Mxx, Myy, Mzz, Mxy, Mxz, Myz]) return A #%%################################################################### # (3) The conversion between str_dip_rake and A/N vector ###################################################################### # str_dip_rake to A/N vector def str_dip_rake2AN(strike,dip,rake): """ Input: fault plane' strike dip and rake in degrees. strike : [0, 360) dip : [0, 90] rake : [-180, 180) Output: slip vector(A) and fault plane's normal vector(N) in NED system. """ strike = strike/180*pi dip = dip/180*pi rake = rake/180*pi A=np.array([ cos(rake)*cos(strike) + sin(rake)*cos(dip)*sin(strike), cos(rake)*sin(strike) - sin(rake)*cos(dip)*cos(strike), -sin(rake)*sin(dip)] ) N=np.array([ -sin(strike)*sin(dip), cos(strike)*sin(dip), -cos(dip)] ) return A,N # A/N vector to str_dip_rake def AN2str_dip_rake(A,N): """ Input: slip vector(A) and fault plane's normal vector(N) in NED system. Output: fault plane' strike dip and rake. strike : [0, 360) dip : [0, 90] rake : [-180, 180) """ if abs(N[2]+1) < 0.00001: # nz=-1: the fault plane is horizontal strike = atan2(A[1],A[0]) # The direction of slip is also the strike, because the fault plane is horizontal dip = 0.0 else: strike = atan2(-N[0],N[1]) if abs(N[2]-0) < 0.00001: # nz=-1: the fault plane is vertical dip = pi/2 elif abs(sin(strike)) > abs(cos(strike)): dip = atan( (N[0]/sin(strike)) / N[2] ) else: dip = atan( (-N[1]/cos(strike)) / N[2] ) cos_rake = A[0]*cos(strike) + A[1]*sin(strike) if abs(A[2]-0) > 0.0000001: # az!=0: consider the effect of dip if abs(dip-0) > 0.000001: rake = atan2(-A[2]/sin(dip),cos_rake) else: rake = atan2(-100000000.0*A[2],cos_rake) else: # az=0: don't consider the effect of dip if cos_rake > 1: cos_rake = 1 if cos_rake < -1: cos_rake = -1 rake = acos(cos_rake) if dip < 0: dip = -dip strike = strike+pi # strike need to be in the opposite direction if strike >= 2*pi: strike = strike-2*pi if strike < 0: strike = strike+2*pi strike = strike*180/pi dip = dip*180/pi rake = rake*180/pi A = str_dip_rake(strike,dip,rake) return A #%%################################################################### # (4) The conversion between A/N vector and P/T/N vector ###################################################################### # Calculate the T-axis, P-axis and N-axis according to the slip vector (A) and fault plane direction vector (N) def AN2TPN(A,N): """ Input: slip vector(A) and fault plane's normal vector(N) in NED system. Output: Tension-axis vector(T), Pressure-axis vector(P) and Null-axis vector(Null) in NED system. """ T=sqrt(2)/2*(A+N) P=sqrt(2)/2*(A-N) Null=np.cross(P,T) return T,P,Null # Calculate the slip vector (A) and fault plane direction vector (N) according to the T-axis and P-axis def TP2AN(T,P): """ Input: Tension-axis vector(T) and Pressure-axis vector(P) in NED system. Output: slip vector(A) and fault plane's normal vector(N) in NED system. """ A=sqrt(2)/2*(T+P) N=sqrt(2)/2*(T-P) return A,N #%%####################################################### # (5) mt(in NED system) to P/T/N vector ########################################################## def MT2TPN(MT_raw): """ Input: moment tensor in NED system. Output: Tension-axis vector(T), Pressure-axis vector(P) and Null-axis vector(Null) in NED system. """ M = MT_raw.mt eigen_val, eigen_vec = np.linalg.eig(M) # The TNP axis should be arranged in order of eigenvalues from largest to smallest eigen_vec_ord_axis = np.real( np.take(eigen_vec, np.argsort(-eigen_val), 1) ) T_axis = eigen_vec_ord_axis[:, 0] N_axis = eigen_vec_ord_axis[:, 1] P_axis = eigen_vec_ord_axis[:, 2] return T_axis, P_axis, N_axis #%%############################################################## # (6) P/T/N vector to P/T/N vector's stirke and dip ################################################################# def vector2str_dip(vector): """ Input: a principal axis vector, such as eigenvectors P/T/N of the moment tensor object in NED system. Output: a principal axis' strike and dip. strike : [0, 360) dip : [0, 90] """ x=vector[0] y=vector[1] z=vector[2] strike = atan2(y,x)*180/pi r = sqrt(x**2+y**2) dip = atan2(z,r)*180/pi if dip < 0.0: dip = -dip strike = strike-180 if strike < 0: strike = strike+360 if strike > 360: strike = strike-360 A = Axis_str_dip(strike,dip) return A #%%############################################################## # (7) project station to beachball ################################################################# # According to the strike(azimuth) and takeoff Angle (or dip, for the PTN # axis, you need to make your own Angle transformation pi/2-TKO=dip) # projected onto the beachball def project_beachball(AZM, TKO, R=1, menthod='schmidt'): """ Input in NED system: AZM means azimuth (equal to strike) in degrees. TKO means takeoff angle ( pi/2-TKO=dip equal to dip) in degrees. R means beachball radius that you want to plot. note: Takeoff Angle is the angle with the vertical direction, DIP is the angle with the horizontal plane. Output: X and Y coordinates in E and N direction respectively, and the lower left corner of the circle is the origin. """ AZM = AZM/180*pi TKO = TKO/180*pi # Schmidt (Lambert, equal-area) default if menthod=='schmidt': r = math.sqrt(2)*sin(TKO/2) # Wulff projection (Stereographic, equal-angle) not recommmended elif menthod=='wulff': r = tan(TKO/2) else: raise ValueError('projection error!') X = R*r*sin(AZM)+R Y = R*r*cos(AZM)+R return X,Y #%%######################################### # (9) Hudson plot ############################################ # Hudson, J.A., <NAME>, and R.M.Rogers (1989), "Source type plot for inversion of the moment tensor",\ # J. Geophys. Res., 94, 765?74 def M2kT_space(MT): # 1. full-moment M = MT # M = np.array([Mxx, Mxy, Mxz, Mxy, Myy, Myz, Mxz, Myz, Mzz]).reshape(3, 3) # 2.isotropic part m_iso = 1./3 * np.trace(M) M_iso = np.diag(np.array( [m_iso,m_iso,m_iso] )) # 3.deviatoric part M_devi = M - M_iso # 4.eigenvalues and -vectors of M devi_eigen_val, devi_eigen_vec = np.linalg.eig(M_devi) # 5.eigenvalues in ascending order: devi_eigen_val_ord = np.real( np.take(devi_eigen_val, np.argsort(-devi_eigen_val)) ) # descend order if ( abs(m_iso) + max( abs(devi_eigen_val_ord[0]),abs(devi_eigen_val_ord[2]) ) ) == 0 : raise TypeError("MomentTensor cannot be translated into [k,T] space.") else: k = m_iso / ( abs(m_iso) + max(abs(devi_eigen_val_ord[0]), abs(devi_eigen_val_ord[2])) ) if max(abs(devi_eigen_val_ord[0]), abs(devi_eigen_val_ord[2])) == 0: T = 0 else: T = 2*devi_eigen_val_ord[1] / max(abs(devi_eigen_val_ord[0]), abs(devi_eigen_val_ord[2])) return k,T def kT2UV_space(k,T): tau = T*(1-abs(k)) if ( (tau>0) & (k<0) ) | ( (tau<0) & (k>0) ): # 2nd and 4th quadrants U = tau V = k elif ( tau < (4*k) ) & ( (tau>=0) & (k>=0) ): # First quadrant, Region A U = tau/(1-tau/2) V = k/(1-tau/2) elif ( tau >= (4*k) ) & ( (tau>=0) & (k>=0) ): # First quadrant, Region B U = tau/(1-2*k) V = k/(1-2*k) elif ( tau >= (4*k) ) & ( (tau<=0) & (k<=0) ): # Third quadrant, Region A U = tau/(1+tau/2) V = k/(1+tau/2) elif ( tau < (4*k) ) & ( (tau<=0) & (k<=0) ): # Third quadrant, Region B U = tau/(1+2*k) V = k/(1+2*k) else: raise TypeError("def: kT2UV_space(k,T)") return U,V def Hudson_plot(ax, ms=2, marker_ms='o', color_ms='k', alpha_ms=0.5, alpha_text=0.7, fontsize=6): ###################### ## 1. Fill and draw the border ax.fill_between(x=[-1,0],y1=[0,0],y2=[0,1],color='k', alpha=0.05) # fill the second quadrant ax.fill_between(x=[0,1],y1=[0,0],y2=[-1,0],color='k', alpha=0.05) # fill the fourth quadrant ax.plot([0, 4/3, 0, -4/3, 0], [1, 1/3, -1, -1/3, 1], linestyle='-', color='k', lw=1, alpha=0.6) ax.plot([-1, 1], [0, 0], linestyle='-', color='k', lw=1, alpha=0.6) ax.plot([0, 0], [-1, 1], linestyle='-', color='k', lw=1, alpha=0.6) ###################### ## 2. Draw the inner dotted line U_vector = [];V_vector = [] for i in np.linspace(-1, 1, num=100): k = i T = 0.5 U,V = kT2UV_space(k=k, T=T) U_vector.append(U) V_vector.append(V) ax.plot(U_vector, V_vector, linestyle='--', color='k', lw=1, alpha=0.6) U_vector = [];V_vector = [] for i in np.linspace(-1, 1, num=100): k = i T = -0.5 U,V = kT2UV_space(k=k, T=T) U_vector.append(U) V_vector.append(V) ax.plot(U_vector, V_vector, linestyle='--', color='k', lw=1, alpha=0.6) U_vector = [];V_vector = [] for i in np.linspace(-1, 1, num=100): k = 0.5 T = i U,V = kT2UV_space(k=k, T=T) U_vector.append(U) V_vector.append(V) ax.plot(U_vector, V_vector, linestyle='--', color='k', lw=1, alpha=0.6) U_vector = [];V_vector = [] for i in np.linspace(-1, 1, num=100): k = -0.5 T = i U,V = kT2UV_space(k=k, T=T) U_vector.append(U) V_vector.append(V) ax.plot(U_vector, V_vector, linestyle='--', color='k', lw=1, alpha=0.6) ###################### ## 3. Draw marker points # ms=2 # marker_ms = 'o' # color_ms = 'k' # alpha_ms = 0.5 # alpha_text = 0.7 # fontsize = 7 U,V = kT2UV_space(k=1, T=1) ax.plot(U,V, marker='o', color=color_ms, ms=ms, alpha=alpha_ms) ax.text(U,V,'ISO+ (Explosion)',horizontalalignment='center', verticalalignment='bottom',\ fontsize=fontsize, color='k',alpha=alpha_text) U,V = kT2UV_space(k=-1, T=1) ax.plot(U,V, marker='o', color=color_ms, ms=ms, alpha=alpha_ms) ax.text(U,V,'ISO- (Implosion)',horizontalalignment='center', verticalalignment='top',\ fontsize=fontsize, color='k',alpha=alpha_text) U,V = kT2UV_space(k=0, T=1) ax.plot(U,V, marker='o', color=color_ms, ms=ms, alpha=alpha_ms) ax.text(U,V,'CLVD (-)',horizontalalignment='left', verticalalignment='top',\ fontsize=fontsize, color='k',alpha=alpha_text) U,V = kT2UV_space(k=-5/9, T=1) ax.plot(U,V, marker='o', color=color_ms, ms=ms, alpha=alpha_ms) ax.text(U,V,'Anticrack',horizontalalignment='left', verticalalignment='top',\ fontsize=fontsize, color='k',alpha=alpha_text) U,V = kT2UV_space(k=0, T=-1) ax.plot(U,V, marker='o', color=color_ms, ms=ms, alpha=alpha_ms) ax.text(U,V,'CLVD (+)',horizontalalignment='right', verticalalignment='bottom',\ fontsize=fontsize, color='k',alpha=alpha_text) U,V = kT2UV_space(k=5/9, T=-1) ax.plot(U,V, marker='o', color=color_ms, ms=ms, alpha=alpha_ms) ax.text(U,V,'Tensile Crack',horizontalalignment='right', verticalalignment='bottom',\ fontsize=fontsize, color='k',alpha=alpha_text) U,V = kT2UV_space(k=0, T=0) ax.plot(U,V, marker='o', color=color_ms, ms=ms, alpha=alpha_ms) ax.text(U,V,'DC',horizontalalignment='center', verticalalignment='bottom',\ fontsize=fontsize, color='k',alpha=alpha_text) U,V = kT2UV_space(k=1/3, T=-1) ax.plot(U,V, marker='o', color=color_ms, ms=ms, alpha=alpha_ms) ax.text(U,V,'LVD (+)',horizontalalignment='right', verticalalignment='bottom',\ fontsize=fontsize, color='k',alpha=alpha_text) U,V = kT2UV_space(k=-1/3, T=1) ax.plot(U,V, marker='o', color=color_ms, ms=ms, alpha=alpha_ms) ax.text(U,V,'LVD (-)',horizontalalignment='left', verticalalignment='top',\ fontsize=fontsize, color='k',alpha=alpha_text) ###################### ## 4. Set the axes ax.set_xlim(-4/3-0.1, 4/3+0.1) ax.set_ylim(-1-0.1, 1+0.1) ax.set_aspect("equal") ax.set_axis_off() #%%########################################################### # (10) Describe_fault_plane with two str_dip_rake. ############################################################## def describe_fault_plane(fm): """ Input: moment tensor object in NED system. Output: [strike_1, dip_1, rake_1] and [strike_2, dip_2, rake_2] """ MT = MTensor(fm) T_axis, P_axis, N_axis = MT2TPN(MT) A,N = TP2AN(T_axis,P_axis) a = AN2str_dip_rake(A,N) strike_1 = a.strike dip_1 = a.dip rake_1 = a.rake b = AN2str_dip_rake(N,A) strike_2 = b.strike dip_2 = b.dip rake_2 = b.rake return np.array([[strike_1,dip_1,rake_1],[strike_2, dip_2, rake_2]]) #%%################################ # (11) object. ################################### class MTensor(object): """ Adapted from obspy.A moment tensor in NED system. >>> a = MTensor([1, 1, 0, 0, 0, -1]) # MTensor(Mxx, Myy, Mzz, Mxy, Mxz, Myz) >>> b = MTensor(np.array([[1, 0, 0], [0, 1, -1], [0, -1, 0]])) >>> c = MTensor([100,50,30]) >>> a.mt array([[ 1, 0, 0], [ 0, 1, -1], [ 0, -1, 0]]) >>> b.yz -1 """ def __init__(self, a): if len(a) == 3 and isinstance(a, list): # strike dip rake MT = str_dip_rake2MT(a[0],a[1],a[2]) self.mt = MT.mt elif len(a) == 6: # six independent components self.mt = np.array([[a[0], a[3], a[4]], [a[3], a[1], a[5]], [a[4], a[5], a[2]]]) elif isinstance(a, np.ndarray) and a.shape == (3, 3): # full matrix self.mt = a else: raise TypeError("Wrong size of input parameter.") @property def mt_normalized(self): return self.mt / np.linalg.norm(self.mt) @property def xx(self): return self.mt[0][0] @property def xy(self): return self.mt[0][1] @property def xz(self): return self.mt[0][2] @property def yz(self): return self.mt[1][2] @property def yy(self): return self.mt[1][1] @property def zz(self): return self.mt[2][2] class str_dip_rake(object): """ Describing the faultplanes of the Double Couple Strike dip and rake values are in degrees. strike : [0, 360) dip : [0, 90] rake : [-180, 180) >>> a = str_dip_rake(20, 50, 10) >>> a.strike 20 >>> a.dip 50 >>> a.rake 10 """ def __init__(self, strike=0, dip=0,rake=0): self.strike = strike self.dip = dip self.rake = rake class Axis_str_dip(object): """ A principal axis' strike and dip. Used in P/T/N's axis Strike and dip values are in degrees. strike : [0, 360) dip : [0, 90] >>> a = Axis_str_dip(20, 50) >>> a.strike 20 >>> a.dip 50 """ def __init__(self, strike=0, dip=0): self.strike = strike self.dip = dip #%%########################################################################### # (9) Decompose of mt: isotropic + deviatoric = isotropic + DC + CLVD ############################################################################## class Decompose(object): """ Creates a Decompose object on the basis of a provided MomentTensor object. For example: m = str_dip_rake2MT(120,50,70) AA = Decompose(m) AA.decomposition_iso_DC_CLVD() AA.M_DC_percentage -> """ def __init__(self, MT_raw): self.M = MT_raw.mt self.M_iso = None self.M_devi = None self.M_DC = None self.M_CLVD = None self.M0 = None self.Mw = None self.M_iso_percentage = None self.M_DC_percentage = None self.M_CLVD_percentage = None self.eigen_val = None self.eigen_vec = None self.F = None def decomposition_iso_DC_CLVD(self): """ Input: moment tensor in NED system. Output: Tension-axis vector(T), Pressure-axis vector(P) and Null-axis vector(Null) in NED system. Decomposition according Aki & Richards and Jost & Herrmann into isotropic + deviatoric = isotropic + DC + CLVD parts of the input moment tensor. results are given as attributes, callable via the get_* function: DC, CLVD, DC_percentage, seismic_moment, moment_magnitude """ # 1. full-moment M = self.M # 2.isotropic part m_iso = 1./3 * np.trace(M) M_iso = np.diag(np.array( [m_iso,m_iso,m_iso] )) m0_iso = abs(m_iso) # 3.deviatoric part M_devi = M - M_iso # 4.eigenvalues and -vectors of M eigen_val, eigen_vec = np.linalg.eig(M) # 5.eigenvalues in ascending order: eigen_val_ord = np.real( np.take(eigen_val, np.argsort(abs(eigen_val))) ) eigen_vec_ord = np.real( np.take(eigen_vec, np.argsort(abs(eigen_val)), 1) ) # 6.named according to Jost & Herrmann: # a1 = eigen_vec_ord[:, 0] a2 = eigen_vec_ord[:, 1] a3 = eigen_vec_ord[:, 2] F = -(eigen_val_ord[0]-m_iso) / (eigen_val_ord[2]-m_iso) # 7.decompose M_DC = (eigen_val_ord[2]-m_iso) * (1 - 2 * F) * (np.outer(a3, a3) - np.outer(a2, a2)) M_CLVD = M_devi - M_DC # 8.according to Bowers & Hudson: M0 = max(abs(eigen_val_ord)) # Seismic moment (in Nm) Mw = np.log10(M0 * 1.0e7) / 1.5 - 16.1/1.5 # moment_magnitude unit is Mw M_iso_percentage = int(round(m0_iso / M0 * 100, 6)) M_DC_percentage = int(round((1 - 2 * abs(F)) * (1 - M_iso_percentage / 100.) * 100, 6)) M_CLVD_percentage = 100-M_iso_percentage-M_DC_percentage self.M_iso = M_iso self.M_devi = M_devi self.M_DC = M_DC self.M_CLVD = M_CLVD self.M0 = M0 self.Mw = Mw self.M_iso_percentage = M_iso_percentage self.M_DC_percentage = M_DC_percentage self.M_CLVD_percentage = M_CLVD_percentage self.eigen_val = eigen_val self.eigen_vec = eigen_vec self.F = F def help(self): print("Incluing function:\n\ self.M\n\ self.M_iso\n\ self.M_devi\n\ self.M_DC\n\ self.M_CLVD\n\ self.M0\n\ self.Mw\n\ self.M_iso_percentage\n\ self.M_DC_percentage\n\ self.M_CLVD_percentage\n\ self.eigen_val\n\ self.eigen_vec\n\ self.F") def print_self(self): print("self.M:",self.M,"\n") print("self.M_iso:",self.M_iso,"\n") print("self.M_devi:",self.M_devi,"\n") print("self.M_DC:",self.M_DC,"\n") print("self.M_CLVD:",self.M_CLVD,"\n") print("self.M0:",self.M0,"\n") print("self.Mw:",self.Mw,"\n") print("self.M_iso_percentage:",self.M_iso_percentage,"\n") print("self.M_DC_percentage:",self.M_DC_percentage,"\n") print("self.M_CLVD_percentage:",self.M_CLVD_percentage,"\n") print("self.eigen_val:",self.eigen_val,"\n") print("self.eigen_vec:",self.eigen_vec,"\n") print("self.F:",self.F,"\n")
[ "numpy.trace", "numpy.log10", "numpy.linalg.eig", "numpy.cross", "math.acos", "math.tan", "math.sqrt", "math.cos", "numpy.array", "numpy.linspace", "numpy.argsort", "numpy.outer", "math.atan2", "numpy.linalg.norm", "math.sin" ]
[((5395, 5409), 'numpy.cross', 'np.cross', (['P', 'T'], {}), '(P, T)\n', (5403, 5409), True, 'import numpy as np\n'), ((6191, 6207), 'numpy.linalg.eig', 'np.linalg.eig', (['M'], {}), '(M)\n', (6204, 6207), True, 'import numpy as np\n'), ((7085, 7106), 'math.sqrt', 'sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (7089, 7106), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((9273, 9294), 'numpy.linalg.eig', 'np.linalg.eig', (['M_devi'], {}), '(M_devi)\n', (9286, 9294), True, 'import numpy as np\n'), ((11444, 11471), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)'], {'num': '(100)'}), '(-1, 1, num=100)\n', (11455, 11471), True, 'import numpy as np\n'), ((11715, 11742), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)'], {'num': '(100)'}), '(-1, 1, num=100)\n', (11726, 11742), True, 'import numpy as np\n'), ((11987, 12014), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)'], {'num': '(100)'}), '(-1, 1, num=100)\n', (11998, 12014), True, 'import numpy as np\n'), ((12258, 12285), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)'], {'num': '(100)'}), '(-1, 1, num=100)\n', (12269, 12285), True, 'import numpy as np\n'), ((15703, 15767), 'numpy.array', 'np.array', (['[[strike_1, dip_1, rake_1], [strike_2, dip_2, rake_2]]'], {}), '([[strike_1, dip_1, rake_1], [strike_2, dip_2, rake_2]])\n', (15711, 15767), True, 'import numpy as np\n'), ((3321, 3338), 'math.atan2', 'atan2', (['A[1]', 'A[0]'], {}), '(A[1], A[0])\n', (3326, 3338), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((3511, 3529), 'math.atan2', 'atan2', (['(-N[0])', 'N[1]'], {}), '(-N[0], N[1])\n', (3516, 3529), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((4396, 4410), 'math.acos', 'acos', (['cos_rake'], {}), '(cos_rake)\n', (4400, 4410), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((9084, 9095), 'numpy.trace', 'np.trace', (['M'], {}), '(M)\n', (9092, 9095), True, 'import numpy as np\n'), ((9116, 9147), 'numpy.array', 'np.array', (['[m_iso, m_iso, m_iso]'], {}), '([m_iso, m_iso, m_iso])\n', (9124, 9147), True, 'import numpy as np\n'), ((19984, 20000), 'numpy.linalg.eig', 'np.linalg.eig', (['M'], {}), '(M)\n', (19997, 20000), True, 'import numpy as np\n'), ((1620, 1632), 'math.sin', 'sin', (['(2 * dip)'], {}), '(2 * dip)\n', (1623, 1632), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1633, 1642), 'math.sin', 'sin', (['rake'], {}), '(rake)\n', (1636, 1642), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((3859, 3870), 'math.cos', 'cos', (['strike'], {}), '(strike)\n', (3862, 3870), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((3878, 3889), 'math.sin', 'sin', (['strike'], {}), '(strike)\n', (3881, 3889), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((4124, 4160), 'math.atan2', 'atan2', (['(-100000000.0 * A[2])', 'cos_rake'], {}), '(-100000000.0 * A[2], cos_rake)\n', (4129, 4160), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((5348, 5355), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (5352, 5355), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((5370, 5377), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (5374, 5377), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((5728, 5735), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (5732, 5735), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((5750, 5757), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (5754, 5757), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((6348, 6370), 'numpy.argsort', 'np.argsort', (['(-eigen_val)'], {}), '(-eigen_val)\n', (6358, 6370), True, 'import numpy as np\n'), ((7058, 7069), 'math.atan2', 'atan2', (['y', 'x'], {}), '(y, x)\n', (7063, 7069), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((7111, 7122), 'math.atan2', 'atan2', (['z', 'r'], {}), '(z, r)\n', (7116, 7122), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((8378, 8390), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (8387, 8390), False, 'import math\n'), ((8391, 8403), 'math.sin', 'sin', (['(TKO / 2)'], {}), '(TKO / 2)\n', (8394, 8403), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((8510, 8522), 'math.tan', 'tan', (['(TKO / 2)'], {}), '(TKO / 2)\n', (8513, 8522), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((8590, 8598), 'math.sin', 'sin', (['AZM'], {}), '(AZM)\n', (8593, 8598), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((8613, 8621), 'math.cos', 'cos', (['AZM'], {}), '(AZM)\n', (8616, 8621), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((9394, 9421), 'numpy.argsort', 'np.argsort', (['(-devi_eigen_val)'], {}), '(-devi_eigen_val)\n', (9404, 9421), True, 'import numpy as np\n'), ((16964, 16987), 'numpy.linalg.norm', 'np.linalg.norm', (['self.mt'], {}), '(self.mt)\n', (16978, 16987), True, 'import numpy as np\n'), ((19757, 19768), 'numpy.trace', 'np.trace', (['M'], {}), '(M)\n', (19765, 19768), True, 'import numpy as np\n'), ((19793, 19824), 'numpy.array', 'np.array', (['[m_iso, m_iso, m_iso]'], {}), '([m_iso, m_iso, m_iso])\n', (19801, 19824), True, 'import numpy as np\n'), ((1449, 1464), 'math.sin', 'sin', (['(2 * strike)'], {}), '(2 * strike)\n', (1452, 1464), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1546, 1561), 'math.sin', 'sin', (['(2 * strike)'], {}), '(2 * strike)\n', (1549, 1561), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1684, 1699), 'math.cos', 'cos', (['(2 * strike)'], {}), '(2 * strike)\n', (1687, 1699), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1730, 1745), 'math.sin', 'sin', (['(2 * strike)'], {}), '(2 * strike)\n', (1733, 1745), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1785, 1796), 'math.cos', 'cos', (['strike'], {}), '(strike)\n', (1788, 1796), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1824, 1835), 'math.sin', 'sin', (['strike'], {}), '(strike)\n', (1827, 1835), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1877, 1888), 'math.sin', 'sin', (['strike'], {}), '(strike)\n', (1880, 1888), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1916, 1927), 'math.cos', 'cos', (['strike'], {}), '(strike)\n', (1919, 1927), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2758, 2766), 'math.sin', 'sin', (['dip'], {}), '(dip)\n', (2761, 2766), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2801, 2809), 'math.sin', 'sin', (['dip'], {}), '(dip)\n', (2804, 2809), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2829, 2840), 'math.cos', 'cos', (['strike'], {}), '(strike)\n', (2832, 2840), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2841, 2849), 'math.sin', 'sin', (['dip'], {}), '(dip)\n', (2844, 2849), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2869, 2877), 'math.cos', 'cos', (['dip'], {}), '(dip)\n', (2872, 2877), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((16564, 16634), 'numpy.array', 'np.array', (['[[a[0], a[3], a[4]], [a[3], a[1], a[5]], [a[4], a[5], a[2]]]'], {}), '([[a[0], a[3], a[4]], [a[3], a[1], a[5]], [a[4], a[5], a[2]]])\n', (16572, 16634), True, 'import numpy as np\n'), ((20508, 20524), 'numpy.outer', 'np.outer', (['a3', 'a3'], {}), '(a3, a3)\n', (20516, 20524), True, 'import numpy as np\n'), ((20527, 20543), 'numpy.outer', 'np.outer', (['a2', 'a2'], {}), '(a2, a2)\n', (20535, 20543), True, 'import numpy as np\n'), ((20737, 20762), 'numpy.log10', 'np.log10', (['(M0 * 10000000.0)'], {}), '(M0 * 10000000.0)\n', (20745, 20762), True, 'import numpy as np\n'), ((1426, 1434), 'math.sin', 'sin', (['dip'], {}), '(dip)\n', (1429, 1434), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1437, 1446), 'math.cos', 'cos', (['rake'], {}), '(rake)\n', (1440, 1446), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1465, 1477), 'math.sin', 'sin', (['(2 * dip)'], {}), '(2 * dip)\n', (1468, 1477), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1478, 1487), 'math.sin', 'sin', (['rake'], {}), '(rake)\n', (1481, 1487), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1490, 1501), 'math.sin', 'sin', (['strike'], {}), '(strike)\n', (1493, 1501), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1523, 1531), 'math.sin', 'sin', (['dip'], {}), '(dip)\n', (1526, 1531), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1534, 1543), 'math.cos', 'cos', (['rake'], {}), '(rake)\n', (1537, 1543), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1562, 1574), 'math.sin', 'sin', (['(2 * dip)'], {}), '(2 * dip)\n', (1565, 1574), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1575, 1584), 'math.sin', 'sin', (['rake'], {}), '(rake)\n', (1578, 1584), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1587, 1598), 'math.cos', 'cos', (['strike'], {}), '(strike)\n', (1590, 1598), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1661, 1669), 'math.sin', 'sin', (['dip'], {}), '(dip)\n', (1664, 1669), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1672, 1681), 'math.cos', 'cos', (['rake'], {}), '(rake)\n', (1675, 1681), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1718, 1727), 'math.sin', 'sin', (['rake'], {}), '(rake)\n', (1721, 1727), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1762, 1770), 'math.cos', 'cos', (['dip'], {}), '(dip)\n', (1765, 1770), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1773, 1782), 'math.cos', 'cos', (['rake'], {}), '(rake)\n', (1776, 1782), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1799, 1811), 'math.cos', 'cos', (['(2 * dip)'], {}), '(2 * dip)\n', (1802, 1811), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1812, 1821), 'math.sin', 'sin', (['rake'], {}), '(rake)\n', (1815, 1821), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1854, 1862), 'math.cos', 'cos', (['dip'], {}), '(dip)\n', (1857, 1862), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1865, 1874), 'math.cos', 'cos', (['rake'], {}), '(rake)\n', (1868, 1874), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1891, 1903), 'math.cos', 'cos', (['(2 * dip)'], {}), '(2 * dip)\n', (1894, 1903), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1904, 1913), 'math.sin', 'sin', (['rake'], {}), '(rake)\n', (1907, 1913), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2599, 2608), 'math.cos', 'cos', (['rake'], {}), '(rake)\n', (2602, 2608), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2609, 2620), 'math.cos', 'cos', (['strike'], {}), '(strike)\n', (2612, 2620), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2642, 2653), 'math.sin', 'sin', (['strike'], {}), '(strike)\n', (2645, 2653), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2673, 2682), 'math.cos', 'cos', (['rake'], {}), '(rake)\n', (2676, 2682), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2683, 2694), 'math.sin', 'sin', (['strike'], {}), '(strike)\n', (2686, 2694), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2716, 2727), 'math.cos', 'cos', (['strike'], {}), '(strike)\n', (2719, 2727), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2748, 2757), 'math.sin', 'sin', (['rake'], {}), '(rake)\n', (2751, 2757), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2789, 2800), 'math.sin', 'sin', (['strike'], {}), '(strike)\n', (2792, 2800), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((3686, 3697), 'math.sin', 'sin', (['strike'], {}), '(strike)\n', (3689, 3697), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((3705, 3716), 'math.cos', 'cos', (['strike'], {}), '(strike)\n', (3708, 3716), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((4072, 4080), 'math.sin', 'sin', (['dip'], {}), '(dip)\n', (4075, 4080), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((1705, 1717), 'math.sin', 'sin', (['(2 * dip)'], {}), '(2 * dip)\n', (1708, 1717), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2623, 2632), 'math.sin', 'sin', (['rake'], {}), '(rake)\n', (2626, 2632), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2633, 2641), 'math.cos', 'cos', (['dip'], {}), '(dip)\n', (2636, 2641), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2697, 2706), 'math.sin', 'sin', (['rake'], {}), '(rake)\n', (2700, 2706), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((2707, 2715), 'math.cos', 'cos', (['dip'], {}), '(dip)\n', (2710, 2715), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((3749, 3760), 'math.sin', 'sin', (['strike'], {}), '(strike)\n', (3752, 3760), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n'), ((3816, 3827), 'math.cos', 'cos', (['strike'], {}), '(strike)\n', (3819, 3827), False, 'from math import sin, cos, tan, atan2, atan, sqrt, acos\n')]
""" djlime.forms.widgets ~~~~~~~~~~~~~~ Extended django form widgets. :copyright: (c) 2012 by <NAME>. :license: BSD, see LICENSE for more details. """ from itertools import chain from django.forms import widgets from django.forms.util import flatatt from django.utils.encoding import force_unicode from django.utils.html import conditional_escape from django.utils.safestring import mark_safe class RadioInput(widgets.RadioInput): def __init__(self, name, value, attrs, label_attrs, choice, index): self.label_attrs = label_attrs super(RadioInput, self).__init__(name, value, attrs, choice, index) def __unicode__(self): if 'id' in self.attrs: label_for = ' for="%s_%s"' % (self.attrs['id'], self.index) else: label_for = '' choice_label = conditional_escape(force_unicode(self.choice_label)) return mark_safe(u'<label %s %s>%s %s</label>' % (label_for, flatatt(self.label_attrs), self.tag(), choice_label)) class HorizontalRadioRenderer(widgets.RadioSelect.renderer): def __init__(self, name, value, attrs, label_attrs, choices): self.label_attrs = label_attrs super(HorizontalRadioRenderer, self).__init__(name, value, attrs, choices) def __iter__(self): for i, choice in enumerate(self.choices): yield RadioInput(self.name, self.value, self.attrs.copy(), self.label_attrs.copy(), choice, i) def __getitem__(self, idx): choice = self.choices[idx] # Let the IndexError propogate return RadioInput(self.name, self.value, self.attrs.copy(), self.label_attrs.copy(), choice, idx) def render(self): return mark_safe(u'\n'.join([u'%s\n' % w for w in self])) class RadioFieldRenderer(widgets.RadioFieldRenderer): def __init__(self, name, value, attrs, label_attrs, choices): self.label_attrs = label_attrs super(RadioFieldRenderer, self).__init__(name, value, attrs, choices) def __iter__(self): for i, choice in enumerate(self.choices): yield RadioInput(self.name, self.value, self.attrs.copy(), self.label_attrs.copy(), choice, i) def __getitem__(self, idx): choice = self.choices[idx] # Let the IndexError propogate return RadioInput(self.name, self.value, self.attrs.copy(), self.label_attrs.copy(), choice, idx) class RadioSelect(widgets.RadioSelect): """ RadioSelect widget with support for label attributes such is class, id """ renderer = RadioFieldRenderer def __init__(self, *args, **kwargs): label_attrs = kwargs.pop('label_attrs', {}) self.label_attrs = label_attrs super(RadioSelect, self).__init__(*args, **kwargs) def get_renderer(self, name, value, attrs=None, label_attrs=None, choices=()): """Returns an instance of the renderer.""" if value is None: value = '' str_value = force_unicode(value) # Normalize to string. final_attrs = self.build_attrs(attrs) choices = list(chain(self.choices, choices)) return self.renderer(name, str_value, final_attrs, label_attrs, choices) def render(self, name, value, attrs=None, label_attrs=None, choices=()): return self.get_renderer(name, value, attrs, self.label_attrs, choices).render()
[ "itertools.chain", "django.utils.encoding.force_unicode", "django.forms.util.flatatt" ]
[((2921, 2941), 'django.utils.encoding.force_unicode', 'force_unicode', (['value'], {}), '(value)\n', (2934, 2941), False, 'from django.utils.encoding import force_unicode\n'), ((858, 890), 'django.utils.encoding.force_unicode', 'force_unicode', (['self.choice_label'], {}), '(self.choice_label)\n', (871, 890), False, 'from django.utils.encoding import force_unicode\n'), ((3034, 3062), 'itertools.chain', 'chain', (['self.choices', 'choices'], {}), '(self.choices, choices)\n', (3039, 3062), False, 'from itertools import chain\n'), ((961, 986), 'django.forms.util.flatatt', 'flatatt', (['self.label_attrs'], {}), '(self.label_attrs)\n', (968, 986), False, 'from django.forms.util import flatatt\n')]
import torch from torch import nn, Tensor from typing import Tuple from ..components import ResidualRNN __all__ = ['Encoder', 'RNNEncoder', 'GRUEncoder'] class Encoder(nn.Module): def __init__(self, input_size, hidden_size, embedding_dim, num_layers, bidirectional, device, pad_token=0, drop_rate=0.1): super(Encoder, self).__init__() self._hidden_size = hidden_size self._input_size = input_size self._embedding_dim = embedding_dim self._num_layers = num_layers self._bidirectional = bidirectional self._device = device self._embedding = nn.Embedding(input_size, self._embedding_dim, padding_idx=pad_token) self._dropout = nn.Dropout(drop_rate) def forward(self, input: Tensor, states: Tuple[Tensor, ...]) -> Tuple[Tensor, Tuple[Tensor, ...]]: """ :param input: (seq_len, batch_size, input_dim) :param states: internal states of the RNN, each having dimension (num_layers * num_directions, batch_size, hidden_size) :return: output: (seq_len, batch, num_directions * hidden_size) states: states at final time step, each having dimension (num_layers * num_directions, batch_size, hidden_size) """ raise NotImplementedError def init_hidden(self, batch_size: int) -> Tuple[Tensor, ...]: """ Initialize the first zero hidden state :param batch_size: :return: Initial internal states, each of dim (num_layers * num_directions, batch_size, hidden_size) """ raise NotImplementedError class RNNEncoder(Encoder): def __init__( self, rnn, input_size, hidden_size, embedding_dim, num_layers, bidirectional, device, pad_token=0, drop_rate=0.1 ): super(RNNEncoder, self).__init__( input_size, hidden_size, embedding_dim, num_layers, bidirectional, device, pad_token, drop_rate ) self.rnn = rnn def forward(self, input: Tensor, states: Tuple[Tensor, ...]) -> Tuple[Tensor, Tuple[Tensor, ...]]: embedded = self._dropout(self._embedding(input)) output, hidden = self.rnn(embedded, states) return output, hidden class GRUEncoder(RNNEncoder): def __init__( self, input_size, hidden_size, embedding_dim, device, bias=False, num_layers=1, dropout=0, bidirectional=False, pad_token=0, drop_rate=0.1 ): super(GRUEncoder, self).__init__( nn.GRU( embedding_dim, hidden_size, bias=bias, num_layers=num_layers, dropout=dropout, bidirectional=bidirectional ), input_size, hidden_size, embedding_dim, num_layers, bidirectional, device, pad_token, drop_rate ) def init_hidden(self, batch_size: int) -> Tuple[Tensor, ...]: """ Initialize the first zero hidden state :param batch_size: :return: Initial hidden state, of dimensision (num_layers * num_directions, batch_size, hidden_size) """ first_dim = self._num_layers if self._bidirectional: first_dim *= 2 return (torch.zeros(first_dim, batch_size, self._hidden_size, device=self._device),) class LSTMEncoder(RNNEncoder): def __init__( self, input_size, hidden_size, embedding_dim, device, bias=False, num_layers=1, dropout=0, bidirectional=False, pad_token=0, drop_rate=0.1 ): super(LSTMEncoder, self).__init__( nn.LSTM( embedding_dim, hidden_size, bias=bias, num_layers=num_layers, dropout=dropout, bidirectional=bidirectional ), input_size, hidden_size, embedding_dim, num_layers, bidirectional, device, pad_token, drop_rate ) def init_hidden(self, batch_size: int) -> Tuple[Tensor, ...]: """ Initialize the first zero hidden state :param batch_size: :return: Initial hidden state and cell state, each of dim (num_layers * num_directions, batch_size, hidden_size) """ first_dim = self._num_layers if self._bidirectional: first_dim *= 2 return ( torch.zeros(first_dim, batch_size, self._hidden_size, device=self._device), torch.zeros(first_dim, batch_size, self._hidden_size, device=self._device) ) class ResidualRNNEncoder(RNNEncoder): def __init__( self, base_rnn, input_size, hidden_size, embedding_dim, device, bias=False, num_layers=1, dropout=0, bidirectional=False, pad_token=0, drop_rate=0.1 ): super(ResidualRNNEncoder, self).__init__( ResidualRNN( base_rnn=base_rnn, input_size=embedding_dim, bias=bias, num_layers=num_layers, dropout=dropout, bidirectional=bidirectional ), input_size, hidden_size, embedding_dim, num_layers, bidirectional, device, pad_token, drop_rate ) # class ResidualGRUEncoder(RNNEncoder, GRUEncoder): # def __init__( # self, input_size, hidden_size, embedding_dim, device, bias=False, # num_layers=1, dropout=0, bidirectional=False, pad_token=0, drop_rate=0.1 # ): # super(ResidualGRUEncoder, self).__init__( # nn.GRU, input_size, hidden_size, embedding_dim, num_layers, bidirectional, # device, bias, num_layers, dropout, bidirectional, pad_token, drop_rate # ) # # class GRUEncoder(Encoder): # def __init__( # self, input_size, hidden_size, embedding_dim, device, bias=False, # num_layers=1, dropout=0, bidirectional=False, pad_token=0, drop_rate=0.1): # super(GRUEncoder, self).__init__( # input_size, hidden_size, embedding_dim, # num_layers, bidirectional, device, pad_token, drop_rate) # self._gru = nn.GRU( # embedding_dim, hidden_size, # bias=bias, num_layers=num_layers, # dropout=dropout, # bidirectional=bidirectional # ) # # self._gru = ResidualRNN( # # nn.GRU, input_size=hidden_size, # # bias=bias, num_layers=num_layers, # # dropout=dropout, # # ) # self.to(device) # # def forward(self, input: Tensor, hidden: Tensor) -> Tuple[Tensor, Tensor]: # embedded = self._dropout(self._embedding(input)) # output, hidden = self._gru(embedded, hidden) # return output, hidden #
[ "torch.nn.Dropout", "torch.nn.LSTM", "torch.zeros", "torch.nn.Embedding", "torch.nn.GRU" ]
[((612, 680), 'torch.nn.Embedding', 'nn.Embedding', (['input_size', 'self._embedding_dim'], {'padding_idx': 'pad_token'}), '(input_size, self._embedding_dim, padding_idx=pad_token)\n', (624, 680), False, 'from torch import nn, Tensor\n'), ((705, 726), 'torch.nn.Dropout', 'nn.Dropout', (['drop_rate'], {}), '(drop_rate)\n', (715, 726), False, 'from torch import nn, Tensor\n'), ((2506, 2624), 'torch.nn.GRU', 'nn.GRU', (['embedding_dim', 'hidden_size'], {'bias': 'bias', 'num_layers': 'num_layers', 'dropout': 'dropout', 'bidirectional': 'bidirectional'}), '(embedding_dim, hidden_size, bias=bias, num_layers=num_layers,\n dropout=dropout, bidirectional=bidirectional)\n', (2512, 2624), False, 'from torch import nn, Tensor\n'), ((3205, 3279), 'torch.zeros', 'torch.zeros', (['first_dim', 'batch_size', 'self._hidden_size'], {'device': 'self._device'}), '(first_dim, batch_size, self._hidden_size, device=self._device)\n', (3216, 3279), False, 'import torch\n'), ((3558, 3677), 'torch.nn.LSTM', 'nn.LSTM', (['embedding_dim', 'hidden_size'], {'bias': 'bias', 'num_layers': 'num_layers', 'dropout': 'dropout', 'bidirectional': 'bidirectional'}), '(embedding_dim, hidden_size, bias=bias, num_layers=num_layers,\n dropout=dropout, bidirectional=bidirectional)\n', (3565, 3677), False, 'from torch import nn, Tensor\n'), ((4283, 4357), 'torch.zeros', 'torch.zeros', (['first_dim', 'batch_size', 'self._hidden_size'], {'device': 'self._device'}), '(first_dim, batch_size, self._hidden_size, device=self._device)\n', (4294, 4357), False, 'import torch\n'), ((4371, 4445), 'torch.zeros', 'torch.zeros', (['first_dim', 'batch_size', 'self._hidden_size'], {'device': 'self._device'}), '(first_dim, batch_size, self._hidden_size, device=self._device)\n', (4382, 4445), False, 'import torch\n')]
import torch import torch.nn.functional as F from torch.autograd import Variable import numpy as np class LabelSmoothingCrossEntropy(torch.nn.Module): def __init__(self): super(LabelSmoothingCrossEntropy, self).__init__() def forward(self, x, target, smoothing=0.1): confidence = 1. - smoothing logprobs = F.log_softmax(x, dim=-1) nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) nll_loss = nll_loss.squeeze(1) smooth_loss = -logprobs.mean(dim=-1) loss = confidence * nll_loss + smoothing * smooth_loss return loss.mean() class ConfidenceLabelSmoothingCrossEntropy(torch.nn.Module): def __init__(self): super(ConfidenceLabelSmoothingCrossEntropy, self).__init__() # self.confidence = [0.7425, 0.9325, 0.965, 0.5395, 0.86025, 0.754, 0.66475, 0.618, 0.7925, 0.6525, 0.5415, # 0.5705, 0.6525, 0.59625, 0.6145, 0.62125, 0.7755, 0.866, 0.83425, 0.64125, 0.986, 0.82225, # 0.70525, 0.5625, 0.5145, 0.5275, 0.57775, 0.918, 0.9175, 0.69575, 0.6555, 0.867, 0.945, # 0.5155, 0.593, 0.976, 0.963, 0.591, 0.749, 0.5575, 0.52625, 0.6125, 0.83725, 0.97225, # 0.93725, 0.6415, 0.61225, 0.584, 0.69175, 0.60825, 0.63575, 0.756, 0.61375, 0.53575] self.confidence = [0.713, 0.953, 0.947, 0.514, 0.933, 0.725, 0.6025, 0.5855, 0.821, 0.6175, 0.547, 0.5605, 0.7, 0.609, 0.5785, 0.638, 0.8005, 0.824, 0.834, 0.5155, 0.9775, 0.8615, 0.6305, 0.549, 0.517, 0.5915, 0.5285, 0.923, 0.855, 0.751, 0.675, 0.773, 0.9805, 0.53, 0.5255, 0.9685, 0.9535, 0.5515, 0.8795, 0.497, 0.529, 0.5335, 0.8645, 0.9595, 0.9245, 0.5265, 0.452, 0.6415, 0.696, 0.617, 0.683, 0.7255, 0.5995, 0.5815, 0.772, 0.912, 0.983, 0.565, 0.7875, 0.783, 0.727, 0.6505, 0.764, 0.6875, 0.536, 0.5805, 0.605, 0.5835, 0.6505, 0.6045, 0.7505, 0.908, 0.8345, 0.767, 0.9945, 0.783, 0.78, 0.576, 0.512, 0.4635, 0.627, 0.913, 0.98, 0.6405, 0.636, 0.961, 0.9095, 0.501, 0.6605, 0.9835, 0.9725, 0.6305, 0.6185, 0.618, 0.5235, 0.6915, 0.81, 0.985, 0.95, 0.7565, 0.7725, 0.5265, 0.6875, 0.5995, 0.5885, 0.7865, 0.628, 0.49, 0.985, 0.95, 0.7565, 0.7725, 0.5265, 0.6875, 0.5995, 0.5885, 0.7865, 0.628, 0.49 ] def forward(self, x, target, sid): confidencemat = torch.zeros_like(target,dtype=torch.float32) for i in range(len(target)): confidencemat[i] = self.confidence[sid[i]] smoothing = 1 - confidencemat logprobs = F.log_softmax(x, dim=-1) nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) nll_loss = nll_loss.squeeze(1) smooth_loss = -logprobs.mean(dim=-1) loss = torch.mul(confidencemat,nll_loss) + torch.mul(smoothing,smooth_loss) return loss.mean() class CroppedLoss: def __init__(self, loss_function): self.loss_function = loss_function def __call__(self, preds, targets): avg_preds = torch.mean(preds, dim=2) avg_preds = avg_preds.squeeze(dim=1) return self.loss_function(avg_preds, targets) def train_crop(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch=1): criterion = torch.nn.NLLLoss() lossfn = CroppedLoss(criterion) model.train() for batch_idx, datas in enumerate(train_loader): data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64) optimizer.zero_grad() output = model(data) output = model.embedding_net(data) loss = lossfn(output, target) loss.backward() optimizer.step() if batch_idx % log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) scheduler.step() def eval_crop(model, device, test_loader): model.eval() test_loss = [] correct = [] with torch.no_grad(): for datas in test_loader: data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64) outputs = [] for i in range(2): outputs.append(model(data[:, :, :, i * 125:i * 125 + 1000])) result = torch.cat([outputs[0], outputs[1][:, :, model.out_size - 125:model.out_size]], dim=2) y_preds_per_trial = result.mean(dim=2) test_loss.append(F.nll_loss(y_preds_per_trial, target, reduction='sum').item()) # sum up batch loss pred = y_preds_per_trial.argmax(dim=1, keepdim=True) # get the index of the max log-probability correct.append(pred.eq(target.view_as(pred)).sum().item()) loss = sum(test_loss) / len(test_loader.dataset) # print('{:.0f}'.format(100. * correct / len(test_loader.dataset))) print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format( loss, sum(correct), len(test_loader.dataset), 100. * sum(correct) / len(test_loader.dataset))) return loss, 100. * sum(correct) / len(test_loader.dataset) class MAE_loss(torch.nn.Module): def __init__(self, device): super(MAE_loss, self).__init__() self.device = device self.loss_function = torch.nn.L1Loss() def __call__(self, preds, targets): y_onehot = torch.FloatTensor(targets.size(0), 2).to(self.device) y_onehot.zero_() y_onehot.scatter_(1, targets.unsqueeze(1), 1) return self.loss_function(preds, y_onehot) class MAE_loss(torch.nn.Module): def __init__(self, device): super(MAE_loss, self).__init__() self.device = device self.loss_function = torch.nn.L1Loss() def __call__(self, preds, targets): y_onehot = torch.FloatTensor(targets.size(0), 2).to(self.device) y_onehot.zero_() y_onehot.scatter_(1, targets.unsqueeze(1), 1) return self.loss_function(preds, y_onehot) import utils import time def train(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch): losses = utils.AverageMeter('Loss', ':.4e') if isinstance(model, torch.nn.DataParallel): lossfn = model.module.criterion else: lossfn = model.criterion # lossfn = LabelSmoothingCrossEntropy() # lossfn = ConfidenceLabelSmoothingCrossEntropy() correct = [] start = time.time() model.train() t_data = [] t_model = [] t3 = time.time() for batch_idx, datas in enumerate(train_loader): data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64) t2 = time.time() t_data.append(t2 - t3) # print(t2) optimizer.zero_grad() output = model(data.unsqueeze(dim=1)) pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability correct.append(pred.eq(target.view_as(pred)).sum().item()) loss = lossfn(output, target) loss.backward() optimizer.step() losses.update(loss.item(), data.size(0)) if batch_idx % log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) t3 = time.time() t_model.append(t3 - t2) print("time :", time.time() - start) print(f"t_data : {sum(t_data)} , t_model : {sum(t_model)}") print(f'Train set: Accuracy: {sum(correct)}/{len(train_loader.dataset)} ({100. * sum(correct) / len(train_loader.dataset):.4f}%)') def train_mtl(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch): losses = utils.AverageMeter('Loss', ':.4e') if isinstance(model, torch.nn.DataParallel): lossfn = model.module.criterion else: lossfn = model.criterion # lossfn = LabelSmoothingCrossEntropy() # lossfn = ConfidenceLabelSmoothingCrossEntropy() correct = [] start = time.time() model.train() t_data = [] t_model = [] t3 = time.time() for batch_idx, datas in enumerate(train_loader): data, target, subjid = datas[0].to(device), datas[1].to(device, dtype=torch.int64), datas[2].to(device, dtype=torch.int64) t2 = time.time() t_data.append(t2 - t3) # print(t2) optimizer.zero_grad() output = model(data.unsqueeze(dim=1)) pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability correct.append(pred.eq(target.view_as(pred)).sum().item()) loss = lossfn(output, 2*subjid+target) loss.backward() optimizer.step() losses.update(loss.item(), data.size(0)) if batch_idx % log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) t3 = time.time() t_model.append(t3 - t2) print("time :", time.time() - start) print(f"t_data : {sum(t_data)} , t_model : {sum(t_model)}") print(f'Train set: Accuracy: {sum(correct)}/{len(train_loader.dataset)} ({100. * sum(correct) / len(train_loader.dataset):.4f}%)') def train_gpu(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch=1): losses = utils.AverageMeter('Loss', ':.4e') if isinstance(model, torch.nn.DataParallel): lossfn = model.module.criterion else: lossfn = model.criterion correct = [] import time start = time.time() model.train() t_data = [] t_model = [] t3 = time.time() for batch_idx, datas in enumerate(train_loader): data, target = datas[0], datas[1] t2 = time.time() t_data.append(t2 - t3) optimizer.zero_grad() output = model(data.unsqueeze(dim=1)) pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability correct.append(pred.eq(target.view_as(pred)).sum().item()) loss = lossfn(output, target) loss.backward() optimizer.step() losses.update(loss.item(), data.size(0)) if batch_idx % log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) t3 = time.time() t_model.append(t3 - t2) print("time :", time.time() - start) print(f"t_data : {sum(t_data)} , t_model : {sum(t_model)}") scheduler.step(losses.avg) print(f'Train set: Accuracy: {sum(correct)}/{len(train_loader.dataset)} ({100. * sum(correct) / len(train_loader.dataset):.4f}%)') def eval(model, device, test_loader): model.eval() test_loss = [] correct = [] with torch.no_grad(): for datas in test_loader: data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64) output = model(data.unsqueeze(dim=1)) test_loss.append(F.cross_entropy(output, target, reduction='sum').item()) # sum up batch loss pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability correct.append(pred.eq(target.view_as(pred)).sum().item()) loss = sum(test_loss) / len(test_loader.dataset) # print('{:.0f}'.format(100. * correct / len(test_loader.dataset))) print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format( loss, sum(correct), len(test_loader.dataset), 100. * sum(correct) / len(test_loader.dataset))) return loss, 100. * sum(correct) / len(test_loader.dataset) from sklearn.metrics import roc_curve from sklearn.metrics import auc def eval_cali(model, device, test_loader): model.eval() test_loss = [] correct = [] with torch.no_grad(): for datas in test_loader: data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64) output = model(data.unsqueeze(dim=1)) test_loss.append(F.cross_entropy(output, target, reduction='sum').item()) # sum up batch loss pred = F.softmax(output, dim=1) fpr, tpr, thresholds = roc_curve(target.cpu(), pred.cpu()[:,0]) AUC = auc(fpr, tpr) correct.append(AUC) loss = sum(test_loss) / len(test_loader.dataset) # print('{:.0f}'.format(100. * correct / len(test_loader.dataset))) print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format( loss, sum(correct), len(test_loader.dataset), 100. * sum(correct) / len(test_loader.dataset))) return loss, 100. * sum(correct) / len(test_loader.dataset) def vote(output, target, topk=(1,)): """ Computes the precision@k for the specified values of k """ maxk = max(topk) batch_size = target.size(0) output = F.log_softmax(output, dim=1) _, pred = output.topk(maxk, 1, True, True) # pred = pred.t() # one-hot case if target.ndimension() > 1: target = target.max(1)[1] modevalue = torch.mode(pred%2)[0] return modevalue def eval_mtl(model, device, test_loader): model.eval() test_loss = [] correct = [] with torch.no_grad(): for datas in test_loader: data, target, subjid = datas[0].to(device), datas[1].to(device, dtype=torch.int64), datas[2].to(device, dtype=torch.int64) output = model(data.unsqueeze(dim=1)) pred = vote(output, subjid*2+target, (1,5)) test_loss.append(F.cross_entropy(output, subjid*2+target, reduction='sum').item()) # sum up batch loss # pred_0 = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability # pred = pred_0%2 correct.append(pred.eq(target.view_as(pred)).sum().item()) loss = sum(test_loss) / len(test_loader.dataset) # print('{:.0f}'.format(100. * correct / len(test_loader.dataset))) print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format( loss, sum(correct), len(test_loader.dataset), 100. * sum(correct) / len(test_loader.dataset))) return loss, 100. * sum(correct) / len(test_loader.dataset) def eval_ensemble(models, device, test_loader): for model in models: model.eval() test_loss = [] correct = [] with torch.no_grad(): for datas in test_loader: data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64) output = [] for model in models: output.append(model(data.unsqueeze(dim=1)).unsqueeze(dim=2)) temp = torch.cat(output, dim=2) temp2 = temp.mean(dim=2) test_loss.append(F.cross_entropy(temp2, target, reduction='sum').item()) # sum up batch loss pred = F.log_softmax(temp2, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability correct.append(pred.eq(target.view_as(pred)).sum().item()) loss = sum(test_loss) / len(test_loader.dataset) # print('{:.0f}'.format(100. * correct / len(test_loader.dataset))) print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format( loss, sum(correct), len(test_loader.dataset), 100. * sum(correct) / len(test_loader.dataset))) return loss, 100. * sum(correct) / len(test_loader.dataset)
[ "torch.mul", "torch.mode", "torch.nn.functional.nll_loss", "torch.mean", "sklearn.metrics.auc", "torch.nn.L1Loss", "torch.nn.NLLLoss", "torch.nn.functional.log_softmax", "torch.nn.functional.cross_entropy", "utils.AverageMeter", "torch.no_grad", "torch.zeros_like", "time.time", "torch.nn.f...
[((3492, 3510), 'torch.nn.NLLLoss', 'torch.nn.NLLLoss', ([], {}), '()\n', (3508, 3510), False, 'import torch\n'), ((6393, 6427), 'utils.AverageMeter', 'utils.AverageMeter', (['"""Loss"""', '""":.4e"""'], {}), "('Loss', ':.4e')\n", (6411, 6427), False, 'import utils\n'), ((6696, 6707), 'time.time', 'time.time', ([], {}), '()\n', (6705, 6707), False, 'import time\n'), ((6770, 6781), 'time.time', 'time.time', ([], {}), '()\n', (6779, 6781), False, 'import time\n'), ((8074, 8108), 'utils.AverageMeter', 'utils.AverageMeter', (['"""Loss"""', '""":.4e"""'], {}), "('Loss', ':.4e')\n", (8092, 8108), False, 'import utils\n'), ((8377, 8388), 'time.time', 'time.time', ([], {}), '()\n', (8386, 8388), False, 'import time\n'), ((8451, 8462), 'time.time', 'time.time', ([], {}), '()\n', (8460, 8462), False, 'import time\n'), ((9816, 9850), 'utils.AverageMeter', 'utils.AverageMeter', (['"""Loss"""', '""":.4e"""'], {}), "('Loss', ':.4e')\n", (9834, 9850), False, 'import utils\n'), ((10028, 10039), 'time.time', 'time.time', ([], {}), '()\n', (10037, 10039), False, 'import time\n'), ((10102, 10113), 'time.time', 'time.time', ([], {}), '()\n', (10111, 10113), False, 'import time\n'), ((13449, 13477), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (13462, 13477), True, 'import torch.nn.functional as F\n'), ((340, 364), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(-1)'}), '(x, dim=-1)\n', (353, 364), True, 'import torch.nn.functional as F\n'), ((2594, 2639), 'torch.zeros_like', 'torch.zeros_like', (['target'], {'dtype': 'torch.float32'}), '(target, dtype=torch.float32)\n', (2610, 2639), False, 'import torch\n'), ((2789, 2813), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(-1)'}), '(x, dim=-1)\n', (2802, 2813), True, 'import torch.nn.functional as F\n'), ((3246, 3270), 'torch.mean', 'torch.mean', (['preds'], {'dim': '(2)'}), '(preds, dim=2)\n', (3256, 3270), False, 'import torch\n'), ((4290, 4305), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4303, 4305), False, 'import torch\n'), ((5562, 5579), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (5577, 5579), False, 'import torch\n'), ((5991, 6008), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (6006, 6008), False, 'import torch\n'), ((6932, 6943), 'time.time', 'time.time', ([], {}), '()\n', (6941, 6943), False, 'import time\n'), ((7675, 7686), 'time.time', 'time.time', ([], {}), '()\n', (7684, 7686), False, 'import time\n'), ((8661, 8672), 'time.time', 'time.time', ([], {}), '()\n', (8670, 8672), False, 'import time\n'), ((9413, 9424), 'time.time', 'time.time', ([], {}), '()\n', (9422, 9424), False, 'import time\n'), ((10223, 10234), 'time.time', 'time.time', ([], {}), '()\n', (10232, 10234), False, 'import time\n'), ((10946, 10957), 'time.time', 'time.time', ([], {}), '()\n', (10955, 10957), False, 'import time\n'), ((11365, 11380), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11378, 11380), False, 'import torch\n'), ((12413, 12428), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12426, 12428), False, 'import torch\n'), ((13649, 13669), 'torch.mode', 'torch.mode', (['(pred % 2)'], {}), '(pred % 2)\n', (13659, 13669), False, 'import torch\n'), ((13799, 13814), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13812, 13814), False, 'import torch\n'), ((15073, 15088), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15086, 15088), False, 'import torch\n'), ((2986, 3020), 'torch.mul', 'torch.mul', (['confidencemat', 'nll_loss'], {}), '(confidencemat, nll_loss)\n', (2995, 3020), False, 'import torch\n'), ((3022, 3055), 'torch.mul', 'torch.mul', (['smoothing', 'smooth_loss'], {}), '(smoothing, smooth_loss)\n', (3031, 3055), False, 'import torch\n'), ((4584, 4674), 'torch.cat', 'torch.cat', (['[outputs[0], outputs[1][:, :, model.out_size - 125:model.out_size]]'], {'dim': '(2)'}), '([outputs[0], outputs[1][:, :, model.out_size - 125:model.out_size\n ]], dim=2)\n', (4593, 4674), False, 'import torch\n'), ((7740, 7751), 'time.time', 'time.time', ([], {}), '()\n', (7749, 7751), False, 'import time\n'), ((9478, 9489), 'time.time', 'time.time', ([], {}), '()\n', (9487, 9489), False, 'import time\n'), ((11011, 11022), 'time.time', 'time.time', ([], {}), '()\n', (11020, 11022), False, 'import time\n'), ((12729, 12753), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (12738, 12753), True, 'import torch.nn.functional as F\n'), ((12849, 12862), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (12852, 12862), False, 'from sklearn.metrics import auc\n'), ((15366, 15390), 'torch.cat', 'torch.cat', (['output'], {'dim': '(2)'}), '(output, dim=2)\n', (15375, 15390), False, 'import torch\n'), ((7088, 7116), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (7101, 7116), True, 'import torch.nn.functional as F\n'), ((8817, 8845), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (8830, 8845), True, 'import torch.nn.functional as F\n'), ((10359, 10387), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (10372, 10387), True, 'import torch.nn.functional as F\n'), ((11682, 11710), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (11695, 11710), True, 'import torch.nn.functional as F\n'), ((15554, 15581), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['temp2'], {'dim': '(1)'}), '(temp2, dim=1)\n', (15567, 15581), True, 'import torch.nn.functional as F\n'), ((4751, 4805), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['y_preds_per_trial', 'target'], {'reduction': '"""sum"""'}), "(y_preds_per_trial, target, reduction='sum')\n", (4761, 4805), True, 'import torch.nn.functional as F\n'), ((11584, 11632), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (11599, 11632), True, 'import torch.nn.functional as F\n'), ((12632, 12680), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (12647, 12680), True, 'import torch.nn.functional as F\n'), ((14231, 14292), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', '(subjid * 2 + target)'], {'reduction': '"""sum"""'}), "(output, subjid * 2 + target, reduction='sum')\n", (14246, 14292), True, 'import torch.nn.functional as F\n'), ((15457, 15504), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['temp2', 'target'], {'reduction': '"""sum"""'}), "(temp2, target, reduction='sum')\n", (15472, 15504), True, 'import torch.nn.functional as F\n')]
import matplotlib import numpy as np import time # matplotlib.use('Agg') import matplotlib.pyplot as plt VOC_BBOX_LABEL_NAMES = ( 'fly', 'bike', 'bird', 'boat', 'pin', 'bus', 'c', 'cat', 'chair', 'cow', 'table', 'dog', 'horse', 'moto', 'p', 'plant', 'shep', 'sofa', 'train', 'tv', ) def vis_img(img, ax=None): """Visualize a color image. Args: img (~numpy.ndarray): An array of shape :math:`(3, height, width)`. This is in RGB format and the range of its value is :math:`[0, 255]`. ax (matplotlib.axes.Axis): The visualization is displayed on this axis. If this is :obj:`None` (default), a new axis is created. Returns: ~matploblib.axes.Axes: Returns the Axes object with the plot for further tweaking. """ if ax is None: fig = plt.figure() ax = fig.add_subplot(1, 1, 1) # CHW ==> HWC img = img.transpose((1, 2, 0)) ax.imshow(img.astype(np.uint8)) return ax def vis_bbox(img, bbox, label=None, score=None, ax=None): """ Visualize bounding boxes inside image. :param img: :param bbox: :param label: :param score: :param ax: :return: """ label_names = list(VOC_BBOX_LABEL_NAMES) + ['bg'] if label is not None and not len(bbox) == len(label): raise ValueError('The length of label must be same as that of bbox') if score is not None and not len(bbox) == len(score): raise ValueError('The length of score must be same as that of bbox') # Returns newly instantiated matplotlib.axes.Axes object if ax is None ax = vis_img(img, ax=ax) # If there is no bounding box to display, visualize the image and exit. if len(bbox) == 0: return ax for i, bb in enumerate(bbox): xy = (bb[1], bb[0]) height = bb[2] - bb[0] width = bb[3] - bb[1] ax.add_patch(plt.Rectangle( xy, width, height, fill=False, edgecolor='red', linewidth=2)) caption = list() if label is not None and label_names is not None: lb = label[i] if not (-1 <= lb < len(label_names)): raise ValueError('No corresponding name is given') caption.append(label_names[lb]) if score is not None: sc = score[i] caption.append('{:.2f}'.format(sc)) if len(caption) > 0: ax.text(bb[1], bb[0], ':'.join(caption), style='italic', color='white', bbox={'facecolor': (0.8, 0.2, 0.2), 'alpha': 0.9, 'pad': 1.5}) return ax def fig2data(fig): """ brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it @param fig: a matplotlib figure @return a numpy 3D array of RGBA values """ # draw the renderer fig.canvas.draw() # Get the RGBA buffer from the figure w, h = fig.canvas.get_width_height() buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8) buf.shape = (w, h, 4) # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode buf = np.roll(buf, 3, axis=2) return buf.reshape((h, w, 4)) def fig4vis(fig): """ convert figure to ndarray """ ax = fig.get_figure() img_data = fig2data(ax).astype(np.int32) plt.close() # HWC ==> CHW return img_data[:, :, :3].transpose((2, 0, 1)) / 255. def visdom_bbox(*args, **kwargs): fig = vis_bbox(*args, **kwargs) data = fig4vis(fig) return data
[ "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "numpy.roll", "matplotlib.pyplot.Rectangle" ]
[((3262, 3285), 'numpy.roll', 'np.roll', (['buf', '(3)'], {'axis': '(2)'}), '(buf, 3, axis=2)\n', (3269, 3285), True, 'import numpy as np\n'), ((3462, 3473), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3471, 3473), True, 'import matplotlib.pyplot as plt\n'), ((915, 927), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (925, 927), True, 'import matplotlib.pyplot as plt\n'), ((1977, 2051), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['xy', 'width', 'height'], {'fill': '(False)', 'edgecolor': '"""red"""', 'linewidth': '(2)'}), "(xy, width, height, fill=False, edgecolor='red', linewidth=2)\n", (1990, 2051), True, 'import matplotlib.pyplot as plt\n')]
from lib.util.processing import processing from lib.model.encoder import SimpleEncoder from lib.model.decoder import AttentionDecoder import torch import random if __name__ == "__main__": # 1. Declare the hyperparameter device, configure, word_index, index_word, train_loader, test_loader = processing("./configure") print(len(word_index)) # Declare the encoder model model_encoder = SimpleEncoder(configure).to(device) model_decoder = AttentionDecoder(configure, device).to(device) # Define the optimizer and loss criterion = torch.nn.CrossEntropyLoss() # encoder optimizer optimizer_encoder = torch.optim.Adam(model_encoder.parameters(), lr=configure["lr"]) optimizer_decoder = torch.optim.Adam(model_decoder.parameters(), lr=configure["lr"]) # Training for epoch in range(configure["epochs"]): for idx, item in enumerate(train_loader): # transfer to long tensor input, target = [i.type(torch.LongTensor).to(device) for i in item] if input.size(0) != configure["batch_size"]: continue # Encoder encoder_out, encoder_hidden = model_encoder(input) # Decoder # declare the first input <go> decoder_input = torch.tensor([word_index["<go>"]]*configure["batch_size"], dtype=torch.long, device=device).view(configure["batch_size"], -1) decoder_hidden = encoder_hidden z = torch.ones([configure["batch_size"],1,configure["hidden_size"]]).to(device) coverage = torch.zeros([configure["batch_size"],configure["max_content"]]).to(device) seq_loss = 0 for i in range(configure["max_output"]): decoder_output, decoder_hidden, z, attn, coverage = model_decoder(decoder_input, decoder_hidden, encoder_out, z, input, coverage) coverage = coverage if random.randint(1, 10) > 5: _, decoder_input = torch.max(decoder_output, 1) decoder_input = decoder_input.view(configure["batch_size"], -1) else: decoder_input = target[:,i].view(configure["batch_size"], -1) decoder_hidden = decoder_hidden step_coverage_loss = torch.sum(torch.min(attn.reshape(-1,1), coverage.reshape(-1,1)), 1) step_coverage_loss = torch.sum(step_coverage_loss) # print(coverage) # print("---") # decoder_output = decoder_output.reshape(configure["batch_size"], -1, 1) # print(step_coverage_loss) # print((criterion(decoder_output, target[:,i].reshape(configure["batch_size"],-1)))) # print(-torch.log(decoder_output+target[:,i])) seq_loss += (criterion(decoder_output, target[:,i])) # print(seq_loss) seq_loss += step_coverage_loss # print(decoder_input) optimizer_encoder.zero_grad() optimizer_decoder.zero_grad() seq_loss.backward() optimizer_encoder.step() optimizer_decoder.step() if (idx) % 10 == 0: print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Coverage Loss: {:4f}' .format(epoch+1, configure["epochs"], idx, len(train_loader), seq_loss.item(),step_coverage_loss.item())) # Test the model with torch.no_grad(): correct = 0 total = 0 for idx, item in enumerate(test_loader): # transfer to long tensor input, target = [i.type(torch.LongTensor).to(device) for i in item] if input.size(0) != configure["batch_size"]: continue # Encoder encoder_out, encoder_hidden = model_encoder(input) # Decoder # declare the first input <go> decoder_input = torch.tensor([word_index["<go>"]]*configure["batch_size"], dtype=torch.long, device=device).view(configure["batch_size"], -1) decoder_hidden = encoder_hidden seq_loss = 0 result = [] z = torch.ones([configure["batch_size"],1,configure["hidden_size"]]).to(device) coverage = torch.zeros([configure["batch_size"],configure["max_content"]]).to(device) for i in range(configure["max_output"]): decoder_output, decoder_hidden, z, attn, coverage = model_decoder(decoder_input, decoder_hidden, encoder_out, z, input, coverage) _, decoder_input = torch.max(decoder_output, 1) decoder_input = decoder_input.view(configure["batch_size"], -1) decoder_hidden = decoder_hidden total += configure["batch_size"] correct += (torch.max(decoder_output, 1)[1] == target[:,i]).sum().item() # print(torch.max(decoder_output, 1)[1],target[:,i]) result.append(index_word[torch.max(decoder_output, 1)[1][1].item()]) with open("test.txt", "a+", encoding="utf-8") as a: a.write("".join(result)+"\n") print('Test Accuracy of the model on the test: {} %'.format(100 * correct / total)) # Save the model checkpoint # torch.save(model.state_dict(), 'model.ckpt')`
[ "random.randint", "lib.model.encoder.SimpleEncoder", "torch.nn.CrossEntropyLoss", "lib.model.decoder.AttentionDecoder", "torch.max", "torch.tensor", "torch.sum", "torch.no_grad", "lib.util.processing.processing", "torch.zeros", "torch.ones" ]
[((310, 335), 'lib.util.processing.processing', 'processing', (['"""./configure"""'], {}), "('./configure')\n", (320, 335), False, 'from lib.util.processing import processing\n'), ((573, 600), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (598, 600), False, 'import torch\n'), ((416, 440), 'lib.model.encoder.SimpleEncoder', 'SimpleEncoder', (['configure'], {}), '(configure)\n', (429, 440), False, 'from lib.model.encoder import SimpleEncoder\n'), ((472, 507), 'lib.model.decoder.AttentionDecoder', 'AttentionDecoder', (['configure', 'device'], {}), '(configure, device)\n', (488, 507), False, 'from lib.model.decoder import AttentionDecoder\n'), ((3536, 3551), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3549, 3551), False, 'import torch\n'), ((2456, 2485), 'torch.sum', 'torch.sum', (['step_coverage_loss'], {}), '(step_coverage_loss)\n', (2465, 2485), False, 'import torch\n'), ((1296, 1394), 'torch.tensor', 'torch.tensor', (["([word_index['<go>']] * configure['batch_size'])"], {'dtype': 'torch.long', 'device': 'device'}), "([word_index['<go>']] * configure['batch_size'], dtype=torch.\n long, device=device)\n", (1308, 1394), False, 'import torch\n'), ((1524, 1590), 'torch.ones', 'torch.ones', (["[configure['batch_size'], 1, configure['hidden_size']]"], {}), "([configure['batch_size'], 1, configure['hidden_size']])\n", (1534, 1590), False, 'import torch\n'), ((1623, 1687), 'torch.zeros', 'torch.zeros', (["[configure['batch_size'], configure['max_content']]"], {}), "([configure['batch_size'], configure['max_content']])\n", (1634, 1687), False, 'import torch\n'), ((1980, 2001), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (1994, 2001), False, 'import random\n'), ((2046, 2074), 'torch.max', 'torch.max', (['decoder_output', '(1)'], {}), '(decoder_output, 1)\n', (2055, 2074), False, 'import torch\n'), ((4827, 4855), 'torch.max', 'torch.max', (['decoder_output', '(1)'], {}), '(decoder_output, 1)\n', (4836, 4855), False, 'import torch\n'), ((4101, 4199), 'torch.tensor', 'torch.tensor', (["([word_index['<go>']] * configure['batch_size'])"], {'dtype': 'torch.long', 'device': 'device'}), "([word_index['<go>']] * configure['batch_size'], dtype=torch.\n long, device=device)\n", (4113, 4199), False, 'import torch\n'), ((4397, 4463), 'torch.ones', 'torch.ones', (["[configure['batch_size'], 1, configure['hidden_size']]"], {}), "([configure['batch_size'], 1, configure['hidden_size']])\n", (4407, 4463), False, 'import torch\n'), ((4500, 4564), 'torch.zeros', 'torch.zeros', (["[configure['batch_size'], configure['max_content']]"], {}), "([configure['batch_size'], configure['max_content']])\n", (4511, 4564), False, 'import torch\n'), ((5080, 5108), 'torch.max', 'torch.max', (['decoder_output', '(1)'], {}), '(decoder_output, 1)\n', (5089, 5108), False, 'import torch\n'), ((5259, 5287), 'torch.max', 'torch.max', (['decoder_output', '(1)'], {}), '(decoder_output, 1)\n', (5268, 5287), False, 'import torch\n')]
import julia import numpy as np import os.path as osp import gym from brl_gym.envs.mujoco import box_pusher env = box_pusher.BoxPusher() rlopt = "/home/gilwoo/School_Workspace/rlopt" j = julia.Julia() j.include(osp.join(rlopt, "_init.jl")) j.include(osp.join(rlopt, "src/pg/Baseline.jl")) j.include(osp.join(rlopt, "src/ExpSim.jl")) polo = "/tmp/pusher_polo_opt_1" baseline = j.Baseline.loadbaseline(osp.join(polo, "baseline.jld2")) datafile = j.ExpSim.load(osp.join(polo, "data.jld2")) # Replay the datafile state = np.squeeze(datafile["state"]).transpose() ctrl = np.squeeze(datafile["ctrl"]).transpose() obs = np.squeeze(datafile["obs"]).transpose() # a = [x for x in a.split(";")] # data = [] # for x in a: # data += [[float(y) for y in x.split(" ") if y != " " and y != ""]] # data = np.array(data) # value = j.Baseline.predict(baseline, data.tolist()) # print("Value", value) o = env.reset() # env.set_state_ctrl(state[1,:35], state[1,35:], ctrl[1]) #o, r, d, _ = env.step(ctrl[0]) # new_state = env.sim.get_state() # import IPython; IPython.embed(); import sys; sys.exit(0) #copy_env = humanoid_pushing.HumanoidPushingEnv() #copy_env.reset() print(state.shape) states = [] observations = [] rewards = [] values = [] for i in range(state.shape[0]): env.set_state(state[i,:5], state[i,5:]) # states += [(env.sim.get_state().qpos, env.sim.get_state().qvel)] o, r, d, _ = env.step(ctrl[i]) # observations += [o] # rewards += [r] values += [j.Baseline.predict(baseline, o.reshape(-1,1).tolist())] env.render() import IPython; IPython.embed()
[ "brl_gym.envs.mujoco.box_pusher.BoxPusher", "os.path.join", "julia.Julia", "IPython.embed", "numpy.squeeze" ]
[((114, 136), 'brl_gym.envs.mujoco.box_pusher.BoxPusher', 'box_pusher.BoxPusher', ([], {}), '()\n', (134, 136), False, 'from brl_gym.envs.mujoco import box_pusher\n'), ((188, 201), 'julia.Julia', 'julia.Julia', ([], {}), '()\n', (199, 201), False, 'import julia\n'), ((1570, 1585), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (1583, 1585), False, 'import IPython\n'), ((213, 240), 'os.path.join', 'osp.join', (['rlopt', '"""_init.jl"""'], {}), "(rlopt, '_init.jl')\n", (221, 240), True, 'import os.path as osp\n'), ((252, 289), 'os.path.join', 'osp.join', (['rlopt', '"""src/pg/Baseline.jl"""'], {}), "(rlopt, 'src/pg/Baseline.jl')\n", (260, 289), True, 'import os.path as osp\n'), ((301, 333), 'os.path.join', 'osp.join', (['rlopt', '"""src/ExpSim.jl"""'], {}), "(rlopt, 'src/ExpSim.jl')\n", (309, 333), True, 'import os.path as osp\n'), ((402, 433), 'os.path.join', 'osp.join', (['polo', '"""baseline.jld2"""'], {}), "(polo, 'baseline.jld2')\n", (410, 433), True, 'import os.path as osp\n'), ((460, 487), 'os.path.join', 'osp.join', (['polo', '"""data.jld2"""'], {}), "(polo, 'data.jld2')\n", (468, 487), True, 'import os.path as osp\n'), ((520, 549), 'numpy.squeeze', 'np.squeeze', (["datafile['state']"], {}), "(datafile['state'])\n", (530, 549), True, 'import numpy as np\n'), ((569, 597), 'numpy.squeeze', 'np.squeeze', (["datafile['ctrl']"], {}), "(datafile['ctrl'])\n", (579, 597), True, 'import numpy as np\n'), ((616, 643), 'numpy.squeeze', 'np.squeeze', (["datafile['obs']"], {}), "(datafile['obs'])\n", (626, 643), True, 'import numpy as np\n')]
import numpy as np from scipy.optimize import fmin from scipy.stats import kurtosis from scipy.special import lambertw """ The algorithm is based on [1]. The implementation is based on [2]. [1]: <NAME>, 2013 (https://arxiv.org/pdf/1010.2265.pdf) [2]: <NAME>, 2015 (https://github.com/gregversteeg/gaussianize) """ def estimate_parameters(x): return np.array([igmm(x_i) for x_i in x.T]) def transform(x, parameters): return np.array([ lambertw_tau(x_i, tau_i) for x_i, tau_i in zip(x.T, parameters) ]).T def inverse_transform(y, parameters): return np.array([ inverse(y_i, tau_i) for y_i, tau_i in zip(y.T, parameters) ]).T def lambertw_delta(z, delta): """Lambertw delta function as defined in (9).""" if delta < 1e-6: return z return np.sign(z) * np.sqrt(np.real(lambertw(delta * z ** 2)) / delta) def lambertw_tau(y, tau): """Lambertw tau function as defined in (8).""" return tau[0] + tau[1] * lambertw_delta((y - tau[0]) / tau[1], tau[2]) def inverse(x, tau): """Inverse distribution transform as defined in (6).""" u = (x - tau[0]) / tau[1] return tau[0] + tau[1] * (u * np.exp(u * u * (tau[2] * 0.5))) def igmm(y, tol=1.22e-4, max_iter=100): if np.std(y) < 1e-4: return np.mean(y), np.std(y).clip(1e-4), 0 delta0 = delta_init(y) tau1 = (np.median(y), np.std(y) * (1. - 2. * delta0) ** 0.75, delta0) for k in range(max_iter): tau0 = tau1 z = (y - tau1[0]) / tau1[1] delta1 = delta_gmm(z) x = tau0[0] + tau1[1] * lambertw_delta(z, delta1) mu1, sigma1 = np.mean(x), np.std(x) tau1 = (mu1, sigma1, delta1) if np.linalg.norm(np.array(tau1) - np.array(tau0)) < tol: break else: if k == max_iter - 1: print(f'Warning: No convergence after {max_iter} iterations.') return tau1 def delta_gmm(z): delta0 = delta_init(z) def func(q): u = lambertw_delta(z, np.exp(q)) if not np.all(np.isfinite(u)): return 0. else: k = kurtosis(u, fisher=True, bias=False)**2 if not np.isfinite(k) or k > 1e10: return 1e10 else: return k res = fmin(func, np.log(delta0), disp=0) return np.around(np.exp(res[-1]), 6) def delta_init(z): gamma = kurtosis(z, fisher=False, bias=False) with np.errstate(all='ignore'): delta0 = np.clip(1. / 66 * (np.sqrt(66 * gamma - 162.) - 6.), 0.01, 0.48) if not np.isfinite(delta0): delta0 = 0.01 return delta0
[ "numpy.mean", "numpy.median", "numpy.sqrt", "scipy.special.lambertw", "scipy.stats.kurtosis", "numpy.log", "numpy.exp", "numpy.errstate", "numpy.array", "numpy.isfinite", "numpy.sign", "numpy.std" ]
[((2216, 2253), 'scipy.stats.kurtosis', 'kurtosis', (['z'], {'fisher': '(False)', 'bias': '(False)'}), '(z, fisher=False, bias=False)\n', (2224, 2253), False, 'from scipy.stats import kurtosis\n'), ((779, 789), 'numpy.sign', 'np.sign', (['z'], {}), '(z)\n', (786, 789), True, 'import numpy as np\n'), ((1214, 1223), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (1220, 1223), True, 'import numpy as np\n'), ((1314, 1326), 'numpy.median', 'np.median', (['y'], {}), '(y)\n', (1323, 1326), True, 'import numpy as np\n'), ((2122, 2136), 'numpy.log', 'np.log', (['delta0'], {}), '(delta0)\n', (2128, 2136), True, 'import numpy as np\n'), ((2165, 2180), 'numpy.exp', 'np.exp', (['res[-1]'], {}), '(res[-1])\n', (2171, 2180), True, 'import numpy as np\n'), ((2261, 2286), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (2272, 2286), True, 'import numpy as np\n'), ((2375, 2394), 'numpy.isfinite', 'np.isfinite', (['delta0'], {}), '(delta0)\n', (2386, 2394), True, 'import numpy as np\n'), ((1243, 1253), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1250, 1253), True, 'import numpy as np\n'), ((1328, 1337), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (1334, 1337), True, 'import numpy as np\n'), ((1550, 1560), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1557, 1560), True, 'import numpy as np\n'), ((1562, 1571), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (1568, 1571), True, 'import numpy as np\n'), ((1890, 1899), 'numpy.exp', 'np.exp', (['q'], {}), '(q)\n', (1896, 1899), True, 'import numpy as np\n'), ((1135, 1165), 'numpy.exp', 'np.exp', (['(u * u * (tau[2] * 0.5))'], {}), '(u * u * (tau[2] * 0.5))\n', (1141, 1165), True, 'import numpy as np\n'), ((1919, 1933), 'numpy.isfinite', 'np.isfinite', (['u'], {}), '(u)\n', (1930, 1933), True, 'import numpy as np\n'), ((1972, 2008), 'scipy.stats.kurtosis', 'kurtosis', (['u'], {'fisher': '(True)', 'bias': '(False)'}), '(u, fisher=True, bias=False)\n', (1980, 2008), False, 'from scipy.stats import kurtosis\n'), ((808, 832), 'scipy.special.lambertw', 'lambertw', (['(delta * z ** 2)'], {}), '(delta * z ** 2)\n', (816, 832), False, 'from scipy.special import lambertw\n'), ((1255, 1264), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (1261, 1264), True, 'import numpy as np\n'), ((1628, 1642), 'numpy.array', 'np.array', (['tau1'], {}), '(tau1)\n', (1636, 1642), True, 'import numpy as np\n'), ((1645, 1659), 'numpy.array', 'np.array', (['tau0'], {}), '(tau0)\n', (1653, 1659), True, 'import numpy as np\n'), ((2025, 2039), 'numpy.isfinite', 'np.isfinite', (['k'], {}), '(k)\n', (2036, 2039), True, 'import numpy as np\n'), ((2320, 2347), 'numpy.sqrt', 'np.sqrt', (['(66 * gamma - 162.0)'], {}), '(66 * gamma - 162.0)\n', (2327, 2347), True, 'import numpy as np\n')]
import factory from portfolios.models import Certification from users.factories.user_factory import UserFactory from factory.django import DjangoModelFactory from utils.helpers import create_factory_data class CertificationFactory(DjangoModelFactory): class Meta: model = Certification user = factory.SubFactory(UserFactory) name = factory.Faker('word') organization = factory.Faker('word') address = factory.Faker('word') issue_date = factory.Faker('date_time_between', issue_date='-1y', expiration_date='now') expiration_date = factory.Faker('date_time_between', issue_date='-1y', expiration_date='now') does_not_expire = True if expiration_date is None else False credential_id = factory.Faker('number') credential_url = factory.Faker('url') description = factory.Faker('text') def create_certifications_with_factory( num_of_data=7, display_name="certification", display_name_plural="certifications", delete_old_data=False ): return create_factory_data( factory=CertificationFactory, num_of_data=num_of_data, display_name=display_name, display_name_plural=display_name_plural, delete_old_data=delete_old_data, model=Certification )
[ "factory.SubFactory", "factory.Faker", "utils.helpers.create_factory_data" ]
[((312, 343), 'factory.SubFactory', 'factory.SubFactory', (['UserFactory'], {}), '(UserFactory)\n', (330, 343), False, 'import factory\n'), ((355, 376), 'factory.Faker', 'factory.Faker', (['"""word"""'], {}), "('word')\n", (368, 376), False, 'import factory\n'), ((396, 417), 'factory.Faker', 'factory.Faker', (['"""word"""'], {}), "('word')\n", (409, 417), False, 'import factory\n'), ((432, 453), 'factory.Faker', 'factory.Faker', (['"""word"""'], {}), "('word')\n", (445, 453), False, 'import factory\n'), ((471, 546), 'factory.Faker', 'factory.Faker', (['"""date_time_between"""'], {'issue_date': '"""-1y"""', 'expiration_date': '"""now"""'}), "('date_time_between', issue_date='-1y', expiration_date='now')\n", (484, 546), False, 'import factory\n'), ((569, 644), 'factory.Faker', 'factory.Faker', (['"""date_time_between"""'], {'issue_date': '"""-1y"""', 'expiration_date': '"""now"""'}), "('date_time_between', issue_date='-1y', expiration_date='now')\n", (582, 644), False, 'import factory\n'), ((730, 753), 'factory.Faker', 'factory.Faker', (['"""number"""'], {}), "('number')\n", (743, 753), False, 'import factory\n'), ((775, 795), 'factory.Faker', 'factory.Faker', (['"""url"""'], {}), "('url')\n", (788, 795), False, 'import factory\n'), ((814, 835), 'factory.Faker', 'factory.Faker', (['"""text"""'], {}), "('text')\n", (827, 835), False, 'import factory\n'), ((1006, 1210), 'utils.helpers.create_factory_data', 'create_factory_data', ([], {'factory': 'CertificationFactory', 'num_of_data': 'num_of_data', 'display_name': 'display_name', 'display_name_plural': 'display_name_plural', 'delete_old_data': 'delete_old_data', 'model': 'Certification'}), '(factory=CertificationFactory, num_of_data=num_of_data,\n display_name=display_name, display_name_plural=display_name_plural,\n delete_old_data=delete_old_data, model=Certification)\n', (1025, 1210), False, 'from utils.helpers import create_factory_data\n')]
from flask import Blueprint from api.controllers import imagescontroller imagesprint = Blueprint("images", __name__) imagesprint.add_url_rule( "/image/create/<int:project_id>", view_func=imagescontroller.imageController["save_image"], methods=["POST"] ) imagesprint.add_url_rule( "/image/get/<int:project_id>", view_func=imagescontroller.imageController["get_all_images"], methods=["GET"] ) imagesprint.add_url_rule( "/image/get_image/<int:project_id>/<int:image_id>", view_func=imagescontroller.imageController["get_image"], methods=["GET"] ) imagesprint.add_url_rule( "/image/delete/<int:project_id>", view_func=imagescontroller.imageController["delete_images"], methods=["POST"] ) imagesprint.add_url_rule( "/image/update/<int:image_id>", view_func=imagescontroller.imageController["update_labels"], methods=["PUT"] )
[ "flask.Blueprint" ]
[((89, 118), 'flask.Blueprint', 'Blueprint', (['"""images"""', '__name__'], {}), "('images', __name__)\n", (98, 118), False, 'from flask import Blueprint\n')]
from distutils.core import setup import os setup(name='pylagrit', version='1.0.0', description='Python interface for LaGriT', author='<NAME>', author_email='<EMAIL>', url='lagrit.lanl.gov', license='LGPL', packages=[ 'pylagrit', 'pylagrit.pexpect'] )
[ "distutils.core.setup" ]
[((44, 262), 'distutils.core.setup', 'setup', ([], {'name': '"""pylagrit"""', 'version': '"""1.0.0"""', 'description': '"""Python interface for LaGriT"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""lagrit.lanl.gov"""', 'license': '"""LGPL"""', 'packages': "['pylagrit', 'pylagrit.pexpect']"}), "(name='pylagrit', version='1.0.0', description=\n 'Python interface for LaGriT', author='<NAME>', author_email='<EMAIL>',\n url='lagrit.lanl.gov', license='LGPL', packages=['pylagrit',\n 'pylagrit.pexpect'])\n", (49, 262), False, 'from distutils.core import setup\n')]
""" Compute intersection of two sampled datasets sentences. """ import sys import glob import json import gzip def get_uids(input_file_path): """ Convert datasets to set of uids. """ use_gzip = True if input_file_path[-3:] == '.gz' else False if use_gzip: file_i = gzip.GzipFile(input_file_path, 'r') else: file_i = open(input_file_path, encoding='utf-8') sentences = set() for row in file_i: if use_gzip: json_data = json.loads(row.decode('utf-8')) else: json_data = json.loads(row) uid = '_'.join([ str(json_data['did']), str(json_data['pid']), str(json_data['sid'])]) sentences.add(uid) return sentences if __name__ == '__main__': sentences_1 = get_uids(sys.argv[1]) sentences_2 = get_uids(sys.argv[2]) print(len(sentences_1)) print(len(sentences_2)) print(len(sentences_1.intersection(sentences_2)))
[ "gzip.GzipFile", "json.loads" ]
[((296, 331), 'gzip.GzipFile', 'gzip.GzipFile', (['input_file_path', '"""r"""'], {}), "(input_file_path, 'r')\n", (309, 331), False, 'import gzip\n'), ((560, 575), 'json.loads', 'json.loads', (['row'], {}), '(row)\n', (570, 575), False, 'import json\n')]
from flask import jsonify from discord_interactions import verify_key, InteractionType, InteractionResponseType import color PUBLIC_KEY = "" def handler(request): # Verify request signature = request.headers.get("X-Signature-Ed25519") timestamp = request.headers.get("X-Signature-Timestamp") if ( signature is None or timestamp is None or not verify_key(request.data, signature, timestamp, PUBLIC_KEY) ): return "Bad request signature", 401 # Automatically respond to pings if request.json and request.json.get("type") == InteractionType.PING: return jsonify({"type": InteractionResponseType.PONG}) # Pass through if request.json["type"] == InteractionType.APPLICATION_COMMAND: if request.json["data"]["name"] == "color": return jsonify(color.main(request.json))
[ "discord_interactions.verify_key", "color.main", "flask.jsonify" ]
[((625, 672), 'flask.jsonify', 'jsonify', (["{'type': InteractionResponseType.PONG}"], {}), "({'type': InteractionResponseType.PONG})\n", (632, 672), False, 'from flask import jsonify\n'), ((388, 446), 'discord_interactions.verify_key', 'verify_key', (['request.data', 'signature', 'timestamp', 'PUBLIC_KEY'], {}), '(request.data, signature, timestamp, PUBLIC_KEY)\n', (398, 446), False, 'from discord_interactions import verify_key, InteractionType, InteractionResponseType\n'), ((840, 864), 'color.main', 'color.main', (['request.json'], {}), '(request.json)\n', (850, 864), False, 'import color\n')]
import unittest import numpy as np import pandas as pd from sklearn import datasets from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from punk.feature_selection import PCAFeatures from punk.feature_selection import RFFeatures class TestPCA(unittest.TestCase): def setUp(self): iris = datasets.load_iris() sc = StandardScaler() self.X = sc.fit_transform(iris.data) def test_pca(self): rankings = PCAFeatures() importances = rankings.produce(self.X) self.assertTrue( np.array_equal( importances, np.array([2, 3, 0, 1])) ) class TestRFC(unittest.TestCase): def setUp(self): df_wine = pd.read_csv('https://raw.githubusercontent.com/rasbt/' 'python-machine-learning-book/master/code/datasets/wine/wine.data', header=None) X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values self.X, _, self.y, _ = train_test_split(X, y, test_size=0.3, random_state=0) def test_rfc(self): rf = RFFeatures(problem_type="classification", cv=3, scoring="accuracy", verbose=0, n_jobs=1) indices = rf.produce((self.X, self.y)) self.assertTrue( np.all(np.isfinite( rf.feature_importances )) ) importances = np.array([9, 12, 6, 11, 0, 10, 5, 3, 1, 8, 4, 7, 2]) self.assertTrue( np.array_equal(indices, importances) ) class TestRFR(unittest.TestCase): def setUp(self): boston = datasets.load_boston() self.X, self.y = boston.data, boston.target def test_rfr(self): rf = RFFeatures(problem_type="regression", cv=3, scoring="r2", verbose=0, n_jobs=1) indices = rf.produce((self.X, self.y)) self.assertTrue( np.all(np.isfinite( rf.feature_importances )) ) importances = np.array([5, 12, 7, 0, 4, 10, 9, 6, 11, 2, 8, 1, 3]) self.assertTrue( np.array_equal(indices, importances) ) if __name__ == '__main__': unittest.main()
[ "sklearn.datasets.load_iris", "pandas.read_csv", "sklearn.model_selection.train_test_split", "sklearn.datasets.load_boston", "punk.feature_selection.PCAFeatures", "sklearn.preprocessing.StandardScaler", "numpy.array", "numpy.array_equal", "numpy.isfinite", "unittest.main", "punk.feature_selectio...
[((2232, 2247), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2245, 2247), False, 'import unittest\n'), ((409, 429), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (427, 429), False, 'from sklearn import datasets\n'), ((444, 460), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (458, 460), False, 'from sklearn.preprocessing import StandardScaler\n'), ((630, 643), 'punk.feature_selection.PCAFeatures', 'PCAFeatures', ([], {}), '()\n', (641, 643), False, 'from punk.feature_selection import PCAFeatures\n'), ((875, 1017), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/rasbt/python-machine-learning-book/master/code/datasets/wine/wine.data"""'], {'header': 'None'}), "(\n 'https://raw.githubusercontent.com/rasbt/python-machine-learning-book/master/code/datasets/wine/wine.data'\n , header=None)\n", (886, 1017), True, 'import pandas as pd\n'), ((1176, 1229), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, y, test_size=0.3, random_state=0)\n', (1192, 1229), False, 'from sklearn.model_selection import train_test_split\n'), ((1269, 1362), 'punk.feature_selection.RFFeatures', 'RFFeatures', ([], {'problem_type': '"""classification"""', 'cv': '(3)', 'scoring': '"""accuracy"""', 'verbose': '(0)', 'n_jobs': '(1)'}), "(problem_type='classification', cv=3, scoring='accuracy', verbose\n =0, n_jobs=1)\n", (1279, 1362), False, 'from punk.feature_selection import RFFeatures\n'), ((1528, 1580), 'numpy.array', 'np.array', (['[9, 12, 6, 11, 0, 10, 5, 3, 1, 8, 4, 7, 2]'], {}), '([9, 12, 6, 11, 0, 10, 5, 3, 1, 8, 4, 7, 2])\n', (1536, 1580), True, 'import numpy as np\n'), ((1719, 1741), 'sklearn.datasets.load_boston', 'datasets.load_boston', ([], {}), '()\n', (1739, 1741), False, 'from sklearn import datasets\n'), ((1832, 1910), 'punk.feature_selection.RFFeatures', 'RFFeatures', ([], {'problem_type': '"""regression"""', 'cv': '(3)', 'scoring': '"""r2"""', 'verbose': '(0)', 'n_jobs': '(1)'}), "(problem_type='regression', cv=3, scoring='r2', verbose=0, n_jobs=1)\n", (1842, 1910), False, 'from punk.feature_selection import RFFeatures\n'), ((2083, 2135), 'numpy.array', 'np.array', (['[5, 12, 7, 0, 4, 10, 9, 6, 11, 2, 8, 1, 3]'], {}), '([5, 12, 7, 0, 4, 10, 9, 6, 11, 2, 8, 1, 3])\n', (2091, 2135), True, 'import numpy as np\n'), ((1606, 1642), 'numpy.array_equal', 'np.array_equal', (['indices', 'importances'], {}), '(indices, importances)\n', (1620, 1642), True, 'import numpy as np\n'), ((2161, 2197), 'numpy.array_equal', 'np.array_equal', (['indices', 'importances'], {}), '(indices, importances)\n', (2175, 2197), True, 'import numpy as np\n'), ((775, 797), 'numpy.array', 'np.array', (['[2, 3, 0, 1]'], {}), '([2, 3, 0, 1])\n', (783, 797), True, 'import numpy as np\n'), ((1465, 1500), 'numpy.isfinite', 'np.isfinite', (['rf.feature_importances'], {}), '(rf.feature_importances)\n', (1476, 1500), True, 'import numpy as np\n'), ((2020, 2055), 'numpy.isfinite', 'np.isfinite', (['rf.feature_importances'], {}), '(rf.feature_importances)\n', (2031, 2055), True, 'import numpy as np\n')]
from bs4 import BeautifulSoup from urllib.request import urlopen, Request import math from atpparser.constants import HEADERS from atpparser.util import format_player_name, get_archive_url, get_archive_filename, \ get_draw_url, get_draw_filename # downloads archive to "archive_{year}.html" def downloadArchive(year): archive_url = get_archive_url(year) archive_filename = get_archive_filename(year) req = Request(url=archive_url, headers=HEADERS) html = urlopen(req).read() # write html content to file html_file = open(archive_filename, "w") html_file.write(html.decode("utf-8")) html_file.close() return archive_filename # return list of draws: # [{"title": "Brisbane", "link": "/en/scores/archive/brisbane"}] def parseArchive(html_file): def strip(title_tag): title = str(title_tag.contents) for substr in [" ", '\\n', '\\', '[', ']', '\'', '\"']: title = title.replace(substr, '') return title data = [] soup = BeautifulSoup(open(html_file), "html.parser") for tourney in soup.find_all('tr', {'class': 'tourney-result'}): try: # this parsing method works for 2019, 2018, ... title_tag = tourney.find_all('span', {'class': 'tourney-title'}) title = strip(title_tag[0]) links = tourney.find_all('a') for link in links: # only some links are relevant href = None if 'href' not in link.attrs else link['href'] if href is not None and "singles" in href: data.append({"title": title, "link": href}) except: # this parsing method works for 2020 title_tag = tourney.find_next('a') title = strip(title_tag) links = tourney.find_all('a') for link in links: # only some links are relevant href = None if 'href' not in link.attrs else link['href'] if href is not None and "singles" in href: data.append({"title": title, "link": href}) return data # downloads draw to "{draw_title}_{draw_year}.html" def downloadDraw(draw_title, draw_link, draw_year): draw_url = get_draw_url(draw_link) draw_filename = get_draw_filename(draw_title, draw_year) req = Request(url=draw_url, headers=HEADERS) html = urlopen(req).read() # write html content to file html_file = open(draw_filename, "w") html_file.write(html.decode("utf-8")) html_file.close() return draw_filename def parseDraw(draw_file): def get_winner(pair, roundNum, numWinsDict): player1 = pair[0] player2 = pair[1] num_wins1 = 0 if player1 not in numWinsDict else numWinsDict[player1] num_wins2 = 0 if player2 not in numWinsDict else numWinsDict[player2] return player1 if num_wins1 >= roundNum else player2 if num_wins2 >= roundNum else "unknown" player_finalRound = {} player_result = {} seedDict = {} # key: player, value: seed countryDict = {} # key: player, value: country_image numWinsDict = {} # key: player, value: numWins round1_players = [] win_players = [] soup = BeautifulSoup(open(draw_file), "html.parser") # dates date_tag = soup.find_all('span', {'class': 'tourney-dates'})[0] dates = date_tag.text.strip().replace('\n', '').replace(' - ', ' ').split(' ') # players, matchups for box in soup.find_all('div', {'class': 'scores-draw-entry-box'}): table_tags = box.find_all('table') if len(table_tags) > 0: # round 1 entry tr_tags = box.find_all('tr') for tr_tag in tr_tags: span_tags = tr_tag.find_all('span') a_tags = tr_tag.find_all('a') if len(a_tags) > 0: # player info exists playerName = a_tags[0]['data-ga-label'] img_tags = tr_tag.find_all('img') if len(img_tags) > 0: playerCountry = img_tags[0]['src'] countryDict[playerName] = playerCountry round1_players.append(playerName) else: playerName = "bye" playerCountry = "" countryDict[playerName] = playerCountry round1_players.append(playerName) if len(span_tags) > 0: seed = span_tags[0] if seed: seed_str = str(seed).strip() for substr in ['\n', '\t', '<', '>', 'span', '\\', '/', '(', ')']: seed_str = seed_str.replace(substr, '') seedDict[playerName] = seed_str else: # round 2, 3, ..., entry a_tags = box.find_all('a') if len(a_tags) > 0: # only true if match has happened playerName = a_tags[0]['data-ga-label'] win_players.append(playerName) else: playerName = "unknown" win_players.append(playerName) drawSize = len(round1_players) if not (drawSize == 8 or drawSize == 16 or drawSize == 32 or \ drawSize == 64 or drawSize == 128): print("cannot convert HTML to db (drawSize = ", drawSize, ")") return # parser was not programmed to handle this case numRounds = int(math.log(drawSize)/math.log(2)) + 1 rounds = [[] for i in range(numRounds)] for i in range(0, drawSize, 2): # round1 rounds[0].append((round1_players[i], round1_players[i+1])) for player in round1_players: # numWinsDict numWinsDict[player] = win_players.count(player) num_players_this_round = drawSize for roundNum in range(1, numRounds, 1): # round2, ... num_players_this_round = int(num_players_this_round / 2) if num_players_this_round > 1: for i in range(0, num_players_this_round, 2): w1 = get_winner(rounds[roundNum-1][i], roundNum, numWinsDict) w2 = get_winner(rounds[roundNum-1][i+1], roundNum, numWinsDict) rounds[roundNum].append((w1, w2)) else: # last round w = get_winner(rounds[roundNum-1][0], roundNum, numWinsDict) rounds[roundNum].append((w)) def find_winner(player1, player2, roundNum): if roundNum == numRounds - 1: return None # last round, don't look for winner if roundNum == numRounds - 2: return rounds[roundNum+1][0] for matchup in rounds[roundNum+1]: if player1 in matchup[0] or player1 in matchup[1]: return player1 elif player2 in matchup[0] or player2 in matchup[1]: return player2 return None matchupList = [] for roundNum in range(0, numRounds, 1): # fill matchupList if roundNum != (numRounds - 1): for i in range(0, len(rounds[roundNum]), 1): player1 = rounds[roundNum][i][0] player2 = rounds[roundNum][i][1] winner = find_winner(player1, player2, roundNum) matchup = {"round": roundNum+1, "player1": format_player_name(player1), "player2": format_player_name(player2), "winner": format_player_name(winner)} matchupList.append(matchup) player_finalRound[player1] = roundNum + 1 player_finalRound[player2] = roundNum + 1 else: # final round player1 = rounds[roundNum][0] player2 = "" winner = None matchup = {"round": roundNum+1, "player1": format_player_name(player1), "player2": format_player_name(player2), "winner": format_player_name(winner)} matchupList.append(matchup) player_finalRound[player1] = roundNum + 1 # determine player result # let X = final round # let N = num rounds # let x = N - X + 1 # then result = 2 ^ (x-1) + 1 for player_name, final_round in player_finalRound.items(): x = numRounds - final_round result = 1 if x == 0 else 2 ** (x - 1) + 1 player_result[player_name] = result playerList = [] for player in round1_players: seed = "0" if player not in seedDict else seedDict[player] seed = seed.strip() countryIcon = "" if player not in countryDict else countryDict[player] countryCode = countryIcon[-7:].replace(".svg", "") playerList.append({"name": format_player_name(player), "seed": seed, "countryCode": countryCode, "result": player_result[player]}) return {"dates": {"start": dates[0], "end": dates[1]}, "matchups": matchupList, "players": playerList}
[ "atpparser.util.get_archive_url", "atpparser.util.get_draw_url", "urllib.request.Request", "atpparser.util.get_draw_filename", "atpparser.util.get_archive_filename", "math.log", "atpparser.util.format_player_name", "urllib.request.urlopen" ]
[((341, 362), 'atpparser.util.get_archive_url', 'get_archive_url', (['year'], {}), '(year)\n', (356, 362), False, 'from atpparser.util import format_player_name, get_archive_url, get_archive_filename, get_draw_url, get_draw_filename\n'), ((386, 412), 'atpparser.util.get_archive_filename', 'get_archive_filename', (['year'], {}), '(year)\n', (406, 412), False, 'from atpparser.util import format_player_name, get_archive_url, get_archive_filename, get_draw_url, get_draw_filename\n'), ((423, 464), 'urllib.request.Request', 'Request', ([], {'url': 'archive_url', 'headers': 'HEADERS'}), '(url=archive_url, headers=HEADERS)\n', (430, 464), False, 'from urllib.request import urlopen, Request\n'), ((2183, 2206), 'atpparser.util.get_draw_url', 'get_draw_url', (['draw_link'], {}), '(draw_link)\n', (2195, 2206), False, 'from atpparser.util import format_player_name, get_archive_url, get_archive_filename, get_draw_url, get_draw_filename\n'), ((2227, 2267), 'atpparser.util.get_draw_filename', 'get_draw_filename', (['draw_title', 'draw_year'], {}), '(draw_title, draw_year)\n', (2244, 2267), False, 'from atpparser.util import format_player_name, get_archive_url, get_archive_filename, get_draw_url, get_draw_filename\n'), ((2278, 2316), 'urllib.request.Request', 'Request', ([], {'url': 'draw_url', 'headers': 'HEADERS'}), '(url=draw_url, headers=HEADERS)\n', (2285, 2316), False, 'from urllib.request import urlopen, Request\n'), ((476, 488), 'urllib.request.urlopen', 'urlopen', (['req'], {}), '(req)\n', (483, 488), False, 'from urllib.request import urlopen, Request\n'), ((2328, 2340), 'urllib.request.urlopen', 'urlopen', (['req'], {}), '(req)\n', (2335, 2340), False, 'from urllib.request import urlopen, Request\n'), ((5378, 5396), 'math.log', 'math.log', (['drawSize'], {}), '(drawSize)\n', (5386, 5396), False, 'import math\n'), ((5397, 5408), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (5405, 5408), False, 'import math\n'), ((7663, 7690), 'atpparser.util.format_player_name', 'format_player_name', (['player1'], {}), '(player1)\n', (7681, 7690), False, 'from atpparser.util import format_player_name, get_archive_url, get_archive_filename, get_draw_url, get_draw_filename\n'), ((7719, 7746), 'atpparser.util.format_player_name', 'format_player_name', (['player2'], {}), '(player2)\n', (7737, 7746), False, 'from atpparser.util import format_player_name, get_archive_url, get_archive_filename, get_draw_url, get_draw_filename\n'), ((7758, 7784), 'atpparser.util.format_player_name', 'format_player_name', (['winner'], {}), '(winner)\n', (7776, 7784), False, 'from atpparser.util import format_player_name, get_archive_url, get_archive_filename, get_draw_url, get_draw_filename\n'), ((8537, 8563), 'atpparser.util.format_player_name', 'format_player_name', (['player'], {}), '(player)\n', (8555, 8563), False, 'from atpparser.util import format_player_name, get_archive_url, get_archive_filename, get_draw_url, get_draw_filename\n'), ((7199, 7226), 'atpparser.util.format_player_name', 'format_player_name', (['player1'], {}), '(player1)\n', (7217, 7226), False, 'from atpparser.util import format_player_name, get_archive_url, get_archive_filename, get_draw_url, get_draw_filename\n'), ((7259, 7286), 'atpparser.util.format_player_name', 'format_player_name', (['player2'], {}), '(player2)\n', (7277, 7286), False, 'from atpparser.util import format_player_name, get_archive_url, get_archive_filename, get_draw_url, get_draw_filename\n'), ((7298, 7324), 'atpparser.util.format_player_name', 'format_player_name', (['winner'], {}), '(winner)\n', (7316, 7324), False, 'from atpparser.util import format_player_name, get_archive_url, get_archive_filename, get_draw_url, get_draw_filename\n')]
""" Project: SSITH CyberPhysical Demonstrator Name: simulator.py Author: <NAME>, <NAME> <<EMAIL>> Date: 10/01/2020 Python 3.8.3 O/S: Windows 10 This routine creates a BeamNG simulator thread and makes vehicle speed, throttle, brakes, and position available. """ import logging.config import logging import enum import functools import psutil import time import os from cyberphyslib.demonstrator import config, component, message, logger import cyberphyslib.canlib.canspecs as canspecs from beamngpy import BeamNGpy, Scenario, Vehicle from beamngpy.sensors import Electrics, GForces class BeamNgStatus(enum.Enum): """BeamNgComponent Statuses""" EXIT_FINISHED = enum.auto() LAUNCH_FINISHED = enum.auto() READY = enum.auto() SENSOR_START = enum.auto() SENSOR_POLL = enum.auto() SENSOR_END = enum.auto() ERROR = enum.auto() OS_ERROR = enum.auto() RESTART_FINISHED = enum.auto() RESTART_FAILED = enum.auto() RESTART_INVALID = enum.auto() IS_AUTOPILOT = enum.auto() IS_MANUAL = enum.auto() class BeamNgCommand(enum.Enum): """BeamNGComponent Commands""" RESTART = enum.auto() START = enum.auto() STOP = enum.auto() TOGGLE_PAUSE = enum.auto() REQUEST_STATE = enum.auto() WAIT_READY = enum.auto() ENABLE_AUTOPILOT = enum.auto() DISABLE_AUTOPILOT = enum.auto() UI_BUTTON_PRESSED = enum.auto() AUTOPILOT_STATUS = enum.auto() def requires_running_scenario(func): """guard Sim methods that requires a running scenario (e.g. poll sensors)""" @functools.wraps(func) def inner(self, *args, **kwargs): if self._beamng_context.skt is not None: return func(self, *args, **kwargs) return inner class Sim(component.ComponentPoller): beamng_process_name = "BeamNG.tech.x64.exe" @staticmethod def is_running_beamng(): """returns whether BeamNG process is running""" try: return Sim.beamng_process_name in (p.name() for p in psutil.process_iter()) except Exception: return False @staticmethod def kill_beamng(attempts=2): """kill BeamNG process if it's running""" if Sim.is_running_beamng(): os.system(f"taskkill /im {Sim.beamng_process_name}") time.sleep(1) if Sim.is_running_beamng(): if attempts > 0: logging.warning(f"kill_beamng failed. Trying again...") Sim.kill_beamng(attempts=attempts - 1) else: # TODO: error handling system? raise RuntimeError(f"task_kill_beamng failed") in_descr = config.BEAMNG_COMPONENT_INPUT out_descr = config.BEAMNG_COMPONENT_OUTPUT def __init__(self): self.port = config.BEAMNG_PORT self.path = config.BEAMNG_PATH # component network setup super().__init__("beamng", self.in_descr, self.out_descr, sample_frequency=120.0) self._beamng_context = None self._scenario = None self._vehicle = None self._location = None self._sensors = None self._scenario = None self.sensor_output = None self.color = [0.,]*3 self.control = {} self.control_evt = True self._start_finished = False self._enable_autopilot = False self._disable_autopilot = False self._in_autopilot = False self._restart_scenario = False self.beamng_start_finished = False # record whether sim is paused or not self._is_paused = False def on_start(self) -> None: if not self.stopped: self.start_poller() def on_poll_start(self) -> None: """beamNG session and scenario load NOTE: mainloop is included here due to thread access conflicts in on_poll """ bng_args = { "home": config.BEAMNG_PATH, "user": config.BEAMNG_USER_PATH} self._beamng_context = BeamNGpy('localhost', config.BEAMNG_PORT, **bng_args) self._beamng_context.open() self._scenario = Scenario('italy', 'SSITH', description='Drive protected.') self._vehicle = Vehicle('ego_vehicle', licence='SSITH', **config.BEAMNG_VEHICLE_CONFIG, color='Red') gforces = GForces() electrics = Electrics() # Attach them self._vehicle.attach_sensor('gforces', gforces) self._vehicle.attach_sensor('electrics', electrics) self._scenario.add_vehicle(self._vehicle, **config.BEAMNG_ITALY_SPAWNPOINTS[config.BEAMNG_SCENARIO_SPAWNPOINT]) # Compile the scenario and place it in BeamNG's map folder self._scenario.make(self._beamng_context) try: # Start BeamNG and enter the main loop assert not self.polling_thread.stopped self._beamng_context.hide_hud() # Load and start the scenario assert not self.polling_thread.stopped self._beamng_context.load_scenario(self._scenario) self._beamng_context.set_relative_camera((-0.3, -.5, 0.95)) self._beamng_context.start_scenario() assert not self.polling_thread.stopped self._beamng_context.resume() assert self._vehicle.skt self._vehicle.connect(self._beamng_context) self.beamng_start_finished = True self.send_message(message.Message(BeamNgStatus.READY), 'beamng-events') except Exception as exc: logger.sim_logger.error(f"Failed to create BeamNG session and load scenario <{exc}>") self.send_message(message.Message(BeamNgStatus.OS_ERROR), 'beamng-events') self.kill_beamng() def on_poll_poll(self, t): """simulator mainloop""" while not self.polling_thread.stopped: try: # handle autopilot request # NOTE: restarts the scenario if self._enable_autopilot: self._vehicle.ai_set_mode('span') self._enable_autopilot = False self._disable_autopilot = False self._in_autopilot = True elif self._disable_autopilot: self._vehicle.ai_set_mode('disabled') self._disable_autopilot = False self._enable_autopilot = False self._in_autopilot = False if self._restart_scenario: self._restart_scenario = False self._beamng_context.restart_scenario() # handle vehicle control event if (self._vehicle is not None) and (self._vehicle.skt): # do not control vehicle if in autopilot if self.control_evt and self.control != {} and not self._in_autopilot: self._vehicle.control(**self.control) self.control_evt = False self.control = {} self.sensor_output = self._beamng_context.poll_sensors(self._vehicle) self.send_message(message.Message(self.sensor_output["electrics"]), "beamng-sensors") self._vehicle.update_vehicle() self._location = (tuple(self._vehicle.state["pos"]), tuple(self._vehicle.state["dir"])) self.send_message(message.Message(self._location), "beamng-vehicle") except ConnectionAbortedError: self.exit() except Exception as exc: pass def on_poll_exit(self) -> None: """beamNG exit method""" logger.sim_logger.info(f"{self.__class__.__name__} Exit Signal Received. This might take a while...") self.kill_beamng() if self._beamng_context is not None: self._beamng_context.close() def exit(self): super(Sim, self).exit() ########## can receive ########### def control_process(self, name, data, bounds=(0.0, 1.0)): data = min(max(data[0], bounds[0]), bounds[1]) self.control[name] = data self.control_evt = True @recv_can(canspecs.CAN_ID_STEERING_INPUT, canspecs.CAN_FORMAT_STEERING_INPUT) def _(self, data): """steering -1.0, 1.0""" data = (float(data[0])/100.0,) return self.control_process("steering", data, bounds=(-1.0, 1.0)) @recv_can(canspecs.CAN_ID_THROTTLE_INPUT, canspecs.CAN_FORMAT_THROTTLE_INPUT) def _(self, data): """throttle [0..100] -> 0.0, 1.0""" data = (float(data[0])/100.0,) return self.control_process("throttle", data) @recv_can(canspecs.CAN_ID_BRAKE_INPUT, canspecs.CAN_FORMAT_BRAKE_INPUT) def _(self, data): """brake [0..100] -> 0.0, 1.0""" data = (float(data[0])/100.0,) return self.control_process("brake", data) @recv_can(canspecs.CAN_ID_GEAR, canspecs.CAN_FORMAT_GEAR) def _(self, data): """gear [P, R, N, D] -> -1, 5""" val, = data gear_map = {b'P': 1, b'R': -1, b'N': 0, b'D': 2} gear = gear_map.get(val, 0) return self.control_process("gear", (gear,), bounds=(-1, 5)) ########## register topic receive methods ########## def wait_ready_command(self): """wait until the service has finished booting""" import time while not self.beamng_start_finished: time.sleep(0.2) return component.ComponentStatus.READY def restart_command(self): try: if self._start_finished: self._restart_scenario = True else: return BeamNgStatus.RESTART_INVALID except Exception as exc: return BeamNgStatus.RESTART_FAILED return BeamNgStatus.RESTART_FINISHED def toggle_pause_command(self): """pause the simulator""" if self._start_finished: if self._is_paused: self._beamng_context.resume() else: self._beamng_context.pause() self._is_paused = not self._is_paused return BeamNgStatus.READY else: return BeamNgStatus.ERROR def enable_autopilot_command(self): if self._start_finished: self._enable_autopilot = True return BeamNgStatus.READY def disable_autopilot_command(self): """disable the autopilot""" if self._start_finished: self._disable_autopilot = True return BeamNgStatus.READY def autopilot_status_command(self): """disable the autopilot""" if self._in_autopilot: return message.Message(BeamNgStatus.IS_AUTOPILOT) return BeamNgStatus.IS_MANUAL def ui_button_pressed_command(self): if self._start_finished: if self._in_autopilot: self._disable_autopilot = True self._in_autopilot = None @recv_topic("beamng-commands", BeamNgCommand.RESTART) def _(self, t): """restart the scenario""" return message.Message(self.restart_command()) @recv_topic("beamng-commands", BeamNgCommand.WAIT_READY) def _(self, t): """wait until the service has finished booting""" return message.Message(self.wait_ready_command()) @recv_topic("beamng-commands", BeamNgCommand.TOGGLE_PAUSE) def _(self, t): """pause the simulator""" return message.Message(self.toggle_pause_command()) @recv_topic("beamng-commands", BeamNgCommand.ENABLE_AUTOPILOT) def _(self, t): return message.Message(self.enable_autopilot_command()) @recv_topic("beamng-commands", BeamNgCommand.DISABLE_AUTOPILOT) def _(self, t): """disable the autopilot""" return message.Message(self.disable_autopilot_command()) @recv_topic("beamng-commands", BeamNgCommand.AUTOPILOT_STATUS) def _(self, t): """disable the autopilot""" return message.Message(self.autopilot_status_command()) @recv_topic("infoui-beamng", BeamNgCommand.UI_BUTTON_PRESSED) def _(self, t): if self._start_finished: if self._in_autopilot: self._disable_autopilot = True self._in_autopilot = None self.restart_command()
[ "beamngpy.sensors.Electrics", "beamngpy.BeamNGpy", "cyberphyslib.demonstrator.logger.sim_logger.error", "enum.auto", "beamngpy.sensors.GForces", "logging.warning", "psutil.process_iter", "functools.wraps", "cyberphyslib.demonstrator.logger.sim_logger.info", "time.sleep", "cyberphyslib.demonstrat...
[((674, 685), 'enum.auto', 'enum.auto', ([], {}), '()\n', (683, 685), False, 'import enum\n'), ((708, 719), 'enum.auto', 'enum.auto', ([], {}), '()\n', (717, 719), False, 'import enum\n'), ((732, 743), 'enum.auto', 'enum.auto', ([], {}), '()\n', (741, 743), False, 'import enum\n'), ((763, 774), 'enum.auto', 'enum.auto', ([], {}), '()\n', (772, 774), False, 'import enum\n'), ((793, 804), 'enum.auto', 'enum.auto', ([], {}), '()\n', (802, 804), False, 'import enum\n'), ((822, 833), 'enum.auto', 'enum.auto', ([], {}), '()\n', (831, 833), False, 'import enum\n'), ((846, 857), 'enum.auto', 'enum.auto', ([], {}), '()\n', (855, 857), False, 'import enum\n'), ((873, 884), 'enum.auto', 'enum.auto', ([], {}), '()\n', (882, 884), False, 'import enum\n'), ((908, 919), 'enum.auto', 'enum.auto', ([], {}), '()\n', (917, 919), False, 'import enum\n'), ((941, 952), 'enum.auto', 'enum.auto', ([], {}), '()\n', (950, 952), False, 'import enum\n'), ((975, 986), 'enum.auto', 'enum.auto', ([], {}), '()\n', (984, 986), False, 'import enum\n'), ((1006, 1017), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1015, 1017), False, 'import enum\n'), ((1034, 1045), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1043, 1045), False, 'import enum\n'), ((1129, 1140), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1138, 1140), False, 'import enum\n'), ((1153, 1164), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1162, 1164), False, 'import enum\n'), ((1176, 1187), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1185, 1187), False, 'import enum\n'), ((1207, 1218), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1216, 1218), False, 'import enum\n'), ((1239, 1250), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1248, 1250), False, 'import enum\n'), ((1268, 1279), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1277, 1279), False, 'import enum\n'), ((1303, 1314), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1312, 1314), False, 'import enum\n'), ((1339, 1350), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1348, 1350), False, 'import enum\n'), ((1375, 1386), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1384, 1386), False, 'import enum\n'), ((1410, 1421), 'enum.auto', 'enum.auto', ([], {}), '()\n', (1419, 1421), False, 'import enum\n'), ((1547, 1568), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1562, 1568), False, 'import functools\n'), ((3986, 4039), 'beamngpy.BeamNGpy', 'BeamNGpy', (['"""localhost"""', 'config.BEAMNG_PORT'], {}), "('localhost', config.BEAMNG_PORT, **bng_args)\n", (3994, 4039), False, 'from beamngpy import BeamNGpy, Scenario, Vehicle\n'), ((4102, 4160), 'beamngpy.Scenario', 'Scenario', (['"""italy"""', '"""SSITH"""'], {'description': '"""Drive protected."""'}), "('italy', 'SSITH', description='Drive protected.')\n", (4110, 4160), False, 'from beamngpy import BeamNGpy, Scenario, Vehicle\n'), ((4219, 4307), 'beamngpy.Vehicle', 'Vehicle', (['"""ego_vehicle"""'], {'licence': '"""SSITH"""', 'color': '"""Red"""'}), "('ego_vehicle', licence='SSITH', **config.BEAMNG_VEHICLE_CONFIG,\n color='Red')\n", (4226, 4307), False, 'from beamngpy import BeamNGpy, Scenario, Vehicle\n'), ((4355, 4364), 'beamngpy.sensors.GForces', 'GForces', ([], {}), '()\n', (4362, 4364), False, 'from beamngpy.sensors import Electrics, GForces\n'), ((4385, 4396), 'beamngpy.sensors.Electrics', 'Electrics', ([], {}), '()\n', (4394, 4396), False, 'from beamngpy.sensors import Electrics, GForces\n'), ((7782, 7893), 'cyberphyslib.demonstrator.logger.sim_logger.info', 'logger.sim_logger.info', (['f"""{self.__class__.__name__} Exit Signal Received. This might take a while..."""'], {}), "(\n f'{self.__class__.__name__} Exit Signal Received. This might take a while...'\n )\n", (7804, 7893), False, 'from cyberphyslib.demonstrator import config, component, message, logger\n'), ((2213, 2265), 'os.system', 'os.system', (['f"""taskkill /im {Sim.beamng_process_name}"""'], {}), "(f'taskkill /im {Sim.beamng_process_name}')\n", (2222, 2265), False, 'import os\n'), ((2278, 2291), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2288, 2291), False, 'import time\n'), ((9536, 9551), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (9546, 9551), False, 'import time\n'), ((10772, 10814), 'cyberphyslib.demonstrator.message.Message', 'message.Message', (['BeamNgStatus.IS_AUTOPILOT'], {}), '(BeamNgStatus.IS_AUTOPILOT)\n', (10787, 10814), False, 'from cyberphyslib.demonstrator import config, component, message, logger\n'), ((5515, 5550), 'cyberphyslib.demonstrator.message.Message', 'message.Message', (['BeamNgStatus.READY'], {}), '(BeamNgStatus.READY)\n', (5530, 5550), False, 'from cyberphyslib.demonstrator import config, component, message, logger\n'), ((5614, 5704), 'cyberphyslib.demonstrator.logger.sim_logger.error', 'logger.sim_logger.error', (['f"""Failed to create BeamNG session and load scenario <{exc}>"""'], {}), "(\n f'Failed to create BeamNG session and load scenario <{exc}>')\n", (5637, 5704), False, 'from cyberphyslib.demonstrator import config, component, message, logger\n'), ((2385, 2440), 'logging.warning', 'logging.warning', (['f"""kill_beamng failed. Trying again..."""'], {}), "(f'kill_beamng failed. Trying again...')\n", (2400, 2440), False, 'import logging\n'), ((5730, 5768), 'cyberphyslib.demonstrator.message.Message', 'message.Message', (['BeamNgStatus.OS_ERROR'], {}), '(BeamNgStatus.OS_ERROR)\n', (5745, 5768), False, 'from cyberphyslib.demonstrator import config, component, message, logger\n'), ((7238, 7286), 'cyberphyslib.demonstrator.message.Message', 'message.Message', (["self.sensor_output['electrics']"], {}), "(self.sensor_output['electrics'])\n", (7253, 7286), False, 'from cyberphyslib.demonstrator import config, component, message, logger\n'), ((7524, 7555), 'cyberphyslib.demonstrator.message.Message', 'message.Message', (['self._location'], {}), '(self._location)\n', (7539, 7555), False, 'from cyberphyslib.demonstrator import config, component, message, logger\n'), ((1989, 2010), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (2008, 2010), False, 'import psutil\n')]
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = [ 'GitHubRepositoryCodeArgs', 'GitHubRepositoryS3Args', ] @pulumi.input_type class GitHubRepositoryCodeArgs: def __init__(__self__, *, s3: pulumi.Input['GitHubRepositoryS3Args']): pulumi.set(__self__, "s3", s3) @property @pulumi.getter def s3(self) -> pulumi.Input['GitHubRepositoryS3Args']: return pulumi.get(self, "s3") @s3.setter def s3(self, value: pulumi.Input['GitHubRepositoryS3Args']): pulumi.set(self, "s3", value) @pulumi.input_type class GitHubRepositoryS3Args: def __init__(__self__, *, bucket: pulumi.Input[str], key: pulumi.Input[str], object_version: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, "bucket", bucket) pulumi.set(__self__, "key", key) if object_version is not None: pulumi.set(__self__, "object_version", object_version) @property @pulumi.getter def bucket(self) -> pulumi.Input[str]: return pulumi.get(self, "bucket") @bucket.setter def bucket(self, value: pulumi.Input[str]): pulumi.set(self, "bucket", value) @property @pulumi.getter def key(self) -> pulumi.Input[str]: return pulumi.get(self, "key") @key.setter def key(self, value: pulumi.Input[str]): pulumi.set(self, "key", value) @property @pulumi.getter(name="objectVersion") def object_version(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "object_version") @object_version.setter def object_version(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "object_version", value)
[ "pulumi.getter", "pulumi.set", "pulumi.get" ]
[((1720, 1755), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""objectVersion"""'}), "(name='objectVersion')\n", (1733, 1755), False, 'import pulumi\n'), ((545, 575), 'pulumi.set', 'pulumi.set', (['__self__', '"""s3"""', 's3'], {}), "(__self__, 's3', s3)\n", (555, 575), False, 'import pulumi\n'), ((685, 707), 'pulumi.get', 'pulumi.get', (['self', '"""s3"""'], {}), "(self, 's3')\n", (695, 707), False, 'import pulumi\n'), ((797, 826), 'pulumi.set', 'pulumi.set', (['self', '"""s3"""', 'value'], {}), "(self, 's3', value)\n", (807, 826), False, 'import pulumi\n'), ((1071, 1109), 'pulumi.set', 'pulumi.set', (['__self__', '"""bucket"""', 'bucket'], {}), "(__self__, 'bucket', bucket)\n", (1081, 1109), False, 'import pulumi\n'), ((1118, 1150), 'pulumi.set', 'pulumi.set', (['__self__', '"""key"""', 'key'], {}), "(__self__, 'key', key)\n", (1128, 1150), False, 'import pulumi\n'), ((1349, 1375), 'pulumi.get', 'pulumi.get', (['self', '"""bucket"""'], {}), "(self, 'bucket')\n", (1359, 1375), False, 'import pulumi\n'), ((1452, 1485), 'pulumi.set', 'pulumi.set', (['self', '"""bucket"""', 'value'], {}), "(self, 'bucket', value)\n", (1462, 1485), False, 'import pulumi\n'), ((1575, 1598), 'pulumi.get', 'pulumi.get', (['self', '"""key"""'], {}), "(self, 'key')\n", (1585, 1598), False, 'import pulumi\n'), ((1669, 1699), 'pulumi.set', 'pulumi.set', (['self', '"""key"""', 'value'], {}), "(self, 'key', value)\n", (1679, 1699), False, 'import pulumi\n'), ((1832, 1866), 'pulumi.get', 'pulumi.get', (['self', '"""object_version"""'], {}), "(self, 'object_version')\n", (1842, 1866), False, 'import pulumi\n'), ((1969, 2010), 'pulumi.set', 'pulumi.set', (['self', '"""object_version"""', 'value'], {}), "(self, 'object_version', value)\n", (1979, 2010), False, 'import pulumi\n'), ((1202, 1256), 'pulumi.set', 'pulumi.set', (['__self__', '"""object_version"""', 'object_version'], {}), "(__self__, 'object_version', object_version)\n", (1212, 1256), False, 'import pulumi\n')]
"""Classes and functions for configuring BIG-IP""" # Copyright 2014 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging from f5.bigip.cm import Cm from f5.bigip.ltm import Ltm from f5.bigip.net import Net from f5.bigip.resource import OrganizingCollection from f5.bigip.sys import Sys from icontrol.session import iControlRESTSession LOG = logging.getLogger(__name__) allowed_lazy_attributes = [Cm, Ltm, Net, Sys] class BigIP(OrganizingCollection): """An interface to a single BIG-IP""" def __init__(self, hostname, username, password, **kwargs): timeout = kwargs.pop('timeout', 30) loglevel = kwargs.pop('loglevel', logging.WARNING) allowed_lazy_attrs = kwargs.pop('allowed_lazy_attributes', allowed_lazy_attributes) if kwargs: raise TypeError('Unexpected **kwargs: %r' % kwargs) # _meta_data variable values iCRS = iControlRESTSession(username, password, timeout=timeout, loglevel=loglevel) # define _meta_data self._meta_data = {'allowed_lazy_attributes': allowed_lazy_attrs, 'hostname': hostname, 'uri': 'https://%s/mgmt/tm/' % hostname, 'icr_session': iCRS, 'device_name': None, 'local_ip': None, 'bigip': self}
[ "logging.getLogger", "icontrol.session.iControlRESTSession" ]
[((870, 897), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (887, 897), False, 'import logging\n'), ((1457, 1532), 'icontrol.session.iControlRESTSession', 'iControlRESTSession', (['username', 'password'], {'timeout': 'timeout', 'loglevel': 'loglevel'}), '(username, password, timeout=timeout, loglevel=loglevel)\n', (1476, 1532), False, 'from icontrol.session import iControlRESTSession\n')]
from unittest import TestCase from tests.assertions import CustomAssertions import scipy.sparse import numpy as np import tests.rabi as rabi import floq class TestSetBlock(TestCase): def setUp(self): self.dim_block = 5 self.n_block = 3 self.a, self.b, self.c, self.d, self.e, self.f, self.g, self.h, self.i \ = [j*np.ones([self.dim_block, self.dim_block]) for j in range(9)] matrix = np.bmat([[self.a, self.b, self.c], [self.d, self.e, self.f], [self.g, self.h, self.i]]) self.original = np.array(matrix) total_size = self.dim_block*self.n_block self.copy = np.zeros([total_size,total_size]) def test_set(self): # Try to recreate self.original with the new function floq.evolution._add_block(self.a, self.copy, self.dim_block, self.n_block, 0, 0) floq.evolution._add_block(self.b, self.copy, self.dim_block, self.n_block, 0, 1) floq.evolution._add_block(self.c, self.copy, self.dim_block, self.n_block, 0, 2) floq.evolution._add_block(self.d, self.copy, self.dim_block, self.n_block, 1, 0) floq.evolution._add_block(self.e, self.copy, self.dim_block, self.n_block, 1, 1) floq.evolution._add_block(self.f, self.copy, self.dim_block, self.n_block, 1, 2) floq.evolution._add_block(self.g, self.copy, self.dim_block, self.n_block, 2, 0) floq.evolution._add_block(self.h, self.copy, self.dim_block, self.n_block, 2, 1) floq.evolution._add_block(self.i, self.copy, self.dim_block, self.n_block, 2, 2) self.assertTrue(np.array_equal(self.copy,self.original)) class TestAssembleK(CustomAssertions): def setUp(self): self.n_zones = 5 self.frequency = 1 dim=2 a = -1.*np.ones([dim, dim]) b = np.zeros([dim, dim]) c = np.ones([dim, dim]) z = np.zeros([dim, dim]) i = np.identity(dim) self.goalk = np.array( np.bmat( [[b-2*i, a, z, z, z], [c, b-i, a, z, z], [z, c, b, a, z], [z, z, c, b+i, a], [z, z, z, c, b+2*i]])) self.hf = floq.system._canonicalise_operator(np.array([a, b, c])) def test_dense(self): builtk = floq.evolution.assemble_k(self.hf, self.n_zones, self.frequency) self.assertArrayEqual(builtk, self.goalk) def test_sparse(self): builtk = floq.evolution.assemble_k_sparse(self.hf, self.n_zones, self.frequency) self.assertTrue(scipy.sparse.issparse(builtk)) self.assertArrayEqual(builtk.toarray(), self.goalk) class TestDenseToSparse(CustomAssertions): def test_conversion(self): goal = floq.types.ColumnSparseMatrix(np.array([1, 2]), np.array([1, 0, 1]), np.array([2, 1, 3])) built = floq.evolution._dense_to_sparse(np.arange(4).reshape(2, 2)) self.assertColumnSparseMatrixEqual(built, goal) class TestAssembledK(CustomAssertions): def setUp(self): self.n_zones = 5 dim=2 a = -1.*np.ones([dim, dim]) b = np.zeros([dim, dim]) c = np.ones([dim, dim]) z = np.zeros([dim, dim]) i = np.identity(dim) dk1 = np.array( np.bmat( [[b, a, z, z, z], [c, b, a, z, z], [z, c, b, a, z], [z, z, c, b, a], [z, z, z, c, b]])) dk2 = np.array( np.bmat( [[b, b, z, z, z], [a, b, b, z, z], [z, a, b, b, z], [z, z, a, b, b], [z, z, z, a, b]])) self.goaldk = [floq.evolution._dense_to_sparse(x) for x in [dk1, dk2]] self.dhf = [floq.system._canonicalise_operator(np.array([a, b, c])), floq.system._canonicalise_operator(np.array([b, b, a]))] def test_build(self): builtdk = floq.evolution.assemble_dk(self.dhf, self.n_zones) for i, bdk in enumerate(builtdk): self.assertColumnSparseMatrixEqual(bdk, self.goaldk[i]) class TestFindEigensystem(CustomAssertions): def setUp(self): self.target_vals = np.array([-0.235, 0.753]) # random matrix with known eigenvalues: # {-1.735, -0.747, -0.235, 0.753, 1.265, 2.253} k = np.array([[-0.0846814, -0.0015136 - 0.33735j, -0.210771 + 0.372223j, 0.488512 - 0.769537j, -0.406266 + 0.315634j, -0.334452 + 0.251584j], [-0.0015136 + 0.33735j, 0.809781, -0.416533 - 0.432041j, -0.571074 - 0.669052j, -0.665971 + 0.387569j, -0.297409 - 0.0028969j], [-0.210771 - 0.372223j, -0.416533 + 0.432041j, -0.0085791, 0.110085 + 0.255156j, 0.958938 - 0.17233j, -0.91924 + 0.126004j], [0.488512 + 0.769537j, -0.571074 + 0.669052j, 0.110085 - 0.255156j, -0.371663, 0.279778 + 0.477653j, -0.496302 + 1.04898j], [-0.406266 - 0.315634j, -0.665971 - 0.387569j, 0.958938 + 0.17233j, 0.279778 - 0.477653j, -0.731623, 0.525248 + 0.0443422j], [-0.334452 - 0.251584j, -0.297409 + 0.0028969j, -0.91924 - 0.126004j, -0.496302 - 1.04898j, 0.525248 - 0.0443422j, 1.94077]], dtype='complex128') e1 = np.array([[0.0321771 - 0.52299j, 0.336377 + 0.258732j], [0.371002 + 0.0071587j, 0.237385 + 0.205185j], [0.525321 + 0.j, 0.0964822 + 0.154715j]]) e2 = np.array([[0.593829 + 0.j, -0.105998 - 0.394563j], [-0.0737891 - 0.419478j, 0.323414 + 0.350387j], [-0.05506 - 0.169033j, -0.0165495 + 0.199498j]]) self.target_vecs = np.array([e1, e2]) omega = 2.1 dim = 2 self.vals, self.vecs = floq.evolution.diagonalise(k, dim, omega, 3) def test_finds_vals(self): self.assertArrayEqual(self.vals, self.target_vals) def test_finds_vecs(self): self.assertArrayEqual(self.vecs, self.target_vecs, decimals=3) def test_casts_as_complex128(self): self.assertEqual(self.vecs.dtype, 'complex128') class TestFindDuplicates(CustomAssertions): def test_duplicates(self): a = np.round(np.array([1, 2.001, 2.003, 1.999, 3]), decimals=2) res = tuple(floq.evolution._find_duplicates(a)) self.assertEqual(len(res), 1) self.assertArrayEqual([1, 2, 3], res[0]) def test_empty_if_no_dup(self): a = np.round(np.array([1, 2.001, 4.003, 8.999, 10]), decimals=2) res = tuple(floq.evolution._find_duplicates(a)) self.assertEqual(res, ()) def test_multiple_duplicates(self): a = np.array([1., 1., 2., 2., 3., 4., 4.]) res = tuple(floq.evolution._find_duplicates(a)) self.assertEqual(len(res), 3) self.assertArrayEqual([[0, 1], [2, 3], [5, 6]], res)
[ "numpy.identity", "numpy.ones", "numpy.arange", "floq.evolution._dense_to_sparse", "floq.evolution._add_block", "floq.evolution.assemble_k", "floq.evolution.assemble_dk", "numpy.array", "numpy.zeros", "floq.evolution._find_duplicates", "numpy.array_equal", "floq.evolution.assemble_k_sparse", ...
[((435, 527), 'numpy.bmat', 'np.bmat', (['[[self.a, self.b, self.c], [self.d, self.e, self.f], [self.g, self.h, self.i]]'], {}), '([[self.a, self.b, self.c], [self.d, self.e, self.f], [self.g, self.\n h, self.i]])\n', (442, 527), True, 'import numpy as np\n'), ((597, 613), 'numpy.array', 'np.array', (['matrix'], {}), '(matrix)\n', (605, 613), True, 'import numpy as np\n'), ((684, 718), 'numpy.zeros', 'np.zeros', (['[total_size, total_size]'], {}), '([total_size, total_size])\n', (692, 718), True, 'import numpy as np\n'), ((813, 898), 'floq.evolution._add_block', 'floq.evolution._add_block', (['self.a', 'self.copy', 'self.dim_block', 'self.n_block', '(0)', '(0)'], {}), '(self.a, self.copy, self.dim_block, self.n_block, 0, 0\n )\n', (838, 898), False, 'import floq\n'), ((902, 987), 'floq.evolution._add_block', 'floq.evolution._add_block', (['self.b', 'self.copy', 'self.dim_block', 'self.n_block', '(0)', '(1)'], {}), '(self.b, self.copy, self.dim_block, self.n_block, 0, 1\n )\n', (927, 987), False, 'import floq\n'), ((991, 1076), 'floq.evolution._add_block', 'floq.evolution._add_block', (['self.c', 'self.copy', 'self.dim_block', 'self.n_block', '(0)', '(2)'], {}), '(self.c, self.copy, self.dim_block, self.n_block, 0, 2\n )\n', (1016, 1076), False, 'import floq\n'), ((1080, 1165), 'floq.evolution._add_block', 'floq.evolution._add_block', (['self.d', 'self.copy', 'self.dim_block', 'self.n_block', '(1)', '(0)'], {}), '(self.d, self.copy, self.dim_block, self.n_block, 1, 0\n )\n', (1105, 1165), False, 'import floq\n'), ((1169, 1254), 'floq.evolution._add_block', 'floq.evolution._add_block', (['self.e', 'self.copy', 'self.dim_block', 'self.n_block', '(1)', '(1)'], {}), '(self.e, self.copy, self.dim_block, self.n_block, 1, 1\n )\n', (1194, 1254), False, 'import floq\n'), ((1258, 1343), 'floq.evolution._add_block', 'floq.evolution._add_block', (['self.f', 'self.copy', 'self.dim_block', 'self.n_block', '(1)', '(2)'], {}), '(self.f, self.copy, self.dim_block, self.n_block, 1, 2\n )\n', (1283, 1343), False, 'import floq\n'), ((1347, 1432), 'floq.evolution._add_block', 'floq.evolution._add_block', (['self.g', 'self.copy', 'self.dim_block', 'self.n_block', '(2)', '(0)'], {}), '(self.g, self.copy, self.dim_block, self.n_block, 2, 0\n )\n', (1372, 1432), False, 'import floq\n'), ((1436, 1521), 'floq.evolution._add_block', 'floq.evolution._add_block', (['self.h', 'self.copy', 'self.dim_block', 'self.n_block', '(2)', '(1)'], {}), '(self.h, self.copy, self.dim_block, self.n_block, 2, 1\n )\n', (1461, 1521), False, 'import floq\n'), ((1525, 1610), 'floq.evolution._add_block', 'floq.evolution._add_block', (['self.i', 'self.copy', 'self.dim_block', 'self.n_block', '(2)', '(2)'], {}), '(self.i, self.copy, self.dim_block, self.n_block, 2, 2\n )\n', (1550, 1610), False, 'import floq\n'), ((1847, 1867), 'numpy.zeros', 'np.zeros', (['[dim, dim]'], {}), '([dim, dim])\n', (1855, 1867), True, 'import numpy as np\n'), ((1880, 1899), 'numpy.ones', 'np.ones', (['[dim, dim]'], {}), '([dim, dim])\n', (1887, 1899), True, 'import numpy as np\n'), ((1912, 1932), 'numpy.zeros', 'np.zeros', (['[dim, dim]'], {}), '([dim, dim])\n', (1920, 1932), True, 'import numpy as np\n'), ((1945, 1961), 'numpy.identity', 'np.identity', (['dim'], {}), '(dim)\n', (1956, 1961), True, 'import numpy as np\n'), ((2317, 2381), 'floq.evolution.assemble_k', 'floq.evolution.assemble_k', (['self.hf', 'self.n_zones', 'self.frequency'], {}), '(self.hf, self.n_zones, self.frequency)\n', (2342, 2381), False, 'import floq\n'), ((2477, 2548), 'floq.evolution.assemble_k_sparse', 'floq.evolution.assemble_k_sparse', (['self.hf', 'self.n_zones', 'self.frequency'], {}), '(self.hf, self.n_zones, self.frequency)\n', (2509, 2548), False, 'import floq\n'), ((3266, 3286), 'numpy.zeros', 'np.zeros', (['[dim, dim]'], {}), '([dim, dim])\n', (3274, 3286), True, 'import numpy as np\n'), ((3299, 3318), 'numpy.ones', 'np.ones', (['[dim, dim]'], {}), '([dim, dim])\n', (3306, 3318), True, 'import numpy as np\n'), ((3331, 3351), 'numpy.zeros', 'np.zeros', (['[dim, dim]'], {}), '([dim, dim])\n', (3339, 3351), True, 'import numpy as np\n'), ((3364, 3380), 'numpy.identity', 'np.identity', (['dim'], {}), '(dim)\n', (3375, 3380), True, 'import numpy as np\n'), ((4095, 4145), 'floq.evolution.assemble_dk', 'floq.evolution.assemble_dk', (['self.dhf', 'self.n_zones'], {}), '(self.dhf, self.n_zones)\n', (4121, 4145), False, 'import floq\n'), ((4350, 4375), 'numpy.array', 'np.array', (['[-0.235, 0.753]'], {}), '([-0.235, 0.753])\n', (4358, 4375), True, 'import numpy as np\n'), ((4492, 5330), 'numpy.array', 'np.array', (['[[-0.0846814, -0.0015136 - 0.33735j, -0.210771 + 0.372223j, 0.488512 - \n 0.769537j, -0.406266 + 0.315634j, -0.334452 + 0.251584j], [-0.0015136 +\n 0.33735j, 0.809781, -0.416533 - 0.432041j, -0.571074 - 0.669052j, -\n 0.665971 + 0.387569j, -0.297409 - 0.0028969j], [-0.210771 - 0.372223j, \n -0.416533 + 0.432041j, -0.0085791, 0.110085 + 0.255156j, 0.958938 - \n 0.17233j, -0.91924 + 0.126004j], [0.488512 + 0.769537j, -0.571074 + \n 0.669052j, 0.110085 - 0.255156j, -0.371663, 0.279778 + 0.477653j, -\n 0.496302 + 1.04898j], [-0.406266 - 0.315634j, -0.665971 - 0.387569j, \n 0.958938 + 0.17233j, 0.279778 - 0.477653j, -0.731623, 0.525248 + \n 0.0443422j], [-0.334452 - 0.251584j, -0.297409 + 0.0028969j, -0.91924 -\n 0.126004j, -0.496302 - 1.04898j, 0.525248 - 0.0443422j, 1.94077]]'], {'dtype': '"""complex128"""'}), "([[-0.0846814, -0.0015136 - 0.33735j, -0.210771 + 0.372223j, \n 0.488512 - 0.769537j, -0.406266 + 0.315634j, -0.334452 + 0.251584j], [-\n 0.0015136 + 0.33735j, 0.809781, -0.416533 - 0.432041j, -0.571074 - \n 0.669052j, -0.665971 + 0.387569j, -0.297409 - 0.0028969j], [-0.210771 -\n 0.372223j, -0.416533 + 0.432041j, -0.0085791, 0.110085 + 0.255156j, \n 0.958938 - 0.17233j, -0.91924 + 0.126004j], [0.488512 + 0.769537j, -\n 0.571074 + 0.669052j, 0.110085 - 0.255156j, -0.371663, 0.279778 + \n 0.477653j, -0.496302 + 1.04898j], [-0.406266 - 0.315634j, -0.665971 - \n 0.387569j, 0.958938 + 0.17233j, 0.279778 - 0.477653j, -0.731623, \n 0.525248 + 0.0443422j], [-0.334452 - 0.251584j, -0.297409 + 0.0028969j,\n -0.91924 - 0.126004j, -0.496302 - 1.04898j, 0.525248 - 0.0443422j, \n 1.94077]], dtype='complex128')\n", (4500, 5330), True, 'import numpy as np\n'), ((5644, 5799), 'numpy.array', 'np.array', (['[[0.0321771 - 0.52299j, 0.336377 + 0.258732j], [0.371002 + 0.0071587j, \n 0.237385 + 0.205185j], [0.525321 + 0.0j, 0.0964822 + 0.154715j]]'], {}), '([[0.0321771 - 0.52299j, 0.336377 + 0.258732j], [0.371002 + \n 0.0071587j, 0.237385 + 0.205185j], [0.525321 + 0.0j, 0.0964822 + \n 0.154715j]])\n', (5652, 5799), True, 'import numpy as np\n'), ((5848, 6000), 'numpy.array', 'np.array', (['[[0.593829 + 0.0j, -0.105998 - 0.394563j], [-0.0737891 - 0.419478j, \n 0.323414 + 0.350387j], [-0.05506 - 0.169033j, -0.0165495 + 0.199498j]]'], {}), '([[0.593829 + 0.0j, -0.105998 - 0.394563j], [-0.0737891 - 0.419478j,\n 0.323414 + 0.350387j], [-0.05506 - 0.169033j, -0.0165495 + 0.199498j]])\n', (5856, 6000), True, 'import numpy as np\n'), ((6069, 6087), 'numpy.array', 'np.array', (['[e1, e2]'], {}), '([e1, e2])\n', (6077, 6087), True, 'import numpy as np\n'), ((6156, 6200), 'floq.evolution.diagonalise', 'floq.evolution.diagonalise', (['k', 'dim', 'omega', '(3)'], {}), '(k, dim, omega, 3)\n', (6182, 6200), False, 'import floq\n'), ((7036, 7081), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.0, 2.0, 3.0, 4.0, 4.0]'], {}), '([1.0, 1.0, 2.0, 2.0, 3.0, 4.0, 4.0])\n', (7044, 7081), True, 'import numpy as np\n'), ((1630, 1670), 'numpy.array_equal', 'np.array_equal', (['self.copy', 'self.original'], {}), '(self.copy, self.original)\n', (1644, 1670), True, 'import numpy as np\n'), ((1815, 1834), 'numpy.ones', 'np.ones', (['[dim, dim]'], {}), '([dim, dim])\n', (1822, 1834), True, 'import numpy as np\n'), ((2006, 2128), 'numpy.bmat', 'np.bmat', (['[[b - 2 * i, a, z, z, z], [c, b - i, a, z, z], [z, c, b, a, z], [z, z, c, b +\n i, a], [z, z, z, c, b + 2 * i]]'], {}), '([[b - 2 * i, a, z, z, z], [c, b - i, a, z, z], [z, c, b, a, z], [z,\n z, c, b + i, a], [z, z, z, c, b + 2 * i]])\n', (2013, 2128), True, 'import numpy as np\n'), ((2252, 2271), 'numpy.array', 'np.array', (['[a, b, c]'], {}), '([a, b, c])\n', (2260, 2271), True, 'import numpy as np\n'), ((2834, 2850), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (2842, 2850), True, 'import numpy as np\n'), ((2897, 2916), 'numpy.array', 'np.array', (['[1, 0, 1]'], {}), '([1, 0, 1])\n', (2905, 2916), True, 'import numpy as np\n'), ((2963, 2982), 'numpy.array', 'np.array', (['[2, 1, 3]'], {}), '([2, 1, 3])\n', (2971, 2982), True, 'import numpy as np\n'), ((3234, 3253), 'numpy.ones', 'np.ones', (['[dim, dim]'], {}), '([dim, dim])\n', (3241, 3253), True, 'import numpy as np\n'), ((3418, 3516), 'numpy.bmat', 'np.bmat', (['[[b, a, z, z, z], [c, b, a, z, z], [z, c, b, a, z], [z, z, c, b, a], [z, z,\n z, c, b]]'], {}), '([[b, a, z, z, z], [c, b, a, z, z], [z, c, b, a, z], [z, z, c, b, a],\n [z, z, z, c, b]])\n', (3425, 3516), True, 'import numpy as np\n'), ((3635, 3733), 'numpy.bmat', 'np.bmat', (['[[b, b, z, z, z], [a, b, b, z, z], [z, a, b, b, z], [z, z, a, b, b], [z, z,\n z, a, b]]'], {}), '([[b, b, z, z, z], [a, b, b, z, z], [z, a, b, b, z], [z, z, a, b, b],\n [z, z, z, a, b]])\n', (3642, 3733), True, 'import numpy as np\n'), ((3840, 3874), 'floq.evolution._dense_to_sparse', 'floq.evolution._dense_to_sparse', (['x'], {}), '(x)\n', (3871, 3874), False, 'import floq\n'), ((6589, 6626), 'numpy.array', 'np.array', (['[1, 2.001, 2.003, 1.999, 3]'], {}), '([1, 2.001, 2.003, 1.999, 3])\n', (6597, 6626), True, 'import numpy as np\n'), ((6660, 6694), 'floq.evolution._find_duplicates', 'floq.evolution._find_duplicates', (['a'], {}), '(a)\n', (6691, 6694), False, 'import floq\n'), ((6841, 6879), 'numpy.array', 'np.array', (['[1, 2.001, 4.003, 8.999, 10]'], {}), '([1, 2.001, 4.003, 8.999, 10])\n', (6849, 6879), True, 'import numpy as np\n'), ((6913, 6947), 'floq.evolution._find_duplicates', 'floq.evolution._find_duplicates', (['a'], {}), '(a)\n', (6944, 6947), False, 'import floq\n'), ((7095, 7129), 'floq.evolution._find_duplicates', 'floq.evolution._find_duplicates', (['a'], {}), '(a)\n', (7126, 7129), False, 'import floq\n'), ((356, 397), 'numpy.ones', 'np.ones', (['[self.dim_block, self.dim_block]'], {}), '([self.dim_block, self.dim_block])\n', (363, 397), True, 'import numpy as np\n'), ((3951, 3970), 'numpy.array', 'np.array', (['[a, b, c]'], {}), '([a, b, c])\n', (3959, 3970), True, 'import numpy as np\n'), ((4028, 4047), 'numpy.array', 'np.array', (['[b, b, a]'], {}), '([b, b, a])\n', (4036, 4047), True, 'import numpy as np\n'), ((3032, 3044), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (3041, 3044), True, 'import numpy as np\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'ipetrash' from datetime import datetime items = [ '23/03/2007', '05/12/2007', '22/08/2008', '02/10/2009', ] for i in range(len(items) - 1): date_str_1, date_str_2 = items[i], items[i + 1] date_1 = datetime.strptime(date_str_1, '%d/%m/%Y') date_2 = datetime.strptime(date_str_2, '%d/%m/%Y') days = (date_2 - date_1).days print('{} - {} -> {} days'.format(date_str_1, date_str_2, days))
[ "datetime.datetime.strptime" ]
[((289, 330), 'datetime.datetime.strptime', 'datetime.strptime', (['date_str_1', '"""%d/%m/%Y"""'], {}), "(date_str_1, '%d/%m/%Y')\n", (306, 330), False, 'from datetime import datetime\n'), ((344, 385), 'datetime.datetime.strptime', 'datetime.strptime', (['date_str_2', '"""%d/%m/%Y"""'], {}), "(date_str_2, '%d/%m/%Y')\n", (361, 385), False, 'from datetime import datetime\n')]
from smbus import SMBus import time import csv import serial #import thread LIS3DH = False MPL3115A2 = False GPS = False deg = u'\N{DEGREE SIGN}' apo = u"\u0027" #apostrophe serialport = serial.Serial( port="/dev/ttyACM0", baudrate=9600, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout = 1 ) serialport.flushInput() try: ## Import LIS3DH Accelerometer import lib_LIS3DH except ImportError: print('Error importing LIS3DH Sensor') else: LIS3DH = True print('LIS3DH Accelerometer Connected') try: ## Import MPL3115A2 Barometeric Altimeter import lib_MPL3115A2 except ImportError: print('Error importing MPL3115A2 Sensor') else: MPL3115A2 = True print('MPL3115A2 Altimeter Connected') try: ## Import GPS import lib_GPS except ImportError: print('Error importing GPS Sensor') else: GPS = True print('Adafruit GPS Connected') datarows = [ 'Time', #0 'Pressure (kPa)', #1 'Temperature ('+deg.encode("utf8")+'C)', #2 'Altitude Estimation (m)', #3 'Acceleration (X)', #4 'Acceleration (Y)', #5 'Acceleration (Z)', #6 'Fix Timestamp (Hours)', #7 'Fix Timestamp (Minutes)', #8 'Fix Timestamp (Seconds)', #9 'Fix Type', #10 '# Satellites', #11 'Latitude ('+deg.encode("utf8")+')', #12 'Latitude ('+apo.encode("utf8")+')', #13 'Latitude (Direction)', #14 'Longitude ('+deg.encode("utf8")+')', #15 'Longitude ('+apo.encode("utf8")+')', #16 'Longitude (Direction)', #17 'Altitude GPS (m)', #18 'Speed (kph)', #19 ] if (LIS3DH==False): datarows[4] = 'LIS3DH N/A', datarows[5] = 'LIS3DH N/A', datarows[6] = 'LIS3DH N/A', if (MPL3115A2==False): datarows[1] = 'MPL3115A2 N/A', datarows[2] = 'MPL3115A2 N/A', datarows[3] = 'MPL3115A2 N/A', if (GPS==False): datarows[7] = 'GPS N/A', datarows[8] = 'GPS N/A', datarows[9] = 'GPS N/A', datarows[10] = 'GPS N/A', datarows[11] = 'GPS N/A', datarows[12] = 'GPS N/A', datarows[13] = 'GPS N/A', datarows[14] = 'GPS N/A', datarows[15] = 'GPS N/A', datarows[16] = 'GPS N/A', datarows[17] = 'GPS N/A', datarows[18] = 'GPS N/A', datarows[19] = 'GPS N/A', csv_filename = 'Data: '+time.strftime('%mm%dd%yy_%Hh%Mm%Ss')+'.csv' with open(csv_filename, 'w') as dataInit: dataInit = csv.writer(dataInit, delimiter=',', lineterminator='\n') dataInit.writerow(datarows) while True: if (MPL3115A2==True): MPL3115A2_Data = lib_MPL3115A2.Get_Data() #MPL3115A2_Packet = "kPa:%.2f, C:%.1f, approx m:%.1d" % (MPL3115A2_Data[0], MPL3115A2_Data[1], MPL3115A2_Data[2]) MPL3115A2_Packet = '{pressure:2.1f}{temperature:=+3.0f}{approxm:5.0f}'.format( pressure = MPL3115A2_Data[0], temperature = MPL3115A2_Data[1], approxm = MPL3115A2_Data[2] ) # MPL3115A2_Packet will take up 11 characters. # Saved 5 character spaces for approximate altitude, in case. else: MPL3115A2_Data = [0, 0, 0, 0] MPL3115A2_Packet = "" if (LIS3DH==True): LIS3DH_Data = lib_LIS3DH.Get_Data() LIS3DH_Packet = "" else: LIS3DH_Data = [0, 0, 0] LIS3DH_Packet = "" if (GPS==True): GPS_Data = lib_GPS.Get_Data() #GPS_Packet_fix = 'gps-fix:'+str(GPS_Data[1])+', ' #GPS_Packet_lat = 'lat:'+str(GPS_Data[3][0])+''+deg.encode("utf8")+''+str(GPS_Data[3][1])+''+apo.encode("utf8")+''+str(GPS_Data[3][2])+', ' #GPS_Packet_lon = 'lon:'+str(GPS_Data[4][0])+''+deg.encode("utf8")+''+str(GPS_Data[4][1])+''+apo.encode("utf8")+''+str(GPS_Data[4][2])+', ' #GPS_Packet_altitude = 'm:'+str(GPS_Data[5])+', ' #GPS_Packet_speed = 'kph:'+str(GPS_Data[6])+'' #GPS_Packet = GPS_Packet_fix + GPS_Packet_lat + GPS_Packet_lon + GPS_Packet_altitude + GPS_Packet_speed GPS_Packet = '{fix:d}{altitude:5.0f}{speed:2.0f}{lat_deg:2.0f}{lat_min:2.0f}{lat_dir}{lon_deg:2.0f}{lon_min:2.0f}{lon_dir}'.format( fix = GPS_Data[1], altitude = GPS_Data[5], speed = GPS_Data[6], lat_deg = GPS_Data[3][0], lat_min = GPS_Data[3][1], lat_dir = GPS_Data[3][2], lon_deg = GPS_Data[4][0], lon_min = GPS_Data[4][1], lon_dir = GPS_Data[4][2] ) # GPS_Packet will take up 18 characters. else: GPS_Data = [[0,0,0], 0, 0, [0,0,0], [0,0,0], 0, 0] GPS_Packet = "" with open(csv_filename, 'a') as csvFile: dataLogger = csv.writer(csvFile, delimiter=',', lineterminator='\n') dataLogger.writerow([time.strftime('%m/%d/%Y %H:%M:%S%z'), str(MPL3115A2_Data[0]), # pressure kPa str(MPL3115A2_Data[1]), # temperature C str(MPL3115A2_Data[2]), # altitude m str(LIS3DH_Data[0]), # accel X str(LIS3DH_Data[1]), # accel Y str(LIS3DH_Data[2]), # accel Z str(GPS_Data[0][0]), # fix timestamp hours str(GPS_Data[0][1]), # fix timestamp minutes str(GPS_Data[0][2]), # fix timestamp seconds str(GPS_Data[1]), # fix type integer str(GPS_Data[2]), # satellites integer str(GPS_Data[3][0]), # latitude degrees str(GPS_Data[3][1]), # latitude minutes str(GPS_Data[3][2]), # latitude string direction (S,N) str(GPS_Data[4][0]), # longitude degrees str(GPS_Data[4][1]), # longitude minutes str(GPS_Data[4][2]), # longitude string direction (W,E) str(GPS_Data[5]), # altitude m str(GPS_Data[6]), # speed ]) serialdata = serialport.readline() serialdatacheck = serialdata[0 : 2] #print(serialdatacheck) # Check first 2 characters of serial line. If 'XC' then does not send data and executes a given command, so to not congest serial line. if (serialdatacheck=="XC"): ## Special Command Mode if (serialdata=="XC test"): serialport.write("Received Command - Test") time.sleep(0.25) elif (serialdata == "XC hello"): serialport.write("Hello back to you!") time.sleep(0.25) else: serialport.write("Unknown Command") time.sleep(0.25) else: #If anything else other than 'XC' code, continue sending data packet Packet = ''+MPL3115A2_Packet+''+GPS_Packet+''+LIS3DH_Packet+'' #print(Packet) serialport.write(Packet) #print('Serialport written') time.sleep(0.25) time.sleep(0.75)
[ "lib_GPS.Get_Data", "csv.writer", "time.strftime", "lib_LIS3DH.Get_Data", "time.sleep", "serial.Serial", "lib_MPL3115A2.Get_Data" ]
[((190, 338), 'serial.Serial', 'serial.Serial', ([], {'port': '"""/dev/ttyACM0"""', 'baudrate': '(9600)', 'parity': 'serial.PARITY_NONE', 'stopbits': 'serial.STOPBITS_ONE', 'bytesize': 'serial.EIGHTBITS', 'timeout': '(1)'}), "(port='/dev/ttyACM0', baudrate=9600, parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=1)\n", (203, 338), False, 'import serial\n'), ((3014, 3070), 'csv.writer', 'csv.writer', (['dataInit'], {'delimiter': '""","""', 'lineterminator': '"""\n"""'}), "(dataInit, delimiter=',', lineterminator='\\n')\n", (3024, 3070), False, 'import csv\n'), ((7721, 7737), 'time.sleep', 'time.sleep', (['(0.75)'], {}), '(0.75)\n', (7731, 7737), False, 'import time\n'), ((2913, 2949), 'time.strftime', 'time.strftime', (['"""%mm%dd%yy_%Hh%Mm%Ss"""'], {}), "('%mm%dd%yy_%Hh%Mm%Ss')\n", (2926, 2949), False, 'import time\n'), ((3167, 3191), 'lib_MPL3115A2.Get_Data', 'lib_MPL3115A2.Get_Data', ([], {}), '()\n', (3189, 3191), False, 'import lib_MPL3115A2\n'), ((3787, 3808), 'lib_LIS3DH.Get_Data', 'lib_LIS3DH.Get_Data', ([], {}), '()\n', (3806, 3808), False, 'import lib_LIS3DH\n'), ((3945, 3963), 'lib_GPS.Get_Data', 'lib_GPS.Get_Data', ([], {}), '()\n', (3961, 3963), False, 'import lib_GPS\n'), ((5236, 5291), 'csv.writer', 'csv.writer', (['csvFile'], {'delimiter': '""","""', 'lineterminator': '"""\n"""'}), "(csvFile, delimiter=',', lineterminator='\\n')\n", (5246, 5291), False, 'import csv\n'), ((7700, 7716), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (7710, 7716), False, 'import time\n'), ((7218, 7234), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (7228, 7234), False, 'import time\n'), ((5322, 5358), 'time.strftime', 'time.strftime', (['"""%m/%d/%Y %H:%M:%S%z"""'], {}), "('%m/%d/%Y %H:%M:%S%z')\n", (5335, 5358), False, 'import time\n'), ((7339, 7355), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (7349, 7355), False, 'import time\n'), ((7430, 7446), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (7440, 7446), False, 'import time\n')]
# -*- coding: utf-8 -*- u"""MAD-X execution template. :copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved. :license: http://www.apache.org/licenses/LICENSE-2.0.html """ from __future__ import absolute_import, division, print_function from pykern import pkio from pykern import pkjinja from pykern.pkcollections import PKDict from pykern.pkdebug import pkdp, pkdc, pkdlog from sirepo import simulation_db from sirepo.template import code_variable from sirepo.template import lattice from sirepo.template import sdds_util from sirepo.template import template_common from sirepo.template.lattice import LatticeUtil import math import numpy as np import re import sirepo.sim_data _SIM_DATA, SIM_TYPE, _SCHEMA = sirepo.sim_data.template_globals() _PI = 4 * math.atan(1) _MADX_CONSTANTS = PKDict( pi=_PI, twopi=_PI * 2.0, raddeg=180.0 / _PI, degrad=_PI / 180.0, e=math.exp(1), emass=0.510998928e-03, pmass=0.938272046e+00, nmass=0.931494061+00, mumass=0.1056583715, clight=299792458.0, qelect=1.602176565e-19, hbar=6.58211928e-25, erad=2.8179403267e-15, ) class MadxElementIterator(lattice.ElementIterator): def is_ignore_field(self, field): return field == 'name' def madx_code_var(variables): return _code_var(variables) def python_source_for_model(data, model): return _generate_parameters_file(data) def _code_var(variables): return code_variable.CodeVar( variables, code_variable.PurePythonEval(_MADX_CONSTANTS), case_insensitive=True, ) def _format_field_value(state, model, field, el_type): value = model[field] return [field, value] def _generate_lattice(util): filename_map = PKDict() return util.render_lattice_and_beamline( lattice.LatticeIterator(filename_map, _format_field_value), want_semicolon=True) def _generate_parameters_file(data): res, v = template_common.generate_parameters_file(data) util = LatticeUtil(data, _SCHEMA) code_var = _code_var(data.models.rpnVariables) v.lattice = _generate_lattice(util) v.variables = _generate_variables(code_var, data) if data.models.simulation.visualizationBeamlineId: v.useBeamline = util.id_map[data.models.simulation.visualizationBeamlineId].name return template_common.render_jinja(SIM_TYPE, v, 'parameters.madx') def _generate_variable(name, variables, visited): res = '' if name not in visited: res += 'REAL {} = {};\n'.format(name, variables[name]) visited[name] = True return res def _generate_variables(code_var, data): res = '' visited = PKDict() for name in sorted(code_var.variables): for dependency in code_var.get_expr_dependencies(code_var.postfix_variables[name]): res += _generate_variable(dependency, code_var.variables, visited) res += _generate_variable(name, code_var.variables, visited) return res
[ "sirepo.template.code_variable.PurePythonEval", "sirepo.template.template_common.generate_parameters_file", "sirepo.template.template_common.render_jinja", "sirepo.template.lattice.LatticeUtil", "sirepo.template.lattice.LatticeIterator", "pykern.pkcollections.PKDict", "math.exp", "math.atan" ]
[((771, 783), 'math.atan', 'math.atan', (['(1)'], {}), '(1)\n', (780, 783), False, 'import math\n'), ((1726, 1734), 'pykern.pkcollections.PKDict', 'PKDict', ([], {}), '()\n', (1732, 1734), False, 'from pykern.pkcollections import PKDict\n'), ((1929, 1975), 'sirepo.template.template_common.generate_parameters_file', 'template_common.generate_parameters_file', (['data'], {}), '(data)\n', (1969, 1975), False, 'from sirepo.template import template_common\n'), ((1987, 2013), 'sirepo.template.lattice.LatticeUtil', 'LatticeUtil', (['data', '_SCHEMA'], {}), '(data, _SCHEMA)\n', (1998, 2013), False, 'from sirepo.template.lattice import LatticeUtil\n'), ((2314, 2374), 'sirepo.template.template_common.render_jinja', 'template_common.render_jinja', (['SIM_TYPE', 'v', '"""parameters.madx"""'], {}), "(SIM_TYPE, v, 'parameters.madx')\n", (2342, 2374), False, 'from sirepo.template import template_common\n'), ((2645, 2653), 'pykern.pkcollections.PKDict', 'PKDict', ([], {}), '()\n', (2651, 2653), False, 'from pykern.pkcollections import PKDict\n'), ((897, 908), 'math.exp', 'math.exp', (['(1)'], {}), '(1)\n', (905, 908), False, 'import math\n'), ((1484, 1529), 'sirepo.template.code_variable.PurePythonEval', 'code_variable.PurePythonEval', (['_MADX_CONSTANTS'], {}), '(_MADX_CONSTANTS)\n', (1512, 1529), False, 'from sirepo.template import code_variable\n'), ((1788, 1846), 'sirepo.template.lattice.LatticeIterator', 'lattice.LatticeIterator', (['filename_map', '_format_field_value'], {}), '(filename_map, _format_field_value)\n', (1811, 1846), False, 'from sirepo.template import lattice\n')]