content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
import itertools import logging import math import random from collections import Counter from typing import Any, Dict, List, Optional, Set, Tuple, Union # mypy type checking from .cache import property_cache_forever, property_cache_once_per_frame from .data import ActionResult, Alert, Race, Result, Target, race_gas, race_townhalls, race_worker from .data import ActionResult, Attribute, Race, race_worker, race_townhalls, race_gas, Target, Result from .game_data import AbilityData, GameData # imports for mypy and pycharm autocomplete from .game_state import GameState from .game_data import GameData, AbilityData from .ids.ability_id import AbilityId from .ids.unit_typeid import UnitTypeId from .ids.upgrade_id import UpgradeId from .pixel_map import PixelMap from .position import Point2, Point3 from .unit import Unit from .units import Units logger = logging.getLogger(__name__) def _correct_zerg_supply(self): """ The client incorrectly rounds zerg supply down instead of up (see https://github.com/Blizzard/s2client-proto/issues/123), so self.supply_used and friends return the wrong value when there are an odd number of zerglings and banelings. This function corrects the bad values. """ # TODO: remove when Blizzard/sc2client-proto#123 gets fixed. half_supply_units = { UnitTypeId.ZERGLING, UnitTypeId.ZERGLINGBURROWED, UnitTypeId.BANELING, UnitTypeId.BANELINGBURROWED, UnitTypeId.BANELINGCOCOON, } correction = self.units(half_supply_units).amount % 2 self.supply_used += correction self.supply_army += correction self.supply_left -= correction async def get_available_abilities( self, units: Union[List[Unit], Units], ignore_resource_requirements=False ) -> List[List[AbilityId]]: """ Returns available abilities of one or more units. Right know only checks cooldown, energy cost, and whether the ability has been researched. Example usage: units_abilities = await self.get_available_abilities(self.units) or units_abilities = await self.get_available_abilities([self.units.random]) """ return await self._client.query_available_abilities(units, ignore_resource_requirements) def can_feed(self, unit_type: UnitTypeId) -> bool: """ Checks if you have enough free supply to build the unit """ required = self._game_data.units[unit_type.value]._proto.food_required return required == 0 or self.supply_left >= required def can_afford( self, item_id: Union[UnitTypeId, UpgradeId, AbilityId], check_supply_cost: bool = True ) -> "CanAffordWrapper": """Tests if the player has enough resources to build a unit or cast an ability.""" enough_supply = True if isinstance(item_id, UnitTypeId): unit = self._game_data.units[item_id.value] cost = self._game_data.calculate_ability_cost(unit.creation_ability) if check_supply_cost: enough_supply = self.can_feed(item_id) elif isinstance(item_id, UpgradeId): cost = self._game_data.upgrades[item_id.value].cost else: cost = self._game_data.calculate_ability_cost(item_id) return CanAffordWrapper(cost.minerals <= self.minerals, cost.vespene <= self.vespene, enough_supply) def select_build_worker(self, pos: Union[Unit, Point2, Point3], force: bool = False) -> Optional[Unit]: """Select a worker to build a building with.""" workers = ( self.workers.filter(lambda w: (w.is_gathering or w.is_idle) and w.distance_to(pos) < 20) or self.workers ) if workers: for worker in workers.sorted_by_distance_to(pos).prefer_idle: if ( not worker.orders or len(worker.orders) == 1 and worker.orders[0].ability.id in {AbilityId.MOVE, AbilityId.HARVEST_GATHER} ): return worker return workers.random if force else None def already_pending_upgrade(self, upgrade_type: UpgradeId) -> Union[int, float]: """ Check if an upgrade is being researched Return values: 0: not started 0 < x < 1: researching 1: finished """ assert isinstance(upgrade_type, UpgradeId) if upgrade_type in self.state.upgrades: return 1 level = None if "LEVEL" in upgrade_type.name: level = upgrade_type.name[-1] creationAbilityID = self._game_data.upgrades[upgrade_type.value].research_ability.id for structure in self.units.filter(lambda unit: unit.is_structure and unit.is_ready): for order in structure.orders: if order.ability.id is creationAbilityID: if level and order.ability.button_name[-1] != level: return 0 return order.progress return 0 def already_pending(self, unit_type: Union[UpgradeId, UnitTypeId], all_units: bool = True) -> int: """ Returns a number of buildings or units already in progress, or if a worker is en route to build it. This also includes queued orders for workers and build queues of buildings. If all_units==True, then build queues of other units (such as Carriers (Interceptors) or Oracles (Stasis Ward)) are also included. """ # TODO / FIXME: SCV building a structure might be counted as two units if isinstance(unit_type, UpgradeId): return self.already_pending_upgrade(unit_type) ability = self._game_data.units[unit_type.value].creation_ability amount = len(self.units(unit_type).not_ready) if all_units: amount += sum([o.ability == ability for u in self.units for o in u.orders]) else: amount += sum([o.ability == ability for w in self.workers for o in w.orders]) amount += sum([egg.orders[0].ability == ability for egg in self.units(UnitTypeId.EGG)]) return amount async def build(self, building: UnitTypeId, near: Union[Point2, Point3], max_distance: int=20, unit: Optional[Unit]=None, random_alternative: bool=True, placement_step: int=2): """Build a building.""" if isinstance(near, Unit): near = near.position.to2 elif near is not None: near = near.to2 else: return p = await self.find_placement(building, near.rounded, max_distance, random_alternative, placement_step) if p is None: return ActionResult.CantFindPlacementLocation unit = unit or self.select_build_worker(p) if unit is None or not self.can_afford(building): return ActionResult.Error return await self.do(unit.build(building, p)) def prevent_double_actions(self, action): # always add actions if queued if action.queue: return True if action.unit.orders: # action: UnitCommand # current_action: UnitOrder current_action = action.unit.orders[0] if current_action.ability.id != action.ability: # different action, return true return True try: if current_action.target == action.target.tag: # same action, remove action if same target unit return False except AttributeError: pass try: if action.target.x == current_action.target.x and action.target.y == current_action.target.y: # same action, remove action if same target position return False except AttributeError: pass return True return True # For the functions below, make sure you are inside the boundries of the map size. def get_terrain_height(self, pos: Union[Point2, Point3, Unit]) -> int: """ Returns terrain height at a position. Caution: terrain height is different from a unit's z-coordinate. """ assert isinstance(pos, (Point2, Point3, Unit)), f"pos is not of type Point2, Point3 or Unit" pos = pos.position.to2.rounded return self._game_info.terrain_height[pos] # returns int def get_terrain_z_height(self, pos: Union[Point2, Point3, Unit]) -> int: """ Returns terrain z-height at a position. """ assert isinstance(pos, (Point2, Point3, Unit)), f"pos is not of type Point2, Point3 or Unit" pos = pos.position.to2.rounded return -16 + 32 * self._game_info.terrain_height[pos] / 255 def in_placement_grid(self, pos: Union[Point2, Point3, Unit]) -> bool: """ Returns True if you can place something at a position. Remember, buildings usually use 2x2, 3x3 or 5x5 of these grid points. Caution: some x and y offset might be required, see ramp code: https://github.com/Dentosal/python-sc2/blob/master/sc2/game_info.py#L17-L18 """ assert isinstance(pos, (Point2, Point3, Unit)) pos = pos.position.to2.rounded return self._game_info.placement_grid[pos] == 1 def in_pathing_grid(self, pos: Union[Point2, Point3, Unit]) -> bool: """ Returns True if a unit can pass through a grid point. """ assert isinstance(pos, (Point2, Point3, Unit)) pos = pos.position.to2.rounded return self._game_info.pathing_grid[pos] == 1 def is_visible(self, pos: Union[Point2, Point3, Unit]) -> bool: """ Returns True if you have vision on a grid point. """ # more info: https://github.com/Blizzard/s2client-proto/blob/9906df71d6909511907d8419b33acc1a3bd51ec0/s2clientprotocol/spatial.proto#L19 assert isinstance(pos, (Point2, Point3, Unit)) pos = pos.position.to2.rounded return self.state.visibility[pos] == 2 def has_creep(self, pos: Union[Point2, Point3, Unit]) -> bool: """ Returns True if there is creep on the grid point. """ assert isinstance(pos, (Point2, Point3, Unit)) pos = pos.position.to2.rounded return self.state.creep[pos] == 1 def _prepare_start(self, client, player_id, game_info, game_data): """Ran until game start to set game and player data.""" self._client: "Client" = client self._game_info: "GameInfo" = game_info self._game_data: GameData = game_data self.player_id: int = player_id self.race: Race = Race(self._game_info.player_races[self.player_id]) self._units_previous_map: dict = dict() self._previous_upgrades: Set[UpgradeId] = set() self.units: Units = Units([]) def _prepare_first_step(self): """First step extra preparations. Must not be called before _prepare_step.""" if self.townhalls: self._game_info.player_start_location = self.townhalls.first.position self._game_info.map_ramps, self._game_info.vision_blockers = self._game_info._find_ramps_and_vision_blockers() def _prepare_step(self, state, proto_game_info): # Set attributes from new state before on_step.""" self.state: GameState = state # See game_state.py # update pathing grid self._game_info.pathing_grid: PixelMap = PixelMap( proto_game_info.game_info.start_raw.pathing_grid, in_bits=True, mirrored=False ) # Required for events self._units_previous_map: Dict = {unit.tag: unit for unit in self.units} self.units: Units = state.own_units self.workers: Units = self.units(race_worker[self.race]) self.townhalls: Units = self.units(race_townhalls[self.race]) self.geysers: Units = self.units(race_gas[self.race]) self.minerals: int = state.common.minerals self.vespene: int = state.common.vespene self.supply_army: int = state.common.food_army self.supply_workers: int = state.common.food_workers # Doesn't include workers in production self.supply_cap: int = state.common.food_cap self.supply_used: int = state.common.food_used self.supply_left: int = self.supply_cap - self.supply_used if self.race == Race.Zerg: self.larva_count: int = state.common.larva_count # Workaround Zerg supply rounding bug self._correct_zerg_supply() elif self.race == Race.Protoss: self.warp_gate_count: int = state.common.warp_gate_count self.idle_worker_count: int = state.common.idle_worker_count self.army_count: int = state.common.army_count # reset cached values self.cached_known_enemy_structures = None self.cached_known_enemy_units = None def on_start(self): """ Allows initializing the bot when the game data is available. """ def on_end(self, game_result: Result): """ Triggered at the end of a game. """ class CanAffordWrapper: def __init__(self, can_afford_minerals, can_afford_vespene, have_enough_supply): self.can_afford_minerals = can_afford_minerals self.can_afford_vespene = can_afford_vespene self.have_enough_supply = have_enough_supply
[ 11748, 340, 861, 10141, 198, 11748, 18931, 198, 11748, 10688, 198, 11748, 4738, 198, 6738, 17268, 1330, 15034, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 11, 7343, 11, 32233, 11, 5345, 11, 309, 29291, 11, 4479, 220, 1303, 616, 9078, ...
2.426134
5,510
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations from dataclasses import dataclass from enum import Enum from typing import Any, Iterable, cast from pants.core.util_rules.lockfile_metadata import ( LockfileMetadata, LockfileMetadataValidation, LockfileScope, _get_metadata, lockfile_metadata_registrar, ) from pants.jvm.resolve.common import ArtifactRequirement from pants.util.ordered_set import FrozenOrderedSet _jvm_lockfile_metadata = lockfile_metadata_registrar(LockfileScope.JVM)
[ 2, 15069, 33160, 41689, 1628, 20420, 357, 3826, 27342, 9865, 3843, 20673, 13, 9132, 737, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 3826, 38559, 24290, 737, 198, 198, 6738, 11593, 37443, 834, 1330, 37647, 198, ...
3.264249
193
#!/usr/bin/env python3 import os import re cur_path = os.path.dirname(os.path.realpath(__file__)) opendbc_root = os.path.join(cur_path, '../') include_pattern = re.compile(r'CM_ "IMPORT (.*?)";') if __name__ == "__main__": create_all(opendbc_root)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 11748, 28686, 198, 11748, 302, 198, 198, 22019, 62, 6978, 796, 28686, 13, 6978, 13, 15908, 3672, 7, 418, 13, 6978, 13, 5305, 6978, 7, 834, 7753, 834, 4008, 198, 404, 437, 15630, ...
2.383178
107
from django.contrib import admin from django.utils.safestring import mark_safe from customer.models import Owner, Dog, Breed, SubBreed admin.site.register(Dog, DogAdmin) admin.site.register(Owner, OwnerAdmin) admin.site.register(Breed, BreedAdmin) admin.site.register(SubBreed, SubBreedAdmin)
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 42625, 14208, 13, 26791, 13, 49585, 395, 1806, 1330, 1317, 62, 21230, 198, 6738, 6491, 13, 27530, 1330, 23853, 11, 8532, 11, 45958, 11, 3834, 12679, 276, 628, 628, 198, 198, 2...
3.193548
93
from __future__ import absolute_import from redis import Redis from rq.decorators import job from kaneda.utils import get_backend backend = get_backend()
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 198, 6738, 2266, 271, 1330, 2297, 271, 198, 6738, 374, 80, 13, 12501, 273, 2024, 1330, 1693, 198, 198, 6738, 43998, 18082, 13, 26791, 1330, 651, 62, 1891, 437, 198, 198, 1891, 437, ...
3.22449
49
from dotenv import load_dotenv from PyPDF2 import PdfFileReader, PdfFileWriter import os import json if __name__ == "__main__": load_dotenv() ripper = CertRipper( start_page_index=os.getenv("START_PAGE_INDEX"), master_pdf_path=os.getenv("MASTER_PDF_PATH"), json_points_path=os.getenv("JSON_POINTS_PATH"), ripped_certs_path=os.getenv("RIPPED_CERTS_PATH"), ripped_cert_file_name=os.getenv("RIPPED_CERT_FILE_NAME"), ) ripper.process()
[ 6738, 16605, 24330, 1330, 3440, 62, 26518, 24330, 198, 6738, 9485, 20456, 17, 1330, 350, 7568, 8979, 33634, 11, 350, 7568, 8979, 34379, 198, 11748, 28686, 198, 11748, 33918, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 8...
2.240909
220
# # Licensed Materials - Property of IBM # # (c) Copyright IBM Corp. 2007-2008 # import unittest, sys import ibm_db import config from testfunctions import IbmDbTestFunctions #__END__ #__LUW_EXPECTED__ #False #int(0) #int(1) #False #False #False #int(1) #False #__ZOS_EXPECTED__ #False #int(0) #int(1) #False #False #False #int(1) #False #__SYSTEMI_EXPECTED__ #False #int(0) #int(1) #False #False #False #int(1) #False #__IDS_EXPECTED__ #False #int(0) #int(1) #False #False #False #int(1) #False
[ 2, 220, 198, 2, 220, 49962, 24310, 532, 14161, 286, 19764, 198, 2, 198, 2, 220, 357, 66, 8, 15069, 19764, 11421, 13, 4343, 12, 11528, 198, 2, 198, 198, 11748, 555, 715, 395, 11, 25064, 198, 11748, 24283, 76, 62, 9945, 198, 11748, ...
2.122881
236
from cms.api import get_page_draft from cms.toolbar_pool import toolbar_pool from cms.toolbar_base import CMSToolbar from cms.utils import get_cms_setting from cms.utils.permissions import has_page_change_permission from django.core.urlresolvers import reverse, NoReverseMatch from django.utils.translation import ugettext_lazy as _ from .models import PageBannerExtension _banner_change_url = 'admin:djangocms_pagebanner_pagebannerextension_change' _banner_add_url = 'admin:djangocms_pagebanner_pagebannerextension_add'
[ 6738, 269, 907, 13, 15042, 1330, 651, 62, 7700, 62, 35679, 198, 6738, 269, 907, 13, 25981, 5657, 62, 7742, 1330, 50149, 62, 7742, 198, 6738, 269, 907, 13, 25981, 5657, 62, 8692, 1330, 16477, 2257, 970, 5657, 198, 6738, 269, 907, 13,...
2.971591
176
import math import torch import torch.nn as nn import torch.nn.functional as F # Temporarily leave PositionalEncoding module here. Will be moved somewhere else.
[ 11748, 10688, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 198, 198, 2, 5825, 1819, 3093, 2666, 18574, 1859, 27195, 7656, 8265, 994, 13, 2561, 307, 3888, 7382, 2073, ...
3.767442
43
main()
[ 12417, 3419 ]
3
2
from scrapper.playstation.spider import main
[ 6738, 19320, 2848, 13, 1759, 17529, 13, 2777, 1304, 1330, 1388 ]
4
11
import slackbot.bot import random answer = random.randint(1, 50) max = 50 def number(num): '''number Args: num (int): Returns: str: num answer : 'Too large' num answer : 'Too small' num answer : 'Correct!' : 'Can I kick you?.' 0 max121 ''' global answer global max # if num == 0: return ' is a mysterious number...' elif num < max + 1: if num > answer: return ' is too large. The answer is more small.' elif num < answer: return ' is too small. The answer is more large.' elif num == answer: answer = random.randint(1, max) return ' is correct! :tada: Now, start a new game.' elif max == 1: return '? Can I kick you? Only 1.' return '? Can I kick you? 1 to %d.' % max def number_set(num): '''number set Args: num (int): Returns: str: max50 1 0 ''' global answer global max # max if num == 0: return 'There is a mysterious number... It is ' elif num == 1: max = 1 answer = random.randint(1, max) return '1? Really? Then, the maximum of the answer is ' max = num answer = random.randint(1, max) return 'OK. Then, the maximum of the answer is '
[ 11748, 30740, 13645, 13, 13645, 198, 11748, 4738, 198, 198, 41484, 796, 4738, 13, 25192, 600, 7, 16, 11, 2026, 8, 198, 9806, 796, 2026, 198, 198, 4299, 1271, 7, 22510, 2599, 198, 220, 220, 220, 705, 7061, 17618, 220, 198, 220, 220, ...
2.101515
660
"""Code for checking and inferring types.""" import collections import logging import re import subprocess from typing import Any, Dict, Union from pytype import abstract from pytype import abstract_utils from pytype import convert_structural from pytype import debug from pytype import function from pytype import metrics from pytype import output from pytype import special_builtins from pytype import state as frame_state from pytype import vm from pytype.overlays import typing_overlay from pytype.pytd import builtins from pytype.pytd import escape from pytype.pytd import optimize from pytype.pytd import pytd from pytype.pytd import pytd_utils from pytype.pytd import visitors from pytype.typegraph import cfg log = logging.getLogger(__name__) # Most interpreter functions (including lambdas) need to be analyzed as # stand-alone functions. The exceptions are comprehensions and generators, which # have names like "<listcomp>" and "<genexpr>". _SKIP_FUNCTION_RE = re.compile("<(?!lambda).+>$") CallRecord = collections.namedtuple( "CallRecord", ["node", "function", "signatures", "positional_arguments", "keyword_arguments", "return_value"]) # How deep to follow call chains: INIT_MAXIMUM_DEPTH = 4 # during module loading MAXIMUM_DEPTH = 3 # during non-quick analysis QUICK_CHECK_MAXIMUM_DEPTH = 2 # during quick checking QUICK_INFER_MAXIMUM_DEPTH = 1 # during quick inference def check_types(src, filename, errorlog, options, loader, deep=True, init_maximum_depth=INIT_MAXIMUM_DEPTH, maximum_depth=None, **kwargs): """Verify the Python code.""" tracer = CallTracer(errorlog=errorlog, options=options, generate_unknowns=False, loader=loader, **kwargs) loc, defs = tracer.run_program(src, filename, init_maximum_depth) snapshotter = metrics.get_metric("memory", metrics.Snapshot) snapshotter.take_snapshot("analyze:check_types:tracer") if deep: if maximum_depth is None: maximum_depth = ( QUICK_CHECK_MAXIMUM_DEPTH if options.quick else MAXIMUM_DEPTH) tracer.analyze(loc, defs, maximum_depth=maximum_depth) snapshotter.take_snapshot("analyze:check_types:post") _maybe_output_debug(options, tracer.program) def infer_types(src, errorlog, options, loader, filename=None, deep=True, init_maximum_depth=INIT_MAXIMUM_DEPTH, show_library_calls=False, maximum_depth=None, tracer_vm=None, **kwargs): """Given Python source return its types. Args: src: A string containing Python source code. errorlog: Where error messages go. Instance of errors.ErrorLog. options: config.Options object loader: A load_pytd.Loader instance to load PYI information. filename: Filename of the program we're parsing. deep: If True, analyze all functions, even the ones not called by the main execution flow. init_maximum_depth: Depth of analysis during module loading. show_library_calls: If True, call traces are kept in the output. maximum_depth: Depth of the analysis. Default: unlimited. tracer_vm: An instance of CallTracer, in case the caller wants to instantiate and retain the vm used for type inference. **kwargs: Additional parameters to pass to vm.VirtualMachine Returns: A tuple of (ast: TypeDeclUnit, builtins: TypeDeclUnit) Raises: AssertionError: In case of a bad parameter combination. """ # If the caller has passed in a vm, use that. if tracer_vm: assert isinstance(tracer_vm, CallTracer) tracer = tracer_vm else: tracer = CallTracer(errorlog=errorlog, options=options, generate_unknowns=options.protocols, store_all_calls=not deep, loader=loader, **kwargs) loc, defs = tracer.run_program(src, filename, init_maximum_depth) log.info("===Done running definitions and module-level code===") snapshotter = metrics.get_metric("memory", metrics.Snapshot) snapshotter.take_snapshot("analyze:infer_types:tracer") if deep: if maximum_depth is None: if not options.quick: maximum_depth = MAXIMUM_DEPTH elif options.analyze_annotated: # Since there's no point in analyzing annotated functions for inference, # the presence of this option means that the user wants checking, too. maximum_depth = QUICK_CHECK_MAXIMUM_DEPTH else: maximum_depth = QUICK_INFER_MAXIMUM_DEPTH tracer.exitpoint = tracer.analyze(loc, defs, maximum_depth) else: tracer.exitpoint = loc snapshotter.take_snapshot("analyze:infer_types:post") ast = tracer.compute_types(defs) ast = tracer.loader.resolve_ast(ast) if tracer.has_unknown_wildcard_imports or any( a in defs for a in abstract_utils.DYNAMIC_ATTRIBUTE_MARKERS): if "__getattr__" not in ast: ast = pytd_utils.Concat( ast, builtins.GetDefaultAst(options.python_version)) # If merged with other if statement, triggers a ValueError: Unresolved class # when attempts to load from the protocols file if options.protocols: protocols_pytd = tracer.loader.import_name("protocols") else: protocols_pytd = None builtins_pytd = tracer.loader.concat_all() # Insert type parameters, where appropriate ast = ast.Visit(visitors.CreateTypeParametersForSignatures()) if options.protocols: log.info("=========== PyTD to solve =============\n%s", pytd_utils.Print(ast)) ast = convert_structural.convert_pytd(ast, builtins_pytd, protocols_pytd) elif not show_library_calls: log.info("Solving is turned off. Discarding call traces.") # Rename remaining "~unknown" to "?" ast = ast.Visit(visitors.RemoveUnknownClasses()) # Remove "~list" etc.: ast = convert_structural.extract_local(ast) _maybe_output_debug(options, tracer.program) return ast, builtins_pytd def _maybe_output_debug(options, program): """Maybe emit debugging output.""" if options.output_cfg or options.output_typegraph: dot = debug.program_to_dot(program, set([]), bool(options.output_cfg)) svg_file = options.output_cfg or options.output_typegraph proc = subprocess.Popen(["/usr/bin/dot", "-T", "svg", "-o", svg_file], stdin=subprocess.PIPE, universal_newlines=True) (_, stderr) = proc.communicate(dot) if stderr: log.info("Failed to create %s: %s", svg_file, stderr) if options.output_debug: text = debug.program_to_text(program) if options.output_debug == "-": log.info("=========== Program Dump =============\n%s", text) else: with options.open_function(options.output_debug, "w") as fi: fi.write(text)
[ 37811, 10669, 329, 10627, 290, 13249, 1806, 3858, 526, 15931, 198, 198, 11748, 17268, 198, 11748, 18931, 198, 11748, 302, 198, 11748, 850, 14681, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 11, 4479, 198, 198, 6738, 12972, 4906, 1330, 1...
2.779675
2,401
#!/usr/bin/env python __author__ = "etseng@pacb.com" """ Given a pooled input GFF + demux CSV file, write out per-{barcode group} GFFs If input fasta/fastq is given, optionally also output per-{barcode group} FASTA/FASTQ """ import re from collections import defaultdict from csv import DictReader from typing import Optional import typer from Bio import SeqIO import cupcake.sequence.GFF as GFF from cupcake import version_callback from cupcake import cupcake_logger as logger rex_pbid = re.compile(r"(PB.\d+.\d+)(|\S+)") app = typer.Typer(name="cupcake.post_isoseq_cluster.demux_by_barcode_groups") def regroup_gff( pooled_gff, demux_count_file, output_prefix, out_group_dict, in_fafq=None ): """ :param pooled_sam: SAM file :param demux_count_file: comma-delimited per-barcode count file :param output_prefix: output prefix for GFF :param out_group_dict: dict of barcode name --> group to be long in (ex: {'EM1':'EM', 'EM2':'EM'}) :param in_fafq: optional fasta/fastq that was input to SAM """ if in_fafq is not None: type_fafq = get_type_fafq(in_fafq) in_tissue = defaultdict( lambda: set() ) # pbid --> list of tissue it is in (EM, END, R) for r in DictReader(open(demux_count_file), delimiter=","): for k, v in r.items(): if k != "id" and int(v) > 0: in_tissue[r["id"]].add(k) # in_tissue = dict(in_tissue) handles = {} handles_fafq = {} for g in out_group_dict.values(): handles[g] = open(f"{output_prefix}_{g}_only.gff", "w") if in_fafq is not None: handles_fafq[g] = open(f"{output_prefix}_{g}_only.{type_fafq}", "w") if in_fafq is not None: fafq_dict = SeqIO.to_dict(SeqIO.parse(open(in_fafq), type_fafq)) fafq_dict_keys = list(fafq_dict.keys()) for k in fafq_dict_keys: m = rex_pbid.match(k) if m is not None: fafq_dict[m.group(1)] = fafq_dict[k] reader = GFF.collapseGFFReader(pooled_gff) for r in reader: groups_to_write_in = set() pbid = r.seqid if pbid not in in_tissue: logger.info( f"WARNING: {pbid} does not belong to any group indicated by outgroup_dict" ) for tissue in in_tissue[pbid]: groups_to_write_in.add(out_group_dict[tissue]) for g in groups_to_write_in: GFF.write_collapseGFF_format(handles[g], r) if in_fafq is not None: SeqIO.write(fafq_dict[pbid], handles_fafq[g], type_fafq) if __name__ == "__main__": typer.run(main)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 834, 9800, 834, 796, 366, 1039, 1516, 31, 33587, 65, 13, 785, 1, 198, 198, 37811, 198, 15056, 257, 44762, 5128, 402, 5777, 1343, 1357, 2821, 44189, 2393, 11, 3551, 503, 583, 12, 90, ...
2.117742
1,240
import random import numpy as np import math from skimage.draw import line, line_aa, circle, set_color, circle_perimeter_aa from skimage.io import imsave from skimage.util import random_noise maxSlope = 10 # restrict the maximum slope of generated lines for stability minLength = 20 # restrict the minimum length of line segments
[ 11748, 4738, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 10688, 198, 198, 6738, 1341, 9060, 13, 19334, 1330, 1627, 11, 1627, 62, 7252, 11, 9197, 11, 900, 62, 8043, 11, 9197, 62, 525, 16912, 62, 7252, 198, 6738, 1341, 9060, 13, 9...
3.772727
88
# -*- coding: utf-8 -*- """ Created on Wed Apr 1 17:14:19 2020 @author: Mitchell nesm_generator.py ~~~~~~~~~~~~~~~~~ This file serves as a script for using our pre-trained VAE model to generate brand new NES music soundtracks. NOTE - using the reduced model we only generate the first melodic voice for each track rather than each of the four voices present in an NESM track. To do so we first reconstruct our model using the file VAE class defined in `VAE.py` and the same parameters used in `model_training`. Then we use functions from the file `generation_utils` to have our trained model create entirely new and original NES music. """ # Imports #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # NOTE - nesmdb folder manually added to environment libraries from dataset_utils import load_training from VAE import VAE from generation_utils import generate_seprsco, latent_SVD, get_latent_vecs,\ plot_track, filter_tracks import nesmdb from nesmdb.vgm.vgm_to_wav import save_vgmwav import tensorflow as tf import numpy as np import os, json ### Load Mappings #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Parameters for shape of dataset (note these are also used for model def.) measures = 8 measure_len = 96 # load data training_foldername = '../../nesmdb24_seprsco/train/' train_save_filename = 'transformed_dataset.json' dataset , labels2int_map , int2labels_map = \ load_training(training_foldername, train_save_filename, measures = measures, measure_len = measure_len) ### Reinitiate Model #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ### Model Parameters latent_dim = 124 input_dim = len(int2labels_map) - 1 dropout = .1 maxnorm = None vae_b1 , vae_b2 = .02 , .1 print('Reinitiating VAE Model') # Build Model model = VAE(latent_dim, input_dim, measures, measure_len, dropout, maxnorm, vae_b1 , vae_b2) # Reload Saved Weights checkpoint_dir = './training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "model_ckpt") model.load_weights(checkpoint_prefix) model.build(tf.TensorShape([None, measures, measure_len, ])) # Print Summary of Model model.summary() ### Sample Latent Variable Distributions #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Here we use SVD to more effectively sample from the orthogonal components # of our latent space # Parameters for sampling num_songs = 10 print('Generating Latent Samples to Generate {} New Tracks'.format(num_songs)) # Grab distributions of dataset over latent space # Have to run in batches due to size of the dataset batch_size = 300 latent_vecs = get_latent_vecs(model, dataset, batch_size) # Sample from normal distribution rand_vecs = np.random.normal(0.0, 1.0, (num_songs, latent_dim)) # perform SVD plot_eigenvalues = True sample_vecs = latent_SVD(latent_vecs, rand_vecs, plot_eigenvalues) ### Generate New Tracks #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Create new seprsco tracks using our model and the random samples # Seprsco files can later be converted to valid NES music format # Parameters for track generation (specifically filtering) p_min = .5 print('Generating New Tracks from Latent Samples') # Decode samples using VAE decoded_tracks = model.decoder(sample_vecs) # Plot first decoded track print("Example Model Generated Track") plot_track(decoded_tracks[0]) # Filter Track decoded_tracks = filter_tracks(decoded_tracks, p_min) # Plot first filtered track print("Example Filtered Track") plot_track(decoded_tracks[0]) # Convert tracks to seprsco format print('Converting Model Output to Seprsco') seprsco_tracks = generate_seprsco(decoded_tracks, int2labels_map) ### Convert to WAV #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Convert seprsco tracks to WAV files so we can listen!!! print('Converting Seprsco to WAV Audio') wav_tracks = [] for track in seprsco_tracks: wav = nesmdb.convert.seprsco_to_wav(track) wav_tracks.append(wav) ### Save WAV Files #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Save our wav tracks to appropriate files (be sure not to overwrite existing) # Also save latent variables so we can reproduce songs we like # Save WAV tracks save_wav = False if save_wav: print('Saving Generated WAV Audio Tracks') wav_folder = 'model_gen_files/' for i in range(len(wav_tracks)): wav_file = wav_folder+'VAE_NESM_{}.wav'.format(i) save_vgmwav(wav_file, wav_tracks[i]) # Save Latent Variables save_latent_var = False if save_latent_var: print('Saving Latent Variables for Generated Tracks') latent_filename = os.path.join(wav_folder, "latent_variables.json") with open(latent_filename, 'w') as f: json.dump({ 'VAE_NESM_{}.wav'.format(i): sample_vecs[i].tolist() for i in range(sample_vecs.shape[0]) }, f) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #----------------------------------END FILE------------------------------------ #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 3300, 2758, 220, 352, 1596, 25, 1415, 25, 1129, 12131, 198, 198, 31, 9800, 25, 14526, 198, 198, 2516, 76, 62, 8612, 1352, 13, 9078, 198, 27156,...
3.262832
1,617
import logging import logging.handlers import os
[ 11748, 18931, 198, 11748, 18931, 13, 4993, 8116, 198, 11748, 28686, 198 ]
4.083333
12
#Author Theodosis Paidakis print("Hello World") hello_list = ["Hello World"] print(hello_list[0]) for i in hello_list: print(i)
[ 2, 13838, 383, 375, 5958, 47355, 27321, 198, 198, 4798, 7203, 15496, 2159, 4943, 198, 198, 31373, 62, 4868, 796, 14631, 15496, 2159, 8973, 198, 4798, 7, 31373, 62, 4868, 58, 15, 12962, 198, 1640, 1312, 287, 23748, 62, 4868, 25, 198, ...
2.714286
49
from collections import namedtuple from unittest.mock import MagicMock _fake_ext = namedtuple('_', ['qtype', 'kb_ident'])
[ 6738, 17268, 1330, 3706, 83, 29291, 220, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 6139, 44, 735, 628, 198, 62, 30706, 62, 2302, 796, 3706, 83, 29291, 10786, 62, 3256, 37250, 80, 4906, 3256, 705, 32812, 62, 738, 6, 12962, 220, ...
2.863636
44
from abc import ABCMeta, abstractmethod from typing import Tuple, Callable
[ 6738, 450, 66, 1330, 9738, 48526, 11, 12531, 24396, 198, 6738, 19720, 1330, 309, 29291, 11, 4889, 540, 628 ]
4
19
# Book04_1.py b1 = Book(); print(b1.category) b2 = b1; print(b2.category) print(Book.category) Book.category = '' print(b2.category); print(b1.category) ; print(Book.category) b2.category = 'IT' print(b2.category); print(b1.category) ; print(Book.category)
[ 2, 4897, 3023, 62, 16, 13, 9078, 198, 198, 65, 16, 796, 4897, 9783, 3601, 7, 65, 16, 13, 22872, 8, 198, 65, 17, 796, 275, 16, 26, 3601, 7, 65, 17, 13, 22872, 8, 198, 4798, 7, 10482, 13, 22872, 8, 198, 198, 10482, 13, 22872, ...
2.514563
103
# Copyright (C) 2015-2021 Virgil Security, Inc. # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # (1) Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # (2) Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # (3) Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com> from ctypes import * from ._c_bridge import VscfKdf1 from ._c_bridge import VscfImplTag from ._c_bridge import VscfStatus from virgil_crypto_lib.common._c_bridge import Data from virgil_crypto_lib.common._c_bridge import Buffer from .alg import Alg from .kdf import Kdf
[ 2, 15069, 357, 34, 8, 1853, 12, 1238, 2481, 16310, 37718, 4765, 11, 3457, 13, 198, 2, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 2297, 396, 3890, 290, 779, 287, 2723, 290, 13934, 5107, 11, 351, 393, 1231, 198, 2, 17613, 11, ...
3.334513
565
#!/usr/bin/env python # encoding: utf-8 from django.test import TestCase from zoo import models
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 21004, 25, 3384, 69, 12, 23, 628, 198, 6738, 42625, 14208, 13, 9288, 1330, 6208, 20448, 198, 198, 6738, 26626, 1330, 4981, 628 ]
3.030303
33
print('Digite seu nome completo: ') nome = input().strip().upper() print('Seu nome tem "Silva"?') print('SILVA' in nome)
[ 4798, 10786, 19511, 578, 384, 84, 299, 462, 1224, 1462, 25, 705, 8, 198, 198, 77, 462, 796, 5128, 22446, 36311, 22446, 45828, 3419, 198, 198, 4798, 10786, 4653, 84, 299, 462, 2169, 366, 15086, 6862, 1, 8348, 8, 198, 4798, 10786, 50,...
2.411765
51
dataset_type = 'STVQADATASET' data_root = '/home/datasets/mix_data/iMIX/' feature_path = 'data/datasets/stvqa/defaults/features/' ocr_feature_path = 'data/datasets/stvqa/defaults/ocr_features/' annotation_path = 'data/datasets/stvqa/defaults/annotations/' vocab_path = 'data/datasets/stvqa/defaults/extras/vocabs/' train_datasets = ['train'] test_datasets = ['val'] reader_train_cfg = dict( type='STVQAREADER', card='default', mix_features=dict( train=data_root + feature_path + 'detectron.lmdb', val=data_root + feature_path + 'detectron.lmdb', test=data_root + feature_path + 'detectron.lmdb', ), mix_ocr_features=dict( train=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb', val=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb', test=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb', ), mix_annotations=dict( train=data_root + annotation_path + 'imdb_subtrain.npy', val=data_root + annotation_path + 'imdb_subval.npy', test=data_root + annotation_path + 'imdb_test_task3.npy', ), datasets=train_datasets) reader_test_cfg = dict( type='STVQAREADER', card='default', mix_features=dict( train=data_root + feature_path + 'detectron.lmdb', val=data_root + feature_path + 'detectron.lmdb', test=data_root + feature_path + 'detectron.lmdb', ), mix_ocr_features=dict( train=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb', val=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb', test=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb', ), mix_annotations=dict( train=data_root + annotation_path + 'imdb_subtrain.npy', val=data_root + annotation_path + 'imdb_subval.npy', test=data_root + annotation_path + 'imdb_test_task3.npy', ), datasets=train_datasets) info_cpler_cfg = dict( type='STVQAInfoCpler', glove_weights=dict( glove6b50d=data_root + 'glove/glove.6B.50d.txt.pt', glove6b100d=data_root + 'glove/glove.6B.100d.txt.pt', glove6b200d=data_root + 'glove/glove.6B.200d.txt.pt', glove6b300d=data_root + 'glove/glove.6B.300d.txt.pt', ), fasttext_weights=dict( wiki300d1m=data_root + 'fasttext/wiki-news-300d-1M.vec', wiki300d1msub=data_root + 'fasttext/wiki-news-300d-1M-subword.vec', wiki_bin=data_root + 'fasttext/wiki.en.bin', ), tokenizer='/home/datasets/VQA/bert/' + 'bert-base-uncased-vocab.txt', mix_vocab=dict( answers_st_5k=data_root + vocab_path + 'fixed_answer_vocab_stvqa_5k.txt', vocabulary_100k=data_root + vocab_path + 'vocabulary_100k.txt', ), max_seg_lenth=20, max_ocr_lenth=10, word_mask_ratio=0.0, vocab_name='vocabulary_100k', vocab_answer_name='answers_st_5k', glove_name='glove6b300d', fasttext_name='wiki_bin', if_bert=True, ) train_data = dict( samples_per_gpu=16, workers_per_gpu=1, data=dict(type=dataset_type, reader=reader_train_cfg, info_cpler=info_cpler_cfg, limit_nums=800)) test_data = dict( samples_per_gpu=16, workers_per_gpu=1, data=dict(type=dataset_type, reader=reader_test_cfg, info_cpler=info_cpler_cfg), )
[ 19608, 292, 316, 62, 4906, 796, 705, 2257, 53, 48, 2885, 1404, 1921, 2767, 6, 198, 7890, 62, 15763, 796, 31051, 11195, 14, 19608, 292, 1039, 14, 19816, 62, 7890, 14, 72, 8895, 55, 14, 6, 198, 30053, 62, 6978, 796, 705, 7890, 14, ...
2.09375
1,568
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2016-09-27 13:17 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import mezzanine.core.fields
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 940, 319, 1584, 12, 2931, 12, 1983, 1511, 25, 1558, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738,...
2.805195
77
#LineSensor test from gpiozero import LineSensor from time import sleep from signal import pause sensor = LineSensor(14) sensor.when_line = lineDetected sensor.when_no_line = noLineDetected pause() sensor.close()
[ 2, 13949, 47864, 1332, 198, 198, 6738, 27809, 952, 22570, 1330, 6910, 47864, 198, 6738, 640, 1330, 3993, 198, 6738, 6737, 1330, 14985, 628, 198, 82, 22854, 796, 6910, 47864, 7, 1415, 8, 198, 198, 82, 22854, 13, 12518, 62, 1370, 796, ...
3.084507
71
# # This file is part of LiteX-Boards. # # Copyright (c) 2021 Gwenhael Goavec-Merou <gwenhael.goavec-merou@trabucayre.com> # SPDX-License-Identifier: BSD-2-Clause from migen import * from litex.build.generic_platform import * from litex.build.gowin.platform import GowinPlatform from litex.build.openfpgaloader import OpenFPGALoader # IOs ---------------------------------------------------------------------------------------------- _io = [ # Clk / Rst ("clk12", 0, Pins("4"), IOStandard("LVCMOS33")), # Leds ("user_led", 0, Pins("23"), IOStandard("LVCMOS33")), ("user_led", 1, Pins("24"), IOStandard("LVCMOS33")), ("user_led", 2, Pins("25"), IOStandard("LVCMOS33")), ("user_led", 3, Pins("26"), IOStandard("LVCMOS33")), ("user_led", 4, Pins("27"), IOStandard("LVCMOS33")), ("user_led", 5, Pins("28"), IOStandard("LVCMOS33")), ("user_led", 6, Pins("29"), IOStandard("LVCMOS33")), ("user_led", 7, Pins("30"), IOStandard("LVCMOS33")), # RGB led, active-low ("rgb_led", 0, Subsignal("r", Pins("112")), Subsignal("g", Pins("114")), Subsignal("b", Pins("113")), IOStandard("LVCMOS33"), ), ("rgb_led", 1, Subsignal("r", Pins("106")), Subsignal("g", Pins("111")), Subsignal("b", Pins("110")), IOStandard("LVCMOS33"), ), ("rgb_led", 2, Subsignal("r", Pins("101")), Subsignal("g", Pins("104")), Subsignal("b", Pins("102")), IOStandard("LVCMOS33"), ), ("rgb_led", 3, Subsignal("r", Pins("98")), Subsignal("g", Pins("100")), Subsignal("b", Pins("99")), IOStandard("LVCMOS33"), ), # Switches ("user_sw", 0, Pins("75"), IOStandard("LVCMOS33")), ("user_sw", 1, Pins("76"), IOStandard("LVCMOS33")), ("user_sw", 2, Pins("78"), IOStandard("LVCMOS33")), ("user_sw", 3, Pins("79"), IOStandard("LVCMOS33")), ("user_sw", 4, Pins("80"), IOStandard("LVCMOS33")), ("user_sw", 5, Pins("81"), IOStandard("LVCMOS33")), ("user_sw", 6, Pins("82"), IOStandard("LVCMOS33")), ("user_sw", 7, Pins("83"), IOStandard("LVCMOS33")), # Buttons. ("user_btn", 0, Pins("58"), IOStandard("LVCMOS33")), ("user_btn", 1, Pins("59"), IOStandard("LVCMOS33")), ("user_btn", 2, Pins("60"), IOStandard("LVCMOS33")), ("user_btn", 3, Pins("61"), IOStandard("LVCMOS33")), ("user_btn", 4, Pins("62"), IOStandard("LVCMOS33")), ("user_btn", 5, Pins("63"), IOStandard("LVCMOS33")), ("user_btn", 6, Pins("64"), IOStandard("LVCMOS33")), ("user_btn", 7, Pins("65"), IOStandard("LVCMOS33")), # Serial. # FT232H has only one interface -> use (arbitrary) two pins from J2 to # connect an external USB<->serial adapter ("serial", 0, Subsignal("tx", Pins("116")), # J2.17 Subsignal("rx", Pins("115")), # J2.18 IOStandard("LVCMOS33") ), # Seven Segment ("seven_seg_dig", 0, Pins("137"), IOStandard("LVCMOS33")), ("seven_seg_dig", 1, Pins("140"), IOStandard("LVCMOS33")), ("seven_seg_dig", 2, Pins("141"), IOStandard("LVCMOS33")), ("seven_seg_dig", 3, Pins("7"), IOStandard("LVCMOS33")), ("seven_seg", 0, Pins("138 142 9 11 12 139 8 10"), IOStandard("LVCMOS33")), ] # Connectors --------------------------------------------------------------------------------------- _connectors = [ ["J1", "- 38 39 40 41 42 43 44 66 67 68 69 70 71 72 96 95 94 93 -"], ["J2", "- 136 135 134 133 132 131 130 129 128 123 122 121 120 119 118 117 116 115 -"], ] # Platform -----------------------------------------------------------------------------------------
[ 2, 198, 2, 770, 2393, 318, 636, 286, 27395, 55, 12, 16635, 1371, 13, 198, 2, 198, 2, 15069, 357, 66, 8, 33448, 39661, 16550, 3010, 1514, 1015, 66, 12, 13102, 280, 1279, 70, 86, 16550, 3010, 13, 2188, 1015, 66, 12, 647, 280, 31, ...
2.284204
1,608
import numpy as np import copy import combo.misc import cPickle as pickle from results import history from .. import utility from ...variable import variable from ..call_simulator import call_simulator from ... import predictor from ...gp import predictor as gp_predictor from ...blm import predictor as blm_predictor import combo.search.score MAX_SEACH = int(20000)
[ 11748, 299, 32152, 355, 45941, 198, 11748, 4866, 198, 11748, 14831, 13, 44374, 198, 11748, 269, 31686, 293, 355, 2298, 293, 198, 6738, 2482, 1330, 2106, 198, 6738, 11485, 1330, 10361, 198, 6738, 2644, 45286, 1330, 7885, 198, 6738, 11485, ...
3.717172
99
_disable_linecache()
[ 198, 198, 62, 40223, 62, 1370, 23870, 3419, 198 ]
2.555556
9
import requests from bs4 import BeautifulSoup as bs import os #source url = '' # the source you want the bot take images from #down page page = requests.get(url) html = bs(page.text, 'html.parser') #locate image_loc = html.findAll('img') #create folder for located imgs if not os.path.exists('imgs'): os.makedirs('imgs') #open the new folder os.chdir('imgs') image0 = 0 #img name #get images for image in image_loc: try: url = image['src'] source = requests.get(url) if source.status_code == 200: with open('img-' + str(image0) + '.jpg', 'png') as mkimg: mkimg.write(requests.get(url).content) mkimg.close() image0 += 1 except: pass
[ 11748, 7007, 198, 6738, 275, 82, 19, 1330, 23762, 50, 10486, 355, 275, 82, 198, 11748, 28686, 198, 198, 2, 10459, 198, 6371, 796, 10148, 1303, 262, 2723, 345, 765, 262, 10214, 1011, 4263, 422, 198, 198, 2, 2902, 2443, 220, 198, 7700...
2.492806
278
''' 1appium subproccess 1.1 1.2 2driver ''' from lib.tools import Tool import subprocess from lib.path import SYSTEMPATH, ERRORPATH import time from appium import webdriver import queue # python driver_queue = queue.Queue() if __name__ == '__main__': controller = Controller() controller.start_server() if controller.test_server(): controller.start_driver()
[ 7061, 6, 198, 16, 1324, 1505, 198, 220, 220, 220, 850, 1676, 1591, 198, 220, 220, 220, 220, 198, 16, 13, 16, 198, 16, 13, 17, 198, 17, 26230, 198, 198, 7061, 6, 198, 198, 6738, 9195, 13, 31391, 1330, 16984, 198, 11748, 850, 1468...
2.847826
138
# -*- encoding: utf-8 -*- ''' @project : LeetCode @File : pondSizes.py @Contact : 9824373@qq.com @Desc : land0 [ [0,2,1,0], [0,1,0,1], [1,1,0,1], [0,1,0,1] ] [1,2,4] 0 < len(land) <= 1000 0 < len(land[i]) <= 1000 LeetCode https://leetcode-cn.com/problems/pond-sizes-lcci @Modify Time @Author @Version @Desciption ------------ ------- -------- ----------- 2020-03-07 zhan 1.0 None ''' from typing import List from collections import deque if __name__ == '__main__': a = [ [0,2,1,0], [0,1,0,1], [1,1,0,1], [0,1,0,1] ] ans = Solution().pondSizes(a) print(ans)
[ 2, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 7061, 6, 198, 31, 16302, 1058, 220, 220, 1004, 316, 10669, 198, 31, 8979, 220, 220, 220, 1058, 220, 220, 16723, 50, 4340, 13, 9078, 198, 31, 17829, 1058, 220, 220, 9...
1.691466
457
# -*- coding: utf-8 -*- from decimal import Decimal, setcontext, ExtendedContext from geolucidate.links.google import google_maps_link from geolucidate.links.tools import MapLink from geolucidate.parser import parser_re setcontext(ExtendedContext) def _cleanup(parts): """ Normalize up the parts matched by :obj:`parser.parser_re` to degrees, minutes, and seconds. >>> _cleanup({'latdir': 'south', 'longdir': 'west', ... 'latdeg':'60','latmin':'30', ... 'longdeg':'50','longmin':'40'}) ['S', '60', '30', '00', 'W', '50', '40', '00'] >>> _cleanup({'latdir': 'south', 'longdir': 'west', ... 'latdeg':'60','latmin':'30', 'latdecsec':'.50', ... 'longdeg':'50','longmin':'40','longdecsec':'.90'}) ['S', '60', '30.50', '00', 'W', '50', '40.90', '00'] """ latdir = (parts['latdir'] or parts['latdir2']).upper()[0] longdir = (parts['longdir'] or parts['longdir2']).upper()[0] latdeg = parts.get('latdeg') longdeg = parts.get('longdeg') latmin = parts.get('latmin', '00') or '00' longmin = parts.get('longmin', '00') or '00' latdecsec = parts.get('latdecsec', '') longdecsec = parts.get('longdecsec', '') if (latdecsec and longdecsec): latmin += latdecsec longmin += longdecsec latsec = '00' longsec = '00' else: latsec = parts.get('latsec', '') or '00' longsec = parts.get('longsec', '') or '00' return [latdir, latdeg, latmin, latsec, longdir, longdeg, longmin, longsec] def _convert(latdir, latdeg, latmin, latsec, longdir, longdeg, longmin, longsec): """ Convert normalized degrees, minutes, and seconds to decimal degrees. Quantize the converted value based on the input precision and return a 2-tuple of strings. >>> _convert('S','50','30','30','W','50','30','30') ('-50.508333', '-50.508333') >>> _convert('N','50','27','55','W','127','27','65') ('50.459167', '-127.460833') """ if (latsec != '00' or longsec != '00'): precision = Decimal('0.000001') elif (latmin != '00' or longmin != '00'): precision = Decimal('0.001') else: precision = Decimal('1') latitude = Decimal(latdeg) latmin = Decimal(latmin) latsec = Decimal(latsec) longitude = Decimal(longdeg) longmin = Decimal(longmin) longsec = Decimal(longsec) if latsec > 59 or longsec > 59: # Assume that 'seconds' greater than 59 are actually a decimal # fraction of minutes latitude += (latmin + (latsec / Decimal('100'))) / Decimal('60') longitude += (longmin + (longsec / Decimal('100'))) / Decimal('60') else: latitude += (latmin + (latsec / Decimal('60'))) / Decimal('60') longitude += (longmin + (longsec / Decimal('60'))) / Decimal('60') if latdir == 'S': latitude *= Decimal('-1') if longdir == 'W': longitude *= Decimal('-1') lat_str = str(latitude.quantize(precision)) long_str = str(longitude.quantize(precision)) return (lat_str, long_str) def replace(string, sub_function=google_maps_link()): """ Replace detected coordinates with a map link, using the given substitution function. The substitution function will be passed a :class:`~.MapLink` instance, and should return a string which will be substituted by :func:`re.sub` in place of the detected coordinates. >>> replace("58147N/07720W") '<a href="http://maps.google.com/maps?q=58.235278%2C-77.333333+%2858147N%2F07720W%29&ll=58.235278%2C-77.333333&t=h" title="58147N/07720W (58.235278, -77.333333)">58147N/07720W</a>' >>> replace("5814N/07720W", google_maps_link('satellite')) '<a href="http://maps.google.com/maps?q=58.233%2C-77.333+%285814N%2F07720W%29&ll=58.233%2C-77.333&t=k" title="5814N/07720W (58.233, -77.333)">5814N/07720W</a>' >>> from geolucidate.links.bing import bing_maps_link >>> replace("58N/077W", bing_maps_link('map')) '<a href="http://bing.com/maps/default.aspx?style=r&cp=58~-77&sp=Point.58_-77_58N%2F077W&v=2" title="58N/077W (58, -77)">58N/077W</a>' """ return parser_re.sub(do_replace, string) def get_replacements(string, sub_function=google_maps_link()): """ Return a dict whose keys are instances of :class:`re.Match` and whose values are the corresponding replacements. Use :func:`get_replacements` when the replacement cannot be performed through ordinary string substitution by :func:`re.sub`, as in :func:`replace`. >>> get_replacements("4630 NORTH 5705 WEST 58147N/07720W") ... #doctest: +ELLIPSIS {<re.Match object...>: '<a href="..." title="...">4630 NORTH 5705 WEST</a>', <re.Match object...>: '<a href="..." title="...">58147N/07720W</a>'} >>> test_string = "4630 NORTH 5705 WEST 58147N/07720W" >>> replacements = get_replacements(test_string) >>> offset = 0 >>> out = bytearray(test_string, encoding="ascii", errors="replace") >>> for (match, link) in replacements.items(): ... start = match.start() + offset ... end = match.end() + offset ... out[start:end] = bytearray(link, encoding="ascii", errors="replace") ... offset += (len(link) - len(match.group())) >>> out.decode(encoding="ascii") == replace(test_string) True """ substitutions = {} matches = parser_re.finditer(string) for match in matches: (latitude, longitude) = _convert(*_cleanup(match.groupdict())) substitutions[match] = sub_function(MapLink(match.group(), latitude, longitude)) return substitutions
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 32465, 1330, 4280, 4402, 11, 900, 22866, 11, 24204, 21947, 198, 198, 6738, 4903, 349, 1229, 20540, 13, 28751, 13, 13297, 1330, 23645, 62, 31803, 62, 8726, 198, 673...
2.347013
2,461
from setuptools import setup, find_packages setup( name='Pokedex', version='0.1', zip_safe=False, packages=find_packages(), package_data={ 'pokedex': ['data/csv/*.csv'] }, install_requires=[ 'SQLAlchemy>=1.0,<2.0', 'whoosh>=2.5,<2.7', 'markdown==2.4.1', 'construct==2.5.3', 'six>=1.9.0', ], entry_points={ 'console_scripts': [ 'pokedex = pokedex.main:setuptools_entry', ], }, classifiers=[ "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.7", ] )
[ 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 198, 198, 40406, 7, 198, 220, 220, 220, 1438, 11639, 47, 6545, 1069, 3256, 198, 220, 220, 220, 2196, 11639, 15, 13, 16, 3256, 198, 220, 220, 220, 19974, 62, 21230, 28, 25101,...
2.019774
354
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Encoders for the speech model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from six.moves import range from six.moves import zip import tensorflow as tf from tensorflow.python.ops import inplace_ops from lingvo.core import base_encoder from lingvo.core import base_layer from lingvo.core import layers from lingvo.core import plot from lingvo.core import py_utils from lingvo.core import rnn_cell from lingvo.core import rnn_layers from lingvo.core import summary_utils from lingvo.core import model_helper ConvLSTMBlock = collections.namedtuple('ConvLSTMBlock', ('rnn', 'cnn')) def FProp(self, theta, batch, state0=None): """Encodes source as represented by 'inputs' and 'paddings'. Args: theta: A NestedMap object containing weights' values of this layer and its children layers. batch: A NestedMap with fields: src_inputs - The inputs tensor. It is expected to be of shape [batch, time, feature_dim, channels]. paddings - The paddings tensor. It is expected to be of shape [batch, time]. state0: Recurrent input state. Not supported/ignored by this encoder. Returns: (outputs, out_paddings, state1) tuple. Outputs is of the shape [time, batch, depth], and out_paddings is of the shape [time, batch] """ p = self.params inputs, paddings = batch.src_inputs, batch.paddings with tf.name_scope(p.name): # Add a few extra padded timesteps at the end. This is for ensuring the # correctness of the conv-layers at the edges. if p.pad_steps > 0: # inplace_update() is not supported by TPU for now. Since we have done # padding on the input_generator, we may avoid this additional padding. assert not py_utils.use_tpu() inputs_pad = tf.zeros( inplace_ops.inplace_update(tf.shape(inputs), 1, p.pad_steps), inputs.dtype) paddings_pad = tf.ones( inplace_ops.inplace_update(tf.shape(paddings), 1, p.pad_steps), paddings.dtype) inputs = tf.concat([inputs, inputs_pad], 1, name='inputs') paddings = tf.concat([paddings, paddings_pad], 1) def ReshapeForPlot(tensor, padding, name): """Transposes and flattens channels to [batch, dim, seq_len] shape.""" # Flatten any dimensions beyond the third into the third. batch_size = tf.shape(tensor)[0] max_len = tf.shape(tensor)[1] plot_tensor = tf.reshape(tensor, [batch_size, max_len, -1]) plot_tensor = tf.transpose(plot_tensor, [0, 2, 1], name=name) return (plot_tensor, summary_utils.SequenceLength(padding)) plots = [ ReshapeForPlot( tf.transpose(inputs, [0, 1, 3, 2]), paddings, 'inputs') ] conv_out = inputs out_padding = paddings for i, conv_layer in enumerate(self.conv): conv_out, out_padding = conv_layer.FProp(theta.conv[i], conv_out, out_padding) plots.append( ReshapeForPlot( tf.transpose(conv_out, [0, 1, 3, 2]), out_padding, 'conv_%d_out' % i)) # Now the conv-lstm part. conv_lstm_out = conv_out conv_lstm_out_padding = out_padding for i, (rnn, cnn) in enumerate( zip(self.conv_lstm_rnn, self.conv_lstm_cnn)): conv_lstm_in = conv_lstm_out # Move time dimension to be the first. conv_lstm_in = TransposeFirstTwoDims(conv_lstm_in) conv_lstm_in = tf.expand_dims(conv_lstm_in, 2) conv_lstm_in_padding = tf.expand_dims( tf.transpose(conv_lstm_out_padding), 2) lstm_out = rnn.FProp(theta.conv_lstm_rnn[i], conv_lstm_in, conv_lstm_in_padding) # Move time dimension to be the second. cnn_in = TransposeFirstTwoDims(lstm_out) cnn_in = tf.squeeze(cnn_in, 2) cnn_in_padding = conv_lstm_out_padding cnn_out, cnn_out_padding = cnn.FProp(theta.conv_lstm_cnn[i], cnn_in, cnn_in_padding) conv_lstm_out, conv_lstm_out_padding = cnn_out, cnn_out_padding plots.append( ReshapeForPlot(conv_lstm_out, conv_lstm_out_padding, 'conv_lstm_%d_out' % i)) # Need to do a reshape before starting the rnn layers. conv_lstm_out = py_utils.HasRank(conv_lstm_out, 4) conv_lstm_out_shape = tf.shape(conv_lstm_out) new_shape = tf.concat([conv_lstm_out_shape[:2], [-1]], 0) conv_lstm_out = tf.reshape(conv_lstm_out, new_shape) if self._first_lstm_input_dim_pad: conv_lstm_out = tf.pad( conv_lstm_out, [[0, 0], [0, 0], [0, self._first_lstm_input_dim_pad]]) conv_lstm_out = py_utils.HasShape(conv_lstm_out, [-1, -1, self._first_lstm_input_dim]) # Transpose to move the time dimension to be the first. rnn_in = tf.transpose(conv_lstm_out, [1, 0, 2]) rnn_padding = tf.expand_dims(tf.transpose(conv_lstm_out_padding), 2) # rnn_in is of shape [time, batch, depth] # rnn_padding is of shape [time, batch, 1] # Now the rnn layers. num_skips = 0 for i in range(p.num_lstm_layers): rnn_out = self.rnn[i].FProp(theta.rnn[i], rnn_in, rnn_padding) residual_index = i - p.residual_start + 1 if p.residual_start > 0 and residual_index >= 0: if residual_index % p.residual_stride == 0: residual_in = rnn_in if residual_index % p.residual_stride == p.residual_stride - 1: # Highway skip connection. if p.highway_skip: rnn_out = self.highway_skip[num_skips].FProp( theta.highway_skip[num_skips], residual_in, rnn_out) num_skips += 1 else: # Residual skip connection. rnn_out += py_utils.HasShape(residual_in, tf.shape(rnn_out)) if p.project_lstm_output and (i < p.num_lstm_layers - 1): # Projection layers. rnn_out = self.proj[i].FProp(theta.proj[i], rnn_out, rnn_padding) if i == p.num_lstm_layers - 1: rnn_out *= (1.0 - rnn_padding) plots.append( ReshapeForPlot( tf.transpose(rnn_out, [1, 0, 2]), tf.transpose(rnn_padding, [1, 0, 2]), 'rnn_%d_out' % i)) rnn_in = rnn_out final_out = rnn_in if self.cluster.add_summary: fig = plot.MatplotlibFigureSummary( 'encoder_example', figsize=(8, len(plots) * 3.5)) # Order layers from bottom to top. plots.reverse() for tensor, seq_len in plots: fig.AddSubplot( [tensor, seq_len], summary_utils.TrimPaddingAndPlotSequence, title=tensor.name, xlabel='Time') fig.Finalize() rnn_padding = tf.squeeze(rnn_padding, [2]) return final_out, rnn_padding, py_utils.NestedMap()
[ 2, 15069, 2864, 383, 309, 22854, 37535, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, ...
2.135734
3,610
import matplotlib.font_manager as fm import matplotlib.pyplot as plt import numpy as np font_location = './wordcloud_file/malgun.ttf' # For Windows font_name = fm.FontProperties(fname=font_location).get_name() plt.rc('font', family=font_name)
[ 11748, 2603, 29487, 8019, 13, 10331, 62, 37153, 355, 277, 76, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198, 10331, 62, 24886, 796, 705, 19571, 4775, 17721, 62, 7753, 14, 7617, 7...
2.793103
87
from django.shortcuts import render from django.views.generic import TemplateView
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 6738, 42625, 14208, 13, 33571, 13, 41357, 1330, 37350, 7680 ]
4.263158
19
from django.shortcuts import render from .forms import * from django.shortcuts import redirect,get_object_or_404 from django.contrib.auth.decorators import login_required from . models import * from django.views import generic def myloc_details(request,myloc_id): activities=Activity.objects.filter(myloc=myloc_id) posts=Post.objects.filter(myloc=myloc_id) myloc=Myloc.objects.get(pk=myloc_id) return render(request,'details.html',{'myloc':myloc,'activities':activities,'posts':posts})
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 6738, 764, 23914, 1330, 1635, 198, 6738, 42625, 14208, 13, 19509, 23779, 1330, 18941, 11, 1136, 62, 15252, 62, 273, 62, 26429, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13,...
2.875706
177
""" Day 1 Main Module """ from day01 import parse_input, part1, part2 if __name__ == "__main__": # trying out the new walrus[:=] oprtr in python if (part := int(input("Enter Part: "))) == 1: print(part1(parse_input("input.txt"))) elif part == 2: print(part2(parse_input("input.txt"))) else: print("Wrong choice [1|2]")
[ 37811, 198, 12393, 352, 8774, 19937, 198, 37811, 198, 198, 6738, 1110, 486, 1330, 21136, 62, 15414, 11, 636, 16, 11, 636, 17, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1303, 2111, 503, 26...
2.344156
154
""" Django settings for quiz_app project. Generated by 'django-admin startproject' using Django 2.1.2. For more information on this file, see https://docs.djangoproject.com/en/2.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.1/ref/settings/ """ SITE_ID = 1 import os import dj_database_url # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ.get('SECRET_KEY') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = os.environ.get('DEBUG', False) ALLOWED_HOSTS = ['ignas-quiz.herokuapp.com','localhost','127.0.0.1'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.sites', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'storages', 'quiz', 'multichoice', 'true_false', 'essay', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'quiz_app.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.media', ], }, }, ] WSGI_APPLICATION = 'quiz_app.wsgi.application' # Database # https://docs.djangoproject.com/en/2.1/ref/settings/#databases # DATABASES = { # 'default': { # 'ENGINE': 'django.db.backends.sqlite3', # 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), # } # } # Password validation # https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.1/howto/static-files/ STATIC_URL = '/static/' # STATIC_ROOT = 'staticfiles' STATICFILES_DIRS= ( os.path.join(BASE_DIR, "static"), ) MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/' DATABASES = {'default': dj_database_url.parse(os.environ.get('DATABASE_URL')) } AWS_HEADERS = { # see http://developer.yahoo.com/performance/rules.html#expires 'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT', 'Cache-Control': 'max-age=94608000', } AWS_STORAGE_BUCKET_NAME = os.environ.get("AWS_STORAGE_BUCKET_NAME") AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID") AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY") AWS_S3_HOST = 's3-eu-west-1.amazonaws.com' AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME STATICFILES_LOCATION = 'static' STATICFILES_STORAGE = 'custom_storages.StaticStorage' STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, STATICFILES_LOCATION) MEDIAFILES_LOCATION = 'media' MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIAFILES_LOCATION) DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'
[ 37811, 198, 35, 73, 14208, 6460, 329, 38964, 62, 1324, 1628, 13, 198, 198, 8645, 515, 416, 705, 28241, 14208, 12, 28482, 923, 16302, 6, 1262, 37770, 362, 13, 16, 13, 17, 13, 198, 198, 1890, 517, 1321, 319, 428, 2393, 11, 766, 198,...
2.316509
1,902
import numpy as np import pickle from os.path import exists, realpath import sys import math from topple_data_loader import ToppleData, ToppleDataLoader import transforms3d if __name__=='__main__': # norm_info = ToppleNormalizationInfo() # norm_info.load_from('../../data/sim/normalization_info/cube_train.pkl') # norm_info.print_out() topple_data = ToppleDataset(roots=['./data/sim/Cube/Cube30k_ObjSplit/Cube30kVal'], norm_info_file='./data/sim/normalization_info/cube_30k.pkl', \ batch_size=5, num_steps=10, shuffle=True, num_pts=None, perturb_pts=0.01) count = 0 while topple_data.has_next_batch(): batch = topple_data.next_batch(random_window=True, focus_toppling=False) count += 1 # print(batch.lin_vel[0]) # print(batch.toppled[0]) # print(batch.delta_rot_split[0]) # print(batch.delta_rot[0]) # print(batch.topple_label[0]) # print(batch.pos) # print(batch.body_friction) # print(batch.delta_quat[0]) # print(np.degrees(2*np.arccos(batch.delta_quat[0, :, 0]))) print('Total num batches: ' + str(count)) topple_data.reset() count = 0 while topple_data.has_next_batch(): batch = topple_data.next_batch() count += 1 print(batch.size) print('Total num batches: ' + str(count))
[ 11748, 299, 32152, 355, 45941, 198, 11748, 2298, 293, 198, 6738, 28686, 13, 6978, 1330, 7160, 11, 1103, 6978, 198, 11748, 25064, 198, 11748, 10688, 198, 6738, 49377, 62, 7890, 62, 29356, 1330, 1675, 381, 293, 6601, 11, 1675, 381, 293, ...
2.232143
616
import numpy as np from operator import truediv
[ 11748, 299, 32152, 355, 45941, 201, 198, 6738, 10088, 1330, 491, 1739, 452, 201 ]
3.5
14
#!/usr/bin/env python ######################################################################################### # # Apply transformations. This function is a wrapper for sct_WarpImageMultiTransform # # --------------------------------------------------------------------------------------- # Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca> # Authors: Julien Cohen-Adad, Olivier Comtois # Modified: 2014-07-20 # # About the license: see the file LICENSE.TXT ######################################################################################### # TODO: display message at the end # TODO: interpolation methods from __future__ import division, absolute_import import sys, io, os, time, functools from msct_parser import Parser import sct_utils as sct import sct_convert import sct_image import spinalcordtoolbox.image as msct_image from sct_crop_image import ImageCropper # PARSER # ========================================================================================== def get_parser(): # parser initialisation parser = Parser(__file__) parser.usage.set_description('Apply transformations. This function is a wrapper for antsApplyTransforms (ANTs).') parser.add_option(name="-i", type_value="file", description="input image", mandatory=True, example="t2.nii.gz") parser.add_option(name="-d", type_value="file", description="destination image", mandatory=True, example="out.nii.gz") parser.add_option(name="-w", type_value=[[','], "file"], description="Transformation, which can be a warping field (nifti image) or an affine transformation matrix (text file).", mandatory=True, example="warp1.nii.gz,warp2.nii.gz") parser.add_option(name="-crop", type_value="multiple_choice", description="Crop Reference. 0 : no reference. 1 : sets background to 0. 2 : use normal background", mandatory=False, default_value='0', example=['0', '1', '2']) parser.add_option(name="-c", type_value=None, description="Crop Reference. 0 : no reference. 1 : sets background to 0. 2 : use normal background", mandatory=False, deprecated_by='-crop') parser.add_option(name="-o", type_value="file_output", description="registered source.", mandatory=False, default_value='', example="dest.nii.gz") parser.add_option(name="-x", type_value="multiple_choice", description="interpolation method", mandatory=False, default_value='spline', example=['nn', 'linear', 'spline']) parser.add_option(name="-r", type_value="multiple_choice", description="""Remove temporary files.""", mandatory=False, default_value='1', example=['0', '1']) parser.add_option(name="-v", type_value="multiple_choice", description="""Verbose.""", mandatory=False, default_value='1', example=['0', '1', '2']) return parser # MAIN # ========================================================================================== def main(args=None): # check user arguments if not args: args = sys.argv[1:] # Get parser info parser = get_parser() arguments = parser.parse(args) input_filename = arguments["-i"] fname_dest = arguments["-d"] warp_filename = arguments["-w"] transform = Transform(input_filename=input_filename, fname_dest=fname_dest, warp=warp_filename) if "-crop" in arguments: transform.crop = arguments["-crop"] if "-o" in arguments: transform.output_filename = arguments["-o"] if "-x" in arguments: transform.interp = arguments["-x"] if "-r" in arguments: transform.remove_temp_files = int(arguments["-r"]) transform.verbose = int(arguments.get('-v')) sct.init_sct(log_level=transform.verbose, update=True) # Update log level transform.apply() # START PROGRAM # ========================================================================================== if __name__ == "__main__": sct.init_sct() # # initialize parameters param = Param() # call main function main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 29113, 29113, 14468, 7804, 2, 198, 2, 198, 2, 27967, 38226, 13, 770, 2163, 318, 257, 29908, 329, 264, 310, 62, 54, 5117, 5159, 29800, 41762, 198, 2, 198, 2, 16529, 19351, 6329, 198, ...
2.325481
2,080
# Copyright 2013-2014 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests methods in plugin_base.py """ import copy import sys sys.path[0:0] = [""] from mongo_connector.plugins.plugin_base import PluginBase from tests import unittest from tests.plugins.helpers import (BAD_PLUGIN_CONFIGS, get_test_namespace) if __name__ == '__main__': unittest.main()
[ 2, 15069, 2211, 12, 4967, 42591, 11012, 11, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 1...
3.40856
257
import logging import sys from django.core.exceptions import ValidationError from django.db import models from django.db.models.fields.related import ForeignObject from django.utils.encoding import python_2_unicode_compatible try: from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor except ImportError: from django.db.models.fields.related import ReverseSingleRelatedObjectDescriptor as ForwardManyToOneDescriptor logger = logging.getLogger(__name__) if sys.version > '3': long = int basestring = (str, bytes) unicode = str __all__ = ['Country', 'State', 'Locality', 'Address', 'AddressField'] ## # Convert a dictionary to an address. ## ## # A country. ## ## # A state. Google refers to this as `administration_level_1`. ## ## # A locality (suburb). ## ## # An address. If for any reason we are unable to find a matching # decomposed address we will store the raw address string in `raw`. ## class AddressDescriptor(ForwardManyToOneDescriptor): def __set__(self, inst, value): super(AddressDescriptor, self).__set__(inst, to_python(value)) ## # A field for addresses in other models. ##
[ 11748, 18931, 198, 11748, 25064, 198, 198, 6738, 42625, 14208, 13, 7295, 13, 1069, 11755, 1330, 3254, 24765, 12331, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 42625, 14208, 13, 9945, 13, 27530, 13, 25747, 13, 5363, 1330, ...
3.103175
378
# -*- coding: utf-8 -*- """ Modules for exposing functions that can be run as tasks. """ from __future__ import (absolute_import, division, print_function, unicode_literals)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 5841, 5028, 329, 21294, 5499, 326, 460, 307, 1057, 355, 8861, 13, 198, 37811, 198, 6738, 11593, 37443, 834, 1330, 357, 48546, 62, 11748, 11, 7297, 11, 198, ...
2.538462
78
#-*- coding:utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, fields, models, _ from odoo.exceptions import UserError from odoo.tools import float_compare, float_is_zero
[ 2, 12, 9, 12, 19617, 25, 40477, 12, 23, 532, 9, 12, 198, 2, 2142, 286, 10529, 2238, 13, 4091, 38559, 24290, 2393, 329, 1336, 6634, 290, 15665, 3307, 13, 198, 198, 6738, 16298, 2238, 1330, 40391, 11, 7032, 11, 4981, 11, 4808, 198, ...
3.194444
72
import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import sys import numpy as np from matplotlib.colors import LinearSegmentedColormap from matplotlib.colors import BoundaryNorm
[ 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 2603, 29487, 8019, 13, 2164, 2340, 43106, 355, 50000, 43106, 198, 11748, 25064, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 2603, 29487, 8019, 13, 4033, 669, 1330, ...
3.35
60
import logging import os import random from abc import ABC, abstractmethod from random import randint from time import sleep, strftime HOSTNAME = ['defence-first.rs', 'defence-first.de', 'defence-first.ru'] HOSTIP = ['78.218.236.218', '87.236.11.212', '54.147.165.86'] SOURCEIP = ['163.189.141.53', '204.164.10.7', '213.166.160.236', '123.197.235.233', '77.28.21.14'] USERNAMES = ['user1', 'user2', 'user3', 'user4', 'user5'] FACILITY = ['KERN', 'USER', 'MAIL', 'DAEMON', 'AUTH', 'SYSLOG', 'LPR', 'NEWS', 'UUCP', 'CLOCK_DAEMON', 'AUTHPRIV', 'FTP', 'NTP', 'LOGAUDIT', 'LOGALERT', 'CRON', 'LOCAL0', 'LOCAL1', 'LOCAL2', 'LOCAL3', 'LOCAL4', 'LOCAL5', 'LOCAL6', 'LOCAL7'] SEVERITY = ['DEBUG', 'INFORMATIONAL', 'NOTICE', 'WARNING', 'ERROR', 'CRITICAL', 'ALERT', 'EMERGENCY'] FORMAT = '%(asctime)s %(hostname)s-Application-%(hostip)s-%(sourceip)s %(severity)s-%(facility)s %(' \ 'message)s ' RESOURCES = ['index.html', 'document.xml', 'dashboard.html'] LOGS_PATH = 'logs' if __name__ == '__main__': sm = Context() sm.run()
[ 11748, 18931, 198, 11748, 28686, 198, 11748, 4738, 198, 6738, 450, 66, 1330, 9738, 11, 12531, 24396, 198, 6738, 4738, 1330, 43720, 600, 198, 6738, 640, 1330, 3993, 11, 965, 31387, 198, 198, 39, 10892, 20608, 796, 37250, 4299, 594, 12, ...
2.215606
487
from bayes_race.pp.pure_pursuit import purePursuit
[ 6738, 15489, 274, 62, 16740, 13, 381, 13, 37424, 62, 79, 1834, 5013, 1330, 5899, 47, 1834, 5013 ]
2.777778
18
from typing import Optional import numpy as np from . import Evaluator from ..state import VectorSpaceStateVar
[ 6738, 19720, 1330, 32233, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 764, 1330, 26439, 84, 1352, 198, 6738, 11485, 5219, 1330, 20650, 14106, 9012, 19852, 198 ]
4
28
from .knn_interpolate import knn_interpolate __all__ = [ 'knn_interpolate', ]
[ 6738, 764, 15418, 77, 62, 3849, 16104, 378, 1330, 638, 77, 62, 3849, 16104, 378, 198, 198, 834, 439, 834, 796, 685, 198, 220, 220, 220, 705, 15418, 77, 62, 3849, 16104, 378, 3256, 198, 60, 198 ]
2.243243
37
from bc4py_extension import PyAddress import hashlib def is_address(ck: PyAddress, hrp, ver): """check bech32 format and version""" try: if ck.hrp != hrp: return False if ck.version != ver: return False except ValueError: return False return True def get_address(pk, hrp, ver) -> PyAddress: """get address from public key""" identifier = hashlib.new('ripemd160', hashlib.sha256(pk).digest()).digest() return PyAddress.from_param(hrp, ver, identifier) def convert_address(ck: PyAddress, hrp, ver) -> PyAddress: """convert address's version""" return PyAddress.from_param(hrp, ver, ck.identifier()) __all__ = [ "is_address", "get_address", "convert_address", "dummy_address", ]
[ 6738, 47125, 19, 9078, 62, 2302, 3004, 1330, 9485, 20231, 198, 11748, 12234, 8019, 628, 198, 4299, 318, 62, 21975, 7, 694, 25, 9485, 20231, 11, 39436, 79, 11, 3326, 2599, 198, 220, 220, 220, 37227, 9122, 307, 354, 2624, 5794, 290, 2...
2.445483
321
"""``cubi-tk snappy kickoff``: kickoff SNAPPY pipeline.""" import argparse import os import subprocess import typing from logzero import logger from toposort import toposort from . import common from cubi_tk.exceptions import ParseOutputException def setup_argparse(parser: argparse.ArgumentParser) -> None: """Setup argument parser for ``cubi-tk snappy pull-sheet``.""" parser.add_argument("--hidden-cmd", dest="snappy_cmd", default=run, help=argparse.SUPPRESS) parser.add_argument( "--dry-run", "-n", default=False, action="store_true", help="Perform dry-run, do not do anything.", ) parser.add_argument( "--timeout", default=10, type=int, help="Number of seconds to wait for commands." ) parser.add_argument( "path", nargs="?", help="Path into SNAPPY directory (below a directory containing .snappy_pipeline).", )
[ 37811, 15506, 66, 29603, 12, 30488, 3013, 7774, 40532, 15506, 25, 40532, 11346, 24805, 56, 11523, 526, 15931, 198, 198, 11748, 1822, 29572, 198, 11748, 28686, 198, 11748, 850, 14681, 198, 11748, 19720, 198, 198, 6738, 2604, 22570, 1330, 4...
2.675287
348
from __future__ import absolute_import from functools import reduce from operator import mul try: from StringIO import StringIO except ImportError: # Python3 compatibility from io import StringIO import pytest from conftest import skipif_yask import numpy as np from devito import Grid, Function, TimeFunction, Eq, Operator, configuration, silencio from devito.logger import logger, logging
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 198, 6738, 1257, 310, 10141, 1330, 4646, 198, 6738, 10088, 1330, 35971, 198, 28311, 25, 198, 220, 220, 220, 422, 10903, 9399, 1330, 10903, 9399, 198, 16341, 17267, 12331, 25, 198, 220...
3.556522
115
from data.data_reader import BIZCARD_LABEL_MAP, BizcardDataParser import argparse from pathlib import Path import os import json import cv2 import numpy as np if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--img_dir', type=str) parser.add_argument('--gt_dir', type=str) parser.add_argument('--data_list', type=str) parser.add_argument('--out_dir', type=str) parser.add_argument('--out_name', type=str) args = parser.parse_args() if not Path(args.out_dir).exists(): Path(args.out_dir).mkdir() convert_bizcard_to_coco_format( Path(args.img_dir), Path(args.gt_dir), args.data_list, args.out_dir, args.out_name)
[ 6738, 1366, 13, 7890, 62, 46862, 1330, 347, 14887, 34, 9795, 62, 48780, 3698, 62, 33767, 11, 347, 528, 9517, 6601, 46677, 198, 11748, 1822, 29572, 198, 6738, 3108, 8019, 1330, 10644, 198, 11748, 28686, 198, 11748, 33918, 198, 11748, 269...
2.372168
309
from pathlib import Path from typing import List, Optional from typer import Argument from deckz.cli import app from deckz.paths import Paths from deckz.running import run as running_run
[ 6738, 3108, 8019, 1330, 10644, 198, 6738, 19720, 1330, 7343, 11, 32233, 198, 198, 6738, 1259, 525, 1330, 45751, 198, 198, 6738, 6203, 89, 13, 44506, 1330, 598, 198, 6738, 6203, 89, 13, 6978, 82, 1330, 10644, 82, 198, 6738, 6203, 89, ...
3.72549
51
from django import template register = template.Library()
[ 6738, 42625, 14208, 1330, 11055, 198, 198, 30238, 796, 11055, 13, 23377, 3419, 628, 198 ]
4.066667
15
import asyncio import contextvars import aioredis import uvloop from aioredis import Redis from fastapi import FastAPI from starlette.middleware.base import BaseHTTPMiddleware from starlette.staticfiles import StaticFiles from RLog import rprint from routers import apirest, websockets REDIS_HOST = 'redis' REDIS_PORT = 6379 PORT = 9080 HOST = "0.0.0.0" cvar_redis = contextvars.ContextVar('redis', default=None) # uvloop is written in Cython and is built on top of libuv http://magic.io/blog/uvloop-blazing-fast-python-networking/ asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) app = FastAPI() app.mount("/static", StaticFiles(directory="static"), name="static") app.add_middleware(CustomHeaderMiddleware) app.include_router(apirest.router) app.include_router(websockets.router) if __name__ == "__main__": import uvicorn rprint("Starting app") rprint(dir(app)) rprint(app.url_path_for('websocket_endpoint')) uvicorn.run('chat:app', host=HOST, port=PORT, log_level='info', reload=True)#, uds='uvicorn.sock')
[ 11748, 30351, 952, 198, 11748, 4732, 85, 945, 198, 11748, 257, 72, 1850, 271, 198, 11748, 334, 85, 26268, 198, 6738, 257, 72, 1850, 271, 1330, 2297, 271, 198, 6738, 3049, 15042, 1330, 12549, 17614, 198, 6738, 3491, 21348, 13, 27171, 1...
2.767196
378
import argparse import autoencoder parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest="action") encode_parser = subparsers.add_parser('encode') addTrainablesArg(encode_parser) encode_parser.add_argument('--input', dest='input', help='Input image file name', required=True) addExchangeArg(encode_parser) decode_parser = subparsers.add_parser('decode') addTrainablesArg(decode_parser) addExchangeArg(decode_parser) decode_parser.add_argument('--output', dest='output', help='Output image file name', required=True) opts = parser.parse_args() if opts.action == 'encode': autoencoder.encode(opts.model, opts.input, opts.exchange) elif opts.action == 'decode': autoencoder.decode(opts.model, opts.exchange, opts.output)
[ 11748, 1822, 29572, 198, 11748, 1960, 6571, 66, 12342, 628, 198, 198, 48610, 796, 1822, 29572, 13, 28100, 1713, 46677, 3419, 198, 198, 7266, 79, 945, 364, 796, 30751, 13, 2860, 62, 7266, 79, 945, 364, 7, 16520, 2625, 2673, 4943, 198, ...
2.806691
269
# -*- coding: utf-8; test-case-name: bridgedb.test.test_email_request; -*- #_____________________________________________________________________________ # # This file is part of BridgeDB, a Tor bridge distribution system. # # :authors: Nick Mathewson <nickm@torproject.org> # Isis Lovecruft <isis@torproject.org> 0xA3ADB67A2CDB8B35 # Matthew Finkel <sysrqb@torproject.org> # please also see AUTHORS file # :copyright: (c) 2007-2015, The Tor Project, Inc. # (c) 2013-2015, Isis Lovecruft # :license: see LICENSE for licensing information #_____________________________________________________________________________ """ .. py:module:: bridgedb.email.request :synopsis: Classes for parsing and storing information about requests for bridges which are sent to the email distributor. bridgedb.email.request ====================== Classes for parsing and storing information about requests for bridges which are sent to the email distributor. :: bridgedb.email.request | |_ determineBridgeRequestOptions - Figure out which filters to apply, or | offer help. |_ EmailBridgeRequest - A request for bridges which was received through the email distributor. .. """ from __future__ import print_function from __future__ import unicode_literals import logging import re from bridgedb import bridgerequest from bridgedb.Dist import EmailRequestedHelp from bridgedb.Dist import EmailRequestedKey #: A regular expression for matching the Pluggable Transport method TYPE in #: emailed requests for Pluggable Transports. TRANSPORT_REGEXP = ".*transport ([a-z][_a-z0-9]*)" TRANSPORT_PATTERN = re.compile(TRANSPORT_REGEXP) #: A regular expression that matches country codes in requests for unblocked #: bridges. UNBLOCKED_REGEXP = ".*unblocked ([a-z]{2,4})" UNBLOCKED_PATTERN = re.compile(UNBLOCKED_REGEXP) def determineBridgeRequestOptions(lines): """Figure out which :class:`Bridges.BridgeFilter`s to apply, or offer help. .. note:: If any ``'transport TYPE'`` was requested, or bridges not blocked in a specific CC (``'unblocked CC'``), then the ``TYPE`` and/or ``CC`` will *always* be stored as a *lowercase* string. :param list lines: A list of lines from an email, including the headers. :raises EmailRequestedHelp: if the client requested help. :raises EmailRequestedKey: if the client requested our GnuPG key. :rtype: :class:`EmailBridgeRequest` :returns: A :class:`~bridgerequst.BridgeRequest` with all of the requested parameters set. The returned ``BridgeRequest`` will have already had its filters generated via :meth:`~EmailBridgeRequest.generateFilters`. """ request = EmailBridgeRequest() skippedHeaders = False for line in lines: line = line.strip().lower() # Ignore all lines before the first empty line: if not line: skippedHeaders = True if not skippedHeaders: continue if ("help" in line) or ("halp" in line): raise EmailRequestedHelp("Client requested help.") if "get" in line: request.isValid(True) logging.debug("Email request was valid.") if "key" in line: request.wantsKey(True) raise EmailRequestedKey("Email requested a copy of our GnuPG key.") if "ipv6" in line: request.withIPv6() if "transport" in line: request.withPluggableTransportType(line) if "unblocked" in line: request.withoutBlockInCountry(line) logging.debug("Generating hashring filters for request.") request.generateFilters() return request
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 26, 1332, 12, 7442, 12, 3672, 25, 38265, 2004, 65, 13, 9288, 13, 9288, 62, 12888, 62, 25927, 26, 532, 9, 12, 198, 2, 27193, 2602, 29343, 198, 2, 198, 2, 770, 2393, 318, 636, 286, 1...
2.82816
1,321
""" Module docstring """ def _impl(_ctx): """ Function docstring """ pass some_rule = rule( attrs = { "attr1": attr.int( default = 2, mandatory = False, ), "attr2": 5, }, implementation = _impl, )
[ 37811, 19937, 2205, 8841, 37227, 198, 198, 4299, 4808, 23928, 28264, 49464, 2599, 198, 220, 220, 220, 37227, 15553, 2205, 8841, 37227, 198, 220, 220, 220, 1208, 198, 198, 11246, 62, 25135, 796, 3896, 7, 198, 220, 220, 220, 708, 3808, ...
2.022727
132
from __future__ import print_function from connection import * from jinja2 import Environment, FileSystemLoader import webbrowser # self.entry_text(self.entry_name, result['firstName']+" "+result['lastName'] ) # self.entry_text(self.entry_EmpID, result['empID']) # self.entry_text(self.entry_EmpName, result['firstName']+" "+result['lastName']) # self.entry_text(self.entry_personalno, result['empID']) # self.entry_text(self.entry_address,result['address'] ) # self.entry_text(self.entry_pin, result['pin']) # self.entry_text(self.entry_state, result['state']) # self.entry_text(self.entry_adhar, result['adharID']) # self.entry_text(self.entry_pan, result['panID']) # self.entry_text(self.entry_designation, result['designation']) # self.entry_text(self.entry_unit, result['unit']) # self.entry_text(self.entry_emailid, result['email']) # self.entry_text(self.entry_mobile, result['mobile']) # self.entry_text(self.entry_department, result['depName']) # self.entry_text(self.entry_ifsc, result['IFSC']) # self.entry_text(self.enrtry_acno, result['ACNo']) # self.entry_text(self.entry_branch, result['BranchAdd'])
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 6738, 4637, 1330, 1635, 198, 6738, 474, 259, 6592, 17, 1330, 9344, 11, 9220, 11964, 17401, 198, 11748, 3992, 40259, 198, 198, 2, 220, 220, 220, 220, 220, 197, 944, 13, 13000, 62, 5...
2.406814
499
# -*- coding: utf-8 -*- # # michael a.g. avzis # orthologue # (c) 1998-2021 all rights reserved # # superclass from .Schema import Schema # declaration # meta-methods def __init__(self, default=object, schema=Schema(), **kwds): # adjust the default; carefully, so we don't all end up using the same global container # checking for {None} is not appropriate here; the user may want {None} as the default # value; we need a way to know that {default} was not supplied: use a TYPE (in this # case object) as the marker default = self.container() if default is object else default # chain up with my default super().__init__(default=default, **kwds) # save my schema self.schema = schema # all done return # end of file
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 285, 40302, 257, 13, 70, 13, 1196, 89, 271, 198, 2, 29617, 39795, 198, 2, 357, 66, 8, 7795, 12, 1238, 2481, 477, 2489, 10395, 198, 2, 628, 198, 2, 220...
2.683007
306
# # Copyright The NOMAD Authors. # # This file is part of NOMAD. # See https://nomad-lab.eu for further info. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import os import re import logging from nomad.units import ureg from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser from nomad.datamodel.metainfo.simulation.run import Run, Program from nomad.datamodel.metainfo.simulation.method import ( Method, DFT, Electronic, Smearing, XCFunctional, Functional, GW as GWMethod, Scf, BasisSet ) from nomad.datamodel.metainfo.simulation.system import ( System, Atoms ) from nomad.datamodel.metainfo.simulation.calculation import ( Calculation, Dos, DosValues, BandStructure, BandEnergies, Energy, EnergyEntry, Charges, Forces, ForcesEntry, ScfIteration, BandGap ) from nomad.datamodel.metainfo.workflow import Workflow, GeometryOptimization from .metainfo.exciting import x_exciting_section_MT_charge_atom, x_exciting_section_MT_moment_atom,\ x_exciting_section_spin, x_exciting_section_fermi_surface,\ x_exciting_section_atoms_group re_float = r'[-+]?\d+\.\d*(?:[Ee][-+]\d+)?' class DOSXMLParser(XMLParser): def _get_dos(self, diagram): dos = np.array( [point.attrib.get(self._dos_key) for point in diagram], dtype=float) return dos def parse(self, key): if self._results is None: self._results = dict() if 'total' in key: if not self.total_dos: return res = np.zeros((self.number_of_spin_channels, self.number_of_dos)) for i in range(len(self.total_dos)): spin = self.total_dos[i].attrib.get(self._nspin_key, i) res[i] = self._get_dos(self._total_dos[i]) if self.energy_unit is not None: res = res * (1 / self.energy_unit) elif 'partial' in key: if not self.partial_dos: return res = np.zeros(( self.number_of_lm, self.number_of_spin_channels, self.number_of_atoms, self.number_of_dos)) for i in range(len(self.partial_dos)): spin = self.partial_dos[i].attrib.get(self._nspin_key, None) if spin is None: spin = (i % (self.number_of_spin_channels * self.number_of_lm)) // self.number_of_lm else: spin = int(spin) - 1 val_l = self.partial_dos[i].attrib.get(self._l_key, None) val_m = self.partial_dos[i].attrib.get(self._m_key, None) if val_l is None or val_m is None: lm = i % self.number_of_lm else: lm = int(val_l) ** 2 + int(val_m) + int(val_l) atom = i // (self.number_of_lm * self.number_of_spin_channels) res[lm][spin][atom] = self._get_dos(self.partial_dos[i]) if self.energy_unit is not None: res = res * (1 / self.energy_unit) elif key == 'energies': return self.energies else: res = None self._results[key] = res class ExcitingFermiSurfaceBxsfParser(TextParser): class ExcitingEigenvalueParser(TextParser): class ExcitingGWOutParser(TextParser): class ExcitingInfoParser(TextParser): class ExcitingParser: def __init__(self): self.info_parser = ExcitingInfoParser() self.dos_parser = DOSXMLParser(energy_unit=ureg.hartree) self.bandstructure_parser = BandstructureXMLParser(energy_unit=ureg.hartree) self.eigval_parser = ExcitingEigenvalueParser() self.fermisurf_parser = ExcitingFermiSurfaceBxsfParser() self.evalqp_parser = ExcitingEvalqpParser() self.dos_out_parser = DataTextParser() self.bandstructure_dat_parser = BandstructureDatParser(energy_unit=ureg.hartree) self.band_out_parser = BandOutParser(energy_unit=ureg.hartree) self.info_gw_parser = GWInfoParser() self.input_xml_parser = XMLParser() self.data_xs_parser = DataTextParser() self.data_clathrate_parser = DataTextParser(dtype=str) # different names for different versions of exciting self._energy_keys_mapping = { 'energy_total': ['Total energy', 'total energy'], 'x_exciting_fermi_energy': ['Fermi energy', 'Fermi'], 'energy_kinetic_electronic': ['Kinetic energy', 'electronic kinetic'], 'energy_coulomb': ['Coulomb energy', 'Coulomb'], 'x_exciting_coulomb_energy': ['Coulomb energy', 'Coulomb'], 'energy_exchange': ['Exchange energy', 'exchange'], 'x_exciting_exchange_energy': ['Exchange energy', 'exchange'], 'energy_correlation': ['Correlation energy', 'correlation'], 'x_exciting_correlation_energy': ['Correlation energy', 'correlation'], 'energy_sum_eigenvalues': ['Sum of eigenvalues', 'sum of eigenvalues'], 'x_exciting_effective_potential_energy': ['Effective potential energy'], 'x_exciting_coulomb_potential_energy': ['Coulomb potential energy', 'Coulomb potential'], 'energy_xc_potential': ['xc potential energy', 'xc potential'], 'energy_electrostatic': ['Hartree energy', 'Hartree'], 'x_exciting_hartree_energy': ['Hartree energy', 'Hartree'], 'x_exciting_electron_nuclear_energy': ['Electron-nuclear energy', 'electron-nuclear '], 'x_exciting_nuclear_nuclear_energy': ['Nuclear-nuclear energy', 'nuclear-nuclear'], 'x_exciting_madelung_energy': ['Madelung energy', 'Madelung'], 'x_exciting_core_electron_kinetic_energy': ['Core-electron kinetic energy', 'core electron kinetic'], 'x_exciting_dft_d2_dispersion_correction': ['DFT-D2 dispersion correction'] } self._electron_charge_keys_mapping = { 'x_exciting_core_charge': ['core'], 'x_exciting_core_leakage': ['core leakage'], 'x_exciting_valence_charge': ['valence'], 'x_exciting_interstitial_charge': ['interstitial'], 'x_exciting_total_MT_charge': ['total charge in muffin-tins', 'total in muffin-tins'], 'charge_total': ['total charge'], 'x_exciting_section_MT_charge_atom': ['atom_resolved'] } self._moment_keys_mapping = { 'x_exciting_interstitial_moment': ['interstitial'], 'x_exciting_total_MT_moment': ['total moment in muffin-tins'], 'x_exciting_total_moment': ['total moment'], 'x_exciting_section_MT_moment_atom': ['atom_resolved'] } def file_exists(self, filename): """Checks if a the given filename exists and is accessible in the same folder where the mainfile is stored. """ mainfile = os.path.basename(self.info_parser.mainfile) suffix = mainfile.strip('INFO.OUT') target = filename.rsplit('.', 1) filepath = '%s%s' % (target[0], suffix) if target[1:]: filepath = '%s.%s' % (filepath, target[1]) filepath = os.path.join(self.info_parser.maindir, filepath) if os.path.isfile(filepath) and os.access(filepath, os.F_OK): return True return False
[ 2, 198, 2, 15069, 383, 399, 2662, 2885, 46665, 13, 198, 2, 198, 2, 770, 2393, 318, 636, 286, 399, 2662, 2885, 13, 198, 2, 4091, 3740, 1378, 26601, 324, 12, 23912, 13, 12496, 329, 2252, 7508, 13, 198, 2, 198, 2, 49962, 739, 262, ...
2.274259
3,442
# coding: utf-8 """ simcore-service-storage API API definition for simcore-service-storage service # noqa: E501 OpenAPI spec version: 0.1.0 Contact: support@simcore.io Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from simcore_service_storage_sdk.api_client import ApiClient
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 37811, 198, 220, 220, 220, 985, 7295, 12, 15271, 12, 35350, 7824, 628, 220, 220, 220, 7824, 6770, 329, 985, 7295, 12, 15271, 12, 35350, 2139, 220, 1303, 645, 20402, 25, 412, 33548, 628, 22...
3.014085
142
# Generated by Django 3.2.7 on 2021-10-22 14:23 from django.db import migrations
[ 2, 2980, 515, 416, 37770, 513, 13, 17, 13, 22, 319, 33448, 12, 940, 12, 1828, 1478, 25, 1954, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 628 ]
2.766667
30
#!/usr/bin/env python """ Demos of encoding and decoding algorithms using populations of IAF neurons. """ # Copyright (c) 2009-2015, Lev Givon # All rights reserved. # Distributed under the terms of the BSD license: # http://www.opensource.org/licenses/bsd-license import sys import numpy as np # Set matplotlib backend so that plots can be generated without a # display: import matplotlib matplotlib.use('AGG') from bionet.utils.misc import func_timer import bionet.utils.band_limited as bl import bionet.utils.plotting as pl import bionet.ted.iaf as iaf # For determining output plot file names: output_name = 'iaf_pop_demo_' output_count = 0 output_ext = '.png' # Define algorithm parameters and input signal: dur = 0.1 dt = 1e-6 f = 32 bw = 2*np.pi*f t = np.arange(0, dur, dt) np.random.seed(0) noise_power = None if noise_power == None: fig_title = 'IAF Input Signal with No Noise' else: fig_title = 'IAF Input Signal with %d dB of Noise' % noise_power print fig_title u = func_timer(bl.gen_band_limited)(dur, dt, f, noise_power) pl.plot_signal(t, u, fig_title, output_name + str(output_count) + output_ext) # Test leaky IAF algorithms: b1 = 3.5 # bias d1 = 0.7 # threshold R1 = 10.0 # resistance C1 = 0.01 # capacitance try: iaf.iaf_recoverable(u, bw, b1, d1, R1, C1) except ValueError('reconstruction condition not satisfied'): sys.exit() b2 = 3.4 # bias d2 = 0.8 # threshold R2 = 9.0 # resistance C2 = 0.01 # capacitance try: iaf.iaf_recoverable(u, bw, b2, d2, R2, C2) except ValueError('reconstruction condition not satisfied'): sys.exit() b_list = np.array([b1, b2]) d_list = np.array([d1, d2]) R_list = np.array([R1, R2]) C_list = np.array([C1, C2]) output_count += 1 fig_title = 'Signal Encoded Using Leaky IAF Encoder' print fig_title s_list = func_timer(iaf.iaf_encode_pop)([u, u], dt, b_list, d_list, R_list, C_list) pl.plot_encoded(t, u, s_list[0], fig_title + ' #1', output_name + str(output_count) + output_ext) output_count += 1 pl.plot_encoded(t, u, s_list[1], fig_title + ' #2', output_name + str(output_count) + output_ext) output_count += 1 fig_title = 'Signal Decoded Using Leaky IAF Population Decoder' print fig_title u_rec = func_timer(iaf.iaf_decode_pop)(s_list, dur, dt, bw, b_list, d_list, R_list, C_list) pl.plot_compare(t, u, u_rec, fig_title, output_name + str(output_count) + output_ext) # Test ideal IAF algorithms: b1 = 3.5 # bias d1 = 0.7 # threshold R1 = np.inf # resistance C1 = 0.01 # capacitance try: iaf.iaf_recoverable(u, bw, b1, d1, R1, C1) except ValueError('reconstruction condition not satisfied'): sys.exit() b2 = 3.4 # bias d2 = 0.8 # threshold R2 = np.inf # resistance C2 = 0.01 # capacitance try: iaf.iaf_recoverable(u, bw, b2, d2, R2, C2) except ValueError('reconstruction condition not satisfied'): sys.exit() b_list = [b1, b2] d_list = [d1, d2] R_list = [R1, R2] C_list = [C1, C2] output_count += 1 fig_title = 'Signal Encoded Using Ideal IAF Encoder' print fig_title s_list = func_timer(iaf.iaf_encode_pop)([u, u], dt, b_list, d_list, R_list, C_list) pl.plot_encoded(t, u, s_list[0], fig_title + ' #1', output_name + str(output_count) + output_ext) output_count += 1 pl.plot_encoded(t, u, s_list[1], fig_title + ' #2', output_name + str(output_count) + output_ext) output_count += 1 fig_title = 'Signal Decoded Using Ideal IAF Population Decoder' print fig_title u_rec = func_timer(iaf.iaf_decode_pop)(s_list, dur, dt, bw, b_list, d_list, R_list, C_list) pl.plot_compare(t, u, u_rec, fig_title, output_name + str(output_count) + output_ext)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 37811, 198, 11522, 418, 286, 21004, 290, 39938, 16113, 1262, 9684, 286, 198, 3539, 37, 16890, 13, 198, 37811, 198, 198, 2, 15069, 357, 66, 8, 3717, 12, 4626, 11, 16042, 402, 452...
2.178652
1,780
"""func expr""" F = function( x,y ): return x+y def main(): TestError( F(1,2) == 3 )
[ 37811, 20786, 44052, 37811, 198, 198, 37, 796, 2163, 7, 2124, 11, 88, 15179, 198, 197, 7783, 2124, 10, 88, 198, 198, 4299, 1388, 33529, 198, 197, 14402, 12331, 7, 376, 7, 16, 11, 17, 8, 6624, 513, 1267, 628 ]
2.25
40
#coding:utf-8 from nadmin.sites import site from nadmin.views import BaseAdminPlugin, ListAdminView SORTBY_VAR = '_sort_by' site.register_plugin(SortablePlugin, ListAdminView)
[ 2, 66, 7656, 25, 40477, 12, 23, 198, 6738, 299, 28482, 13, 49315, 1330, 2524, 198, 6738, 299, 28482, 13, 33571, 1330, 7308, 46787, 37233, 11, 7343, 46787, 7680, 198, 198, 50, 9863, 17513, 62, 53, 1503, 796, 705, 62, 30619, 62, 1525,...
2.903226
62
import logging from config import ACCOUNTING_MAIL_RECIPIENT, LOG_LEVEL, REDIS_URL, TIMEZONE from datetime import datetime, timedelta from pytz import timezone import celery import redis from charges import amount_to_charge, charge, ChargeException from npsp import Opportunity from util import send_email zone = timezone(TIMEZONE) log_level = logging.getLevelName(LOG_LEVEL) root = logging.getLogger() root.setLevel(log_level) # TODO stop sending this email and just rely on Sentry and logs? if __name__ == "__main__": charge_cards()
[ 11748, 18931, 198, 6738, 4566, 1330, 15859, 28270, 2751, 62, 5673, 4146, 62, 38827, 4061, 28495, 11, 41605, 62, 2538, 18697, 11, 23848, 1797, 62, 21886, 11, 20460, 57, 11651, 198, 6738, 4818, 8079, 1330, 4818, 8079, 11, 28805, 12514, 19...
3.154286
175
import operator import os from unittest.mock import patch import pytest import requests from rotkehlchen.chain.ethereum.manager import NodeName from rotkehlchen.constants.assets import A_BTC from rotkehlchen.tests.utils.blockchain import mock_etherscan_query from rotkehlchen.typing import SupportedBlockchain
[ 11748, 10088, 198, 11748, 28686, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 8529, 198, 198, 11748, 12972, 9288, 198, 11748, 7007, 198, 198, 6738, 5724, 365, 18519, 6607, 13, 7983, 13, 316, 1456, 388, 13, 37153, 1330, 19081, 5376, 19...
3.461538
91
#------------------------------------------------------------------------------- # Name: module1 # Purpose: # # Author: I am # # Created: 02/11/2017 # Copyright: (c) I am 2017 # Licence: <your licence> #------------------------------------------------------------------------------- if __name__ == '__main__': main()
[ 2, 10097, 24305, 198, 2, 6530, 25, 220, 220, 220, 220, 220, 220, 220, 8265, 16, 198, 2, 32039, 25, 198, 2, 198, 2, 6434, 25, 220, 220, 220, 220, 220, 314, 716, 198, 2, 198, 2, 15622, 25, 220, 220, 220, 220, 7816, 14, 1157, 1...
3.583333
96
from flask import Flask from config import Config from sqlalchemy import MetaData from flask_sqlalchemy import SQLAlchemy from flask_migrate import Migrate from flask_login import LoginManager from flask_moment import Moment from flask_misaka import Misaka from flask_bootstrap import Bootstrap import os import logging from logging.handlers import RotatingFileHandler from elasticsearch import Elasticsearch convention = { "ix": 'ix_%(column_0_label)s', "uq": "uq_%(table_name)s_%(column_0_name)s", "ck": "ck_%(table_name)s_%(constraint_name)s", "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", "pk": "pk_%(table_name)s" } metadata = MetaData(naming_convention=convention) db = SQLAlchemy(metadata=metadata) migrate = Migrate() login = LoginManager() login.login_view = "auth.login" moment = Moment() md = Misaka() bootstrap = Bootstrap()
[ 6738, 42903, 1330, 46947, 198, 6738, 4566, 1330, 17056, 198, 6738, 44161, 282, 26599, 1330, 30277, 6601, 198, 6738, 42903, 62, 25410, 282, 26599, 1330, 16363, 2348, 26599, 198, 6738, 42903, 62, 76, 42175, 1330, 337, 42175, 198, 6738, 4290...
2.803175
315
import numpy as np import random from time import time, sleep import h5py import torch import torch.nn as nn import torch.optim as optimizer import glob import os #from scipy.stats import rankdata from lstm import Model, initialize from Optim import ScheduledOptim # import _pickle as cPickle # np.set_printoptions(threshold=np.nan)
[ 11748, 299, 32152, 355, 45941, 198, 11748, 4738, 198, 6738, 640, 1330, 640, 11, 3993, 198, 11748, 289, 20, 9078, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 40085, 355, 6436, 7509, 198, 11748, ...
3.303922
102
from c_int import Int from casting import cast from globals_consts import NAMESPACE from temps import used_temps, get_temp, get_temp_func
[ 6738, 269, 62, 600, 1330, 2558, 198, 6738, 13092, 1330, 3350, 198, 6738, 15095, 874, 62, 1102, 6448, 1330, 399, 29559, 47, 11598, 198, 6738, 2169, 862, 1330, 973, 62, 11498, 862, 11, 651, 62, 29510, 11, 651, 62, 29510, 62, 20786, 19...
3.209302
43
#!/usr/bin/python from struct import * from getopt import * import sys import os import re if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 201, 198, 6738, 2878, 1330, 1635, 201, 198, 6738, 651, 8738, 1330, 1635, 201, 198, 11748, 25064, 201, 198, 11748, 28686, 201, 198, 11748, 302, 201, 198, 201, 198, 361, 11593, 3672, 834, 6624, 366,...
2.566038
53
import os from pytest_mock import MockerFixture from robot.api import logger from Mainframe3270.x3270 import x3270
[ 11748, 28686, 198, 198, 6738, 12972, 9288, 62, 76, 735, 1330, 337, 12721, 37, 9602, 198, 6738, 9379, 13, 15042, 1330, 49706, 198, 198, 6738, 8774, 14535, 18, 20233, 13, 87, 18, 20233, 1330, 2124, 18, 20233, 628, 628 ]
3.076923
39
# -*- coding: utf-8 -*- from __future__ import print_function, division """ .. note:: These are the spectrophotometry functions for SPLAT """ # imports - internal import copy import os # imports - external import numpy from astropy import units as u # standard units from astropy import constants as const # physical constants in SI units import matplotlib.patches as patches import matplotlib.pyplot as plt from scipy.integrate import trapz # for numerical integration from scipy.interpolate import interp1d # splat functions and constants from .initialize import * from .utilities import * ##################################################### ############### SPECTROPHOTOMETRY ############### ##################################################### # this function has been obseleted def filterProfile(filt,**kwargs): ''' :Purpose: Retrieve the filter profile for a SPLAT filter. Returns two arrays: the filter wavelength and filter transmission curve. :param filter: String giving the name of one of the predefined filters listed in splat.FILTERS.keys() (required) :param filterFolder: folder containing the filter transmission files (optional, default = splat.FILTER_FOLDER) :Example: >>> import splat >>> import splat.photometry as spphot >>> sp = splat.getSpectrum(shortname='1507-1627')[0] >>> sp.fluxCalibrate('2MASS J',14.5) >>> spphot.filterMag(sp,'MKO J') (14.345894376898123, 0.027596454828421831) ''' # keyword parameters filterFolder = kwargs.get('filterFolder',SPLAT_PATH+FILTER_FOLDER) if not os.path.exists(filterFolder): filterFolder = SPLAT_URL+FILTER_FOLDER # check that requested filter is in list f0 = checkFilterName(filt, verbose=True) if f0 == False: raise ValueError filt = f0 # read in filter fwave,ftrans = numpy.genfromtxt(os.path.normpath(filterFolder+FILTERS[filt]['file']), comments='#', unpack=True, missing_values = ('NaN','nan'), filling_values = (numpy.nan)) # print(type(fwave),type(ftrans),isinstance(fwave,numpy.ndarray),isinstance(ftrans,numpy.ndarray),not isinstance(fwave,numpy.ndarray) or not isinstance(ftrans,numpy.ndarray)) if not isinstance(fwave,numpy.ndarray) or not isinstance(ftrans,numpy.ndarray): raise ValueError('\nProblem reading in {}'.format(filterFolder+FILTERS[filt]['file'])) fwave = fwave[~numpy.isnan(ftrans)]*u.micron ftrans = ftrans[~numpy.isnan(ftrans)] return fwave,ftrans def filterMag(sp,filt,*args,**kwargs): ''' :Purpose: Determine the photometric magnitude of a source based on its spectrum. Spectral fluxes are convolved with the filter profile specified by the ``filter`` input. By default this filter is also convolved with a model of Vega to extract Vega magnitudes, but the user can also specify AB magnitudes, photon flux or energy flux. :Required Parameters: **sp**: Spectrum class object, which should contain wave, flux and noise array elements. **filter**: String giving name of filter, which can either be one of the predefined filters listed in splat.FILTERS.keys() or a custom filter name :Optional Parameters: **custom** = None: A 2 x N vector array specifying the wavelengths and transmissions for a custom filter **notch** = None: A 2 element array that specifies the lower and upper wavelengths for a notch filter (100% transmission within, 0% transmission without) **vega** = True: compute Vega magnitudes (may be set by filter) **ab** = False: compute AB magnitudes (may be set by filter) **energy** = False: compute energy flux **photon** = False: compute photon flux **filterFolder** = splat.FILTER_FOLDER: folder containing the filter transmission files **vegaFile** = 'vega_kurucz.txt': name of file containing Vega flux file, must be within ``filterFolder`` **nsamples** = 100: number of samples to use in Monte Carlo error estimation **info** = False: List the predefined filter names available **verbose** = True: List the predefined filter names available :Example: >>> import splat >>> import splat.photometry as spphot >>> sp = splat.getSpectrum(shortname='1507-1627')[0] >>> sp.fluxCalibrate('2MASS J',14.5) >>> spphot.filterMag(sp,'MKO J') (14.345894376898123, 0.027596454828421831) ''' # keyword parameters filterFolder = kwargs.get('filterFolder',SPLAT_PATH+FILTER_FOLDER) if not os.path.exists(filterFolder): filterFolder = SPLAT_URL+FILTER_FOLDER vegaFile = kwargs.get('vegaFile',VEGAFILE) info = kwargs.get('info',False) custom = kwargs.get('custom',False) notch = kwargs.get('notch',False) vega = kwargs.get('vega',True) ab = kwargs.get('ab',not vega) rsr = kwargs.get('rsr',False) nsamples = kwargs.get('nsamples',100) verbose = kwargs.get('verbose',False) # check that requested filter is in list if isinstance(custom,bool) and isinstance(notch,bool): f0 = checkFilterName(filt,verbose=True) if f0 == False: return numpy.nan, numpy.nan filt = f0 # reset filter calculation methods based on filter design if 'ab' in FILTERS[filt]['method']: ab = kwargs.get('ab',True) vega = not ab if 'vega' in FILTERS[filt]['method']: vega = kwargs.get('vega',True) ab = not vega rsr = FILTERS[filt]['rsr'] # other possibilities photons = kwargs.get('photons',False) photons = kwargs.get('photon',photons) energy = kwargs.get('energy',False) energy = kwargs.get('flux',energy) if (photons or energy): vega = False ab = False if photons: energy = False if energy: photons = False # Read in filter if isinstance(custom,bool) and isinstance(notch,bool): fwave,ftrans = filterProfile(filt,**kwargs) # notch filter elif isinstance(custom,bool) and isinstance(notch,list): dn = (notch[1]-notch[0])/1000 fwave = numpy.arange(notch[0]-5.*dn,notch[1]+5.*dn,dn) ftrans = numpy.zeros(len(fwave)) ftrans[numpy.where(numpy.logical_and(fwave >= notch[0],fwave <= notch[1]))] = 1. # custom filter else: fwave,ftrans = custom[0],custom[1] # units if isinstance(fwave,u.quantity.Quantity) == True: fwave = fwave.to(u.micron) else: fwave = fwave*u.micron # check that spectrum and filter cover the same wavelength ranges if numpy.nanmax(fwave) < numpy.nanmin(sp.wave) or numpy.nanmin(fwave) > numpy.nanmax(sp.wave): if verbose==True: print('\nWarning: no overlap between spectrum for {} and filter {}'.format(sp.name,filt)) return numpy.nan, numpy.nan if numpy.nanmin(fwave) < numpy.nanmin(sp.wave) or numpy.nanmax(fwave) > numpy.nanmax(sp.wave): if verbose==True: print('\nWarning: spectrum for {} does not span full filter profile for {}'.format(sp.name,filt)) # interpolate spectrum onto filter wavelength function wgood = numpy.where(~numpy.isnan(sp.noise)) if len(sp.wave[wgood]) > 0: d = interp1d(sp.wave[wgood].value,sp.flux[wgood].value,bounds_error=False,fill_value=0.) n = interp1d(sp.wave[wgood].value,sp.noise[wgood].value,bounds_error=False,fill_value=0) # catch for models else: if verbose==True: print('\nWarning: data values in range of filter {} have no uncertainties'.format(filt)) d = interp1d(sp.wave.value,sp.flux.value,bounds_error=False,fill_value=0.) n = interp1d(sp.wave.value,sp.flux.value*1.e-9,bounds_error=False,fill_value=0.) result = [] if (vega): # Read in Vega spectrum vwave,vflux = numpy.genfromtxt(os.path.normpath(filterFolder+vegaFile), comments='#', unpack=True, \ missing_values = ('NaN','nan'), filling_values = (numpy.nan)) vwave = vwave[~numpy.isnan(vflux)]*u.micron vflux = vflux[~numpy.isnan(vflux)]*(u.erg/(u.cm**2 * u.s * u.micron)) vflux.to(sp.flux_unit,equivalencies=u.spectral_density(vwave)) # interpolate Vega onto filter wavelength function v = interp1d(vwave.value,vflux.value,bounds_error=False,fill_value=0.) if rsr: val = -2.5*numpy.log10(trapz(ftrans*fwave.value*d(fwave.value),fwave.value)/trapz(ftrans*fwave.value*v(fwave.value),fwave.value)) else: val = -2.5*numpy.log10(trapz(ftrans*d(fwave.value),fwave.value)/trapz(ftrans*v(fwave.value),fwave.value)) for i in numpy.arange(nsamples): # result.append(-2.5*numpy.log10(trapz(ftrans*numpy.random.normal(d(fwave),n(fwave))*sp.flux_unit,fwave)/trapz(ftrans*v(fwave)*sp.flux_unit,fwave))) if rsr: result.append(-2.5*numpy.log10(trapz(ftrans*fwave.value*(d(fwave.value)+numpy.random.normal(0,1.)*n(fwave.value)),fwave.value)/trapz(ftrans*fwave.value*v(fwave.value),fwave.value))) else: result.append(-2.5*numpy.log10(trapz(ftrans*(d(fwave.value)+numpy.random.normal(0,1.)*n(fwave.value)),fwave.value)/trapz(ftrans*v(fwave.value),fwave.value))) outunit = 1. elif (ab): nu = sp.wave.to('Hz',equivalencies=u.spectral()) fnu = sp.flux.to('Jy',equivalencies=u.spectral_density(sp.wave)) noisenu = sp.noise.to('Jy',equivalencies=u.spectral_density(sp.wave)) filtnu = fwave.to('Hz',equivalencies=u.spectral()) fconst = 3631*u.jansky d = interp1d(nu.value,fnu.value,bounds_error=False,fill_value=0.) n = interp1d(nu.value,noisenu.value,bounds_error=False,fill_value=0.) b = trapz((ftrans/filtnu.value)*fconst.value,filtnu.value) val = -2.5*numpy.log10(trapz(ftrans*d(filtnu.value)/filtnu.value,filtnu.value)/b) for i in numpy.arange(nsamples): a = trapz(ftrans*(d(filtnu.value)+numpy.random.normal(0,1)*n(filtnu.value))/filtnu.value,filtnu.value) result.append(-2.5*numpy.log10(a/b)) outunit = 1. elif (energy): outunit = u.erg/u.s/u.cm**2 if rsr: a = trapz(ftrans*fwave.value*d(fwave.value),fwave.value)*sp.wave.unit*sp.flux.unit b = trapz(ftrans*fwave.value,fwave.value)*sp.wave.unit c = trapz(ftrans*fwave.value*fwave.value,fwave.value)*sp.wave.unit*sp.wave.unit val = (a/b * c/b).to(outunit).value else: a = trapz(ftrans*d(fwave.value),fwave.value)*sp.wave.unit*sp.flux.unit b = trapz(ftrans,fwave.value)*sp.wave.unit c = trapz(ftrans*fwave.value,fwave.value)*sp.wave.unit*sp.wave.unit val = (a/b * c/b).to(outunit).value for i in numpy.arange(nsamples): if rsr: result.append((trapz(ftrans*fwave.value*(d(fwave.value)+numpy.random.normal(0,1.)*n(fwave.value)),fwave.value)*sp.wave.unit*sp.flux.unit).to(outunit).value) else: result.append((trapz(ftrans*(d(fwave.value)+numpy.random.normal(0,1.)*n(fwave.value)),fwave.value)*sp.wave.unit*sp.flux.unit).to(outunit).value) elif (photons): outunit = 1./u.s/u.cm**2 convert = const.h.to('erg s')*const.c.to('micron/s') val = (trapz(ftrans*fwave.value*convert.value*d(fwave.value),fwave.value)*sp.wave.unit*sp.flux.unit*convert.unit).to(outunit).value for i in numpy.arange(nsamples): result.append((trapz(ftrans*fwave.value*convert.value*(d(fwave.value)+numpy.random.normal(0,1.)*n(fwave.value)),fwave.value)*sp.wave.unit*sp.flux.unit*convert.unit).to(outunit).value) else: raise NameError('\nfilterMag not given a correct physical quantity (vega, ab, energy, photons) to compute photometry\n\n') # val = numpy.nanmean(result)*outunit err = numpy.nanstd(result) if len(sp.wave[wgood]) == 0: err = 0. return val*outunit,err*outunit def filterInfo(*args,**kwargs): ''' :Purpose: Prints out the current list of filters in the SPLAT reference library. ''' verbose = kwargs.get('verbose',True) if len(args) > 0: fname = list(args) elif kwargs.get('filter',False) != False: fname = kwargs['filter'] else: fname = sorted(list(FILTERS.keys())) if isinstance(fname,list) == False: fname = [fname] output = {} for k in fname: f = checkFilterName(k) if f != False: output[f] = {} output[f]['description'] = FILTERS[f]['description'] output[f]['zeropoint'] = FILTERS[f]['zeropoint'] fwave,ftrans = filterProfile(f,**kwargs) try: fwave = fwave.to(u.micron) except: fwave = fwave*u.micron fw = fwave[numpy.where(ftrans > 0.01*numpy.nanmax(ftrans))] ft = ftrans[numpy.where(ftrans > 0.01*numpy.nanmax(ftrans))] fw05 = fwave[numpy.where(ftrans > 0.5*numpy.nanmax(ftrans))] output[f]['lambda_mean'] = trapz(ft*fw,fw)/trapz(ft,fw) output[f]['lambda_pivot'] = numpy.sqrt(trapz(fw*ft,fw)/trapz(ft/fw,fw)) output[f]['lambda_central'] = 0.5*(numpy.max(fw)+numpy.min(fw)) output[f]['lambda_fwhm'] = numpy.max(fw05)-numpy.min(fw05) output[f]['lambda_min'] = numpy.min(fw) output[f]['lambda_max'] = numpy.max(fw) if verbose ==True: print(f.replace('_',' ')+': '+output[f]['zeropoint']) print('Zeropoint = {} Jy'.format(output[f]['zeropoint'])) print('Central wavelength: = {:.3f}'.format(output[f]['lambda_central'])) print('Mean wavelength: = {:.3f}'.format(output[f]['lambda_mean'])) print('Pivot point: = {:.3f}'.format(output[f]['lambda_pivot'])) print('FWHM = {:.3f}'.format(output[f]['lambda_fwhm'])) print('Wavelength range = {:.3f} to {:.3f}\n'.format(output[f]['lambda_min'],output[f]['lambda_max'])) else: if verbose ==True: print(' Filter {} not in SPLAT filter list'.format(k)) kys = list(output.keys()) if len(kys) == 1: return output[kys[0]] else: return output def filterProperties(filt,**kwargs): ''' :Purpose: Returns a dictionary containing key parameters for a particular filter. :param filter: name of filter, must be one of the specifed filters given by splat.FILTERS.keys() :type filter: required :param verbose: print out information about filter to screen :type verbose: optional, default = True :Example: >>> import splat >>> data = splat.filterProperties('2MASS J') Filter 2MASS J: 2MASS J-band Zeropoint = 1594.0 Jy Pivot point: = 1.252 micron FWHM = 0.323 micron Wavelength range = 1.066 to 1.442 micron >>> data = splat.filterProperties('2MASS X') Filter 2MASS X not among the available filters: 2MASS H: 2MASS H-band 2MASS J: 2MASS J-band 2MASS KS: 2MASS Ks-band BESSEL I: Bessel I-band FOURSTAR H: FOURSTAR H-band FOURSTAR H LONG: FOURSTAR H long FOURSTAR H SHORT: FOURSTAR H short ... ''' filterFolder = kwargs.get('filterFolder',SPLAT_PATH+FILTER_FOLDER) if not os.path.exists(filterFolder): filterFolder = SPLAT_URL+FILTER_FOLDER # check that requested filter is in list filt = checkFilterName(filt) if filt == False: return None report = {} report['name'] = filt report['description'] = FILTERS[filt]['description'] report['zeropoint'] = FILTERS[filt]['zeropoint'] report['method'] = FILTERS[filt]['method'] report['rsr'] = FILTERS[filt]['rsr'] fwave,ftrans = filterProfile(filt,**kwargs) try: fwave = fwave.to(u.micron) except: fwave = fwave*u.micron fw = fwave[numpy.where(ftrans > 0.01*numpy.nanmax(ftrans))] ft = ftrans[numpy.where(ftrans > 0.01*numpy.nanmax(ftrans))] fw05 = fwave[numpy.where(ftrans > 0.5*numpy.nanmax(ftrans))] # print(trapz(ft,fw)) # print(trapz(fw*ft,fw)) report['lambda_mean'] = trapz(ft*fw,fw)/trapz(ft,fw) report['lambda_pivot'] = numpy.sqrt(trapz(fw*ft,fw)/trapz(ft/fw,fw)) report['lambda_central'] = 0.5*(numpy.max(fw)+numpy.min(fw)) report['lambda_fwhm'] = numpy.max(fw05)-numpy.min(fw05) report['lambda_min'] = numpy.min(fw) report['lambda_max'] = numpy.max(fw) report['wave'] = fwave report['transmission'] = ftrans # report values out if kwargs.get('verbose',False): print('\nFilter '+filt+': '+report['description']) print('Zeropoint = {} Jy'.format(report['zeropoint'])) print('Pivot point: = {:.3f}'.format(report['lambda_pivot'])) print('FWHM = {:.3f}'.format(report['lambda_fwhm'])) print('Wavelength range = {:.3f} to {:.3f}\n'.format(report['lambda_min'],report['lambda_max'])) return report def magToFlux(mag,filt,**kwargs): ''' :Purpose: Converts a magnitude into an energy, and vice versa. :param mag: magnitude on whatever system is defined for the filter or provided (required) :param filter: name of filter, must be one of the specifed filters given by splat.FILTERS.keys() (required) :param reverse: convert energy into magnitude instead (optional, default = False) :param ab: magnitude is on the AB system (optional, default = filter preference) :param vega: magnitude is on the Vega system (optional, default = filter preference) :param rsr: magnitude is on the Vega system (optional, default = filter preference) :param units: units for energy as an astropy.units variable; if this conversion does not work, the conversion is ignored (optional, default = erg/cm2/s) :param verbose: print out information about filter to screen (optional, default = False) WARNING: THIS CODE IS ONLY PARTIALLY COMPLETE ''' # keyword parameters filterFolder = kwargs.get('filterFolder',SPLAT_PATH+FILTER_FOLDER) if not os.path.exists(filterFolder): filterFolder = SPLAT_URL+FILTER_FOLDER vegaFile = kwargs.get('vegaFile','vega_kurucz.txt') vega = kwargs.get('vega',True) ab = kwargs.get('ab',not vega) rsr = kwargs.get('rsr',False) nsamples = kwargs.get('nsamples',100) custom = kwargs.get('custom',False) notch = kwargs.get('notch',False) base_unit = u.erg/(u.cm**2 * u.s) return_unit = kwargs.get('unit',base_unit) e_mag = kwargs.get('uncertainty',0.) e_mag = kwargs.get('unc',e_mag) e_mag = kwargs.get('e_mag',e_mag) if not isinstance(mag,u.quantity.Quantity): mag=mag*u.s/u.s if not isinstance(e_mag,u.quantity.Quantity): e_mag=e_mag*mag.unit # check that requested filter is in list filt = checkFilterName(filt) if filt == False: return numpy.nan, numpy.nan # reset filter calculation methods based on filter design if 'ab' in FILTERS[filt]['method']: ab = kwargs.get('ab',True) vega = not ab if 'vega' in FILTERS[filt]['method']: vega = kwargs.get('vega',True) ab = not vega if 'rsr' in FILTERS[filt]['method']: rsr = kwargs.get('rsr',True) # Read in filter if isinstance(custom,bool) and isinstance(notch,bool): fwave,ftrans = filterProfile(filt,**kwargs) # notch filter elif isinstance(custom,bool) and isinstance(notch,list): dn = (notch[1]-notch[0])/1000 fwave = numpy.arange(notch[0]-5.*dn,notch[1]+5.*dn,dn)*u.micron ftrans = numpy.zeros(len(fwave)) ftrans[numpy.where(numpy.logical_and(fwave >= notch[0],fwave <= notch[1]))] = 1. # custom filter else: fwave,ftrans = custom[0],custom[1] if isinstance(fwave,u.quantity.Quantity) == False: fwave=fwave*u.micron if isinstance(ftrans,u.quantity.Quantity) == True: ftrans=ftrans.value fwave = fwave[~numpy.isnan(ftrans)] ftrans = ftrans[~numpy.isnan(ftrans)] result = [] err = 0. # magnitude -> energy if kwargs.get('reverse',False) == False: if vega == True: # Read in Vega spectrum vwave,vflux = numpy.genfromtxt(os.path.normpath(filterFolder+vegaFile), comments='#', unpack=True, \ missing_values = ('NaN','nan'), filling_values = (numpy.nan)) vwave = vwave[~numpy.isnan(vflux)]*u.micron vflux = vflux[~numpy.isnan(vflux)]*(u.erg/(u.cm**2 * u.s * u.micron)) # interpolate Vega onto filter wavelength function v = interp1d(vwave.value,vflux.value,bounds_error=False,fill_value=0.) if rsr: fact = trapz(ftrans*fwave.value*v(fwave.value),fwave.value) else: fact = trapz(ftrans*v(fwave.value),fwave.value) val = 10.**(-0.4*mag.value)*fact*u.erg/(u.cm**2 * u.s) # calculate uncertainty if e_mag.value > 0.: for i in numpy.arange(nsamples): result.append(10.**(-0.4*(mag.value+numpy.random.normal(0,1.)*e_mag.value))*fact) err = (numpy.nanstd(result))*u.erg/(u.cm**2 * u.s) else: err = 0.*u.erg/(u.cm**2 * u.s) elif ab == True: fconst = 3631*u.jansky ftrans = (ftrans*fconst).to(u.erg/(u.cm**2 * u.s * u.micron),equivalencies=u.spectral_density(fwave)) if rsr: fact = trapz(ftrans.value*fwave.value,fwave.value) else: fact = trapz(ftrans.value,fwave.value) val = (10.**(-0.4*mag.value)*fact)*u.erg/(u.cm**2 * u.s) # calculate uncertainty if e_mag.value > 0.: for i in numpy.arange(nsamples): result.append(10.**(-0.4*(mag.value+numpy.random.normal(0,1.)*e_mag.value))*fact) err = (numpy.nanstd(result))*u.erg/(u.cm**2 * u.s) else: err = 0.*u.erg/(u.cm**2 * u.s) else: raise ValueError('\nmagToFlux needs vega or ab method specified') # convert to desired energy units # try: val.to(return_unit) err.to(return_unit) # except: # print('\nWarning: unit {} is not an energy flux unit'.format(return_unit)) try: val.to(base_unit) err.to(base_unit) except: print('\nWarning: cannot convert result to an energy flux unit'.format(base_unit)) return numpy.nan, numpy.nan return val, err # energy -> magnitude # THIS NEEDS TO BE COMPLETED else: print('passed') pass # check that input is an energy flux # try: # mag.to(base_unit) # e_mag.to(base_unit) # except: # raise ValueError('\nInput quantity unit {} is not a flux unit'.format(mag.unit)) def visualizeFilter(filters,verbose=True,xra=[],yra=[0,1.2],**kwargs): ''' :Purpose: Plots a filter profile or set of filter profiles, optionally on top of a spectrum WARNING: THIS CODE IS CURRENTLY UNDER DEVELOPMENT, BUGS MAY BE COMMON ''' filt = copy.deepcopy(filters) wave_unit = kwargs.get('wave_unit',DEFAULT_WAVE_UNIT) # single filter name if isinstance(filt,str): filt = [filt] if isinstance(filt,list): # list of filter names if isinstance(filt[0],str): for f in filt: fc = checkFilterName(f) filt.remove(f) if fc == False: if verbose==True: print('Removed filter {}: not included in SPLAT'.format(f)) else: filt.insert(len(filt),fc) if len(filt) == 0: raise ValueError('Did not recognize any of the input filters {}'.format(filters)) # prep parameters fwave,ftrans = filterProfile(f,**kwargs) if isUnit(fwave): wave_unit = kwargs.get('wave_unit',fwave.unit) xl = kwargs.get('xlabel','Wavelength ({})'.format(wave_unit)) yl = kwargs.get('ylabel','Transmission Curve') legend = [] fig = plt.figure(figsize=kwargs.get('figsize',[5,4])) for i,f in enumerate(filt): fwave,ftrans = filterProfile(f,**kwargs) if isUnit(fwave): fwave.to(wave_unit) else: fwave = fwave*wave_unit if kwargs.get('normalize',False): ftrans = ftrans/numpy.nanmax(ftrans) plt.plot(fwave,ftrans) if len(xra) == 0: xra = [numpy.nanmin(fwave.value),numpy.nanmax(fwave.value)] xra = [numpy.nanmin([xra[0],numpy.nanmin(fwave.value)]),numpy.nanmax([xra[1],numpy.nanmax(fwave.value)])] yra = [yra[0],numpy.nanmax([yra[1],numpy.nanmax(ftrans)])] legend.append(FILTERS[f]['description']) if FILTERS[f]['rsr'] == True: yl = kwargs.get('ylabel','Transmission Curve') # list of notch ranges if isinstance(filt[0],int) or isinstance(filt[0],float): filt = [filt] # list of notch ranges if isinstance(filt[0],list): xl = kwargs.get('xlabel','Wavelength ({})'.format(wave_unit)) yl = kwargs.get('ylabel','Transmission Curve') legend = [] fig = plt.figure(figsize=kwargs.get('figsize',[5,4])) for i,f in enumerate(filt): fwave,ftrans = numpy.linspace(f[0],f[1],1000)*wave_unit,numpy.ones(1000) plt.plot(fwave,ftrans) if len(xra) == 0: xra = [numpy.nanmin(fwave.value),numpy.nanmax(fwave.value)] xra = [numpy.nanmin([xra[0],numpy.nanmin(fwave.value)]),numpy.nanmax([xra[1],numpy.nanmax(fwave.value)])] yra = [yra[0],numpy.nanmax([yra[1],numpy.nanmax(ftrans)])] legend.append('Filter {}'.format(i+1)) else: raise ValueError('Could not parse input {}'.format(filt)) # add a comparison spectrum sp = kwargs.get('spectrum',None) sp = kwargs.get('comparison',sp) if isinstance(sp,splat.core.Spectrum) == True: print(xra) sp.normalize(xra) sp.scale(numpy.nanmax(ftrans)*kwargs.get('comparison_scale',0.8)) plt.plot(sp.wave,sp.flux,color=kwargs.get('comparison_color','k'),alpha=kwargs.get('comparison_alpha',0.5)) legend.append(sp.name) yra = [yra[0],yra[1]*1.1] # finish up plt.xlim(xra) plt.ylim(yra) plt.xlabel(xl) plt.ylabel(yl) plt.legend(legend) # save if desired file = kwargs.get('file','') file = kwargs.get('filename',file) file = kwargs.get('output',file) if file != '': plt.savefig(file) return fig ######################################### ######## SED FITTING TOOLS ######### ### WARNING: THESE ARE EXPERIMENTAL!! ### ######################################### # plan: def modelMagnitudes(verbose=True): ''' this will be a code that calculates a set of magnitudes for a model set's SED models saves to file that could be uploaded pre-save some model magnitudes ''' pass def interpolateMagnitudes(verbose=True): ''' produces an interpolated value for a grid set of model magnitudes ''' pass def compareMagnitudes(mags1,mags2,unc=None,unc2=None,ignore=[],verbose=True): ''' this code compares a set of magnitudes using one of several statistics ''' chi = 0. dm,em = [],[] for f in list(mags1.keys()): if f in list(mags2.keys()) and f in list(unc.keys()) and f not in ignore: dm.append(mags1[f]-mags2[f]) em.append(unc[f]) # find best scale factor dm = numpy.array(dm) em = numpy.array(em) offset = numpy.sum(dm/em**2)/numpy.sum (1./em**2) dmo = numpy.array([m-offset for m in dm]) return numpy.sum((dmo/em)**2), offset def SEDFitGrid(verbose=True): ''' this code will compare a set of magnitudes to a grid of model magnitudes and choose the closest match based on various statistics ''' pass def SEDFitMCMC(verbose=True): ''' this code will conduct a comparison of a set of magnitudes to model magnitudes using an MCMC wrapper, and choose best/average/distribution of parameters ''' pass def SEDFitAmoeba(verbose=True): ''' this code will conduct a comparison of a set of magnitudes to model magnitudes using an Amoeba (Nelder-Mead) wrapper, and choose the closest match ''' pass def SEDVisualize(verbose=True): ''' Visualizes magnitudes on SED scale (flux = lam x F_lam), with option of also comparing to SED spectrum ''' pass ##################################################### ############### MAGNITUDE CLASS ############### #####################################################
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 11, 7297, 198, 198, 37811, 198, 492, 3465, 3712, 198, 220, 220, 220, 220, 220, 220, 220, 220, 2312, 389, 262, 5444, 1005...
2.247637
12,696
import datetime ONE_MINUTE = 60 ONE_HOUR = 3600 ONE_DAY = 24 * ONE_HOUR ONE_YEAR = 1 * 365 * ONE_DAY
[ 11748, 4818, 8079, 198, 198, 11651, 62, 23678, 37780, 796, 3126, 198, 11651, 62, 39, 11698, 796, 4570, 405, 198, 11651, 62, 26442, 796, 1987, 1635, 16329, 62, 39, 11698, 198, 11651, 62, 56, 17133, 796, 352, 1635, 21268, 1635, 16329, 6...
2.311111
45
""" Plot up surface or bottom (or any fixed level) errors from a profile object with no z_dim (vertical dimension). Provide an array of netcdf files and mess with the options to get a figure you like. You can define how many rows and columns the plot will have. This script will plot the provided list of netcdf datasets from left to right and top to bottom. A colorbar will be placed right of the figure. """ import xarray as xr import matplotlib.pyplot as plt import numpy as np import sys sys.path.append("/Users/dbyrne/code/COAsT") import coast import pandas as pd #%% File settings run_name = "test" # List of analysis output files. Profiles from each will be plotted # on each axis of the plot fn_list = [ "~/transfer/test_grid.nc", "~/transfer/test_grid.nc", ] # Filename for the output fn_out = "/Users/dbyrne/transfer/surface_gridded_errors_{0}.png".format(run_name) #%% General Plot Settings var_name = "abs_diff_temperature" # Variable name in analysis file to plot # If you used var modified to make gridded data # then this is where to select season etc. save_plot = False # Masking out grid cells that don't contain many points min_points_in_average = 5 name_of_count_variable = "grid_N" # Subplot axes settings n_r = 2 # Number of subplot rows n_c = 2 # Number of subplot columns figsize = (10, 5) # Figure size lonbounds = [-15, 9.5] # Longitude bounds latbounds = [45, 64] # Latitude bounds subplot_padding = 0.5 # Amount of vertical and horizontal padding between plots fig_pad = (0.075, 0.075, 0.1, 0.1) # Figure padding (left, top, right, bottom) # Leave some space on right for colorbar # Scatter opts marker_size = 3 # Marker size cmap = "bwr" # Colormap for normal points clim = (-1, 1) # Color limits for normal points discrete_cmap = True # Discretize colormap cmap_levels = 14 # Labels and Titles fig_title = "SST Errors" # Whole figure title title_fontsize = 13 # Fontsize of title title_fontweight = "bold" # Fontweight to use for title dataset_names = ["CO9p0", "CO9p0", "CO9p0"] # Names to use for labelling plots subtitle_fontsize = 11 # Fontsize for dataset subtitles subtitle_fontweight = "normal" # Fontweight for dataset subtitles # PLOT SEASONS. Make sure n_r = 2 and n_c = 2 # If this option is true, only the first dataset will be plotted, with seasonal # variables on each subplot. The season_suffixes will be added to var_name # for each subplot panel. plot_seasons = True season_suffixes = ["DJF", "MAM", "JJA", "SON"] #%% Read and plotdata # Read all datasets into list ds_list = [xr.open_dataset(dd) for dd in fn_list] n_ds = len(ds_list) n_ax = n_r * n_c # Create plot and flatten axis array f, a = coast.plot_util.create_geo_subplots(lonbounds, latbounds, n_r, n_c, figsize=figsize) a_flat = a.flatten() # Dicretize colormap maybe if discrete_cmap: cmap = plt.cm.get_cmap(cmap, cmap_levels) # Determine if we will extend the colormap or not extend_cbar = [] # Loop over dataset for ii in range(n_ax): ur_index = np.unravel_index(ii, (n_r, n_c)) # Select season if required if plot_seasons: ds = ds_list[0] var_ii = var_name + "_{0}".format(season_suffixes[ii]) N_var = "{0}_{1}".format(name_of_count_variable, season_suffixes[ii]) a_flat[ii].text(0.05, 1.02, season_suffixes[ii], transform=a_flat[ii].transAxes, fontweight="bold") else: ds = ds_list[ii] var_ii = var_name a_flat[ii].set_title(dataset_names[ii], fontsize=subtitle_fontsize, fontweight=subtitle_fontweight) N_var = name_of_count_variable data = ds[var_ii].values count_var = ds[N_var] data[count_var < min_points_in_average] = np.nan # Scatter and set title pc = a_flat[ii].pcolormesh( ds.longitude, ds.latitude, data, cmap=cmap, vmin=clim[0], vmax=clim[1], ) # Will we extend the colorbar for this dataset? extend_cbar.append(coast.plot_util.determine_colorbar_extension(data, clim[0], clim[1])) # Set Figure title f.suptitle(fig_title, fontsize=title_fontsize, fontweight=title_fontweight) # Set tight figure layout f.tight_layout(w_pad=subplot_padding, h_pad=subplot_padding) f.subplots_adjust(left=(fig_pad[0]), bottom=(fig_pad[1]), right=(1 - fig_pad[2]), top=(1 - fig_pad[3])) # Handle colorbar -- will we extend it? if "both" in extend_cbar: extend = "both" elif "max" in extend_cbar and "min" in extend_cbar: extend = "both" elif "max" in extend_cbar: extend = "max" elif "min" in extend_cbar: extend = "min" else: extend = "neither" cbar_ax = f.add_axes([(1 - fig_pad[2] + fig_pad[2] * 0.15), 0.15, 0.025, 0.7]) f.colorbar(pc, cax=cbar_ax, extend=extend) # Save plot maybe if save_plot: f.savefig(fn_out)
[ 37811, 198, 43328, 510, 4417, 393, 4220, 357, 273, 597, 5969, 1241, 8, 8563, 422, 257, 7034, 2134, 198, 4480, 645, 1976, 62, 27740, 357, 1851, 605, 15793, 737, 44290, 281, 7177, 286, 2010, 66, 7568, 3696, 290, 220, 198, 37348, 351, ...
2.590217
1,840
from sklearn.feature_extraction.text import TfidfVectorizer def compute_tf_idf(corpus): """Computing term frequency (tf) - inverse document frequency (idf). :param corpus: List of documents. :returns: tf-idf of corpus. """ return TfidfVectorizer().fit_transform(corpus) if __name__ == '__main__': sample_corpus = [ 'This is sample document.', 'another random document.', 'third sample document text' ] print(compute_tf_idf(sample_corpus))
[ 6738, 1341, 35720, 13, 30053, 62, 2302, 7861, 13, 5239, 1330, 309, 69, 312, 69, 38469, 7509, 198, 198, 4299, 24061, 62, 27110, 62, 312, 69, 7, 10215, 79, 385, 2599, 198, 220, 220, 220, 37227, 5377, 48074, 3381, 8373, 357, 27110, 8, ...
2.55102
196
#!/usr/bin/env python3 from __future__ import print_function import socket import threading try: import socketserver except ImportError: import SocketServer as socketserver import time import os import signal import sys import struct import errno from fprime.constants import DATA_ENCODING from optparse import OptionParser __version__ = 0.1 __date__ = "2015-04-03" __updated__ = "2016-04-07" # Universal server id global SERVER = None LOCK = None shutdown_event = threading.Event() FSW_clients = [] GUI_clients = [] FSW_ids = [] GUI_ids = [] def main(argv=None): global SERVER, LOCK program_name = os.path.basename(sys.argv[0]) program_license = "Copyright 2015 user_name (California Institute of Technology) \ ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged." program_version = "v0.1" program_build_date = "%s" % __updated__ program_version_string = "%%prog %s (%s)" % (program_version, program_build_date) program_longdesc = ( """""" # optional - give further explanation about what the program does ) if argv is None: argv = sys.argv[1:] try: parser = OptionParser( version=program_version_string, epilog=program_longdesc, description=program_license, ) parser.add_option( "-p", "--port", dest="port", action="store", type="int", help="Set threaded tcp socket server port [default: %default]", default=50007, ) parser.add_option( "-i", "--host", dest="host", action="store", type="string", help="Set threaded tcp socket server ip [default: %default]", default="127.0.0.1", ) # process options (opts, args) = parser.parse_args(argv) HOST = opts.host PORT = opts.port server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler) udp_server = ThreadedUDPServer((HOST, PORT), ThreadedUDPRequestHandler) # Hopefully this will allow address reuse and server to restart immediately server.allow_reuse_address = True SERVER = server LOCK = server.lock_obj ip, port = server.server_address print("TCP Socket Server listening on host addr %s, port %s" % (HOST, PORT)) # Start a thread with the server -- that thread will then start one # more thread for each request server_thread = threading.Thread(target=server.serve_forever) udp_server_thread = threading.Thread(target=udp_server.serve_forever) signal.signal(signal.SIGINT, signal_handler) server_thread.daemon = False server_thread.start() udp_server_thread.daemon = False udp_server_thread.start() while not shutdown_event.is_set(): server_thread.join(timeout=5.0) udp_server_thread.join(timeout=5.0) print("shutdown from main thread") SERVER.shutdown() SERVER.server_close() udp_server.shutdown() udp_server.server_close() time.sleep(1) except Exception as e: indent = len(program_name) * " " sys.stderr.write(program_name + ": " + repr(e) + "\n") sys.stderr.write(indent + " for help use --help\n") return 2 if __name__ == "__main__": sys.exit(main())
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 11748, 17802, 198, 11748, 4704, 278, 198, 198, 28311, 25, 198, 220, 220, 220, 1330, 37037, 18497, 198, 16341, 17267, 12331, 25, ...
2.274903
1,546
import logging import sched import time ( MENU, EDIT_COIN_LIST, EDIT_USER_CONFIG, DELETE_DB, UPDATE_TG, UPDATE_BTB, PANIC_BUTTON, CUSTOM_SCRIPT, ) = range(8) BOUGHT, BUYING, SOLD, SELLING = range(4) logging.basicConfig( format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO ) logger = logging.getLogger("btb_manager_telegram_logger") scheduler = sched.scheduler(time.time, time.sleep)
[ 11748, 18931, 198, 11748, 6038, 198, 11748, 640, 198, 198, 7, 198, 220, 220, 220, 41597, 52, 11, 198, 220, 220, 220, 48483, 62, 8220, 1268, 62, 45849, 11, 198, 220, 220, 220, 48483, 62, 29904, 62, 10943, 16254, 11, 198, 220, 220, ...
2.199029
206
import modutil mod, __getattr__ = modutil.lazy_import(__name__, ['tests.test_data.A', '.B', '.C as still_C'])
[ 11748, 953, 22602, 628, 198, 4666, 11, 11593, 1136, 35226, 834, 796, 953, 22602, 13, 75, 12582, 62, 11748, 7, 834, 3672, 834, 11, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, ...
1.797619
84
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys import tqdm import torch import pickle import resource import numpy as np import matplotlib.pyplot as plt from args import parse_args from modelSummary import model_dict from pytorchtools import load_from_file from torch.utils.data import DataLoader from helperfunctions import mypause, stackall_Dict from loss import get_seg2ptLoss from utils import get_nparams, get_predictions from utils import getSeg_metrics, getPoint_metric, generateImageGrid, unnormPts sys.path.append(os.path.abspath(os.path.join(os.getcwd(), os.pardir))) rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (2048*10, rlimit[1])) #%% if __name__ == '__main__': args = parse_args() device=torch.device("cuda") torch.cuda.manual_seed(12) if torch.cuda.device_count() > 1: print('Moving to a multiGPU setup.') args.useMultiGPU = True else: args.useMultiGPU = False torch.backends.cudnn.deterministic=False if args.model not in model_dict: print("Model not found.") print("valid models are: {}".format(list(model_dict.keys()))) exit(1) LOGDIR = os.path.join(os.getcwd(), 'logs', args.model, args.expname) path2model = os.path.join(LOGDIR, 'weights') path2checkpoint = os.path.join(LOGDIR, 'checkpoints') path2writer = os.path.join(LOGDIR, 'TB.lock') path2op = os.path.join(os.getcwd(), 'op', str(args.curObj)) os.makedirs(LOGDIR, exist_ok=True) os.makedirs(path2model, exist_ok=True) os.makedirs(path2checkpoint, exist_ok=True) os.makedirs(path2writer, exist_ok=True) os.makedirs(path2op, exist_ok=True) model = model_dict[args.model] netDict = load_from_file([args.loadfile, os.path.join(path2checkpoint, 'checkpoint.pt')]) startEp = netDict['epoch'] if 'epoch' in netDict.keys() else 0 if 'state_dict' in netDict.keys(): model.load_state_dict(netDict['state_dict']) print('Parameters: {}'.format(get_nparams(model))) model = model if not args.useMultiGPU else torch.nn.DataParallel(model) model = model.to(device).to(args.prec) f = open(os.path.join('curObjects', 'baseline', 'cond_'+str(args.curObj)+'.pkl'), 'rb') _, _, testObj = pickle.load(f) testObj.path2data = os.path.join(args.path2data, 'Datasets', 'All') testObj.augFlag = False testloader = DataLoader(testObj, batch_size=args.batchsize, shuffle=False, num_workers=args.workers, drop_last=False) if args.disp: fig, axs = plt.subplots(nrows=1, ncols=1) #%% accLoss = 0.0 imCounter = 0 ious = [] dists_pupil_latent = [] dists_pupil_seg = [] dists_iris_latent = [] dists_iris_seg = [] model.eval() opDict = {'id':[], 'archNum': [], 'archName': [], 'code': [], 'scores':{'iou':[], 'lat_dst':[], 'seg_dst':[]}, 'pred':{'pup_latent_c':[], 'pup_seg_c':[], 'iri_latent_c':[], 'iri_seg_c':[], 'mask':[]}, 'gt':{'pup_c':[], 'mask':[]}} with torch.no_grad(): for bt, batchdata in enumerate(tqdm.tqdm(testloader)): img, labels, spatialWeights, distMap, pupil_center, iris_center, elNorm, cond, imInfo = batchdata out_tup = model(img.to(device).to(args.prec), labels.to(device).long(), pupil_center.to(device).to(args.prec), elNorm.to(device).to(args.prec), spatialWeights.to(device).to(args.prec), distMap.to(device).to(args.prec), cond.to(device).to(args.prec), imInfo[:, 2].to(device).to(torch.long), 0.5) output, elOut, latent, loss = out_tup latent_pupil_center = elOut[:, 0:2].detach().cpu().numpy() latent_iris_center = elOut[:, 5:7].detach().cpu().numpy() _, seg_pupil_center = get_seg2ptLoss(output[:, 2, ...].cpu(), pupil_center, temperature=4) _, seg_iris_center = get_seg2ptLoss(-output[:, 0, ...].cpu(), iris_center, temperature=4) loss = loss if args.useMultiGPU else loss.mean() accLoss += loss.detach().cpu().item() predict = get_predictions(output) iou, iou_bySample = getSeg_metrics(labels.numpy(), predict.numpy(), cond[:, 1].numpy())[1:] latent_pupil_dist, latent_pupil_dist_bySample = getPoint_metric(pupil_center.numpy(), latent_pupil_center, cond[:,0].numpy(), img.shape[2:], True) # Unnormalizes the points seg_pupil_dist, seg_pupil_dist_bySample = getPoint_metric(pupil_center.numpy(), seg_pupil_center, cond[:,1].numpy(), img.shape[2:], True) # Unnormalizes the points latent_iris_dist, latent_iris_dist_bySample = getPoint_metric(iris_center.numpy(), latent_iris_center, cond[:,1].numpy(), img.shape[2:], True) # Unnormalizes the points seg_iris_dist, seg_iris_dist_bySample = getPoint_metric(iris_center.numpy(), seg_iris_center, cond[:,1].numpy(), img.shape[2:], True) # Unnormalizes the points dists_pupil_latent.append(latent_pupil_dist) dists_iris_latent.append(latent_iris_dist) dists_pupil_seg.append(seg_pupil_dist) dists_iris_seg.append(seg_iris_dist) ious.append(iou) pup_latent_c = unnormPts(latent_pupil_center, img.shape[2:]) pup_seg_c = unnormPts(seg_pupil_center, img.shape[2:]) iri_latent_c = unnormPts(latent_iris_center, img.shape[2:]) iri_seg_c = unnormPts(seg_iris_center, img.shape[2:]) dispI = generateImageGrid(img.numpy().squeeze(), predict.numpy(), elOut.detach().cpu().numpy().reshape(-1, 2, 5), pup_seg_c, cond.numpy(), override=True, heatmaps=False) for i in range(0, img.shape[0]): archNum = testObj.imList[imCounter, 1] opDict['id'].append(testObj.imList[imCounter, 0]) opDict['code'].append(latent[i,...].detach().cpu().numpy()) opDict['archNum'].append(archNum) opDict['archName'].append(testObj.arch[archNum]) opDict['pred']['pup_latent_c'].append(pup_latent_c[i, :]) opDict['pred']['pup_seg_c'].append(pup_seg_c[i, :]) opDict['pred']['iri_latent_c'].append(iri_latent_c[i, :]) opDict['pred']['iri_seg_c'].append(iri_seg_c[i, :]) if args.test_save_op_masks: opDict['pred']['mask'].append(predict[i,...].numpy().astype(np.uint8)) opDict['scores']['iou'].append(iou_bySample[i, ...]) opDict['scores']['lat_dst'].append(latent_pupil_dist_bySample[i, ...]) opDict['scores']['seg_dst'].append(seg_pupil_dist_bySample[i, ...]) opDict['gt']['pup_c'].append(pupil_center[i,...].numpy()) if args.test_save_op_masks: opDict['gt']['mask'].append(labels[i,...].numpy().astype(np.uint8)) imCounter+=1 if args.disp: if bt == 0: h_im = plt.imshow(dispI.permute(1, 2, 0)) plt.pause(0.01) else: h_im.set_data(dispI.permute(1, 2, 0)) mypause(0.01) opDict = stackall_Dict(opDict) ious = np.stack(ious, axis=0) ious = np.nanmean(ious, axis=0) print('mIoU: {}. IoUs: {}'.format(np.mean(ious), ious)) print('Latent space PUPIL dist. Med: {}, STD: {}'.format(np.nanmedian(dists_pupil_latent), np.nanstd(dists_pupil_latent))) print('Segmentation PUPIL dist. Med: {}, STD: {}'.format(np.nanmedian(dists_pupil_seg), np.nanstd(dists_pupil_seg))) print('Latent space IRIS dist. Med: {}, STD: {}'.format(np.nanmedian(dists_iris_latent), np.nanstd(dists_iris_latent))) print('Segmentation IRIS dist. Med: {}, STD: {}'.format(np.nanmedian(dists_iris_seg), np.nanstd(dists_iris_seg))) print('--- Saving output directory ---') f = open(os.path.join(path2op, 'opDict.pkl'), 'wb') pickle.dump(opDict, f) f.close()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 256, 80, 36020, 198, 11748, 28034, 198, 11748, 2298, 293, 198, 11...
1.670149
6,221
#!/usr/bin/env python """ <Program Name> test_util.py <Author> Konstantin Andrianov. <Started> February 1, 2013. <Copyright> See LICENSE for licensing information. <Purpose> Unit test for 'util.py' """ # Help with Python 3 compatibility, where the print statement is a function, an # implicit relative import is invalid, and the '/' operator performs true # division. Example: print 'hello world' raises a 'SyntaxError' exception. from __future__ import print_function from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals import os import sys import gzip import shutil import logging import tempfile import unittest import tuf import tuf.log import tuf.hash import tuf.util import tuf.unittest_toolbox as unittest_toolbox import tuf._vendor.six as six logger = logging.getLogger('tuf.test_util') # Run unit test. if __name__ == '__main__': unittest.main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 37811, 198, 27, 15167, 6530, 29, 198, 220, 1332, 62, 22602, 13, 9078, 198, 198, 27, 13838, 29, 198, 220, 17431, 18797, 259, 843, 4484, 709, 13, 198, 198, 27, 10434, 276, 29, 1...
3.157718
298
from django import forms
[ 198, 6738, 42625, 14208, 1330, 220, 5107 ]
3.714286
7
from functools import lru_cache from math import cos, sin import scipy from scipy.ndimage import affine_transform import numpy as np def rotation_axis_by_angle(data, angles=(90, 90, 90), times=(2, 2, 2)): """ Get true data matrix(Cartesian coordinates) from relative data matrix (Non-Cartesian coordinates). Parameters ---------- data: np.ndarray data with shape (nx,ny,nz). angles:tuple 3 angle of x, y, z z angle is the intersection angle of x,y, y angle is the intersection angle of x,z, x angle is the intersection angle of y,z. times: tuple expand the multiple of the matrix. """ matrix = get_matrix(angles=angles, inverse=True) return rotation_axis_by_matrix(data, matrix, times=times) def rotation_axis_by_matrix(data, matrix, times=(2, 2, 2)): """ Get true data matrix(Cartesian coordinates) from relative data matrix (Non-Cartesian coordinates). Parameters ---------- data: np.ndarray data with shape (nx,ny,nz). matrix:tuple See Also ``get_matrix`` times: tuple expand the multiple of the matrix. """ dims_old = data.shape dims = tuple([int(i * j) for i, j in zip(dims_old, times)]) n_data = np.zeros(dims) d0s = int((dims[0] - dims_old[0]) / 2) d1s = int((dims[1] - dims_old[1]) / 2) d2s = int((dims[2] - dims_old[2]) / 2) n_data[d0s:d0s + dims_old[0], d1s:d1s + dims_old[1], d2s:d2s + dims_old[2]] = data coords = np.meshgrid(range(dims[0]), range(dims[1]), range(dims[2]), indexing="ij") xy_coords = np.vstack([coords[0].reshape(-1), coords[1].reshape(-1), coords[2].reshape(-1)]) # apply the transformation matrix # please note: the coordinates are not homogeneous. # for the 3D case, I've added code for homogeneous coordinates, you might want to look at that # please also note: rotation is always around the origin: # since I want the origin to be in the image center, I had to substract dim/2, rotate, then add it again dims2 = np.array([i / 2 for i in dims]) dims2 = dims2.reshape(-1, 1) xy_coords = np.dot(matrix, xy_coords - dims2) + dims2 # # # undo the stacking and reshaping x = xy_coords[0, :] y = xy_coords[1, :] z = xy_coords[2, :] x = x.reshape(dims, order="A") y = y.reshape(dims, order="A") z = z.reshape(dims, order="A") new_coords = [x, y, z] # use map_coordinates to sample values for the new image new_img = scipy.ndimage.map_coordinates(n_data, new_coords, order=2) return new_img def _coords(points, angles=(90, 90, 90), times=(2, 2, 2)): """ Parameters ---------- points: np.darray percent of shape. key points with shape(n_sample,3) angles:tuple 3 angle of x, y, z z angle is the intersection angle of x,y, y angle is the intersection angle of x,z, x angle is the intersection angle of y,z. times: tuple expand the multiple of the matrix. """ dims_old = [1, 1, 1] matrix = get_matrix(angles=angles) times = np.array(list(times)) times = times.reshape((-1, 1)) dims_old = np.array(dims_old) dims_old = dims_old.reshape(-1, 1) dims2 = dims_old / 2 points = points.T * dims_old xy_coords = np.dot(matrix, points - dims2) + dims2 xy_coords = xy_coords + (times / 2 - 0.5) return xy_coords def rote_index(points, data, angles=(90, 90, 90), times=(2, 2, 2), data_init=True, return_type="float"): """ Parameters ---------- points: np.darray key points with shape(n_sample,3) percent of shape. data: np.ndarray or tuple data or data.shape data_init:bool The data is the init data (relative location) or Cartesian coordinates.(rotation_axis_by_angle) angles:tuple 3 angle of x, y, z z angle is the intersection angle of x,y, y angle is the intersection angle of x,z, x angle is the intersection angle of y,z. times: tuple expand the multiple of the matrix. return_type:str "float", "int", "percent" for "float", "int" return the new index for "percent" return the new percent. """ data_shape = data.shape if isinstance(data, np.ndarray) else data if data_init: times_np = np.array([1,1,1]) else: times_np = np.array(times) dims = data_shape dims = np.array(dims).reshape((-1, 1)) xy_coords = _coords(points, angles=angles, times=times) if return_type == "percent": return xy_coords if return_type == "float": return (dims * xy_coords/times_np).T else: return np.round((dims * xy_coords/times_np).T).astype(int) # for rounding off: .4 -, .5 + def rote_value(points, data, angles=(90, 90, 90), times=(2, 2, 2), method="in", data_type="td"): """ Parameters ---------- points: np.darray key points with shape(n_sample,3) percent of shape. data: np.ndarray data angles:tuple 3 angle of x, y, z z angle is the intersection angle of x,y, y angle is the intersection angle of x,z, x angle is the intersection angle of y,z. times: tuple expand the multiple of the matrix. data_type:str if "init" the data accept init data (elfcar, chgcar). see rotation_axis_by_angle. if "td" the data accept true matrix data . see rotation_axis_by_angle. method:str if "near" , return nearest site's value. if "inter" , return the interpolation value. """ if data_type == "td": new_data = data else: new_data = rotation_axis_by_angle(data, angles=angles, times=times) if method == "near": ind = rote_index(points, data, angles=angles, times=times, return_type="int") new_value = np.array([new_data[tuple(i)] for i in ind.T]) return new_value else: ind = rote_index(points, data, angles=angles, times=times, return_type="float") new_value = scipy.ndimage.map_coordinates(new_data, ind, order=2) return new_value
[ 6738, 1257, 310, 10141, 1330, 300, 622, 62, 23870, 198, 6738, 10688, 1330, 8615, 11, 7813, 198, 198, 11748, 629, 541, 88, 198, 198, 6738, 629, 541, 88, 13, 358, 9060, 1330, 1527, 500, 62, 35636, 198, 11748, 299, 32152, 355, 45941, 6...
2.35919
2,617
#!/usr/bin/env python import sys #lolafile = open("ex-small.graph", "r") source = 0 target = 0 lowlink = 0 trans = "bla" print("digraph {") with open(sys.argv[1]) as lolafile: for line in lolafile: if len(line) == 1: continue linelist = line.split(" ") if "STATE" in linelist: source = linelist[1] lowlink = linelist[3].rstrip() if "->" in linelist: trans = linelist[0] target = linelist[2].rstrip() print(''' {} -> {} [label="{}", lowlink="{}"];'''.format(source, target, trans, lowlink)) print("}")
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 25064, 198, 198, 2, 47288, 1878, 576, 796, 1280, 7203, 1069, 12, 17470, 13, 34960, 1600, 366, 81, 4943, 198, 198, 10459, 220, 796, 657, 198, 16793, 220, 796, 657, 198, 75...
2.15942
276
import re import urllib.parse from django.shortcuts import render, get_object_or_404 from django.http import HttpResponse, JsonResponse from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from .models import Definition RE_HANGUL = re.compile(r'[(]*[\uAC00-\uD7AF]+[\uAC00-\uD7AF (),;]*', re.IGNORECASE)
[ 11748, 302, 198, 11748, 2956, 297, 571, 13, 29572, 198, 198, 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 11, 651, 62, 15252, 62, 273, 62, 26429, 198, 6738, 42625, 14208, 13, 4023, 1330, 367, 29281, 31077, 11, 449, 1559, 31077, 1...
2.634921
126
import globals
[ 11748, 15095, 874, 628 ]
4
4
#!/usr/bin/env python from __future__ import print_function ''' 1\taaaa~^~bbbb~^~cccc 2\tdddd~^~EEEE~^~ffff ''' import sys ARR_DELIM = '~^~' for row in sys.stdin: row = row.strip() sent_id, lemmas = row.split('\t') lemmas = lemmas.split(ARR_DELIM) for lemma in lemmas: print('{}\t{}'.format(sent_id, lemma))
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 198, 7061, 6, 198, 16, 59, 83, 24794, 93, 61, 93, 11848, 11848, 93, 61, 93, 535, 535, 198, 17, 59, 83, 1860, 1860, 93, 61, 93,...
2.011976
167
from django.urls import path from . import views urlpatterns = [ path('', views.SupplierList.as_view(), name='supplier_list'), path('view/<int:pk>', views.SupplierView.as_view(), name='supplier_view'), path('new', views.SupplierCreate.as_view(), name='supplier_new'), path('view/<int:pk>', views.SupplierView.as_view(), name='supplier_view'), path('edit/<int:pk>', views.SupplierUpdate.as_view(), name='supplier_edit'), path('delete/<int:pk>', views.SupplierDelete.as_view(), name='supplier_delete'), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 198, 6738, 764, 1330, 5009, 198, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 3108, 10786, 3256, 5009, 13, 15979, 2505, 8053, 13, 292, 62, 1177, 22784, 1438, 11639, 18608, 2505...
2.64
200
import yaml __config=None
[ 11748, 331, 43695, 628, 198, 834, 11250, 28, 14202, 198 ]
2.8
10
APP_PROFILE_DIRECTORY_NAME = 'lazies-cmd' DOSKEY_FILE_NAME = 'doskey.bat' AUTO_RUN_REGISTRY_NAME = 'AutoRun'
[ 24805, 62, 31190, 25664, 62, 17931, 23988, 15513, 62, 20608, 796, 705, 75, 1031, 444, 12, 28758, 6, 198, 35178, 20373, 62, 25664, 62, 20608, 796, 705, 37427, 2539, 13, 8664, 6, 198, 39371, 46, 62, 49, 4944, 62, 31553, 1797, 40405, 6...
2.18
50
#!/usr/bin/env python import os, sys from Bio import SeqIO if __name__ == "__main__": from argparse import ArgumentParser parser = ArgumentParser("Get sequences from a fasta file from a list") parser.add_argument("fasta_filename", help="Input fasta filename to extract sequences from") parser.add_argument("list_filename", help="List of sequence IDs to extract") args = parser.parse_args() get_seqs_from_list(args.fasta_filename, args.list_filename)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 11748, 28686, 11, 25064, 198, 6738, 16024, 1330, 1001, 80, 9399, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 422, 1822, 29572, 1330, 45751, 4...
3.18
150
import time import json import base64 import msgpack from schema import Schema, And, Optional from datetime import datetime from algosdk import mnemonic from algosdk.account import address_from_private_key from algosdk.error import * from algosdk.future.transaction import PaymentTxn from inequality_indexes import * from algo_query import * def wait_for_confirmation(algod_client, transaction_id, timeout): """Wait until the transaction is confirmed or rejected, or until 'timeout' number of rounds have passed. Args: algod_client (AlgodClient): Algod Client transaction_id (str): the transaction to wait for timeout (int): maximum number of rounds to wait Returns: (dict): pending transaction information, or throws an error if the transaction is not confirmed or rejected in the next timeout rounds """ start_round = algod_client.status()["last-round"] + 1 current_round = start_round while current_round < start_round + timeout: algod_client.status_after_block(current_round) try: pending_txn = algod_client.pending_transaction_info(transaction_id) except Exception: return if pending_txn.get("confirmed-round", 0) > 0: return pending_txn elif pending_txn["pool-error"]: raise Exception( 'pool error: {}'.format(pending_txn["pool-error"])) current_round += 1 raise Exception( 'pending tx not found in timeout rounds, timeout value = : {}'.format( timeout))
[ 198, 11748, 640, 198, 11748, 33918, 198, 11748, 2779, 2414, 198, 11748, 31456, 8002, 198, 6738, 32815, 1330, 10011, 2611, 11, 843, 11, 32233, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 435, 70, 418, 34388, 1330, 285, 77, 50016,...
2.733102
577
##### # # This class is part of the Programming the Internet of Things # project, and is available via the MIT License, which can be # found in the LICENSE file at the top level of this repository. # # Copyright (c) 2020 by Andrew D. King # import logging import unittest from programmingtheiot.cda.system.SystemMemUtilTask import SystemMemUtilTask if __name__ == "__main__": unittest.main()
[ 4242, 2, 198, 2, 220, 198, 2, 770, 1398, 318, 636, 286, 262, 30297, 262, 4455, 286, 11597, 198, 2, 1628, 11, 290, 318, 1695, 2884, 262, 17168, 13789, 11, 543, 460, 307, 198, 2, 1043, 287, 262, 38559, 24290, 2393, 379, 262, 1353, ...
3.25
124