max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
bomber_monkey/features/tile/tile_killer_system.py | MonkeyPatchIo/bomber-monkey | 0 | 6624051 | <filename>bomber_monkey/features/tile/tile_killer_system.py
from typing import Any, Callable
from bomber_monkey.features.board.board import Tiles
from bomber_monkey.features.physics.rigid_body import RigidBody
from bomber_monkey.features.tile.tile_killer import TileKiller
from python_ecs.ecs import System, Simulator
class TileKillerSystem(System):
def __init__(self, spawner: Callable[[RigidBody], Any] = None):
super().__init__([TileKiller, RigidBody])
self.spawner = spawner
def update(self, sim: Simulator, dt: float, killer: TileKiller, body: RigidBody) -> None:
cell = sim.context.board.by_pixel(body.pos)
if cell.tile == killer.tile:
cell.tile = Tiles.EMPTY
if self.spawner is not None:
self.spawner(body)
| <filename>bomber_monkey/features/tile/tile_killer_system.py
from typing import Any, Callable
from bomber_monkey.features.board.board import Tiles
from bomber_monkey.features.physics.rigid_body import RigidBody
from bomber_monkey.features.tile.tile_killer import TileKiller
from python_ecs.ecs import System, Simulator
class TileKillerSystem(System):
def __init__(self, spawner: Callable[[RigidBody], Any] = None):
super().__init__([TileKiller, RigidBody])
self.spawner = spawner
def update(self, sim: Simulator, dt: float, killer: TileKiller, body: RigidBody) -> None:
cell = sim.context.board.by_pixel(body.pos)
if cell.tile == killer.tile:
cell.tile = Tiles.EMPTY
if self.spawner is not None:
self.spawner(body)
| none | 1 | 2.469701 | 2 | |
userinput/userinput.py | LucaCappelletti94/userinput | 1 | 6624052 | <reponame>LucaCappelletti94/userinput
from typing import Callable, Union, List
import json
import os
from inspect import isfunction
import getpass
from .utils import default_validators, closest, default_sanitizers, clear
def normalize_validators(validator: str) -> List[Callable]:
if validator not in default_validators:
candidate = closest(validator, default_validators.keys())
if candidate is None:
raise ValueError("Given validator callback {validator} is not available.".format(
validator=validator))
raise ValueError("Given validator callback {validator} is invalid, did you mean {candidate}?.".format(
validator=validator,
candidate=candidate
))
return default_validators.get(validator)
def normalize_sanitizers(sanitizer: str) -> List[Callable]:
if sanitizer not in default_sanitizers:
candidate = closest(sanitizer, default_sanitizers.keys())
if candidate is None:
raise ValueError("Given sanitizer callback {sanitizer} is not available.".format(
sanitizer=sanitizer))
raise ValueError("Given sanitizer callback {sanitizer} is invalid, did you mean {candidate}?.".format(
sanitizer=sanitizer,
candidate=candidate
))
return default_sanitizers.get(sanitizer)
def _is_input_valid(value: str, validators: List) -> bool:
return not validators or all([v(value) for v in validators])
def multi_line_input(label: str):
return "\n".join(iter(lambda: input(label), ''))
def userinput(
name: str,
label: str = "Please insert {name}",
default=None,
always_use_default: bool = False,
hidden: bool = False,
validator: Union[Callable, List[Union[Callable, str]], str] = None,
maximum_attempts: int = 100,
recoverer: Callable = None,
sanitizer: Union[Callable, List[Union[Callable, str]], str] = None,
cache: bool = True,
cache_path: str = ".userinput.json",
delete_cache: bool = False,
auto_clear: bool = False,
multi_line: bool = False
) -> str:
"""Default handler for uniform user experience.
Parameters
----------------------------------------------
name:str,
Name of the expected input, used for storing.
label:str="Please insert {name}",
Label shown to the user.
default=None,
Default value to use.
hidden:bool=False,
Whetever to display or not user input.
always_use_default:bool=False,
Whetever to always use the default, bypassing the user request.
validator:Union[Callable, List[Union[Callable, str]], str]=None,
Single or list of validators for the user input.
maximum_attempts:int=100,
Maximum available attempts for a given input.
By default 100 to avoid deadlocks.
recoverer: Callable, str]=None,
Single or list of recoverer to use when the input fails but could be
sanitized into something acceptable.
sanitizer:Union[Callable, List[Union[Callable, str]], str]=None,
Single or list of sanitizers for the user input.
cache:bool=True,
Whetever to load and store input values.
cache_path:str=".userinput.json",
Default path to store and load cache.
delete_cache:bool=False,
Whetever to delete cache after reading it.
auto_clear:bool=False,
Whetever to call clear stdout after user input is determined.
"""
defaults = {}
if cache and os.path.exists(cache_path):
with open(cache_path, "r") as f:
defaults = json.load(f)
default = defaults.get(name, default)
if isinstance(validator, str) or isfunction(validator):
validators = [validator]
else:
validators = []
if isinstance(sanitizer, str) or isfunction(sanitizer):
sanitizers = [sanitizer]
else:
sanitizers = []
validators = [
normalize_validators(validator) if isinstance(validator, str) else validator for validator in validators
]
sanitizers = [
normalize_sanitizers(sanitizer) if isinstance(sanitizer, str) else sanitizer for sanitizer in sanitizers
]
attempts = 0
input_function = None
if hidden:
input_function = getpass.getpass
elif multi_line:
input_function = multi_line_input
else:
input_function = input
while maximum_attempts is None or attempts < maximum_attempts:
value = None
if not always_use_default or not _is_input_valid(default, validators):
value = input_function("{label}{default}: ".format(
label=label.format(name=name),
default="" if default is None else " [{}]".format(default)
)).strip()
if not value:
value = default
if recoverer is not None and not _is_input_valid(value, validators):
recoverered = recoverer(value)
if recoverered is not None:
value = recoverered
if _is_input_valid(value, validators):
if cache and not delete_cache and (name not in defaults or value != defaults[name]):
with open(cache_path, "w") as f:
defaults[name] = value
json.dump(defaults, f, indent=4)
if delete_cache:
os.remove(cache_path)
if auto_clear:
clear()
for sanitizer in sanitizers:
value = sanitizer(value)
return value
attempts += 1
print("Given value '{value}' is not valid.".format(value=value))
raise ValueError(
"User attempted to answer query called {name} more than {attempts} times!".format(
name=name,
attempts=attempts
)
)
| from typing import Callable, Union, List
import json
import os
from inspect import isfunction
import getpass
from .utils import default_validators, closest, default_sanitizers, clear
def normalize_validators(validator: str) -> List[Callable]:
if validator not in default_validators:
candidate = closest(validator, default_validators.keys())
if candidate is None:
raise ValueError("Given validator callback {validator} is not available.".format(
validator=validator))
raise ValueError("Given validator callback {validator} is invalid, did you mean {candidate}?.".format(
validator=validator,
candidate=candidate
))
return default_validators.get(validator)
def normalize_sanitizers(sanitizer: str) -> List[Callable]:
if sanitizer not in default_sanitizers:
candidate = closest(sanitizer, default_sanitizers.keys())
if candidate is None:
raise ValueError("Given sanitizer callback {sanitizer} is not available.".format(
sanitizer=sanitizer))
raise ValueError("Given sanitizer callback {sanitizer} is invalid, did you mean {candidate}?.".format(
sanitizer=sanitizer,
candidate=candidate
))
return default_sanitizers.get(sanitizer)
def _is_input_valid(value: str, validators: List) -> bool:
return not validators or all([v(value) for v in validators])
def multi_line_input(label: str):
return "\n".join(iter(lambda: input(label), ''))
def userinput(
name: str,
label: str = "Please insert {name}",
default=None,
always_use_default: bool = False,
hidden: bool = False,
validator: Union[Callable, List[Union[Callable, str]], str] = None,
maximum_attempts: int = 100,
recoverer: Callable = None,
sanitizer: Union[Callable, List[Union[Callable, str]], str] = None,
cache: bool = True,
cache_path: str = ".userinput.json",
delete_cache: bool = False,
auto_clear: bool = False,
multi_line: bool = False
) -> str:
"""Default handler for uniform user experience.
Parameters
----------------------------------------------
name:str,
Name of the expected input, used for storing.
label:str="Please insert {name}",
Label shown to the user.
default=None,
Default value to use.
hidden:bool=False,
Whetever to display or not user input.
always_use_default:bool=False,
Whetever to always use the default, bypassing the user request.
validator:Union[Callable, List[Union[Callable, str]], str]=None,
Single or list of validators for the user input.
maximum_attempts:int=100,
Maximum available attempts for a given input.
By default 100 to avoid deadlocks.
recoverer: Callable, str]=None,
Single or list of recoverer to use when the input fails but could be
sanitized into something acceptable.
sanitizer:Union[Callable, List[Union[Callable, str]], str]=None,
Single or list of sanitizers for the user input.
cache:bool=True,
Whetever to load and store input values.
cache_path:str=".userinput.json",
Default path to store and load cache.
delete_cache:bool=False,
Whetever to delete cache after reading it.
auto_clear:bool=False,
Whetever to call clear stdout after user input is determined.
"""
defaults = {}
if cache and os.path.exists(cache_path):
with open(cache_path, "r") as f:
defaults = json.load(f)
default = defaults.get(name, default)
if isinstance(validator, str) or isfunction(validator):
validators = [validator]
else:
validators = []
if isinstance(sanitizer, str) or isfunction(sanitizer):
sanitizers = [sanitizer]
else:
sanitizers = []
validators = [
normalize_validators(validator) if isinstance(validator, str) else validator for validator in validators
]
sanitizers = [
normalize_sanitizers(sanitizer) if isinstance(sanitizer, str) else sanitizer for sanitizer in sanitizers
]
attempts = 0
input_function = None
if hidden:
input_function = getpass.getpass
elif multi_line:
input_function = multi_line_input
else:
input_function = input
while maximum_attempts is None or attempts < maximum_attempts:
value = None
if not always_use_default or not _is_input_valid(default, validators):
value = input_function("{label}{default}: ".format(
label=label.format(name=name),
default="" if default is None else " [{}]".format(default)
)).strip()
if not value:
value = default
if recoverer is not None and not _is_input_valid(value, validators):
recoverered = recoverer(value)
if recoverered is not None:
value = recoverered
if _is_input_valid(value, validators):
if cache and not delete_cache and (name not in defaults or value != defaults[name]):
with open(cache_path, "w") as f:
defaults[name] = value
json.dump(defaults, f, indent=4)
if delete_cache:
os.remove(cache_path)
if auto_clear:
clear()
for sanitizer in sanitizers:
value = sanitizer(value)
return value
attempts += 1
print("Given value '{value}' is not valid.".format(value=value))
raise ValueError(
"User attempted to answer query called {name} more than {attempts} times!".format(
name=name,
attempts=attempts
)
) | en | 0.659519 | Default handler for uniform user experience. Parameters ---------------------------------------------- name:str, Name of the expected input, used for storing. label:str="Please insert {name}", Label shown to the user. default=None, Default value to use. hidden:bool=False, Whetever to display or not user input. always_use_default:bool=False, Whetever to always use the default, bypassing the user request. validator:Union[Callable, List[Union[Callable, str]], str]=None, Single or list of validators for the user input. maximum_attempts:int=100, Maximum available attempts for a given input. By default 100 to avoid deadlocks. recoverer: Callable, str]=None, Single or list of recoverer to use when the input fails but could be sanitized into something acceptable. sanitizer:Union[Callable, List[Union[Callable, str]], str]=None, Single or list of sanitizers for the user input. cache:bool=True, Whetever to load and store input values. cache_path:str=".userinput.json", Default path to store and load cache. delete_cache:bool=False, Whetever to delete cache after reading it. auto_clear:bool=False, Whetever to call clear stdout after user input is determined. | 3.050039 | 3 |
nus_tools/types/samurai/content_list.py | arcticdiv/nus_tools | 0 | 6624053 | <reponame>arcticdiv/nus_tools<filename>nus_tools/types/samurai/content_list.py
from typing import List
from . import movie_list, title_list
from .common import SamuraiListBaseType
class SamuraiContentsList(SamuraiListBaseType):
titles: List[title_list.SamuraiListTitle]
movies: List[movie_list.SamuraiListMovie]
def _read_list(self, xml):
assert xml.tag == 'contents'
self.titles = []
self.movies = []
for content in xml.content:
if hasattr(content, 'title'):
self.titles.append(title_list.SamuraiListTitle._parse(content.title))
elif hasattr(content, 'movie'):
self.movies.append(movie_list.SamuraiListMovie._parse(content.movie))
else:
raise ValueError(content.getchildren()[0].tag)
| from typing import List
from . import movie_list, title_list
from .common import SamuraiListBaseType
class SamuraiContentsList(SamuraiListBaseType):
titles: List[title_list.SamuraiListTitle]
movies: List[movie_list.SamuraiListMovie]
def _read_list(self, xml):
assert xml.tag == 'contents'
self.titles = []
self.movies = []
for content in xml.content:
if hasattr(content, 'title'):
self.titles.append(title_list.SamuraiListTitle._parse(content.title))
elif hasattr(content, 'movie'):
self.movies.append(movie_list.SamuraiListMovie._parse(content.movie))
else:
raise ValueError(content.getchildren()[0].tag) | none | 1 | 2.935668 | 3 | |
examples/absolute-import-rewrite/hello/hello/messages.py | olsonpm/python-vendorize | 38 | 6624054 | <reponame>olsonpm/python-vendorize
message = "hello"
| message = "hello" | none | 1 | 1.060144 | 1 | |
day07/day7_part2.py | raistlin7447/AoC2021 | 0 | 6624055 | <gh_stars>0
import statistics
# Brute Force
with open("day7_input.txt") as f:
crabs = list(map(int, f.readline().strip().split(",")))
best = 2**10000
fuel = lambda distance: int(distance * (distance+1) / 2)
for i in range(min(crabs), max(crabs)+1):
total_fuel = sum(fuel(abs(crab - i)) for crab in crabs)
best = min(best, total_fuel)
print(f"{best=}")
# Turns out that mean is usually correct, but can sometimes vary by up to 1/2
# https://www.reddit.com/gallery/rawxad
with open("day7_input.txt") as f:
crabs = list(map(int, f.readline().strip().split(",")))
fuel = lambda distance: int(distance * (distance+1) / 2)
total_fuel = lambda x: sum(fuel(abs(crab - x)) for crab in crabs)
mean = statistics.mean(crabs)
low = int(mean - 0.5)
high = int(mean + 0.5)
best = min(total_fuel(low), total_fuel(high))
print(f"{best=}")
| import statistics
# Brute Force
with open("day7_input.txt") as f:
crabs = list(map(int, f.readline().strip().split(",")))
best = 2**10000
fuel = lambda distance: int(distance * (distance+1) / 2)
for i in range(min(crabs), max(crabs)+1):
total_fuel = sum(fuel(abs(crab - i)) for crab in crabs)
best = min(best, total_fuel)
print(f"{best=}")
# Turns out that mean is usually correct, but can sometimes vary by up to 1/2
# https://www.reddit.com/gallery/rawxad
with open("day7_input.txt") as f:
crabs = list(map(int, f.readline().strip().split(",")))
fuel = lambda distance: int(distance * (distance+1) / 2)
total_fuel = lambda x: sum(fuel(abs(crab - x)) for crab in crabs)
mean = statistics.mean(crabs)
low = int(mean - 0.5)
high = int(mean + 0.5)
best = min(total_fuel(low), total_fuel(high))
print(f"{best=}") | en | 0.939525 | # Brute Force # Turns out that mean is usually correct, but can sometimes vary by up to 1/2 # https://www.reddit.com/gallery/rawxad | 3.509853 | 4 |
prototype/back-end/rateapi/migrations/0006_auto_20171119_0100.py | nWo-deHack/CNAP | 0 | 6624056 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-11-18 23:00
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rateapi', '0005_auto_20171110_0404'),
]
operations = [
migrations.AddField(
model_name='rate',
name='numOfTicket',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='dialog',
name='timeStamp',
field=models.DateTimeField(default=datetime.datetime(2017, 11, 19, 1, 0, 57, 688000)),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-11-18 23:00
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rateapi', '0005_auto_20171110_0404'),
]
operations = [
migrations.AddField(
model_name='rate',
name='numOfTicket',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='dialog',
name='timeStamp',
field=models.DateTimeField(default=datetime.datetime(2017, 11, 19, 1, 0, 57, 688000)),
),
]
| en | 0.700517 | # -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2017-11-18 23:00 | 1.617651 | 2 |
agents/loser_agent.py | dbelliss/Starcraft2AI | 2 | 6624057 | <reponame>dbelliss/Starcraft2AI
# https://chatbotslife.com/building-a-basic-pysc2-agent-b109cde1477c
import asyncio
import random
import sc2
from sc2 import Race, Difficulty
from sc2.constants import *
from sc2.player import Bot, Computer
from pprint import pprint
from time import gmtime, strftime, localtime
import os
# For debugging purposes only
import sys
# Get strategy enums
from strategies import Strategies
from sc2.position import Point2
from sc2.data import race_townhalls
class LoserAgent(sc2.BotAI):
mainAgent = None
def __init__(self, is_logging = False, is_printing_to_console = False, isMainAgent = False, fileName = ""):
super().__init__()
if isMainAgent:
# For debugging
self.is_logging = is_logging # Setting this to true to write information to log files in the agents/logs directory
self.is_printing_to_console = is_printing_to_console # Setting this to true causes all logs to be printed to the console
# Make logs directory if it doesn't exist
if not os.path.exists("./logs"):
os.mkdir("./logs")
self.log_file_name = "./logs/" + fileName + strftime("%Y-%m-%d %H%M%S", localtime()) + ".log"
self.log_file = open(self.log_file_name, "w+") # Create log file based on the time
# Constants
self.researched = 2 # If an upgrade has been research
self.is_researching = 1 # If an upgrade is being researched
self.not_researched = 0 # If an upgrade is not being researched and has not been researched
# non-standard upgrade status
self.can_burrow = 0
self.zergling_speed = 0
self.zergling_attack_boost = 0
self.baneling_speed = 0
self.roach_speed = 0
self.roach_tunnel = 0
self.overlord_speed = 0
self.hydralisk_range = 0
self.infestor_parasite = 0
self.infestor_energy = 0
self.ultralisk_defense = 0
# standard upgrades
# Ground melee
self.melee1 = 0
self.melee2 = 0
self.melee3 = 0
# Ground ranged
self.ranged1 = 0
self.ranged2 = 0
self.ranged3 = 0
# Ground defense
self.carapace1 = 0
self.carapace2 = 0
self.carapace3 = 0
# Flyer attack
self.flyer_attack1 = 0
self.flyer_attack2 = 0
self.flyer_attack3 = 0
# Flyer defense
self.flyer_defense1 = 0
self.flyer_defense2 = 0
self.flyer_defense3 = 0
# units built
self.num_zerglings_built = 0
self.num_drones_built = 0
self.num_queens_built = 0
self.num_roaches_built = 0
self.num_hydralisks_built = 0
self.num_banelines_built = 0
self.num_lurkers_built = 0
self.num_ravagers_built = 0
self.num_mutalisks_built = 0
self.num_corrupters_built = 0
self.num_brood_lords_built = 0
self.num_swarm_hosts_built = 0
self.num_vipers_built = 0
self.num_ultralisks_built = 0
# Static defenses built
self.num_spinecrawlers_built = 0
self.num_sporecrawlers_built = 0
# Structure built
self.num_extractors_built = 0
self.num_hatcheries_built = 0
self.num_spawningpools_built = 0
self.num_roachwarrens_built = 0
self.num_hyraliskdens_built = 0
self.num_lairs_built = 0
self.num_infestation_pits_built = 0
self.num_lurkerdens_built = 0
self.num_hives_built = 0
self.num_ultralisk_caverns_built = 0
self.num_spires_built = 0
self.num_greater_spires_built = 0
# Units actively being used for things, gets set to null on strategy change
self.strike_force = None
# Previous strategy so you now when the strategy changes
self.prev_strategy = None
# True if strategy just changed in this iteration
self.did_strategy_change = False
LoserAgent.mainAgent = self
# Way point for units to move to
self.waypoint = None
self.mutalisk_waypoint = None
# Predict enemy will be in the first possible position
self.predicted_enemy_position_num = -1
# Position to search for enemy untis
self.num_enemy_positions = -1
# Position the bot begins
self.mainAgent.start_location = None
# Easier way to access map information, must be loaded in after game loads
self.map_height = None
self.map_width = None
# Top left corner of the map for mutas
self.map_corner = None
# Set to true after army is requested to prevent duplicate queries in the same iteration
# gets set to false in each perform_strategy call
self.is_army_cached = False;
# Saves army each iteration to prevent duplicate queries
self.cached_army = None
# SafeRoachAgent attributes needed that should not conflict with prior existing attributes
# Number of BUILT units, different from number of unit types
self.creeptumors_built = 0 # number of built creep tumors
self.creeptumors_built_queen = 0 # number of seed creep tumors built by queens
self.drones_built = 0 # number of built drones
self.overlords_built = 0 # number of overlords built
self.hatcheries_built = 0 # number of hatcheries built
self.rebuild_viable_tumor = 0 # number of viable tumors rebuilt
self.zerglings_built = 0 # number of zerglings built
self.queens_built = 0 # number of queens built
self.sporecrawlers_built = 0 # number of spore crawlers built
self.spinecrawlers_built = 0 # number of spine crawlers built
self.drone_gapnum = 0 # gap in missing drones
self.drone_gapnumcounter = 0 # counter for replenishing drones
self.done_gap_closing = False # closed gap boolean
self.roaches_built = 0 # Number of roaches built
self.hydralisks_built = 0 # Number of hydralisks built
self.extractors_built = 0 # number of extractors built
self.queen_gapnum = 0 # gap in missing queens
self.queen_gapnumcounter = 0 # counter for replenishing queens
# checks for true/false
self.base_build_order_complete = False # checks if base build order is complete
self.viable_tumor = True # checks if there's a tumor that can spawn other tumors
# standard upgrades
self.built_gas1 = False
self.moved_workers_to_gas1 = False # whether workers are assigned to the first vespene geyser
self.built_sp = False # whether a spawning pool was built
self.research_zmb = False # whether RESEARCH_ZERGLINGMETABOLICBOOST was performed
self.built_rwarren = False # whether the roach warren was built
self.built_lair = False # True if one Lair has been upgraded
self.built_gr = False # True if glial reconstitution has been built
self.built_hd = False # True if hydralisk den has been built
self.built_gs = False # True if grooved spines are researched
self.built_ec = False # True if evolution chamber is built
self.built_ga1 = False # True if ground armor 1 built
self.built_mw1 = False # True if missile weapon 1 built
self.OG_hatchery = 0
'''
Base on_step function
Uses basic_build and performs actions based on the current strategy
For now, strategies will change ever 100 steps
Harass strategies are not implemented yet
'''
async def on_step(self, iteration, strategy_num):
# self.log("Step: %s Overlord: %s" % (str(iteration), str(self.mainAgent.units(OVERLORD).amount)))
# self.log("Step: " + str(iteration))
# TEMP: Until strategy is given by Q table
#strategy_num = (int)(iteration / 75) % 12
# Build lings, queen, overlords, drones, and meleeattack1
await self.basic_build(iteration)
# Perform actions based on given strategy
if strategy_num == -1:
# self.mainAgent.log("No given strategy")
pass
else:
await self.perform_strategy(iteration, strategy_num)
'''
Builds a ton of lings
Build drones and start gathering vespene
Build a queen
Build overlords as needed
Builds a few hydralisks
'''
async def basic_build(self, iteration):
hatchery = self.mainAgent.bases
if hatchery == None or hatchery.amount == 0:
return
else:
hatchery = self.mainAgent.bases.ready.random.ready.random
# Build overlords if close to reaching cap
if self.mainAgent.supply_used > self.mainAgent.supply_cap - 4 and self.mainAgent.num_larva > 0 and self.mainAgent.can_afford(OVERLORD):
await self.mainAgent.do(self.mainAgent.random_larva.train(OVERLORD))
else:
# Build drones
if self.mainAgent.units(DRONE).amount < 20 and self.mainAgent.can_afford(DRONE) and self.mainAgent.units(LARVA).amount > 0 and self.mainAgent.supply_used < self.mainAgent.supply_cap:
await self.mainAgent.do(self.mainAgent.random_larva.train(DRONE))
if self.mainAgent.units(SPIRE).ready.exists and self.mainAgent.units(MUTALISK).amount < 20 and self.mainAgent.supply_used < self.mainAgent.supply_cap - 3 \
and self.mainAgent.can_afford(MUTALISK) and self.mainAgent.num_larva > 0:
await self.mainAgent.do(self.mainAgent.random_larva.train(MUTALISK))
if self.mainAgent.units(HYDRALISKDEN).ready.exists and self.mainAgent.units(HYDRALISK).amount < 20 and self.mainAgent.supply_used < self.mainAgent.supply_cap - 3 \
and self.mainAgent.can_afford(HYDRALISK) and self.mainAgent.num_larva > 0:
await self.mainAgent.do(self.mainAgent.random_larva.train(HYDRALISK))
# Build lings
if self.mainAgent.units(ZERGLING).amount + self.mainAgent.already_pending(ZERGLING) < 5 and self.mainAgent.can_afford(ZERGLING) and self.mainAgent.num_larva > 0 and \
self.mainAgent.supply_used < self.mainAgent.supply_cap - 1 and self.mainAgent.units(SPAWNINGPOOL).ready.exists:
await self.mainAgent.do(self.mainAgent.random_larva.train(ZERGLING))
# Build Spawning pool
if not self.mainAgent.units(SPAWNINGPOOL).exists and self.mainAgent.can_afford(SPAWNINGPOOL):
p = hatchery.position.towards(self.mainAgent.game_info.map_center, 3)
await self.mainAgent.build(SPAWNINGPOOL, near=p)
if self.mainAgent.units(EXTRACTOR).amount < 2 and self.mainAgent.can_afford(EXTRACTOR) and self.mainAgent.already_pending(EXTRACTOR) < 2:
self.mainAgent.num_extractors_built += 1
drone = self.mainAgent.workers.random
target = self.mainAgent.state.vespene_geyser.closest_to(drone.position)
await self.mainAgent.do(drone.build(EXTRACTOR, target))
# If Extractor does not have 3 drones, give it more drones
for extractor in self.mainAgent.units(EXTRACTOR):
if extractor.assigned_harvesters < extractor.ideal_harvesters and self.mainAgent.workers.amount > 0:
await self.mainAgent.do(self.mainAgent.workers.random.gather(extractor))
# # Build Evolution Chamber pool
# if not self.mainAgent.units(EVOLUTIONCHAMBER).exists and self.mainAgent.can_afford(SPAWNINGPOOL):
# p = hatchery.position.towards(self.mainAgent.game_info.map_center, 3)
# await self.mainAgent.build(EVOLUTIONCHAMBER, near=p)
# elif self.mainAgent.can_afford(RESEARCH_ZERGMELEEWEAPONSLEVEL1) and self.mainAgent.melee1 == 0 and self.mainAgent.units(EVOLUTIONCHAMBER).ready.exists:
# # Get melee1 upgrade
# self.mainAgent.melee1 = 1
# await self.mainAgent.do(self.mainAgent.units(EVOLUTIONCHAMBER).first(RESEARCH_ZERGMELEEWEAPONSLEVEL1))
# Build a queen if you haven't
if self.mainAgent.num_queens_built < 1 and self.mainAgent.units(SPAWNINGPOOL).ready.exists and self.mainAgent.can_afford(QUEEN) and \
self.mainAgent.supply_used < self.mainAgent.supply_cap - 1:
base = self.mainAgent.bases.random
self.mainAgent.num_queens_built += 1
await self.mainAgent.do(base.train(QUEEN))
# Inject larva when possible
elif self.mainAgent.units(QUEEN).amount > 0:
queen = self.mainAgent.units(QUEEN).first
abilities = await self.mainAgent.get_available_abilities(queen)
if AbilityId.EFFECT_INJECTLARVA in abilities:
await self.mainAgent.do(queen(EFFECT_INJECTLARVA, hatchery))
# Upgrade to lair when possible
if self.mainAgent.num_lairs_built == 0 and self.mainAgent.units(HATCHERY).amount > 0 and self.mainAgent.can_afford(AbilityId.UPGRADETOLAIR_LAIR) \
and self.mainAgent.can_afford(UnitTypeId.LAIR) and self.mainAgent.units(SPAWNINGPOOL).ready.exists and self.mainAgent.units(QUEEN).amount > 0:
hatchery = self.mainAgent.units(HATCHERY).first
self.mainAgent.num_lairs_built += 1
err = await self.mainAgent.do(hatchery(UPGRADETOLAIR_LAIR))
if err:
self.mainAgent.num_lairs_built -= 1
# # Build hydralisk den when possible
# if not self.mainAgent.units(HYDRALISKDEN).exists and self.mainAgent.units(LAIR).amount > 0 and self.mainAgent.can_afford(HYDRALISKDEN) \
# and self.mainAgent.num_hydralisks_built == 0:
# p = hatchery.position.towards(self.mainAgent.game_info.map_center, 3)
# self.mainAgent.num_hydralisks_built += 1
# await self.mainAgent.build(HYDRALISKDEN, near=p)
#
# # Build lurker den when possible
# if self.mainAgent.num_lurkerdens_built == 0 and self.mainAgent.units(HYDRALISKDEN).ready.amount > 0 and \
# self.mainAgent.can_afford(UPGRADETOLURKERDEN_LURKERDEN):
# # await self.mainAgent.do(self.mainAgent.units(HYDRALISKDEN).first(UPGRADETOLURKERDEN_LURKERDEN ))
# self.mainAgent.num_lurkerdens_built += 1
# await self.mainAgent.do(self.mainAgent.units(HYDRALISKDEN).first(MORPH_LURKERDEN))
if not self.mainAgent.units(SPIRE).exists and self.mainAgent.units(LAIR).amount > 0 and self.mainAgent.can_afford(SPIRE) \
and not self.mainAgent.already_pending(SPIRE):
p = hatchery.position.towards(self.mainAgent.game_info.map_center, 3)
await self.mainAgent.build(SPIRE, near=p)
'''
Calls the correct strategy function given the strategy enum value
Strategy functions can be override in base classes
'''
async def perform_strategy(self, iteration, strategy_num):
self.mainAgent.clean_strike_force() # Clear dead units from strike force
self.mainAgent.is_army_cached = False # Must re obtain army data
if self.mainAgent.predicted_enemy_position_num == -1:
# Initializing things that are needed after game data is loaded
# Prevent game from crashing
hatchery = self.mainAgent.bases
if hatchery == None or hatchery.amount == 0:
return
else:
hatchery = self.mainAgent.bases.ready.random
# Assume first position
self.mainAgent.predicted_enemy_position = 0
self.mainAgent.num_enemy_positions = len(self.mainAgent.enemy_start_locations)
self.mainAgent.start_location = self.mainAgent.bases.ready.random.position # Should only be 1 hatchery at this time
self.mainAgent.map_width = self.mainAgent.game_info.map_size[0]
self.mainAgent.map_height = self.mainAgent.game_info.map_size[1]
# Get a point in the corner of the map
p = lambda: None # https://stackoverflow.com/questions/19476816/creating-an-empty-object-in-python
p.x = self.mainAgent.game_info.map_center.x * 1.9
p.y = self.mainAgent.game_info.map_center.y * 1.9
self.mainAgent.map_corner = Point2.from_proto(p)
# Make sure given strategy num is valid
if Strategies.has_value(strategy_num):
# Valid strategy num, convert int into enum value
strategy = Strategies(strategy_num)
# Mark strategy as changed or not
if strategy != self.mainAgent.prev_strategy:
self.mainAgent.log("New strategy is " + str(strategy))
self.mainAgent.did_strategy_change = True
self.mainAgent.strike_force = None
else:
self.mainAgent.did_strategy_change = False
self.mainAgent.prev_strategy = strategy # Prepare for next iteration
else:
self.log_error(f"Unknown strategy number {strategy_num}")
return
# Call the proper strategy function
# Prevent game from crashing
hatchery = self.mainAgent.bases
if hatchery == None or hatchery.amount == 0:
return
else:
hatchery = self.mainAgent.bases.ready.random
# Attack
if strategy == Strategies.HEAVY_ATTACK:
await self.heavy_attack(iteration)
elif strategy == Strategies.MEDIUM_ATTACK:
await self.medium_attack(iteration)
elif strategy == Strategies.LIGHT_ATTACK:
await self.light_attack(iteration)
# Scouting
elif strategy == Strategies.HEAVY_SCOUTING:
await self.heavy_scouting(iteration)
elif strategy == Strategies.MEDIUM_SCOUTING:
await self.medium_scouting(iteration)
elif strategy == Strategies.LIGHT_SCOUTING:
await self.light_scouting(iteration)
# Defense
elif strategy == Strategies.HEAVY_DEFENSE:
await self.heavy_defense(iteration)
elif strategy == Strategies.MEDIUM_DEFENSE:
await self.medium_defense(iteration)
elif strategy == Strategies.LIGHT_DEFENSE:
await self.light_defense(iteration)
# Harass
elif strategy == Strategies.HEAVY_HARASS:
await self.heavy_harass(iteration)
elif strategy == Strategies.MEDIUM_HARASS:
await self.medium_harass(iteration)
elif strategy == Strategies.LIGHT_HARASS:
await self.light_harass(iteration)
# Unknown
else:
self.log("Unknown strategy was given: " + str(strategy))
'''
Send all combat units (including the queen) to a known enemy position
Do NOT recall ever
'''
async def heavy_attack(self, iteration):
await self.attack_with_percentage_of_army(0, 0)
'''
Send all combat units (including the queen) to a known enemy position
Recall after a certain amount of units die
Must keep track of units being used because order of units in self.units constantly changes
'''
async def medium_attack(self, iteration):
await self.attack_with_percentage_of_army(.75, 15)
'''
Attack a known enemy position, but if you get attacked, retreat back to base
'''
async def light_attack(self, iteration):
await self.attack_with_percentage_of_army(.9, .3)
# If more than percentage_to_advance_group percent of strike force is
async def attack_with_percentage_of_army(self, percentage_to_advance_group, percentage_to_retreat_group):
army = self.mainAgent.army
if len(army) == 0:
# No army to use, don't bother trying to attack
self.mainAgent.waypoint = self.mainAgent.game_info.map_center # Restart waypoint
return
# Move army to mainAgent's waypoint and attack things on the way
percentage_units_at_waypoint = \
await self.move_and_get_percent_units_at_waypoint(army, self.mainAgent.waypoint, True)
# If all units are close to the waypoint, pick a closer one
if percentage_units_at_waypoint > percentage_to_advance_group:
target = self.mainAgent.select_target()
self.mainAgent.waypoint = self.mainAgent.waypoint.towards(target, 20)
elif percentage_units_at_waypoint < percentage_to_retreat_group:
# Move waypoint back
if (self.mainAgent.waypoint != self.mainAgent.start_location):
self.mainAgent.waypoint = self.mainAgent.waypoint.towards(self.mainAgent.start_location, 1)
async def move_and_get_percent_units_at_waypoint(self, units, waypoint, should_attack):
# Keep units together
num_units_at_waypoint = 0
# All strike force members attack to the waypoint
for unit in units:
distance_from_waypoint = unit.position.to2.distance_to(waypoint)
if distance_from_waypoint < 15:
num_units_at_waypoint += 1
if should_attack:
await self.mainAgent.do(unit.attack(waypoint))
else:
await self.mainAgent.do(unit.move(waypoint))
percentage_units_at_waypoint = num_units_at_waypoint / len(units)
return percentage_units_at_waypoint
'''
Send all military units out to different areas
Die for knowledge
'''
async def heavy_scouting(self, iteration):
await self.scout_with_percentage_of_army(1, True, False)
'''
Send a good amount of military units out
'''
async def medium_scouting(self, iteration):
await self.scout_with_percentage_of_army(.5, True, False)
'''
Send a couple of things out for scouting and pull back if damage is taken
'''
async def light_scouting(self, iteration):
await self.scout_with_percentage_of_army(.5, True, True)
async def scout_with_percentage_of_army(self, percentage, use_overlords, pull_back_if_damaged):
map_width = self.mainAgent.map_width
map_height = self.mainAgent.map_height
army = self.army
if use_overlords:
army += self.mainAgent.units(OVERLORD)
desired_strike_force_size = int(percentage * army.amount)
if self.mainAgent.strike_force is None:
self.mainAgent.strike_force = army.take(desired_strike_force_size)
# If strike force should include more members (If a unit was built)
# Do not add more units if the entire army is already in strike force
if len(self.mainAgent.strike_force) < desired_strike_force_size and len(army) > len(self.mainAgent.strike_force):
self.mainAgent.strike_force += (army - self.mainAgent.strike_force).take(desired_strike_force_size - len(self.mainAgent.strike_force))
for unit_ref in self.mainAgent.strike_force:
# Need to reacquire unit from self.mainAgent.units to see that a command has been queued
id = unit_ref.tag
unit = self.mainAgent.units.find_by_tag(id)
if unit is None:
# Unit died
self.mainAgent.strike_force.remove(unit_ref)
continue
if pull_back_if_damaged and unit.health < unit.health_max:
# If pull_back is true and unti is damaged, move to random hatchery
if (len(self.mainAgent.bases) > 0):
await self.mainAgent.do(unit.move(self.mainAgent.bases[random.randrange(0, len(self.mainAgent.bases))].position))
elif unit.noqueue:
# Go to a new random position
pos = lambda: None # https://stackoverflow.com/questions/19476816/creating-an-empty-object-in-python
pos.x = random.randrange(0, map_width)
pos.y = random.randrange(0, map_height)
position_to_search = Point2.from_proto(pos)
await self.mainAgent.do(unit.move(position_to_search))
'''
Complete recall back to main base
Build lots of static defenses
Build lots of lurkers
'''
async def heavy_defense(self, iteration):
# Build 5 spinecrawlers and sporecrawlers, and 10 lurkers
await self.prepare_defenses(4, 4, 10)
'''
Recall and distribute between main base and explansions
Build some defensive structures and units
'''
async def medium_defense(self, iteration):
# Build 3 spinecrawlers and sporecrawlers, and 5 lurkers
await self.prepare_defenses(3, 3, 5)
'''
Distribute forces between main base and expansions
Build a few defensive structures and units
'''
async def light_defense(self, iteration):
# Build 1 spinecrawlers and sporecrawlers, and 3 lurkers
await self.prepare_defenses(1, 1, 3)
async def prepare_defenses(self, num_spine_crawlers_to_build, num_sporecrawlers_to_build, num_lurkers_to_build):
hatchery = self.mainAgent.bases.ready.random
# TODO: have some units go to expansions
# Return all units to base
for unit in self.mainAgent.army + self.mainAgent.overlords:
if unit.distance_to(hatchery.position) > 20:
await self.mainAgent.do(unit.move(hatchery.position))
# Build spine crawlers
if self.mainAgent.units(SPAWNINGPOOL).ready.exists and self.mainAgent.num_spinecrawlers_built < num_spine_crawlers_to_build \
and self.mainAgent.can_afford(SPINECRAWLER):
self.mainAgent.num_spinecrawlers_built += 1
p = hatchery.position.towards(self.mainAgent.game_info.map_center, 3)
await self.mainAgent.build(SPINECRAWLER, near=p)
# Build spore crawlers
if self.mainAgent.units(EVOLUTIONCHAMBER).ready.exists and self.mainAgent.num_sporecrawlers_built < num_sporecrawlers_to_build \
and self.mainAgent.can_afford(SPORECRAWLER):
self.mainAgent.num_sporecrawlers_built += 1
p = hatchery.position.towards(self.mainAgent.game_info.map_center, 3)
await self.mainAgent.build(SPORECRAWLER, near=p)
# Build lurkers
if self.mainAgent.units(LURKERDENMP).ready.exists and self.mainAgent.num_lurkers_built < num_lurkers_to_build \
and self.mainAgent.can_afford(MORPH_LURKER) and self.mainAgent.num_larva > 0 and self.mainAgent.units(HYDRALISK).amount > 0:
self.mainAgent.num_lurkers_built += 1
hydralisk = self.mainAgent.units(HYDRALISK).random
err = await self.mainAgent.do(hydralisk(MORPH_LURKER))
if err:
self.mainAgent.num_lurkers_built -= 1
# Burrow all lurkers so they can attack
for lurker in self.mainAgent.units(LURKERMP):
abilities = await self.mainAgent.get_available_abilities(lurker)
if AbilityId.BURROWDOWN_LURKER in abilities:
await self.mainAgent.do(lurker(BURROWDOWN_LURKER))
'''
Build swarms hosts and harass with them
Build mutalisks and harass with them
If harass units are attacked, move to the next base
'''
async def heavy_harass(self, iteration):
await self.harass(0) # Die for the harass
'''
TODO
'''
async def medium_harass(self, iteration):
await self.harass(.5) # Return if damaged to half health
'''
If attacked pull back for a set time
Only use harass units if you have them
'''
async def light_harass(self, iteration):
await self.harass(1) # Return immediately if damaged
async def harass(self, percent_health_to_return):
if self.mainAgent.did_strategy_change:
self.mainAgent.mutalisk_waypoint = self.mainAgent.map_corner
if self.army.amount == 0:
# Nothing to harass with
return
harass_target = self.get_harass_target()
mutalisks = self.mainAgent.units(MUTALISK)
# Mutalisk harass is different from other things
if mutalisks.amount > 0:
if self.mainAgent.mutalisk_waypoint == self.mainAgent.enemy_start_locations[0]:
# Second phase of muta harass, when at the enemy base, begin attacking
for muta in mutalisks:
if muta.position.to2.distance_to(self.mainAgent.mutalisk_waypoint):
# Begin attacking workers or anything nearby
await self.mainAgent.do(muta.attack(harass_target))
else:
# Move to whre the workers are without attacking
await self.mainAgent.do(muta.move(self.mainAgent.mutalisk_waypoint))
else:
# Phase 1: Gather the mutas
# Move mutalisks to mutalisk waypoint, and do not attack anything else on the way
percentage_mutas_at_waypoint = await \
self.move_and_get_percent_units_at_waypoint(mutalisks, self.mainAgent.mutalisk_waypoint, False)
if percentage_mutas_at_waypoint > .75:
self.mainAgent.mutalisk_waypoint = self.mainAgent.enemy_start_locations[0] # Send them off to the enemy base
for unit in self.army - self.mainAgent.units(MUTALISK):
if unit.health < unit.health_max * percent_health_to_return:
# low on health so come back
await self.mainAgent.do(unit.move(self.mainAgent.bases.random))
else:
# still full health so keep attacking
await self.mainAgent.do(unit.attack(harass_target))
# Finds a target to harass
# Will first choose workers, and if there are no workers, then to go a known base, and in no known bases,
# Go to enemy main base
def get_harass_target(self):
# If there are known enemy expansions, harass those
enemy_workers = self.mainAgent.known_enemy_units.filter(lambda x: x.name == "Drone" or x.name == "SCV" or x.name == "Probe")
# If workers are visible, attack them
if len(enemy_workers) > 0:
harass_target = enemy_workers.random.position
else:
# If no workers are visible, find a town hall to attack
enemy_bases = self.get_known_enemy_bases()
if len(enemy_bases) > 0:
harass_target = enemy_bases[random.randint(0, len(enemy_bases) - 1)]
else:
# if no town halls are known, go to the enemy start
harass_target = self.mainAgent.enemy_start_locations[0]
return harass_target
'''
Removes dead units from strike force
'''
def clean_strike_force(self):
if self.mainAgent.strike_force is None:
# No defined strike force yet
return
for unit in self.mainAgent.strike_force:
if self.mainAgent.units.find_by_tag(unit.tag) is None:
self.mainAgent.strike_force.remove(unit)
'''
Utilities
'''
@property
def army(self):
if self.mainAgent.is_army_cached:
return self.mainAgent.cached_army
else:
self.mainAgent.is_army_cached = True
self.cached_army = self.mainAgent.units.filter(
lambda x: x.name != "Drone" and x.name != "Overlord" and x.name != "Queen" and x.name != "CreepTumorQueen"\
and x.name != "Egg" and x.name != "Larva" and not x.is_structure and x.name != "CreepTumorBurrowed") \
- self.mainAgent.units(LURKERMPBURROWED) - self.mainAgent.units(LURKERMPEGG) \
- self.mainAgent.units(BANELINGCOCOON)
return self.cached_army
@property
def overlords(self):
return self.mainAgent.units(OVERLORD)
@property
def buildings(self):
return self.mainAgent.units.filter(lambda x: x.is_structure) + self.mainAgent.units(SPINECRAWLER) + self.mainAgent.units(SPORECRAWLER)
@property
def bases(self):
return self.mainAgent.units.filter(lambda x: x.name == "Hatchery" or x.name == "Lair" or x.name == "Hive")
def get_random_worker(self):
return self.mainAgent.units(DRONE).random
@property
def game_time(self):
return self.mainAgent.state.game_loop * 0.725 * (1 / 16)
def get_known_enemy_bases(self):
# Get all enemy structures, then filter to only take townhall types
enemy_structures = self.mainAgent.known_enemy_structures
townhall_ids = [item for sublist in race_townhalls.values() for item in sublist]
return enemy_structures.filter(lambda x: x.type_id in townhall_ids)
'''
From Dentosal's proxyrax build
Targets a random known enemy building
If no known buildings, go towards to a possible enemy start position
'''
def select_target(self):
target = self.mainAgent.known_enemy_units
if target.exists:
return target.random.position
target = self.mainAgent.known_enemy_units
if target.exists:
return target.random.position
return self.mainAgent.enemy_start_locations[0]
# Code to explore more than one enemy starting position not needed because all maps are only 2 people
# Not tested
# # Explore other starting positions
# units_near_predicted_position = self.mainAgent.units.filter(lambda x: x.position.distance_to(
# self.enemy_start_locations[self.predicted_enemy_position]) < 5)
# if len(units_near_predicted_position) > 0:
# # There is a unit near the predicted position, but no visible structures or enemies
# self.predicted_enemy_position = (self.predicted_enemy_position + 1)
# # loop over starting positions if needed
# if self.predicted_enemy_position >= self.num_enemy_positions:
# self.predicted_enemy_position = 0
#
# return self.enemy_start_locations[self.predicted_enemy_position]
@property
def num_larva(self):
"""Get the current amount of larva"""
return self.mainAgent.units(LARVA).amount
@property
def random_larva(self):
"""Get a random larva"""
return self.mainAgent.units(LARVA).random
'''
Prints to console if self.is_printing_to_console
Writes to log file if self.is_logging
'''
def log(self, data):
"""Log the data to the logfile if this agent is set to log information and logfile is below 1 megabyte"""
if self.mainAgent.is_logging and os.path.getsize(self.mainAgent.log_file_name) < 1000000:
self.mainAgent.log_file.write(f"{data}\n")
if self.mainAgent.is_printing_to_console:
print(data)
def log_error(self, data):
data = f"ERROR: {data}"
self.mainAgent.log_file.write(f"{data}\n")
print(data)
def main():
# Start game with LoserAgent as the Bot, and begin logging
sc2.run_game(sc2.maps.get("Abyssal Reef LE"), [
Bot(Race.Zerg, LoserAgent(True, True, True)),
Computer(Race.Protoss, Difficulty.Medium)
], realtime=False)
if __name__ == '__main__':
main()
| # https://chatbotslife.com/building-a-basic-pysc2-agent-b109cde1477c
import asyncio
import random
import sc2
from sc2 import Race, Difficulty
from sc2.constants import *
from sc2.player import Bot, Computer
from pprint import pprint
from time import gmtime, strftime, localtime
import os
# For debugging purposes only
import sys
# Get strategy enums
from strategies import Strategies
from sc2.position import Point2
from sc2.data import race_townhalls
class LoserAgent(sc2.BotAI):
mainAgent = None
def __init__(self, is_logging = False, is_printing_to_console = False, isMainAgent = False, fileName = ""):
super().__init__()
if isMainAgent:
# For debugging
self.is_logging = is_logging # Setting this to true to write information to log files in the agents/logs directory
self.is_printing_to_console = is_printing_to_console # Setting this to true causes all logs to be printed to the console
# Make logs directory if it doesn't exist
if not os.path.exists("./logs"):
os.mkdir("./logs")
self.log_file_name = "./logs/" + fileName + strftime("%Y-%m-%d %H%M%S", localtime()) + ".log"
self.log_file = open(self.log_file_name, "w+") # Create log file based on the time
# Constants
self.researched = 2 # If an upgrade has been research
self.is_researching = 1 # If an upgrade is being researched
self.not_researched = 0 # If an upgrade is not being researched and has not been researched
# non-standard upgrade status
self.can_burrow = 0
self.zergling_speed = 0
self.zergling_attack_boost = 0
self.baneling_speed = 0
self.roach_speed = 0
self.roach_tunnel = 0
self.overlord_speed = 0
self.hydralisk_range = 0
self.infestor_parasite = 0
self.infestor_energy = 0
self.ultralisk_defense = 0
# standard upgrades
# Ground melee
self.melee1 = 0
self.melee2 = 0
self.melee3 = 0
# Ground ranged
self.ranged1 = 0
self.ranged2 = 0
self.ranged3 = 0
# Ground defense
self.carapace1 = 0
self.carapace2 = 0
self.carapace3 = 0
# Flyer attack
self.flyer_attack1 = 0
self.flyer_attack2 = 0
self.flyer_attack3 = 0
# Flyer defense
self.flyer_defense1 = 0
self.flyer_defense2 = 0
self.flyer_defense3 = 0
# units built
self.num_zerglings_built = 0
self.num_drones_built = 0
self.num_queens_built = 0
self.num_roaches_built = 0
self.num_hydralisks_built = 0
self.num_banelines_built = 0
self.num_lurkers_built = 0
self.num_ravagers_built = 0
self.num_mutalisks_built = 0
self.num_corrupters_built = 0
self.num_brood_lords_built = 0
self.num_swarm_hosts_built = 0
self.num_vipers_built = 0
self.num_ultralisks_built = 0
# Static defenses built
self.num_spinecrawlers_built = 0
self.num_sporecrawlers_built = 0
# Structure built
self.num_extractors_built = 0
self.num_hatcheries_built = 0
self.num_spawningpools_built = 0
self.num_roachwarrens_built = 0
self.num_hyraliskdens_built = 0
self.num_lairs_built = 0
self.num_infestation_pits_built = 0
self.num_lurkerdens_built = 0
self.num_hives_built = 0
self.num_ultralisk_caverns_built = 0
self.num_spires_built = 0
self.num_greater_spires_built = 0
# Units actively being used for things, gets set to null on strategy change
self.strike_force = None
# Previous strategy so you now when the strategy changes
self.prev_strategy = None
# True if strategy just changed in this iteration
self.did_strategy_change = False
LoserAgent.mainAgent = self
# Way point for units to move to
self.waypoint = None
self.mutalisk_waypoint = None
# Predict enemy will be in the first possible position
self.predicted_enemy_position_num = -1
# Position to search for enemy untis
self.num_enemy_positions = -1
# Position the bot begins
self.mainAgent.start_location = None
# Easier way to access map information, must be loaded in after game loads
self.map_height = None
self.map_width = None
# Top left corner of the map for mutas
self.map_corner = None
# Set to true after army is requested to prevent duplicate queries in the same iteration
# gets set to false in each perform_strategy call
self.is_army_cached = False;
# Saves army each iteration to prevent duplicate queries
self.cached_army = None
# SafeRoachAgent attributes needed that should not conflict with prior existing attributes
# Number of BUILT units, different from number of unit types
self.creeptumors_built = 0 # number of built creep tumors
self.creeptumors_built_queen = 0 # number of seed creep tumors built by queens
self.drones_built = 0 # number of built drones
self.overlords_built = 0 # number of overlords built
self.hatcheries_built = 0 # number of hatcheries built
self.rebuild_viable_tumor = 0 # number of viable tumors rebuilt
self.zerglings_built = 0 # number of zerglings built
self.queens_built = 0 # number of queens built
self.sporecrawlers_built = 0 # number of spore crawlers built
self.spinecrawlers_built = 0 # number of spine crawlers built
self.drone_gapnum = 0 # gap in missing drones
self.drone_gapnumcounter = 0 # counter for replenishing drones
self.done_gap_closing = False # closed gap boolean
self.roaches_built = 0 # Number of roaches built
self.hydralisks_built = 0 # Number of hydralisks built
self.extractors_built = 0 # number of extractors built
self.queen_gapnum = 0 # gap in missing queens
self.queen_gapnumcounter = 0 # counter for replenishing queens
# checks for true/false
self.base_build_order_complete = False # checks if base build order is complete
self.viable_tumor = True # checks if there's a tumor that can spawn other tumors
# standard upgrades
self.built_gas1 = False
self.moved_workers_to_gas1 = False # whether workers are assigned to the first vespene geyser
self.built_sp = False # whether a spawning pool was built
self.research_zmb = False # whether RESEARCH_ZERGLINGMETABOLICBOOST was performed
self.built_rwarren = False # whether the roach warren was built
self.built_lair = False # True if one Lair has been upgraded
self.built_gr = False # True if glial reconstitution has been built
self.built_hd = False # True if hydralisk den has been built
self.built_gs = False # True if grooved spines are researched
self.built_ec = False # True if evolution chamber is built
self.built_ga1 = False # True if ground armor 1 built
self.built_mw1 = False # True if missile weapon 1 built
self.OG_hatchery = 0
'''
Base on_step function
Uses basic_build and performs actions based on the current strategy
For now, strategies will change ever 100 steps
Harass strategies are not implemented yet
'''
async def on_step(self, iteration, strategy_num):
# self.log("Step: %s Overlord: %s" % (str(iteration), str(self.mainAgent.units(OVERLORD).amount)))
# self.log("Step: " + str(iteration))
# TEMP: Until strategy is given by Q table
#strategy_num = (int)(iteration / 75) % 12
# Build lings, queen, overlords, drones, and meleeattack1
await self.basic_build(iteration)
# Perform actions based on given strategy
if strategy_num == -1:
# self.mainAgent.log("No given strategy")
pass
else:
await self.perform_strategy(iteration, strategy_num)
'''
Builds a ton of lings
Build drones and start gathering vespene
Build a queen
Build overlords as needed
Builds a few hydralisks
'''
async def basic_build(self, iteration):
hatchery = self.mainAgent.bases
if hatchery == None or hatchery.amount == 0:
return
else:
hatchery = self.mainAgent.bases.ready.random.ready.random
# Build overlords if close to reaching cap
if self.mainAgent.supply_used > self.mainAgent.supply_cap - 4 and self.mainAgent.num_larva > 0 and self.mainAgent.can_afford(OVERLORD):
await self.mainAgent.do(self.mainAgent.random_larva.train(OVERLORD))
else:
# Build drones
if self.mainAgent.units(DRONE).amount < 20 and self.mainAgent.can_afford(DRONE) and self.mainAgent.units(LARVA).amount > 0 and self.mainAgent.supply_used < self.mainAgent.supply_cap:
await self.mainAgent.do(self.mainAgent.random_larva.train(DRONE))
if self.mainAgent.units(SPIRE).ready.exists and self.mainAgent.units(MUTALISK).amount < 20 and self.mainAgent.supply_used < self.mainAgent.supply_cap - 3 \
and self.mainAgent.can_afford(MUTALISK) and self.mainAgent.num_larva > 0:
await self.mainAgent.do(self.mainAgent.random_larva.train(MUTALISK))
if self.mainAgent.units(HYDRALISKDEN).ready.exists and self.mainAgent.units(HYDRALISK).amount < 20 and self.mainAgent.supply_used < self.mainAgent.supply_cap - 3 \
and self.mainAgent.can_afford(HYDRALISK) and self.mainAgent.num_larva > 0:
await self.mainAgent.do(self.mainAgent.random_larva.train(HYDRALISK))
# Build lings
if self.mainAgent.units(ZERGLING).amount + self.mainAgent.already_pending(ZERGLING) < 5 and self.mainAgent.can_afford(ZERGLING) and self.mainAgent.num_larva > 0 and \
self.mainAgent.supply_used < self.mainAgent.supply_cap - 1 and self.mainAgent.units(SPAWNINGPOOL).ready.exists:
await self.mainAgent.do(self.mainAgent.random_larva.train(ZERGLING))
# Build Spawning pool
if not self.mainAgent.units(SPAWNINGPOOL).exists and self.mainAgent.can_afford(SPAWNINGPOOL):
p = hatchery.position.towards(self.mainAgent.game_info.map_center, 3)
await self.mainAgent.build(SPAWNINGPOOL, near=p)
if self.mainAgent.units(EXTRACTOR).amount < 2 and self.mainAgent.can_afford(EXTRACTOR) and self.mainAgent.already_pending(EXTRACTOR) < 2:
self.mainAgent.num_extractors_built += 1
drone = self.mainAgent.workers.random
target = self.mainAgent.state.vespene_geyser.closest_to(drone.position)
await self.mainAgent.do(drone.build(EXTRACTOR, target))
# If Extractor does not have 3 drones, give it more drones
for extractor in self.mainAgent.units(EXTRACTOR):
if extractor.assigned_harvesters < extractor.ideal_harvesters and self.mainAgent.workers.amount > 0:
await self.mainAgent.do(self.mainAgent.workers.random.gather(extractor))
# # Build Evolution Chamber pool
# if not self.mainAgent.units(EVOLUTIONCHAMBER).exists and self.mainAgent.can_afford(SPAWNINGPOOL):
# p = hatchery.position.towards(self.mainAgent.game_info.map_center, 3)
# await self.mainAgent.build(EVOLUTIONCHAMBER, near=p)
# elif self.mainAgent.can_afford(RESEARCH_ZERGMELEEWEAPONSLEVEL1) and self.mainAgent.melee1 == 0 and self.mainAgent.units(EVOLUTIONCHAMBER).ready.exists:
# # Get melee1 upgrade
# self.mainAgent.melee1 = 1
# await self.mainAgent.do(self.mainAgent.units(EVOLUTIONCHAMBER).first(RESEARCH_ZERGMELEEWEAPONSLEVEL1))
# Build a queen if you haven't
if self.mainAgent.num_queens_built < 1 and self.mainAgent.units(SPAWNINGPOOL).ready.exists and self.mainAgent.can_afford(QUEEN) and \
self.mainAgent.supply_used < self.mainAgent.supply_cap - 1:
base = self.mainAgent.bases.random
self.mainAgent.num_queens_built += 1
await self.mainAgent.do(base.train(QUEEN))
# Inject larva when possible
elif self.mainAgent.units(QUEEN).amount > 0:
queen = self.mainAgent.units(QUEEN).first
abilities = await self.mainAgent.get_available_abilities(queen)
if AbilityId.EFFECT_INJECTLARVA in abilities:
await self.mainAgent.do(queen(EFFECT_INJECTLARVA, hatchery))
# Upgrade to lair when possible
if self.mainAgent.num_lairs_built == 0 and self.mainAgent.units(HATCHERY).amount > 0 and self.mainAgent.can_afford(AbilityId.UPGRADETOLAIR_LAIR) \
and self.mainAgent.can_afford(UnitTypeId.LAIR) and self.mainAgent.units(SPAWNINGPOOL).ready.exists and self.mainAgent.units(QUEEN).amount > 0:
hatchery = self.mainAgent.units(HATCHERY).first
self.mainAgent.num_lairs_built += 1
err = await self.mainAgent.do(hatchery(UPGRADETOLAIR_LAIR))
if err:
self.mainAgent.num_lairs_built -= 1
# # Build hydralisk den when possible
# if not self.mainAgent.units(HYDRALISKDEN).exists and self.mainAgent.units(LAIR).amount > 0 and self.mainAgent.can_afford(HYDRALISKDEN) \
# and self.mainAgent.num_hydralisks_built == 0:
# p = hatchery.position.towards(self.mainAgent.game_info.map_center, 3)
# self.mainAgent.num_hydralisks_built += 1
# await self.mainAgent.build(HYDRALISKDEN, near=p)
#
# # Build lurker den when possible
# if self.mainAgent.num_lurkerdens_built == 0 and self.mainAgent.units(HYDRALISKDEN).ready.amount > 0 and \
# self.mainAgent.can_afford(UPGRADETOLURKERDEN_LURKERDEN):
# # await self.mainAgent.do(self.mainAgent.units(HYDRALISKDEN).first(UPGRADETOLURKERDEN_LURKERDEN ))
# self.mainAgent.num_lurkerdens_built += 1
# await self.mainAgent.do(self.mainAgent.units(HYDRALISKDEN).first(MORPH_LURKERDEN))
if not self.mainAgent.units(SPIRE).exists and self.mainAgent.units(LAIR).amount > 0 and self.mainAgent.can_afford(SPIRE) \
and not self.mainAgent.already_pending(SPIRE):
p = hatchery.position.towards(self.mainAgent.game_info.map_center, 3)
await self.mainAgent.build(SPIRE, near=p)
'''
Calls the correct strategy function given the strategy enum value
Strategy functions can be override in base classes
'''
async def perform_strategy(self, iteration, strategy_num):
self.mainAgent.clean_strike_force() # Clear dead units from strike force
self.mainAgent.is_army_cached = False # Must re obtain army data
if self.mainAgent.predicted_enemy_position_num == -1:
# Initializing things that are needed after game data is loaded
# Prevent game from crashing
hatchery = self.mainAgent.bases
if hatchery == None or hatchery.amount == 0:
return
else:
hatchery = self.mainAgent.bases.ready.random
# Assume first position
self.mainAgent.predicted_enemy_position = 0
self.mainAgent.num_enemy_positions = len(self.mainAgent.enemy_start_locations)
self.mainAgent.start_location = self.mainAgent.bases.ready.random.position # Should only be 1 hatchery at this time
self.mainAgent.map_width = self.mainAgent.game_info.map_size[0]
self.mainAgent.map_height = self.mainAgent.game_info.map_size[1]
# Get a point in the corner of the map
p = lambda: None # https://stackoverflow.com/questions/19476816/creating-an-empty-object-in-python
p.x = self.mainAgent.game_info.map_center.x * 1.9
p.y = self.mainAgent.game_info.map_center.y * 1.9
self.mainAgent.map_corner = Point2.from_proto(p)
# Make sure given strategy num is valid
if Strategies.has_value(strategy_num):
# Valid strategy num, convert int into enum value
strategy = Strategies(strategy_num)
# Mark strategy as changed or not
if strategy != self.mainAgent.prev_strategy:
self.mainAgent.log("New strategy is " + str(strategy))
self.mainAgent.did_strategy_change = True
self.mainAgent.strike_force = None
else:
self.mainAgent.did_strategy_change = False
self.mainAgent.prev_strategy = strategy # Prepare for next iteration
else:
self.log_error(f"Unknown strategy number {strategy_num}")
return
# Call the proper strategy function
# Prevent game from crashing
hatchery = self.mainAgent.bases
if hatchery == None or hatchery.amount == 0:
return
else:
hatchery = self.mainAgent.bases.ready.random
# Attack
if strategy == Strategies.HEAVY_ATTACK:
await self.heavy_attack(iteration)
elif strategy == Strategies.MEDIUM_ATTACK:
await self.medium_attack(iteration)
elif strategy == Strategies.LIGHT_ATTACK:
await self.light_attack(iteration)
# Scouting
elif strategy == Strategies.HEAVY_SCOUTING:
await self.heavy_scouting(iteration)
elif strategy == Strategies.MEDIUM_SCOUTING:
await self.medium_scouting(iteration)
elif strategy == Strategies.LIGHT_SCOUTING:
await self.light_scouting(iteration)
# Defense
elif strategy == Strategies.HEAVY_DEFENSE:
await self.heavy_defense(iteration)
elif strategy == Strategies.MEDIUM_DEFENSE:
await self.medium_defense(iteration)
elif strategy == Strategies.LIGHT_DEFENSE:
await self.light_defense(iteration)
# Harass
elif strategy == Strategies.HEAVY_HARASS:
await self.heavy_harass(iteration)
elif strategy == Strategies.MEDIUM_HARASS:
await self.medium_harass(iteration)
elif strategy == Strategies.LIGHT_HARASS:
await self.light_harass(iteration)
# Unknown
else:
self.log("Unknown strategy was given: " + str(strategy))
'''
Send all combat units (including the queen) to a known enemy position
Do NOT recall ever
'''
async def heavy_attack(self, iteration):
await self.attack_with_percentage_of_army(0, 0)
'''
Send all combat units (including the queen) to a known enemy position
Recall after a certain amount of units die
Must keep track of units being used because order of units in self.units constantly changes
'''
async def medium_attack(self, iteration):
await self.attack_with_percentage_of_army(.75, 15)
'''
Attack a known enemy position, but if you get attacked, retreat back to base
'''
async def light_attack(self, iteration):
await self.attack_with_percentage_of_army(.9, .3)
# If more than percentage_to_advance_group percent of strike force is
async def attack_with_percentage_of_army(self, percentage_to_advance_group, percentage_to_retreat_group):
army = self.mainAgent.army
if len(army) == 0:
# No army to use, don't bother trying to attack
self.mainAgent.waypoint = self.mainAgent.game_info.map_center # Restart waypoint
return
# Move army to mainAgent's waypoint and attack things on the way
percentage_units_at_waypoint = \
await self.move_and_get_percent_units_at_waypoint(army, self.mainAgent.waypoint, True)
# If all units are close to the waypoint, pick a closer one
if percentage_units_at_waypoint > percentage_to_advance_group:
target = self.mainAgent.select_target()
self.mainAgent.waypoint = self.mainAgent.waypoint.towards(target, 20)
elif percentage_units_at_waypoint < percentage_to_retreat_group:
# Move waypoint back
if (self.mainAgent.waypoint != self.mainAgent.start_location):
self.mainAgent.waypoint = self.mainAgent.waypoint.towards(self.mainAgent.start_location, 1)
async def move_and_get_percent_units_at_waypoint(self, units, waypoint, should_attack):
# Keep units together
num_units_at_waypoint = 0
# All strike force members attack to the waypoint
for unit in units:
distance_from_waypoint = unit.position.to2.distance_to(waypoint)
if distance_from_waypoint < 15:
num_units_at_waypoint += 1
if should_attack:
await self.mainAgent.do(unit.attack(waypoint))
else:
await self.mainAgent.do(unit.move(waypoint))
percentage_units_at_waypoint = num_units_at_waypoint / len(units)
return percentage_units_at_waypoint
'''
Send all military units out to different areas
Die for knowledge
'''
async def heavy_scouting(self, iteration):
await self.scout_with_percentage_of_army(1, True, False)
'''
Send a good amount of military units out
'''
async def medium_scouting(self, iteration):
await self.scout_with_percentage_of_army(.5, True, False)
'''
Send a couple of things out for scouting and pull back if damage is taken
'''
async def light_scouting(self, iteration):
await self.scout_with_percentage_of_army(.5, True, True)
async def scout_with_percentage_of_army(self, percentage, use_overlords, pull_back_if_damaged):
map_width = self.mainAgent.map_width
map_height = self.mainAgent.map_height
army = self.army
if use_overlords:
army += self.mainAgent.units(OVERLORD)
desired_strike_force_size = int(percentage * army.amount)
if self.mainAgent.strike_force is None:
self.mainAgent.strike_force = army.take(desired_strike_force_size)
# If strike force should include more members (If a unit was built)
# Do not add more units if the entire army is already in strike force
if len(self.mainAgent.strike_force) < desired_strike_force_size and len(army) > len(self.mainAgent.strike_force):
self.mainAgent.strike_force += (army - self.mainAgent.strike_force).take(desired_strike_force_size - len(self.mainAgent.strike_force))
for unit_ref in self.mainAgent.strike_force:
# Need to reacquire unit from self.mainAgent.units to see that a command has been queued
id = unit_ref.tag
unit = self.mainAgent.units.find_by_tag(id)
if unit is None:
# Unit died
self.mainAgent.strike_force.remove(unit_ref)
continue
if pull_back_if_damaged and unit.health < unit.health_max:
# If pull_back is true and unti is damaged, move to random hatchery
if (len(self.mainAgent.bases) > 0):
await self.mainAgent.do(unit.move(self.mainAgent.bases[random.randrange(0, len(self.mainAgent.bases))].position))
elif unit.noqueue:
# Go to a new random position
pos = lambda: None # https://stackoverflow.com/questions/19476816/creating-an-empty-object-in-python
pos.x = random.randrange(0, map_width)
pos.y = random.randrange(0, map_height)
position_to_search = Point2.from_proto(pos)
await self.mainAgent.do(unit.move(position_to_search))
'''
Complete recall back to main base
Build lots of static defenses
Build lots of lurkers
'''
async def heavy_defense(self, iteration):
# Build 5 spinecrawlers and sporecrawlers, and 10 lurkers
await self.prepare_defenses(4, 4, 10)
'''
Recall and distribute between main base and explansions
Build some defensive structures and units
'''
async def medium_defense(self, iteration):
# Build 3 spinecrawlers and sporecrawlers, and 5 lurkers
await self.prepare_defenses(3, 3, 5)
'''
Distribute forces between main base and expansions
Build a few defensive structures and units
'''
async def light_defense(self, iteration):
# Build 1 spinecrawlers and sporecrawlers, and 3 lurkers
await self.prepare_defenses(1, 1, 3)
async def prepare_defenses(self, num_spine_crawlers_to_build, num_sporecrawlers_to_build, num_lurkers_to_build):
hatchery = self.mainAgent.bases.ready.random
# TODO: have some units go to expansions
# Return all units to base
for unit in self.mainAgent.army + self.mainAgent.overlords:
if unit.distance_to(hatchery.position) > 20:
await self.mainAgent.do(unit.move(hatchery.position))
# Build spine crawlers
if self.mainAgent.units(SPAWNINGPOOL).ready.exists and self.mainAgent.num_spinecrawlers_built < num_spine_crawlers_to_build \
and self.mainAgent.can_afford(SPINECRAWLER):
self.mainAgent.num_spinecrawlers_built += 1
p = hatchery.position.towards(self.mainAgent.game_info.map_center, 3)
await self.mainAgent.build(SPINECRAWLER, near=p)
# Build spore crawlers
if self.mainAgent.units(EVOLUTIONCHAMBER).ready.exists and self.mainAgent.num_sporecrawlers_built < num_sporecrawlers_to_build \
and self.mainAgent.can_afford(SPORECRAWLER):
self.mainAgent.num_sporecrawlers_built += 1
p = hatchery.position.towards(self.mainAgent.game_info.map_center, 3)
await self.mainAgent.build(SPORECRAWLER, near=p)
# Build lurkers
if self.mainAgent.units(LURKERDENMP).ready.exists and self.mainAgent.num_lurkers_built < num_lurkers_to_build \
and self.mainAgent.can_afford(MORPH_LURKER) and self.mainAgent.num_larva > 0 and self.mainAgent.units(HYDRALISK).amount > 0:
self.mainAgent.num_lurkers_built += 1
hydralisk = self.mainAgent.units(HYDRALISK).random
err = await self.mainAgent.do(hydralisk(MORPH_LURKER))
if err:
self.mainAgent.num_lurkers_built -= 1
# Burrow all lurkers so they can attack
for lurker in self.mainAgent.units(LURKERMP):
abilities = await self.mainAgent.get_available_abilities(lurker)
if AbilityId.BURROWDOWN_LURKER in abilities:
await self.mainAgent.do(lurker(BURROWDOWN_LURKER))
'''
Build swarms hosts and harass with them
Build mutalisks and harass with them
If harass units are attacked, move to the next base
'''
async def heavy_harass(self, iteration):
await self.harass(0) # Die for the harass
'''
TODO
'''
async def medium_harass(self, iteration):
await self.harass(.5) # Return if damaged to half health
'''
If attacked pull back for a set time
Only use harass units if you have them
'''
async def light_harass(self, iteration):
await self.harass(1) # Return immediately if damaged
async def harass(self, percent_health_to_return):
if self.mainAgent.did_strategy_change:
self.mainAgent.mutalisk_waypoint = self.mainAgent.map_corner
if self.army.amount == 0:
# Nothing to harass with
return
harass_target = self.get_harass_target()
mutalisks = self.mainAgent.units(MUTALISK)
# Mutalisk harass is different from other things
if mutalisks.amount > 0:
if self.mainAgent.mutalisk_waypoint == self.mainAgent.enemy_start_locations[0]:
# Second phase of muta harass, when at the enemy base, begin attacking
for muta in mutalisks:
if muta.position.to2.distance_to(self.mainAgent.mutalisk_waypoint):
# Begin attacking workers or anything nearby
await self.mainAgent.do(muta.attack(harass_target))
else:
# Move to whre the workers are without attacking
await self.mainAgent.do(muta.move(self.mainAgent.mutalisk_waypoint))
else:
# Phase 1: Gather the mutas
# Move mutalisks to mutalisk waypoint, and do not attack anything else on the way
percentage_mutas_at_waypoint = await \
self.move_and_get_percent_units_at_waypoint(mutalisks, self.mainAgent.mutalisk_waypoint, False)
if percentage_mutas_at_waypoint > .75:
self.mainAgent.mutalisk_waypoint = self.mainAgent.enemy_start_locations[0] # Send them off to the enemy base
for unit in self.army - self.mainAgent.units(MUTALISK):
if unit.health < unit.health_max * percent_health_to_return:
# low on health so come back
await self.mainAgent.do(unit.move(self.mainAgent.bases.random))
else:
# still full health so keep attacking
await self.mainAgent.do(unit.attack(harass_target))
# Finds a target to harass
# Will first choose workers, and if there are no workers, then to go a known base, and in no known bases,
# Go to enemy main base
def get_harass_target(self):
# If there are known enemy expansions, harass those
enemy_workers = self.mainAgent.known_enemy_units.filter(lambda x: x.name == "Drone" or x.name == "SCV" or x.name == "Probe")
# If workers are visible, attack them
if len(enemy_workers) > 0:
harass_target = enemy_workers.random.position
else:
# If no workers are visible, find a town hall to attack
enemy_bases = self.get_known_enemy_bases()
if len(enemy_bases) > 0:
harass_target = enemy_bases[random.randint(0, len(enemy_bases) - 1)]
else:
# if no town halls are known, go to the enemy start
harass_target = self.mainAgent.enemy_start_locations[0]
return harass_target
'''
Removes dead units from strike force
'''
def clean_strike_force(self):
if self.mainAgent.strike_force is None:
# No defined strike force yet
return
for unit in self.mainAgent.strike_force:
if self.mainAgent.units.find_by_tag(unit.tag) is None:
self.mainAgent.strike_force.remove(unit)
'''
Utilities
'''
@property
def army(self):
if self.mainAgent.is_army_cached:
return self.mainAgent.cached_army
else:
self.mainAgent.is_army_cached = True
self.cached_army = self.mainAgent.units.filter(
lambda x: x.name != "Drone" and x.name != "Overlord" and x.name != "Queen" and x.name != "CreepTumorQueen"\
and x.name != "Egg" and x.name != "Larva" and not x.is_structure and x.name != "CreepTumorBurrowed") \
- self.mainAgent.units(LURKERMPBURROWED) - self.mainAgent.units(LURKERMPEGG) \
- self.mainAgent.units(BANELINGCOCOON)
return self.cached_army
@property
def overlords(self):
return self.mainAgent.units(OVERLORD)
@property
def buildings(self):
return self.mainAgent.units.filter(lambda x: x.is_structure) + self.mainAgent.units(SPINECRAWLER) + self.mainAgent.units(SPORECRAWLER)
@property
def bases(self):
return self.mainAgent.units.filter(lambda x: x.name == "Hatchery" or x.name == "Lair" or x.name == "Hive")
def get_random_worker(self):
return self.mainAgent.units(DRONE).random
@property
def game_time(self):
return self.mainAgent.state.game_loop * 0.725 * (1 / 16)
def get_known_enemy_bases(self):
# Get all enemy structures, then filter to only take townhall types
enemy_structures = self.mainAgent.known_enemy_structures
townhall_ids = [item for sublist in race_townhalls.values() for item in sublist]
return enemy_structures.filter(lambda x: x.type_id in townhall_ids)
'''
From Dentosal's proxyrax build
Targets a random known enemy building
If no known buildings, go towards to a possible enemy start position
'''
def select_target(self):
target = self.mainAgent.known_enemy_units
if target.exists:
return target.random.position
target = self.mainAgent.known_enemy_units
if target.exists:
return target.random.position
return self.mainAgent.enemy_start_locations[0]
# Code to explore more than one enemy starting position not needed because all maps are only 2 people
# Not tested
# # Explore other starting positions
# units_near_predicted_position = self.mainAgent.units.filter(lambda x: x.position.distance_to(
# self.enemy_start_locations[self.predicted_enemy_position]) < 5)
# if len(units_near_predicted_position) > 0:
# # There is a unit near the predicted position, but no visible structures or enemies
# self.predicted_enemy_position = (self.predicted_enemy_position + 1)
# # loop over starting positions if needed
# if self.predicted_enemy_position >= self.num_enemy_positions:
# self.predicted_enemy_position = 0
#
# return self.enemy_start_locations[self.predicted_enemy_position]
@property
def num_larva(self):
"""Get the current amount of larva"""
return self.mainAgent.units(LARVA).amount
@property
def random_larva(self):
"""Get a random larva"""
return self.mainAgent.units(LARVA).random
'''
Prints to console if self.is_printing_to_console
Writes to log file if self.is_logging
'''
def log(self, data):
"""Log the data to the logfile if this agent is set to log information and logfile is below 1 megabyte"""
if self.mainAgent.is_logging and os.path.getsize(self.mainAgent.log_file_name) < 1000000:
self.mainAgent.log_file.write(f"{data}\n")
if self.mainAgent.is_printing_to_console:
print(data)
def log_error(self, data):
data = f"ERROR: {data}"
self.mainAgent.log_file.write(f"{data}\n")
print(data)
def main():
# Start game with LoserAgent as the Bot, and begin logging
sc2.run_game(sc2.maps.get("Abyssal Reef LE"), [
Bot(Race.Zerg, LoserAgent(True, True, True)),
Computer(Race.Protoss, Difficulty.Medium)
], realtime=False)
if __name__ == '__main__':
main() | en | 0.808476 | # https://chatbotslife.com/building-a-basic-pysc2-agent-b109cde1477c # For debugging purposes only # Get strategy enums # For debugging # Setting this to true to write information to log files in the agents/logs directory # Setting this to true causes all logs to be printed to the console # Make logs directory if it doesn't exist # Create log file based on the time # Constants # If an upgrade has been research # If an upgrade is being researched # If an upgrade is not being researched and has not been researched # non-standard upgrade status # standard upgrades # Ground melee # Ground ranged # Ground defense # Flyer attack # Flyer defense # units built # Static defenses built # Structure built # Units actively being used for things, gets set to null on strategy change # Previous strategy so you now when the strategy changes # True if strategy just changed in this iteration # Way point for units to move to # Predict enemy will be in the first possible position # Position to search for enemy untis # Position the bot begins # Easier way to access map information, must be loaded in after game loads # Top left corner of the map for mutas # Set to true after army is requested to prevent duplicate queries in the same iteration # gets set to false in each perform_strategy call # Saves army each iteration to prevent duplicate queries # SafeRoachAgent attributes needed that should not conflict with prior existing attributes # Number of BUILT units, different from number of unit types # number of built creep tumors # number of seed creep tumors built by queens # number of built drones # number of overlords built # number of hatcheries built # number of viable tumors rebuilt # number of zerglings built # number of queens built # number of spore crawlers built # number of spine crawlers built # gap in missing drones # counter for replenishing drones # closed gap boolean # Number of roaches built # Number of hydralisks built # number of extractors built # gap in missing queens # counter for replenishing queens # checks for true/false # checks if base build order is complete # checks if there's a tumor that can spawn other tumors # standard upgrades # whether workers are assigned to the first vespene geyser # whether a spawning pool was built # whether RESEARCH_ZERGLINGMETABOLICBOOST was performed # whether the roach warren was built # True if one Lair has been upgraded # True if glial reconstitution has been built # True if hydralisk den has been built # True if grooved spines are researched # True if evolution chamber is built # True if ground armor 1 built # True if missile weapon 1 built Base on_step function Uses basic_build and performs actions based on the current strategy For now, strategies will change ever 100 steps Harass strategies are not implemented yet # self.log("Step: %s Overlord: %s" % (str(iteration), str(self.mainAgent.units(OVERLORD).amount))) # self.log("Step: " + str(iteration)) # TEMP: Until strategy is given by Q table #strategy_num = (int)(iteration / 75) % 12 # Build lings, queen, overlords, drones, and meleeattack1 # Perform actions based on given strategy # self.mainAgent.log("No given strategy") Builds a ton of lings Build drones and start gathering vespene Build a queen Build overlords as needed Builds a few hydralisks # Build overlords if close to reaching cap # Build drones # Build lings # Build Spawning pool # If Extractor does not have 3 drones, give it more drones # # Build Evolution Chamber pool # if not self.mainAgent.units(EVOLUTIONCHAMBER).exists and self.mainAgent.can_afford(SPAWNINGPOOL): # p = hatchery.position.towards(self.mainAgent.game_info.map_center, 3) # await self.mainAgent.build(EVOLUTIONCHAMBER, near=p) # elif self.mainAgent.can_afford(RESEARCH_ZERGMELEEWEAPONSLEVEL1) and self.mainAgent.melee1 == 0 and self.mainAgent.units(EVOLUTIONCHAMBER).ready.exists: # # Get melee1 upgrade # self.mainAgent.melee1 = 1 # await self.mainAgent.do(self.mainAgent.units(EVOLUTIONCHAMBER).first(RESEARCH_ZERGMELEEWEAPONSLEVEL1)) # Build a queen if you haven't # Inject larva when possible # Upgrade to lair when possible # # Build hydralisk den when possible # if not self.mainAgent.units(HYDRALISKDEN).exists and self.mainAgent.units(LAIR).amount > 0 and self.mainAgent.can_afford(HYDRALISKDEN) \ # and self.mainAgent.num_hydralisks_built == 0: # p = hatchery.position.towards(self.mainAgent.game_info.map_center, 3) # self.mainAgent.num_hydralisks_built += 1 # await self.mainAgent.build(HYDRALISKDEN, near=p) # # # Build lurker den when possible # if self.mainAgent.num_lurkerdens_built == 0 and self.mainAgent.units(HYDRALISKDEN).ready.amount > 0 and \ # self.mainAgent.can_afford(UPGRADETOLURKERDEN_LURKERDEN): # # await self.mainAgent.do(self.mainAgent.units(HYDRALISKDEN).first(UPGRADETOLURKERDEN_LURKERDEN )) # self.mainAgent.num_lurkerdens_built += 1 # await self.mainAgent.do(self.mainAgent.units(HYDRALISKDEN).first(MORPH_LURKERDEN)) Calls the correct strategy function given the strategy enum value Strategy functions can be override in base classes # Clear dead units from strike force # Must re obtain army data # Initializing things that are needed after game data is loaded # Prevent game from crashing # Assume first position # Should only be 1 hatchery at this time # Get a point in the corner of the map # https://stackoverflow.com/questions/19476816/creating-an-empty-object-in-python # Make sure given strategy num is valid # Valid strategy num, convert int into enum value # Mark strategy as changed or not # Prepare for next iteration # Call the proper strategy function # Prevent game from crashing # Attack # Scouting # Defense # Harass # Unknown Send all combat units (including the queen) to a known enemy position Do NOT recall ever Send all combat units (including the queen) to a known enemy position Recall after a certain amount of units die Must keep track of units being used because order of units in self.units constantly changes Attack a known enemy position, but if you get attacked, retreat back to base # If more than percentage_to_advance_group percent of strike force is # No army to use, don't bother trying to attack # Restart waypoint # Move army to mainAgent's waypoint and attack things on the way # If all units are close to the waypoint, pick a closer one # Move waypoint back # Keep units together # All strike force members attack to the waypoint Send all military units out to different areas Die for knowledge Send a good amount of military units out Send a couple of things out for scouting and pull back if damage is taken # If strike force should include more members (If a unit was built) # Do not add more units if the entire army is already in strike force # Need to reacquire unit from self.mainAgent.units to see that a command has been queued # Unit died # If pull_back is true and unti is damaged, move to random hatchery # Go to a new random position # https://stackoverflow.com/questions/19476816/creating-an-empty-object-in-python Complete recall back to main base Build lots of static defenses Build lots of lurkers # Build 5 spinecrawlers and sporecrawlers, and 10 lurkers Recall and distribute between main base and explansions Build some defensive structures and units # Build 3 spinecrawlers and sporecrawlers, and 5 lurkers Distribute forces between main base and expansions Build a few defensive structures and units # Build 1 spinecrawlers and sporecrawlers, and 3 lurkers # TODO: have some units go to expansions # Return all units to base # Build spine crawlers # Build spore crawlers # Build lurkers # Burrow all lurkers so they can attack Build swarms hosts and harass with them Build mutalisks and harass with them If harass units are attacked, move to the next base # Die for the harass TODO # Return if damaged to half health If attacked pull back for a set time Only use harass units if you have them # Return immediately if damaged # Nothing to harass with # Mutalisk harass is different from other things # Second phase of muta harass, when at the enemy base, begin attacking # Begin attacking workers or anything nearby # Move to whre the workers are without attacking # Phase 1: Gather the mutas # Move mutalisks to mutalisk waypoint, and do not attack anything else on the way # Send them off to the enemy base # low on health so come back # still full health so keep attacking # Finds a target to harass # Will first choose workers, and if there are no workers, then to go a known base, and in no known bases, # Go to enemy main base # If there are known enemy expansions, harass those # If workers are visible, attack them # If no workers are visible, find a town hall to attack # if no town halls are known, go to the enemy start Removes dead units from strike force # No defined strike force yet Utilities # Get all enemy structures, then filter to only take townhall types From Dentosal's proxyrax build Targets a random known enemy building If no known buildings, go towards to a possible enemy start position # Code to explore more than one enemy starting position not needed because all maps are only 2 people # Not tested # # Explore other starting positions # units_near_predicted_position = self.mainAgent.units.filter(lambda x: x.position.distance_to( # self.enemy_start_locations[self.predicted_enemy_position]) < 5) # if len(units_near_predicted_position) > 0: # # There is a unit near the predicted position, but no visible structures or enemies # self.predicted_enemy_position = (self.predicted_enemy_position + 1) # # loop over starting positions if needed # if self.predicted_enemy_position >= self.num_enemy_positions: # self.predicted_enemy_position = 0 # # return self.enemy_start_locations[self.predicted_enemy_position] Get the current amount of larva Get a random larva Prints to console if self.is_printing_to_console Writes to log file if self.is_logging Log the data to the logfile if this agent is set to log information and logfile is below 1 megabyte # Start game with LoserAgent as the Bot, and begin logging | 2.442422 | 2 |
gmql/dataset/__init__.py | DEIB-GECO/PyGMQL | 12 | 6624058 | <reponame>DEIB-GECO/PyGMQL<filename>gmql/dataset/__init__.py
from .loaders.Loader import load_from_path
from ..FileManagment import get_resources_dir
import os
def get_example_dataset(name="Example_Dataset_1", load=False):
data_path = os.path.join(get_resources_dir(), "example_datasets", name)
res = load_from_path(data_path)
if load:
res = res.materialize()
return res
| from .loaders.Loader import load_from_path
from ..FileManagment import get_resources_dir
import os
def get_example_dataset(name="Example_Dataset_1", load=False):
data_path = os.path.join(get_resources_dir(), "example_datasets", name)
res = load_from_path(data_path)
if load:
res = res.materialize()
return res | none | 1 | 2.146426 | 2 | |
code/archive/config.py | funked1/pieper_md | 0 | 6624059 | import configparser
config = configparser.ConfigParser()
# Database connection credentials
config['database'] = {'host' : 'localhost',
'user' : 'testuser',
'pswd' : 'password',
'data' : 'test',
'charset': 'utf8mb4'}
# Configure patient data
config['patient'] = {'l_name': 'Maxwell',
'f_name': 'James',
'DOB' : '06/13/1831',
'pt_id' : '123-45-678'}
# Configure sampling parameters
config['sampling'] = {'num_channels': 8,
'num_samples' : 2000,
'samp_freq' : 200,
'temp_hist' : 10}
# Configure recording channels
config['channels'] = {'ch1': 'f3c3',
'ch2': 'c3o1',
'ch3': 'f3t3',
'ch4': 't3o1',
'ch5': 'f4c4',
'ch6': 'c4o2',
'ch7': 'f4t4',
'ch8': 't4o2'}
with open('config.ini', 'w') as configfile:
config.write(configfile)
| import configparser
config = configparser.ConfigParser()
# Database connection credentials
config['database'] = {'host' : 'localhost',
'user' : 'testuser',
'pswd' : 'password',
'data' : 'test',
'charset': 'utf8mb4'}
# Configure patient data
config['patient'] = {'l_name': 'Maxwell',
'f_name': 'James',
'DOB' : '06/13/1831',
'pt_id' : '123-45-678'}
# Configure sampling parameters
config['sampling'] = {'num_channels': 8,
'num_samples' : 2000,
'samp_freq' : 200,
'temp_hist' : 10}
# Configure recording channels
config['channels'] = {'ch1': 'f3c3',
'ch2': 'c3o1',
'ch3': 'f3t3',
'ch4': 't3o1',
'ch5': 'f4c4',
'ch6': 'c4o2',
'ch7': 'f4t4',
'ch8': 't4o2'}
with open('config.ini', 'w') as configfile:
config.write(configfile)
| en | 0.463474 | # Database connection credentials # Configure patient data # Configure sampling parameters # Configure recording channels | 2.269794 | 2 |
src/acconeer/exptool/__init__.py | maxijohansson/acconeer-python-exploration | 0 | 6624060 | __version__ = "3.2.19"
SDK_VERSION = "2.1.0"
| __version__ = "3.2.19"
SDK_VERSION = "2.1.0"
| none | 1 | 1.070774 | 1 | |
P25063-ChenJi/week3.py | magedu-pythons/python-25 | 1 | 6624061 | <filename>P25063-ChenJi/week3.py
#1、给出任意一个列表,请查找出x元素是否在列表里面,如果存在返回1,不存在返回0
lst = ['a', 'b', 'c', 'x', 'd']
def findx():
for ch in lst:
if ch == 'x':
return 1
else:
return 0
print(findx())
# 如果x是一个随机字符呢?代码该怎么修改?
#2、任一个英文的纯文本文件,统计其中的单词出现的个数
import string
word_dict = {}
with open('english') as f:
txt = f.read()
for word in txt.split(" "):
word_dict[word.strip(string.punctuation)] = word_dict.get(word.strip(string.punctuation), 0) + 1
print(word_dict)
# 只使用空格来分隔字符串,忽略了英文里面以, . ! ? 等各种标点符号结尾的情况。
| <filename>P25063-ChenJi/week3.py
#1、给出任意一个列表,请查找出x元素是否在列表里面,如果存在返回1,不存在返回0
lst = ['a', 'b', 'c', 'x', 'd']
def findx():
for ch in lst:
if ch == 'x':
return 1
else:
return 0
print(findx())
# 如果x是一个随机字符呢?代码该怎么修改?
#2、任一个英文的纯文本文件,统计其中的单词出现的个数
import string
word_dict = {}
with open('english') as f:
txt = f.read()
for word in txt.split(" "):
word_dict[word.strip(string.punctuation)] = word_dict.get(word.strip(string.punctuation), 0) + 1
print(word_dict)
# 只使用空格来分隔字符串,忽略了英文里面以, . ! ? 等各种标点符号结尾的情况。
| zh | 0.989334 | #1、给出任意一个列表,请查找出x元素是否在列表里面,如果存在返回1,不存在返回0 # 如果x是一个随机字符呢?代码该怎么修改? #2、任一个英文的纯文本文件,统计其中的单词出现的个数 # 只使用空格来分隔字符串,忽略了英文里面以, . ! ? 等各种标点符号结尾的情况。 | 3.918409 | 4 |
examples/makePoly.py | VictorVaquero/geneticPendulum | 0 | 6624062 | """Function allowsInteractively chose polygon vertices, ending when click above line.
Illustrates use of while loops, Text, Line, Polygon, getMouse.
"""
from graphics import *
def isBetween(x, end1, end2):
'''Return True if x is between the ends or equal to either.
The ends do not need to be in increasing order.'''
return end1 <= x <= end2 or end2 <= x <= end1
def isInside(point, rect):
'''Return True if the point is inside the Rectangle rect.'''
pt1 = rect.getP1()
pt2 = rect.getP2()
return isBetween(point.getX(), pt1.getX(), pt2.getX()) and \
isBetween(point.getY(), pt1.getY(), pt2.getY())
def polyHere(rect, win):
''' Draw a polygon interactively in Rectangle rect, in GraphWin win.
Collect mouse clicks inside rect into the vertices of a Polygon,
and always draw the Polygon created so far.
When a click goes outside rect, stop and return the final polygon.
The Polygon ends up drawn. The method draws and undraws rect.
'''
rect.setOutline("red")
rect.draw(win)
vertices = list()
pt = win.getMouse()
while isInside(pt, rect):
vertices.append(pt)
poly = Polygon(vertices)
poly.draw(win)
pt = win.getMouse()
poly.undraw()
poly.draw(win)
rect.undraw()
return poly
def main():
win = GraphWin('Drawing Polygons', 400, 400)
win.yUp()
instructions = Text(Point(win.getWidth()/2, 30),
"Click vertices inside the red rectangle."+
"\nClick outside the rectangle to stop.")
instructions.draw(win)
rect1 = Rectangle(Point(5, 55), Point(200, 120))
poly1 = polyHere(rect1, win)
poly1.setFill('green')
rect2 = Rectangle(Point(210, 50), Point(350, 350))
poly2 = polyHere(rect2, win)
poly2.setOutline('orange')
win.promptClose(instructions)
main()
| """Function allowsInteractively chose polygon vertices, ending when click above line.
Illustrates use of while loops, Text, Line, Polygon, getMouse.
"""
from graphics import *
def isBetween(x, end1, end2):
'''Return True if x is between the ends or equal to either.
The ends do not need to be in increasing order.'''
return end1 <= x <= end2 or end2 <= x <= end1
def isInside(point, rect):
'''Return True if the point is inside the Rectangle rect.'''
pt1 = rect.getP1()
pt2 = rect.getP2()
return isBetween(point.getX(), pt1.getX(), pt2.getX()) and \
isBetween(point.getY(), pt1.getY(), pt2.getY())
def polyHere(rect, win):
''' Draw a polygon interactively in Rectangle rect, in GraphWin win.
Collect mouse clicks inside rect into the vertices of a Polygon,
and always draw the Polygon created so far.
When a click goes outside rect, stop and return the final polygon.
The Polygon ends up drawn. The method draws and undraws rect.
'''
rect.setOutline("red")
rect.draw(win)
vertices = list()
pt = win.getMouse()
while isInside(pt, rect):
vertices.append(pt)
poly = Polygon(vertices)
poly.draw(win)
pt = win.getMouse()
poly.undraw()
poly.draw(win)
rect.undraw()
return poly
def main():
win = GraphWin('Drawing Polygons', 400, 400)
win.yUp()
instructions = Text(Point(win.getWidth()/2, 30),
"Click vertices inside the red rectangle."+
"\nClick outside the rectangle to stop.")
instructions.draw(win)
rect1 = Rectangle(Point(5, 55), Point(200, 120))
poly1 = polyHere(rect1, win)
poly1.setFill('green')
rect2 = Rectangle(Point(210, 50), Point(350, 350))
poly2 = polyHere(rect2, win)
poly2.setOutline('orange')
win.promptClose(instructions)
main()
| en | 0.833998 | Function allowsInteractively chose polygon vertices, ending when click above line. Illustrates use of while loops, Text, Line, Polygon, getMouse. Return True if x is between the ends or equal to either. The ends do not need to be in increasing order. Return True if the point is inside the Rectangle rect. Draw a polygon interactively in Rectangle rect, in GraphWin win. Collect mouse clicks inside rect into the vertices of a Polygon, and always draw the Polygon created so far. When a click goes outside rect, stop and return the final polygon. The Polygon ends up drawn. The method draws and undraws rect. | 4.104315 | 4 |
Python/Model.py | pilarlorente/Energy-expenditure-prediction-wearable-grist | 0 | 6624063 | #!/usr/bin/env python
# coding: utf-8
# In[14]:
##### Import packages
# Basic packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Modelling packages
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
# To avoid warnings
import warnings
warnings.filterwarnings("ignore")
# In[15]:
df=pd.read_csv('energy_data.csv')
# In[16]:
df
# In[17]:
##### Defining MAPE(Mean Absolute Percentage Error)
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
# In[18]:
##### Columns based on time, change the format
for col in ['date_Hr', 'startDate_energy', 'endDate_energy']:
df[col] = pd.to_datetime(df[col])
# In[19]:
#Creating new columns of time
df["time_elapsed"] = (df["startDate_energy"] - df["date_Hr"]).astype('timedelta64[s]')
df["day"] = df.date_Hr.apply(lambda x: x.day)
df["month"] = df.date_Hr.apply(lambda x: x.month)
df["hour"] = df.date_Hr.apply(lambda x: x.hour)
df.drop(['date_Hr', 'startDate_energy', 'endDate_energy','totalTime_energy'], axis=1, inplace=True)
df.head(10)
# ## Modelling
# In[20]:
#target
target= "value_energy"
#features
features=list(df.columns)
features.remove("id_")
features.remove("value_energy")
#Division
X = df[features].values
y = df[target].values
#Dividimos en dos conjuntos de datos para entrenar i testear los modelos
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
# ## Decision Tree
# In[25]:
model = DecisionTreeRegressor()
params = {'criterion':['mae'],
'max_depth': [4,5,6,7],
'max_features': [7,8,9,10],
'max_leaf_nodes': [30,40,50],
'min_impurity_decrease' : [0.0005,0.001,0.005],
'min_samples_split': [2,4]}
# GridSearch
grid_solver = GridSearchCV(estimator = model,
param_grid = params,
scoring = 'neg_median_absolute_error',
cv = 10,
refit = 'neg_median_absolute_error',
verbose = 0)
model_result = grid_solver.fit(X_train,y_train)
reg = model_result.best_estimator_
reg.fit(X,y)
# In[26]:
print(model_result.best_params_)
# In[30]:
best_model=model_result.best_estimator_
#best model
final_model=best_model.fit(X,y)
# features importances
len(df[features].columns)
len(final_model.feature_importances_)
importances=pd.DataFrame([df[features].columns,final_model.feature_importances_], index=["feature","importance"]).T
importances.sort_values('importance', ascending=False)
# ## Random Forest
#
# To do the CV of this model we will use a param_grid based on the results of the decision tree, because the random forest is born from the "trade-off" between bias and variance. The tree has a low bias but a high variance, so we will try to combine models with a low bias and that are not fully correlated to reduce the variance.
# In[ ]:
##### Activity Intensity
In addition to calculate the energy expenditure, for each time interval, the level of intensity of the activity carried out must be calculated. . The classification of the intensity level is based on the metabolic equivalents or METS (kcal/kg*h) of the activity being:light activity < 3 METS, moderate 3 - 6 METS and intense > 6 METS. . To estimate it, I consider a person of 75 kg. The model chosen is the Random Forest Regressor which has the lowest MAPE.
reg = RandomForestRegressor(criterion='mae', max_depth=8, max_features=12,
max_leaf_nodes=30, min_impurity_decrease=0.001,
n_estimators=15)
reg.fit(X,y)
yhat = reg.predict(X)
ids = df_acc_final['id_'].to_frame()
ids['yhat'] = yhat
ids['METs'] = ids["yhat"] / (75 * 62 / 3600)
conditions = [(ids["METs"] < 3 ),((3 < ids["METs"]) & (ids["METs"] < 6)),(ids["METs"] > 6)]
names = ['ligera', 'moderada', 'intensa']
ids['intensidad'] = np.select(conditions, names)
ids
# In[22]:
model = RandomForestRegressor()
params = {'bootstrap': [True],
'criterion':['mae'],
'max_depth': [8,10],
'max_features': [10,12],
'max_leaf_nodes': [10,20,30],
'min_impurity_decrease' : [0.001,0.01],
'min_samples_split': [2,4],
'n_estimators': [10,15]}
# GridSearch
grid_solver = GridSearchCV(estimator = model,
param_grid = params,
scoring = 'neg_median_absolute_error',
cv = 7,
refit = 'neg_median_absolute_error',
verbose = 0)
model_result = grid_solver.fit(X_train,y_train)
reg = model_result.best_estimator_
reg.fit(X,y)
# In[23]:
##### Mean Absolute Percentage Error
yhat = reg.predict(X_test)
print("Mean Absolute Percentage Error = %.2f" %mean_absolute_percentage_error(yhat,y_test),'%')
# In[24]:
##### Feature Importance
features_importance = reg.feature_importances_
features_array = np.array(features)
features_array_ordered = features_array[(features_importance).argsort()[::-1]]
features_array_ordered
plt.figure(figsize=(16,10))
sns.barplot(y = features_array, x = features_importance, orient='h', order=features_array_ordered[:50])
plt.show()
# ## Activity Intensity
#
# In addition to calculate the energy expenditure, for each time interval, the level of intensity of the activity carried out must be calculated. The classification of the intensity level is based on the metabolic equivalents or METS (kcal/kg*h) of the activity being:
#
# light activity < 3 METS, moderate 3 - 6 METS and intense > 6 METS.
#
# To estimate it, I consider a person of 75 kg. The model chosen is the Random Forest Regressor which has the lowest MAPE.
# In[33]:
df
# In[47]:
reg = RandomForestRegressor(criterion='mae', max_depth=8, max_features=12,
max_leaf_nodes=30, min_impurity_decrease=0.001,
n_estimators=15)
reg.fit(X,y)
yhat = reg.predict(X)
ids = df['id_'].to_frame()
ids['yhat'] = yhat
ids['METs'] = ids["yhat"] / (75 * 62 / 3600)
conditions = [(ids["METs"] < 3 ),((3 < ids["METs"]) & (ids["METs"] < 6)),(ids["METs"] > 6)]
names = ['light', 'moderate', 'intense']
ids['intensity'] = np.select(conditions, names)
ids
# The substantial improvement that can be seen when we introduce the non-linearity of the model invites us to deduce that the relationships between the variables and the target are not linear. More efforts should be made to collect all the information on physical activity. Additional information about individuals such as age, sex and weight would help to improve the ASM of the model in several points.
#
# In[ ]:
| #!/usr/bin/env python
# coding: utf-8
# In[14]:
##### Import packages
# Basic packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Modelling packages
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
# To avoid warnings
import warnings
warnings.filterwarnings("ignore")
# In[15]:
df=pd.read_csv('energy_data.csv')
# In[16]:
df
# In[17]:
##### Defining MAPE(Mean Absolute Percentage Error)
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
# In[18]:
##### Columns based on time, change the format
for col in ['date_Hr', 'startDate_energy', 'endDate_energy']:
df[col] = pd.to_datetime(df[col])
# In[19]:
#Creating new columns of time
df["time_elapsed"] = (df["startDate_energy"] - df["date_Hr"]).astype('timedelta64[s]')
df["day"] = df.date_Hr.apply(lambda x: x.day)
df["month"] = df.date_Hr.apply(lambda x: x.month)
df["hour"] = df.date_Hr.apply(lambda x: x.hour)
df.drop(['date_Hr', 'startDate_energy', 'endDate_energy','totalTime_energy'], axis=1, inplace=True)
df.head(10)
# ## Modelling
# In[20]:
#target
target= "value_energy"
#features
features=list(df.columns)
features.remove("id_")
features.remove("value_energy")
#Division
X = df[features].values
y = df[target].values
#Dividimos en dos conjuntos de datos para entrenar i testear los modelos
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
# ## Decision Tree
# In[25]:
model = DecisionTreeRegressor()
params = {'criterion':['mae'],
'max_depth': [4,5,6,7],
'max_features': [7,8,9,10],
'max_leaf_nodes': [30,40,50],
'min_impurity_decrease' : [0.0005,0.001,0.005],
'min_samples_split': [2,4]}
# GridSearch
grid_solver = GridSearchCV(estimator = model,
param_grid = params,
scoring = 'neg_median_absolute_error',
cv = 10,
refit = 'neg_median_absolute_error',
verbose = 0)
model_result = grid_solver.fit(X_train,y_train)
reg = model_result.best_estimator_
reg.fit(X,y)
# In[26]:
print(model_result.best_params_)
# In[30]:
best_model=model_result.best_estimator_
#best model
final_model=best_model.fit(X,y)
# features importances
len(df[features].columns)
len(final_model.feature_importances_)
importances=pd.DataFrame([df[features].columns,final_model.feature_importances_], index=["feature","importance"]).T
importances.sort_values('importance', ascending=False)
# ## Random Forest
#
# To do the CV of this model we will use a param_grid based on the results of the decision tree, because the random forest is born from the "trade-off" between bias and variance. The tree has a low bias but a high variance, so we will try to combine models with a low bias and that are not fully correlated to reduce the variance.
# In[ ]:
##### Activity Intensity
In addition to calculate the energy expenditure, for each time interval, the level of intensity of the activity carried out must be calculated. . The classification of the intensity level is based on the metabolic equivalents or METS (kcal/kg*h) of the activity being:light activity < 3 METS, moderate 3 - 6 METS and intense > 6 METS. . To estimate it, I consider a person of 75 kg. The model chosen is the Random Forest Regressor which has the lowest MAPE.
reg = RandomForestRegressor(criterion='mae', max_depth=8, max_features=12,
max_leaf_nodes=30, min_impurity_decrease=0.001,
n_estimators=15)
reg.fit(X,y)
yhat = reg.predict(X)
ids = df_acc_final['id_'].to_frame()
ids['yhat'] = yhat
ids['METs'] = ids["yhat"] / (75 * 62 / 3600)
conditions = [(ids["METs"] < 3 ),((3 < ids["METs"]) & (ids["METs"] < 6)),(ids["METs"] > 6)]
names = ['ligera', 'moderada', 'intensa']
ids['intensidad'] = np.select(conditions, names)
ids
# In[22]:
model = RandomForestRegressor()
params = {'bootstrap': [True],
'criterion':['mae'],
'max_depth': [8,10],
'max_features': [10,12],
'max_leaf_nodes': [10,20,30],
'min_impurity_decrease' : [0.001,0.01],
'min_samples_split': [2,4],
'n_estimators': [10,15]}
# GridSearch
grid_solver = GridSearchCV(estimator = model,
param_grid = params,
scoring = 'neg_median_absolute_error',
cv = 7,
refit = 'neg_median_absolute_error',
verbose = 0)
model_result = grid_solver.fit(X_train,y_train)
reg = model_result.best_estimator_
reg.fit(X,y)
# In[23]:
##### Mean Absolute Percentage Error
yhat = reg.predict(X_test)
print("Mean Absolute Percentage Error = %.2f" %mean_absolute_percentage_error(yhat,y_test),'%')
# In[24]:
##### Feature Importance
features_importance = reg.feature_importances_
features_array = np.array(features)
features_array_ordered = features_array[(features_importance).argsort()[::-1]]
features_array_ordered
plt.figure(figsize=(16,10))
sns.barplot(y = features_array, x = features_importance, orient='h', order=features_array_ordered[:50])
plt.show()
# ## Activity Intensity
#
# In addition to calculate the energy expenditure, for each time interval, the level of intensity of the activity carried out must be calculated. The classification of the intensity level is based on the metabolic equivalents or METS (kcal/kg*h) of the activity being:
#
# light activity < 3 METS, moderate 3 - 6 METS and intense > 6 METS.
#
# To estimate it, I consider a person of 75 kg. The model chosen is the Random Forest Regressor which has the lowest MAPE.
# In[33]:
df
# In[47]:
reg = RandomForestRegressor(criterion='mae', max_depth=8, max_features=12,
max_leaf_nodes=30, min_impurity_decrease=0.001,
n_estimators=15)
reg.fit(X,y)
yhat = reg.predict(X)
ids = df['id_'].to_frame()
ids['yhat'] = yhat
ids['METs'] = ids["yhat"] / (75 * 62 / 3600)
conditions = [(ids["METs"] < 3 ),((3 < ids["METs"]) & (ids["METs"] < 6)),(ids["METs"] > 6)]
names = ['light', 'moderate', 'intense']
ids['intensity'] = np.select(conditions, names)
ids
# The substantial improvement that can be seen when we introduce the non-linearity of the model invites us to deduce that the relationships between the variables and the target are not linear. More efforts should be made to collect all the information on physical activity. Additional information about individuals such as age, sex and weight would help to improve the ASM of the model in several points.
#
# In[ ]:
| en | 0.829805 | #!/usr/bin/env python # coding: utf-8 # In[14]: ##### Import packages # Basic packages # Modelling packages # To avoid warnings # In[15]: # In[16]: # In[17]: ##### Defining MAPE(Mean Absolute Percentage Error) # In[18]: ##### Columns based on time, change the format # In[19]: #Creating new columns of time # ## Modelling # In[20]: #target #features #Division #Dividimos en dos conjuntos de datos para entrenar i testear los modelos # ## Decision Tree # In[25]: # GridSearch # In[26]: # In[30]: #best model # features importances # ## Random Forest # # To do the CV of this model we will use a param_grid based on the results of the decision tree, because the random forest is born from the "trade-off" between bias and variance. The tree has a low bias but a high variance, so we will try to combine models with a low bias and that are not fully correlated to reduce the variance. # In[ ]: ##### Activity Intensity # In[22]: # GridSearch # In[23]: ##### Mean Absolute Percentage Error # In[24]: ##### Feature Importance # ## Activity Intensity # # In addition to calculate the energy expenditure, for each time interval, the level of intensity of the activity carried out must be calculated. The classification of the intensity level is based on the metabolic equivalents or METS (kcal/kg*h) of the activity being: # # light activity < 3 METS, moderate 3 - 6 METS and intense > 6 METS. # # To estimate it, I consider a person of 75 kg. The model chosen is the Random Forest Regressor which has the lowest MAPE. # In[33]: # In[47]: # The substantial improvement that can be seen when we introduce the non-linearity of the model invites us to deduce that the relationships between the variables and the target are not linear. More efforts should be made to collect all the information on physical activity. Additional information about individuals such as age, sex and weight would help to improve the ASM of the model in several points. # # In[ ]: | 2.780598 | 3 |
rtk/analyses/statistics/Duane.py | rakhimov/rtk | 0 | 6624064 | #!/usr/bin/env python
"""
Contains functions for performing calculations associated with the Duane model.
"""
# -*- coding: utf-8 -*-
#
# rtk.analyses.statistics.Duane.py is part of The RTK Project
#
# All rights reserved.
# Copyright 2007 - 2017 <NAME> andrew.rowland <AT> reliaqual <DOT> com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Add NLS support.
import gettext
# Import mathematical functions.
from math import exp, log, sqrt
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2007 - 2015 Andrew "weibullguy" Rowland'
_ = gettext.gettext
def calculate_duane_parameters(n_failures, fail_times):
"""
Function to estimate the parameters of the Duane model. This is also used
when regression is used to estimated the NHPP Power Law model parameters.
The form of the Duane model used in RTK:
.. note:: cumulative failure intensity = lambda_c = (1 / b) * T^-alpha
.. note:: cumulative MTBF = MTBFc = b * T^alpha
.. note:: instantaneous failure intensity = lambda_i = (1 - alpha) * lambda_c
.. note:: instantaneous MTBF = MTBFi = MTBFc / (1 - alpha)
:param list n_failures: list of failure counts at each failure time.
:param list fail_times: list of failure times.
:return: _b_hat, _alpha_hat
:rtype: tuple
"""
_n = len(n_failures)
if _n <= 0:
return 0.0, 1.0
_mtbf = [fail_times[i] / sum(n_failures[:i + 1])
for i in range(len(fail_times))]
_logT = sum([log(x) for x in fail_times])
_logT2 = sum([log(x)**2.0 for x in fail_times])
_logM = sum([log(m) for m in _mtbf])
_logTlogM = sum([log(fail_times[i]) * log(_mtbf[i])
for i in range(len(fail_times))])
# Estimate the shape parameter.
try:
_alpha_hat = (_logTlogM - (_logT * _logM / _n)) / \
(_logT2 - (_logT**2.0 / _n))
except ZeroDivisionError:
_alpha_hat = 0.0
# Estimate the scale parameter.
try:
_b_hat = exp((1.0 / _n) * (_logM - _alpha_hat * _logT))
except OverflowError:
_b_hat = 1.0
return _b_hat, _alpha_hat
def calculate_duane_standard_error(n_failures, fail_times, alpha, beta):
"""
Function to calculate the standard error of the Duane model parameters,
beta (scale) and alpha (shape), given the failure counts, failure
times, and point estimates of the parameters.
:param int n_failures: list of failure counts at each failure time.
:param float fail_times: list of failure times.
:param float alpha: the point estimate of the Duane alpha (shape)
parameter.
:param float beta: the point estimate of the Duane b (scale) parameter.
:return: estimates of the standard error for alpha and the log of beta.
:rtype: tuple
"""
_logT = sum([log(x) for x in fail_times])
_logT2 = sum([log(x)**2.0 for x in fail_times])
_SSE = sum([((log(beta) + alpha * log(fail_times[i])) -
log(fail_times[i] / sum(n_failures[:i + 1])))**2.0
for i in range(len(fail_times))])
if sum(n_failures) > 2:
_sigma2 = _SSE / (sum(n_failures) - 2)
else:
_sigma2 = _SSE
try:
_Sxx = _logT2 - (_logT**2.0 / sum(n_failures))
except ZeroDivisionError:
_Sxx = 1.0
# Calculate the standard error of the log of b (scale) parameter.
try:
_se_lnb = sqrt(_sigma2) * sqrt(_logT2 / (sum(n_failures) * _Sxx))
except ZeroDivisionError:
_se_lnb = 0.0
try:
_se_alpha = sqrt(_sigma2) / sqrt(_Sxx)
except ZeroDivisionError:
_se_alpha = 0.0
return _se_alpha, _se_lnb
def calculate_duane_mean(est_time, alpha, beta): # pylint: disable=C0103
"""
Method to calculate the Duane model cumulative and instantaneous mean
values (e.g., MTBF) given the Duane parameters and a time. The Duane
model used is:
.. note:: cumulative mean = cum_mean = beta * T^alpha
.. note:: instantaneous mean = inst_mean = cum_mean / (1 - alpha)
:param float est_time: the time at which to calculate the means.
:param float alpha: the point estimate of the Duane alpha (shape)
parameter.
:param float beta: the point estimate of the Duane b (scale) parameter.
:return: estimate of the cumulative mean and instantaneous mean.
:rtype: tuple
"""
_cum_mean = beta * est_time**alpha
try:
_instantaneous_mean = _cum_mean / (1.0 - alpha)
except ZeroDivisionError:
_instantaneous_mean = _cum_mean
return _cum_mean, _instantaneous_mean
| #!/usr/bin/env python
"""
Contains functions for performing calculations associated with the Duane model.
"""
# -*- coding: utf-8 -*-
#
# rtk.analyses.statistics.Duane.py is part of The RTK Project
#
# All rights reserved.
# Copyright 2007 - 2017 <NAME> andrew.rowland <AT> reliaqual <DOT> com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Add NLS support.
import gettext
# Import mathematical functions.
from math import exp, log, sqrt
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2007 - 2015 Andrew "weibullguy" Rowland'
_ = gettext.gettext
def calculate_duane_parameters(n_failures, fail_times):
"""
Function to estimate the parameters of the Duane model. This is also used
when regression is used to estimated the NHPP Power Law model parameters.
The form of the Duane model used in RTK:
.. note:: cumulative failure intensity = lambda_c = (1 / b) * T^-alpha
.. note:: cumulative MTBF = MTBFc = b * T^alpha
.. note:: instantaneous failure intensity = lambda_i = (1 - alpha) * lambda_c
.. note:: instantaneous MTBF = MTBFi = MTBFc / (1 - alpha)
:param list n_failures: list of failure counts at each failure time.
:param list fail_times: list of failure times.
:return: _b_hat, _alpha_hat
:rtype: tuple
"""
_n = len(n_failures)
if _n <= 0:
return 0.0, 1.0
_mtbf = [fail_times[i] / sum(n_failures[:i + 1])
for i in range(len(fail_times))]
_logT = sum([log(x) for x in fail_times])
_logT2 = sum([log(x)**2.0 for x in fail_times])
_logM = sum([log(m) for m in _mtbf])
_logTlogM = sum([log(fail_times[i]) * log(_mtbf[i])
for i in range(len(fail_times))])
# Estimate the shape parameter.
try:
_alpha_hat = (_logTlogM - (_logT * _logM / _n)) / \
(_logT2 - (_logT**2.0 / _n))
except ZeroDivisionError:
_alpha_hat = 0.0
# Estimate the scale parameter.
try:
_b_hat = exp((1.0 / _n) * (_logM - _alpha_hat * _logT))
except OverflowError:
_b_hat = 1.0
return _b_hat, _alpha_hat
def calculate_duane_standard_error(n_failures, fail_times, alpha, beta):
"""
Function to calculate the standard error of the Duane model parameters,
beta (scale) and alpha (shape), given the failure counts, failure
times, and point estimates of the parameters.
:param int n_failures: list of failure counts at each failure time.
:param float fail_times: list of failure times.
:param float alpha: the point estimate of the Duane alpha (shape)
parameter.
:param float beta: the point estimate of the Duane b (scale) parameter.
:return: estimates of the standard error for alpha and the log of beta.
:rtype: tuple
"""
_logT = sum([log(x) for x in fail_times])
_logT2 = sum([log(x)**2.0 for x in fail_times])
_SSE = sum([((log(beta) + alpha * log(fail_times[i])) -
log(fail_times[i] / sum(n_failures[:i + 1])))**2.0
for i in range(len(fail_times))])
if sum(n_failures) > 2:
_sigma2 = _SSE / (sum(n_failures) - 2)
else:
_sigma2 = _SSE
try:
_Sxx = _logT2 - (_logT**2.0 / sum(n_failures))
except ZeroDivisionError:
_Sxx = 1.0
# Calculate the standard error of the log of b (scale) parameter.
try:
_se_lnb = sqrt(_sigma2) * sqrt(_logT2 / (sum(n_failures) * _Sxx))
except ZeroDivisionError:
_se_lnb = 0.0
try:
_se_alpha = sqrt(_sigma2) / sqrt(_Sxx)
except ZeroDivisionError:
_se_alpha = 0.0
return _se_alpha, _se_lnb
def calculate_duane_mean(est_time, alpha, beta): # pylint: disable=C0103
"""
Method to calculate the Duane model cumulative and instantaneous mean
values (e.g., MTBF) given the Duane parameters and a time. The Duane
model used is:
.. note:: cumulative mean = cum_mean = beta * T^alpha
.. note:: instantaneous mean = inst_mean = cum_mean / (1 - alpha)
:param float est_time: the time at which to calculate the means.
:param float alpha: the point estimate of the Duane alpha (shape)
parameter.
:param float beta: the point estimate of the Duane b (scale) parameter.
:return: estimate of the cumulative mean and instantaneous mean.
:rtype: tuple
"""
_cum_mean = beta * est_time**alpha
try:
_instantaneous_mean = _cum_mean / (1.0 - alpha)
except ZeroDivisionError:
_instantaneous_mean = _cum_mean
return _cum_mean, _instantaneous_mean
| en | 0.696792 | #!/usr/bin/env python Contains functions for performing calculations associated with the Duane model. # -*- coding: utf-8 -*- # # rtk.analyses.statistics.Duane.py is part of The RTK Project # # All rights reserved. # Copyright 2007 - 2017 <NAME> andrew.rowland <AT> reliaqual <DOT> com # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER # OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Add NLS support. # Import mathematical functions. Function to estimate the parameters of the Duane model. This is also used when regression is used to estimated the NHPP Power Law model parameters. The form of the Duane model used in RTK: .. note:: cumulative failure intensity = lambda_c = (1 / b) * T^-alpha .. note:: cumulative MTBF = MTBFc = b * T^alpha .. note:: instantaneous failure intensity = lambda_i = (1 - alpha) * lambda_c .. note:: instantaneous MTBF = MTBFi = MTBFc / (1 - alpha) :param list n_failures: list of failure counts at each failure time. :param list fail_times: list of failure times. :return: _b_hat, _alpha_hat :rtype: tuple # Estimate the shape parameter. # Estimate the scale parameter. Function to calculate the standard error of the Duane model parameters, beta (scale) and alpha (shape), given the failure counts, failure times, and point estimates of the parameters. :param int n_failures: list of failure counts at each failure time. :param float fail_times: list of failure times. :param float alpha: the point estimate of the Duane alpha (shape) parameter. :param float beta: the point estimate of the Duane b (scale) parameter. :return: estimates of the standard error for alpha and the log of beta. :rtype: tuple # Calculate the standard error of the log of b (scale) parameter. # pylint: disable=C0103 Method to calculate the Duane model cumulative and instantaneous mean values (e.g., MTBF) given the Duane parameters and a time. The Duane model used is: .. note:: cumulative mean = cum_mean = beta * T^alpha .. note:: instantaneous mean = inst_mean = cum_mean / (1 - alpha) :param float est_time: the time at which to calculate the means. :param float alpha: the point estimate of the Duane alpha (shape) parameter. :param float beta: the point estimate of the Duane b (scale) parameter. :return: estimate of the cumulative mean and instantaneous mean. :rtype: tuple | 1.780607 | 2 |
tests/integration/infra/api/devices/http_endpoints/test_hardware_controller.py | mirumon/mirumon-backend | 19 | 6624065 | <gh_stars>10-100
import uuid
import pytest
from fastapi import FastAPI
pytestmark = [pytest.mark.asyncio]
class TestDeviceHardwareV2:
@pytest.fixture
async def response(self, app: FastAPI, client, device_factory):
async with device_factory(device_id=str(uuid.uuid4())) as device:
url = app.url_path_for("devices:hardware", device_id=device.id)
response = await client.get(url)
return response
async def test_should_return_expected_response(self, response):
assert response.status_code == 200
assert response.json() == {
"motherboard": {
"status": "ok",
"name": "MSI b450",
"caption": "MSI b450 Tomahawk",
"product": "MSI",
"serial_number": "384134g141ghwg92",
},
"cpu": [
{
"status": "ok",
"name": "AMD Ryzen 5",
"caption": "AMD Ryzen 5",
"current_clock_speed": "100",
"current_cthread_countlock_speed": 0,
"virtualization_firmware_enabled": True,
"load_percentage": 50,
"number_of_cores": 12,
"number_of_enabled_core": 6,
"number_of_logical_processors": 6,
}
],
"gpu": [
{
"status": "ok",
"name": "gtx 970",
"caption": "Nvidea GTX 970",
"driver_version": "370.9",
"driver_date": "12.12.12",
"video_mode_description": "no description",
"current_vertical_resolution": "1024x1024",
}
],
"network": [
{
"description": "eth0",
"mac_address": "00:1B:44:11:3A:B7",
"ip_addresses": ["172.16.17.32", "172.16.31.10"],
}
],
"disks": [
{
"status": "ok",
"caption": "Disk 1",
"serial_number": "123123213123",
"size": 10000,
"model": "samsung",
"description": "HDD",
"partitions": 2,
}
],
}
| import uuid
import pytest
from fastapi import FastAPI
pytestmark = [pytest.mark.asyncio]
class TestDeviceHardwareV2:
@pytest.fixture
async def response(self, app: FastAPI, client, device_factory):
async with device_factory(device_id=str(uuid.uuid4())) as device:
url = app.url_path_for("devices:hardware", device_id=device.id)
response = await client.get(url)
return response
async def test_should_return_expected_response(self, response):
assert response.status_code == 200
assert response.json() == {
"motherboard": {
"status": "ok",
"name": "MSI b450",
"caption": "MSI b450 Tomahawk",
"product": "MSI",
"serial_number": "384134g141ghwg92",
},
"cpu": [
{
"status": "ok",
"name": "AMD Ryzen 5",
"caption": "AMD Ryzen 5",
"current_clock_speed": "100",
"current_cthread_countlock_speed": 0,
"virtualization_firmware_enabled": True,
"load_percentage": 50,
"number_of_cores": 12,
"number_of_enabled_core": 6,
"number_of_logical_processors": 6,
}
],
"gpu": [
{
"status": "ok",
"name": "gtx 970",
"caption": "Nvidea GTX 970",
"driver_version": "370.9",
"driver_date": "12.12.12",
"video_mode_description": "no description",
"current_vertical_resolution": "1024x1024",
}
],
"network": [
{
"description": "eth0",
"mac_address": "00:1B:44:11:3A:B7",
"ip_addresses": ["172.16.17.32", "172.16.31.10"],
}
],
"disks": [
{
"status": "ok",
"caption": "Disk 1",
"serial_number": "123123213123",
"size": 10000,
"model": "samsung",
"description": "HDD",
"partitions": 2,
}
],
} | none | 1 | 2.312003 | 2 | |
telegram_bot_sdk/telegram_objects/passportData.py | myOmikron/TelegramBotSDK | 0 | 6624066 | from telegram_bot_sdk.telegram_objects.encryptedCredentials import EncryptedCredentials
from telegram_bot_sdk.telegram_objects.encryptedPassportElement import EncryptedPassportElement
class PassportData:
"""This class contains formation about Telegram Passport data shared with the bot by the user
:param data: List with information about documents and other Telegram Passport elements that was shared with the bot
:type data: list of :ref:`object_encrypted_passport_element`
:param credentials: Encrypted credentials required to decrypt the data
:type credentials: :ref:`object_encrypted_credentials`
"""
def __init__(self, *, data, credentials):
self.data = [EncryptedPassportElement(**x) for x in data] if data else None
self.credentials = EncryptedCredentials(**credentials) if credentials else None
| from telegram_bot_sdk.telegram_objects.encryptedCredentials import EncryptedCredentials
from telegram_bot_sdk.telegram_objects.encryptedPassportElement import EncryptedPassportElement
class PassportData:
"""This class contains formation about Telegram Passport data shared with the bot by the user
:param data: List with information about documents and other Telegram Passport elements that was shared with the bot
:type data: list of :ref:`object_encrypted_passport_element`
:param credentials: Encrypted credentials required to decrypt the data
:type credentials: :ref:`object_encrypted_credentials`
"""
def __init__(self, *, data, credentials):
self.data = [EncryptedPassportElement(**x) for x in data] if data else None
self.credentials = EncryptedCredentials(**credentials) if credentials else None
| en | 0.856924 | This class contains formation about Telegram Passport data shared with the bot by the user :param data: List with information about documents and other Telegram Passport elements that was shared with the bot :type data: list of :ref:`object_encrypted_passport_element` :param credentials: Encrypted credentials required to decrypt the data :type credentials: :ref:`object_encrypted_credentials` | 2.56425 | 3 |
students.py | Maurya232Abhishek/Python-repository-for-basics | 2 | 6624067 | students={1:{"name":"Pappu","address":"VNS","marks":{"phy":50,"chem":60}},2:{"name":"Dappu","address":"VNS","marks":{"phy":50,"chem":60}}}
student =students.get(1)
print(student)
address=student.get("address")
print(address)
marks=student.get("marks")
print(marks)
phy = marks.get("phy")
print(phy) | students={1:{"name":"Pappu","address":"VNS","marks":{"phy":50,"chem":60}},2:{"name":"Dappu","address":"VNS","marks":{"phy":50,"chem":60}}}
student =students.get(1)
print(student)
address=student.get("address")
print(address)
marks=student.get("marks")
print(marks)
phy = marks.get("phy")
print(phy) | none | 1 | 3.622695 | 4 | |
arekit/contrib/experiment_rusentrel/exp_ds/utils.py | nicolay-r/AREk | 18 | 6624068 | <reponame>nicolay-r/AREk<filename>arekit/contrib/experiment_rusentrel/exp_ds/utils.py
import logging
from arekit.common.utils import progress_bar_iter
from arekit.contrib.experiment_rusentrel.labels.scalers.ruattitudes import ExperimentRuAttitudesLabelConverter
from arekit.contrib.source.ruattitudes.collection import RuAttitudesCollection
from arekit.contrib.source.ruattitudes.io_utils import RuAttitudesVersions
from arekit.contrib.source.ruattitudes.news.base import RuAttitudesNews
logger = logging.getLogger(__name__)
def read_ruattitudes_in_memory(version, keep_doc_ids_only, used_doc_ids_set=None):
"""
Performs reading of ruattitude formatted documents and
selection according to 'doc_ids_set' parameter.
used_doc_ids_set: set or None
ids of documents that already used and could not be assigned
'None' corresponds to an empty set.
"""
assert(isinstance(version, RuAttitudesVersions))
assert(isinstance(keep_doc_ids_only, bool))
assert(isinstance(used_doc_ids_set, set) or used_doc_ids_set is None)
d = {}
id_offset = max(used_doc_ids_set) + 1 if used_doc_ids_set is not None else 0
it = RuAttitudesCollection.iter_news(version=version,
get_news_index_func=lambda _: id_offset + len(d),
label_convereter=ExperimentRuAttitudesLabelConverter(),
return_inds_only=keep_doc_ids_only)
it_formatted_and_logged = progress_bar_iter(
iterable=__iter_id_with_news(news_it=it,
keep_doc_ids_only=keep_doc_ids_only),
desc="Loading RuAttitudes Collection [{}]".format("doc ids only" if keep_doc_ids_only else "fully"),
unit='docs')
for doc_id, news in it_formatted_and_logged:
if used_doc_ids_set is not None:
if doc_id in used_doc_ids_set:
logger.info("Document with id='{}' already used. Skipping".format(doc_id))
continue
d[doc_id] = news
return d
def __iter_id_with_news(news_it, keep_doc_ids_only):
if keep_doc_ids_only:
for doc_id in news_it:
yield doc_id, None
else:
for news in news_it:
assert (isinstance(news, RuAttitudesNews))
yield news.ID, news
| import logging
from arekit.common.utils import progress_bar_iter
from arekit.contrib.experiment_rusentrel.labels.scalers.ruattitudes import ExperimentRuAttitudesLabelConverter
from arekit.contrib.source.ruattitudes.collection import RuAttitudesCollection
from arekit.contrib.source.ruattitudes.io_utils import RuAttitudesVersions
from arekit.contrib.source.ruattitudes.news.base import RuAttitudesNews
logger = logging.getLogger(__name__)
def read_ruattitudes_in_memory(version, keep_doc_ids_only, used_doc_ids_set=None):
"""
Performs reading of ruattitude formatted documents and
selection according to 'doc_ids_set' parameter.
used_doc_ids_set: set or None
ids of documents that already used and could not be assigned
'None' corresponds to an empty set.
"""
assert(isinstance(version, RuAttitudesVersions))
assert(isinstance(keep_doc_ids_only, bool))
assert(isinstance(used_doc_ids_set, set) or used_doc_ids_set is None)
d = {}
id_offset = max(used_doc_ids_set) + 1 if used_doc_ids_set is not None else 0
it = RuAttitudesCollection.iter_news(version=version,
get_news_index_func=lambda _: id_offset + len(d),
label_convereter=ExperimentRuAttitudesLabelConverter(),
return_inds_only=keep_doc_ids_only)
it_formatted_and_logged = progress_bar_iter(
iterable=__iter_id_with_news(news_it=it,
keep_doc_ids_only=keep_doc_ids_only),
desc="Loading RuAttitudes Collection [{}]".format("doc ids only" if keep_doc_ids_only else "fully"),
unit='docs')
for doc_id, news in it_formatted_and_logged:
if used_doc_ids_set is not None:
if doc_id in used_doc_ids_set:
logger.info("Document with id='{}' already used. Skipping".format(doc_id))
continue
d[doc_id] = news
return d
def __iter_id_with_news(news_it, keep_doc_ids_only):
if keep_doc_ids_only:
for doc_id in news_it:
yield doc_id, None
else:
for news in news_it:
assert (isinstance(news, RuAttitudesNews))
yield news.ID, news | en | 0.842655 | Performs reading of ruattitude formatted documents and selection according to 'doc_ids_set' parameter. used_doc_ids_set: set or None ids of documents that already used and could not be assigned 'None' corresponds to an empty set. | 2.046114 | 2 |
app.py | jaquielajoie/BS-Financial-Analytics | 1 | 6624069 | <filename>app.py<gh_stars>1-10
from flask import Flask, render_template
import sqlite3
import ast
db = "twit_data.db"
app = Flask(__name__)
def get_top_tweets():
conn = sqlite3.connect(db)
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("SELECT * from twit_data ORDER BY datetime DESC LIMIT 30")
result = c.fetchall()
tweets = []
datetime_toptweets = result[0]['datetime']
for tweet in result:
tweets.append(tweet['top_tweet'])
conn.close()
return tweets, datetime_toptweets
def get_trends():
conn = sqlite3.connect(db)
conn.row_factory = sqlite3.Row
c = conn.cursor()
trend = []
trend_tweet = []
c.execute("SELECT * from trend_data ORDER BY datetime DESC LIMIT 10")
result = c.fetchall()
datetime_trends = result[0]['datetime']
for r in result:
trend.append(r['trend'])
trend_tweet.append(r['trend_id1'])
trend_tweet.append(r['trend_id2'])
trend_tweet.append(r['trend_id3'])
conn.close()
return trend, trend_tweet, datetime_trends
def get_lang():
conn = sqlite3.connect(db)
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("SELECT * from lang_data ORDER BY datetime DESC LIMIT 1")
result = c.fetchone()
lang = ast.literal_eval(result['language'])
top_lang = ast.literal_eval(result['top_language'])
conn.close()
return lang, top_lang
@app.route("/")
def main():
language_data = []
top_language_data = []
lang, top_lang = get_lang()
tweets, datetime_toptweets = get_top_tweets()
for l in lang:
language_data.append([l[0], l[1], l[1]])
for t in top_lang:
top_language_data.append([t[0], t[1], t[1]])
return render_template("lang.html", language_data = language_data, top_language_data = top_language_data, tweets = tweets, datetime_toptweets = datetime_toptweets)
@app.route("/about")
def about():
return render_template('about.html')
@app.route("/top_tweets")
def top_tweets():
tweets, datetime_toptweets = get_top_tweets()
return render_template('top_tweets.html', tweets = tweets, datetime_toptweets = datetime_toptweets)
@app.route("/trends")
def trends():
trend, trend_tweet, datetime_trends = get_trends()
return render_template('trends.html', trend = trend, trend_tweet = trend_tweet, datetime_trends = datetime_trends)
if __name__ == "__main__":
app.run(debug = True)
| <filename>app.py<gh_stars>1-10
from flask import Flask, render_template
import sqlite3
import ast
db = "twit_data.db"
app = Flask(__name__)
def get_top_tweets():
conn = sqlite3.connect(db)
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("SELECT * from twit_data ORDER BY datetime DESC LIMIT 30")
result = c.fetchall()
tweets = []
datetime_toptweets = result[0]['datetime']
for tweet in result:
tweets.append(tweet['top_tweet'])
conn.close()
return tweets, datetime_toptweets
def get_trends():
conn = sqlite3.connect(db)
conn.row_factory = sqlite3.Row
c = conn.cursor()
trend = []
trend_tweet = []
c.execute("SELECT * from trend_data ORDER BY datetime DESC LIMIT 10")
result = c.fetchall()
datetime_trends = result[0]['datetime']
for r in result:
trend.append(r['trend'])
trend_tweet.append(r['trend_id1'])
trend_tweet.append(r['trend_id2'])
trend_tweet.append(r['trend_id3'])
conn.close()
return trend, trend_tweet, datetime_trends
def get_lang():
conn = sqlite3.connect(db)
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("SELECT * from lang_data ORDER BY datetime DESC LIMIT 1")
result = c.fetchone()
lang = ast.literal_eval(result['language'])
top_lang = ast.literal_eval(result['top_language'])
conn.close()
return lang, top_lang
@app.route("/")
def main():
language_data = []
top_language_data = []
lang, top_lang = get_lang()
tweets, datetime_toptweets = get_top_tweets()
for l in lang:
language_data.append([l[0], l[1], l[1]])
for t in top_lang:
top_language_data.append([t[0], t[1], t[1]])
return render_template("lang.html", language_data = language_data, top_language_data = top_language_data, tweets = tweets, datetime_toptweets = datetime_toptweets)
@app.route("/about")
def about():
return render_template('about.html')
@app.route("/top_tweets")
def top_tweets():
tweets, datetime_toptweets = get_top_tweets()
return render_template('top_tweets.html', tweets = tweets, datetime_toptweets = datetime_toptweets)
@app.route("/trends")
def trends():
trend, trend_tweet, datetime_trends = get_trends()
return render_template('trends.html', trend = trend, trend_tweet = trend_tweet, datetime_trends = datetime_trends)
if __name__ == "__main__":
app.run(debug = True)
| none | 1 | 2.771162 | 3 | |
scripts/retired/data_prep_lit_to_dict.py | tcrundall/chronostar | 0 | 6624070 | <filename>scripts/retired/data_prep_lit_to_dict.py<gh_stars>0
"""
Take (excerpt of) the lit compiled RV table and convert into standardised
format and
"""
from __future__ import print_function, division
from astropy.table import Table
import csv
import numpy as np
import sys
sys.path.insert(0, '..')
import chronostar.retired2.datatool as dt
#original_tb_file = "../data/bp_TGAS2_traceback_save.pkl"
data_file = "../data/bpmg_cand_w_gaia_dr2_astrometry_comb_binars.csv"
#astro_file = "../data/bp_astro.dat"
astr_file = "../data/bpmg_cand_w_gaia_dr2_astrometry_comb_binars.fits"
xyzuvw_file = "../data/bpmg_cand_w_gaia_dr2_astrometry_comb_binars_xyzuvw.fits"
main_rv_name = "RV"
main_erv_name = "RV error"
name_name = "Name1"
gaia_ra_name = "ra" # this column is start of the gaia contiguous data block
gaia_dec_name = "dec"
gaia_start_name = "parallax" # this column is start of the gaia contiguous data block
gaia_end_name = "pmra_pmdec_corr"
with open(data_file, 'r') as fp:
rd = csv.reader(fp)
header = rd.next()
data_str = np.zeros((0,len(header))).astype(np.str)
for row in rd:
data_str = np.vstack((data_str, row))
main_rv_ix = header.index(main_rv_name)
main_erv_ix = header.index(main_erv_name)
name_ix = header.index(name_name)
gaia_ra_ix = header.index(gaia_ra_name)
gaia_dec_ix = header.index(gaia_dec_name)
gaia_start_ix = header.index(gaia_start_name)
gaia_end_ix = header.index(gaia_end_name) + 1
NSTARS = data_str.shape[0]
# data_ordered = np.vstack((
# data_str[:,name_ix],
# data_str[:,gaia_ra_ix],
# data_str[:,gaia_dec_ix],
# data_str[:,gaia_start_ix:gaia_end_ix].T,
# data_str[:,main_rv_ix],
# data_str[:,main_erv_ix],
# ))
# data_ordered = data_ordered.T
gaia_file = "../data/all_rvs_w_ok_plx.fits"
gaia_master_table = Table.read(gaia_file)
master_dtype = gaia_master_table.dtype
new_table = Table(data=np.zeros(NSTARS, dtype=master_dtype))
for col in gaia_master_table.columns:
try:
new_table[col] = data_str[:,header.index(col)].astype(
master_dtype.fields[col][0]
)
except ValueError:
print("Column {} not present or can't convert value".format(col))
# manually insert RV
new_table['radial_velocity'] = data_str[:,main_rv_ix].astype(
master_dtype.fields['radial_velocity'][0]
)
new_table['radial_velocity_error'] = data_str[:,main_erv_ix].astype(
master_dtype.fields['radial_velocity_error'][0]
)
new_table.write(astr_file, format='fits', overwrite=True)
xyzuvw_dict = dt.convertGaiaToXYZUVWDict(astr_file, return_dict=True)
# hdu = fits.BinTableHDU(data=hdul[1].data[mask])
# new_hdul = fits.HDUList([primary_hdu, hdu])
# new_hdul.writeto(filename, overwrite=True)
#plt.plot(xyzuvw_dict['xyzuvw'][:,0], xyzuvw_dict['xyzuvw'][:,1], '.')
#plt.show()
#
# print("Done")
| <filename>scripts/retired/data_prep_lit_to_dict.py<gh_stars>0
"""
Take (excerpt of) the lit compiled RV table and convert into standardised
format and
"""
from __future__ import print_function, division
from astropy.table import Table
import csv
import numpy as np
import sys
sys.path.insert(0, '..')
import chronostar.retired2.datatool as dt
#original_tb_file = "../data/bp_TGAS2_traceback_save.pkl"
data_file = "../data/bpmg_cand_w_gaia_dr2_astrometry_comb_binars.csv"
#astro_file = "../data/bp_astro.dat"
astr_file = "../data/bpmg_cand_w_gaia_dr2_astrometry_comb_binars.fits"
xyzuvw_file = "../data/bpmg_cand_w_gaia_dr2_astrometry_comb_binars_xyzuvw.fits"
main_rv_name = "RV"
main_erv_name = "RV error"
name_name = "Name1"
gaia_ra_name = "ra" # this column is start of the gaia contiguous data block
gaia_dec_name = "dec"
gaia_start_name = "parallax" # this column is start of the gaia contiguous data block
gaia_end_name = "pmra_pmdec_corr"
with open(data_file, 'r') as fp:
rd = csv.reader(fp)
header = rd.next()
data_str = np.zeros((0,len(header))).astype(np.str)
for row in rd:
data_str = np.vstack((data_str, row))
main_rv_ix = header.index(main_rv_name)
main_erv_ix = header.index(main_erv_name)
name_ix = header.index(name_name)
gaia_ra_ix = header.index(gaia_ra_name)
gaia_dec_ix = header.index(gaia_dec_name)
gaia_start_ix = header.index(gaia_start_name)
gaia_end_ix = header.index(gaia_end_name) + 1
NSTARS = data_str.shape[0]
# data_ordered = np.vstack((
# data_str[:,name_ix],
# data_str[:,gaia_ra_ix],
# data_str[:,gaia_dec_ix],
# data_str[:,gaia_start_ix:gaia_end_ix].T,
# data_str[:,main_rv_ix],
# data_str[:,main_erv_ix],
# ))
# data_ordered = data_ordered.T
gaia_file = "../data/all_rvs_w_ok_plx.fits"
gaia_master_table = Table.read(gaia_file)
master_dtype = gaia_master_table.dtype
new_table = Table(data=np.zeros(NSTARS, dtype=master_dtype))
for col in gaia_master_table.columns:
try:
new_table[col] = data_str[:,header.index(col)].astype(
master_dtype.fields[col][0]
)
except ValueError:
print("Column {} not present or can't convert value".format(col))
# manually insert RV
new_table['radial_velocity'] = data_str[:,main_rv_ix].astype(
master_dtype.fields['radial_velocity'][0]
)
new_table['radial_velocity_error'] = data_str[:,main_erv_ix].astype(
master_dtype.fields['radial_velocity_error'][0]
)
new_table.write(astr_file, format='fits', overwrite=True)
xyzuvw_dict = dt.convertGaiaToXYZUVWDict(astr_file, return_dict=True)
# hdu = fits.BinTableHDU(data=hdul[1].data[mask])
# new_hdul = fits.HDUList([primary_hdu, hdu])
# new_hdul.writeto(filename, overwrite=True)
#plt.plot(xyzuvw_dict['xyzuvw'][:,0], xyzuvw_dict['xyzuvw'][:,1], '.')
#plt.show()
#
# print("Done")
| en | 0.292904 | Take (excerpt of) the lit compiled RV table and convert into standardised format and #original_tb_file = "../data/bp_TGAS2_traceback_save.pkl" #astro_file = "../data/bp_astro.dat" # this column is start of the gaia contiguous data block # this column is start of the gaia contiguous data block # data_ordered = np.vstack(( # data_str[:,name_ix], # data_str[:,gaia_ra_ix], # data_str[:,gaia_dec_ix], # data_str[:,gaia_start_ix:gaia_end_ix].T, # data_str[:,main_rv_ix], # data_str[:,main_erv_ix], # )) # data_ordered = data_ordered.T # manually insert RV # hdu = fits.BinTableHDU(data=hdul[1].data[mask]) # new_hdul = fits.HDUList([primary_hdu, hdu]) # new_hdul.writeto(filename, overwrite=True) #plt.plot(xyzuvw_dict['xyzuvw'][:,0], xyzuvw_dict['xyzuvw'][:,1], '.') #plt.show() # # print("Done") | 2.62853 | 3 |
example_service/core/services/CalculationService.py | artemijan/python-api | 0 | 6624071 | <filename>example_service/core/services/CalculationService.py
class Operator:
PLUS = '+'
MINUS = '+'
MUL = '*'
DIV = '/'
choices = (
(PLUS, '+'),
(MINUS, '-'),
(MUL, '*'),
(DIV, '/'),
)
# Should be model
class CalculationTask:
def __init__(self, arg1: float = None, operator: Operator = None, arg2: float = None):
super().__init__()
self.argument1 = arg1
self.argument2 = arg2
self.operator = operator
# Should be model
class CalculationResult:
def __init__(self, answer: float = None):
super().__init__()
self.answer = answer
def calculate(task: CalculationTask) -> CalculationResult:
return CalculationResult(4)
| <filename>example_service/core/services/CalculationService.py
class Operator:
PLUS = '+'
MINUS = '+'
MUL = '*'
DIV = '/'
choices = (
(PLUS, '+'),
(MINUS, '-'),
(MUL, '*'),
(DIV, '/'),
)
# Should be model
class CalculationTask:
def __init__(self, arg1: float = None, operator: Operator = None, arg2: float = None):
super().__init__()
self.argument1 = arg1
self.argument2 = arg2
self.operator = operator
# Should be model
class CalculationResult:
def __init__(self, answer: float = None):
super().__init__()
self.answer = answer
def calculate(task: CalculationTask) -> CalculationResult:
return CalculationResult(4)
| en | 0.68927 | # Should be model # Should be model | 2.722779 | 3 |
src/terminal/env_sensor_log2.py | informatiquecsud/m5-stack-utils-core | 0 | 6624072 | from m5stack import *
import time
import unit
from terminal import Terminal
env0 = unit.get(unit.ENV, unit.PORTA)
t = Terminal()
while True:
if btnA.wasPressed():
break
pressure = env0.pressure
temperature = env0.temperature
humidity = env0.humidity
t.print("P={pressure}, T={temperature}, H={humidity}".format(
pressure=pressure,
temperature=temperature,
humidity=humidity
))
wait_ms(500) | from m5stack import *
import time
import unit
from terminal import Terminal
env0 = unit.get(unit.ENV, unit.PORTA)
t = Terminal()
while True:
if btnA.wasPressed():
break
pressure = env0.pressure
temperature = env0.temperature
humidity = env0.humidity
t.print("P={pressure}, T={temperature}, H={humidity}".format(
pressure=pressure,
temperature=temperature,
humidity=humidity
))
wait_ms(500) | none | 1 | 2.614364 | 3 | |
coding/test_info.py | CrashingBrain/BSc_Project | 0 | 6624073 | <filename>coding/test_info.py
import bhvs as bv
import info as inf
import numpy as np
import prob as pr
import time
import matplotlib.pyplot as plt
# Test conditiona MutInf
if False:
P = bv.FourPDstrb()
print("Test CondMutInfo")
start = time.time()
print(inf.condMutInf(pr.marginal(P,3)))
end = time.time()
print("time Arne: %.8f" % (end - start))
start = time.time()
print(inf.condMutInf_(pr.marginal(P,3),0,1,2))
end = time.time()
print("time Mio: %.8f" % (end - start))
print("---")
# Test channels
if False:
PC = inf.randChannel(3,2)
print(PC.shape)
print(PC)
P = bv.FourPDstrb()
print(P.shape)
print(inf.applyChannel(P, PC, (2)).shape)
print(inf.applyChannel(P, PC, (3)).shape)
print(inf.applyChannel(P, PC, (2)))
print(inf.applyChannel(P, PC, (3)))
print(inf.mutInf(pr.marginal(P, (2,3))))
# Test deterministic channels
if False:
dim_in = (2,2)
dim_out = 3
for l in range(0, dim_out**(np.prod(dim_in))):
PC = inf.detChannel( dim_out, dim_in, l)
for k in range(0, np.prod(dim_in)):
coefftpl = inf.coeffOfNo(k, dim_in)
print(PC[:, coefftpl[0], coefftpl[1]])
# Check the reduced intrinsic information upper bound
if False:
P = bv.FourPDstrb()
P = pr.marginal(P,(3))
PC_UXYZ = inf.randChannelMP( (np.prod(P.shape),), P.shape)
print( np.sum( PC_UXYZ, axis=(0)))
print( PC_UXYZ.shape)
P_UXYZ = np.zeros_like(PC_UXYZ)
P_UXYZ_prime = np.zeros_like(PC_UXYZ)
print(P_UXYZ.shape)
print(P_UXYZ_prime.shape)
for u in range(0,PC_UXYZ.shape[0]):
P_UXYZ[u,:,:,:] = np.multiply( PC_UXYZ[u,:,:,:], P)
for u in range(0,np.prod(P.shape)):
for x in range(0, P.shape[0]):
for y in range(0, P.shape[1]):
for z in range(0, P.shape[2]):
P_UXYZ_prime[u,x,y,z] = PC_UXYZ[u,x,y,z]*P[x,y,z]
print("Diff between P_UXYZ_prime and P_UXYZ: %f" % np.amax(np.absolute( P_UXYZ- P_UXYZ_prime)))
print("Diff: marginal(PC_UXYZ*P_XYZ,U) - P_XYZ %f" % np.amax(np.absolute( pr.marginal( P_UXYZ, (0)) - P)))
print("Diff: marginal(PC_UXYZ*P_XYZ,U) - P_XYZ %f" % np.amax(np.absolute( pr.marginal( P_UXYZ_prime, (0)) - P)))
P_UZ = np.sum( P_UXYZ, (1,2))
print("Diff: P_Z from P_UZ and from P_XYZ %f" % np.amax(np.absolute( pr.marginal( P_UZ, (0)) - pr.marginal( P, (0,1)))))
# Compute the intrinsic information I(X;Y\d UZ)
P = bv.FourPDstrb()
I_rd = 100.
no_iter = 1
for k in range(0, no_iter):
PC_UZ = inf.randChannelMP( (P.shape[2:]), (P.shape[2:]))
P_XYZU_p = inf.applyChannel( P, PC_UZ, (2,3))
P_ZU = np.sum( P_XYZU_p, (0,1))
I = 0.
for z in range(0, P_XYZU_p.shape[2]):
for u in range(0,P_XYZU_p.shape[3]):
I += P_ZU[z,u] * inf.mutInf( np.multiply( 1./P_ZU[z,u], P_XYZU_p[:,:, z,u]))
if (I_rd > I):
I_rd = I
print("Intrinsic information I(X;Y\d ZU) = %f (should go down to zero)" % I_rd)
# Compute the intrinsic information I(X;Y\d UZ)
# Replace the channel by one that goes to joint variable
P = bv.FourPDstrb()
I_rd = 100.
for k in range(0, no_iter):
PC_UZ = inf.randChannelMP( (np.prod(P.shape[2:]),), (P.shape[2:]))
P_XYZU_p = inf.applyChannel( P, PC_UZ, (2,3))
P_ZU = np.sum( P_XYZU_p, (0,1))
I = 0.
for z in range(0, P_XYZU_p.shape[2]):
I += P_ZU[z] * inf.mutInf( np.multiply( 1./P_ZU[z], P_XYZU_p[:,:, z]))
if (I_rd > I):
I_rd = I
print("Intrinsic information I(X;Y\d ZU) = %f (should go down to zero)" % I_rd)
# Alternatively: join the parties ZU to a new one and apply MCupperBoundIntrinInf directly
P_prime = np.zeros( (P.shape[0], P.shape[1], P.shape[2]*P.shape[3]))
for x in range(0,P.shape[0]):
for y in range(0,P.shape[1]):
for zu in range(0, P.shape[2]*P.shape[3]):
P_prime[x,y,zu] = P[ (x,y)+inf.coeffOfNo(zu,(P.shape[2],P.shape[3]))]
print("Intrinsic information I(X;Y\d ZU) after joining Z and U = %f" % inf.MCupperBoundIntrinInf(P_prime, no_iter))
# Use the channel from the paper
P3 = inf.applyChannel( P, inf.zuChannel2(), (2,3))
print("Conditional mutual information I(X;Y|bar{UZ}) %f" % inf.condMutInf( P3))
P4 = inf.applyChannel( P, inf.zuChannel(), (2,3))
I = 0.
P4_ZU = pr.marginal( P4, (0,1))
for z in range(0, P4.shape[2]):
for u in range(0, P4.shape[3]):
if P4_ZU[z,u] > 0:
I += P4_ZU[z,u] * inf.mutInf( np.multiply( 1./P4_ZU[z,u], P4[:,:, z,u]))
print("Conditional mutual information I(X;Y|bar{UZ}) %f" % I)
print("Entropy of P_U %f" % inf.entropy( pr.marginal(P, (0,1,2))))
# IntrInf ThreePDstrb from FourPDstrb
if False:
P = bv.FourPDstrb()
P = pr.marginal(P, 3)
print( "Test MCupperBoundIntrinInfMP with Marginal over U of FourPDstrb()")
for dimBZU in range(2,5):
print( dimBZU, inf.MCupperBoundIntrinInfMP( P, dimBZU, 20))
# IntrInf FourPDstrb
if False:
P = bv.FourPDstrb()
print( "Test MCupperBoundIntrinInfMP with FourPDstrb()")
for dimBZU in range(2,5):
print( dimBZU, inf.MCupperBoundIntrinInfMP( P, dimBZU, 2000))
# IntrInfDet ThreePDstrb from FourPDstrb
if False:
P = bv.FourPDstrb()
P = pr.marginal(P, 3)
print( "Test MCupperBoundIntrinInfMPDet with Marginal over U of FourPDstrb()")
for dimBZU in range(2,5):
print( dimBZU, inf.MCupperBoundIntrinInfMPDet( P, dimBZU))
# IntrInfDet FourPDstrb
if False:
P = bv.FourPDstrb()
print( "Test MCupperBoundIntrinInfMPDet with FourPDstrb()")
for dimBZU in range(2,5):
print( dimBZU, inf.MCupperBoundIntrinInfMPDet( P, dimBZU))
# RedIntrInf
if False:
P = bv.FourPDstrb()
P = pr.marginal(P, 3)
print( "Test MCupperBoundRedIntrinInfX(Y) with FourPDstrb()")
for dimU in range(2,5):
for dimBZU in range(2,5):
print( "dimBZU = ", dimBZU, ", dimU = ", dimU)
print( inf.MCupperBoundRedIntrinInfXY( P, dimU, dimBZU, 200, 200))
print( inf.MCupperBoundRedIntrinInfX ( P, dimU, dimBZU, 200, 200))
# RedIntrInfDet
if False:
P = bv.FourPDstrb()
P = pr.marginal(P, 3)
print( "Test MCupperBoundRedIntrinInfX(Y)Det with FourPDstrb()")
for dimU in range(2,5):
for dimBZU in range(2,5):
print( "dimBZU = ", dimBZU, ", dimU = ", dimU)
print( inf.MCupperBoundRedIntrinInfXYDet( P, dimU, dimBZU, 200))
print( inf.MCupperBoundRedIntrinInfXDet ( P, dimU, dimBZU, 200))
# RedIntrInfDD
if True:
P = bv.FourPDstrb()
P = pr.marginal(P, 3)
print( "Test MCupperBoundRedIntrinInfX(Y)DD with FourPDstrb()")
for dimU in range(2,5):
for dimBZU in range(2,5):
print( "dimBZU = ", dimBZU, ", dimU = ", dimU)
print( inf.MCupperBoundRedIntrinInfXYDD( P, dimU, dimBZU))
print( inf.MCupperBoundRedIntrinInfXDD ( P, dimU, dimBZU))
# Loop over different random channels
if False:
P = bv.FourPDstrb()
print("*** BEGIN LOOPS ***")
for k in range(0, 10):
PC = inf.randChannel(2,2)
print(PC)
# Print P_Z after channel.
# NB: last parties are swapped after applying the channel
print( pr.marginal( inf.applyChannel( P, PC, 3), (0,1,2)))
print( inf.mutInf( pr.marginal( inf.applyChannel(P, PC, 3), (2,3))))
print( inf.MCupperBoundIntrinInf( pr.marginal(P, 3), 100))
print( inf.MCupperBoundRedIntrinInfXY(pr.marginal(P,3), 2, 2, 10, 10))
# Test the new RedIntrinInfo function
print( inf.MCupperBoundRedIntrinInf_( pr.marginal( P, 3), 10, 10))
# New deterministic function
print( inf.MCupperBoundRedIntrinInfDet_(pr.marginal( P, 3), 2, 4, 1000, 1000, True))
pass
print("*** END LOOPS ***")
# Test random bipartite channel
if False:
CMulti = inf.randChannelMP( (4,2), (2,2))
print( CMulti.shape )
print( CMulti.min())
print( np.sum( CMulti , axis=(0,1)))
print("---")
# Test deterministic and general uniform behaviors and then the respective entropy
print( bv.determBhv( (2,2), 3 ) )
print("---")
print( bv.determBhv( (2,2), 2 ) )
print("---")
print( bv.determBhv( (4,), 2 ) )
print("---")
print( inf.entropy(bv.determBhv( (4,), 2 ) ))
print("---")
print( bv.unifBhv( (2,2) ))
print("---")
print( bv.unifBhv( (4,2) ))
print("---")
print( bv.unifBhv( (2,) ))
print("---")
print( inf.entropy(bv.unifBhv( (2,4)) ))
print( inf.entropy(bv.unifBhv( (2,2)) ))
print( inf.entropy(bv.unifBhv( (2,)) ))
# Test the entropy
if False:
values = []
for p in np.linspace(0,1,num=100):
values.append( inf.entropy( bv.coin( p )))
plt.plot(values)
plt.savefig("binEntropy.pdf")
plt.gcf().clear()
values1 = []
values2 = []
for i in range(0,100):
bhv = bv.randBhv( (2,))
values1.append( bhv[0])
values2.append( inf.entropy( bhv))
plt.scatter(values1, values2)
plt.savefig("randomlySampledBinEntropy.pdf")
# Test the application of a channel
if False:
dimsChn = (4,5)
bhv = bv.randBhv( (2,2,2,2) )
rChn = inf.randChannelMP( dimsChn, (2,2))
# Apply the channel to the first two parties
bhvAfterChn1 = np.zeros( (2,2)+dimsChn)
for x in range(0,dimsChn[0]):
for y in range(0, dimsChn[1]):
for z in range(0,2):
for u in range(0,2):
for xp in range(0,2):
for yp in range(0,2):
bhvAfterChn1[ z,u,x,y ] += bhv[xp,yp,z,u]*rChn[x,y,xp, yp]
bhvAfterChn = inf.applyChannel( bhv, rChn, (0,1))
print( np.amax(np.absolute(bhvAfterChn-bhvAfterChn1)))
# Apply the channel to the first and the third party
bhvAfterChn1 = np.zeros( (2,2)+dimsChn)
for x in range(0,dimsChn[0]):
for z in range(0, dimsChn[1]):
for y in range(0,2):
for u in range(0,2):
for xp in range(0,2):
for zp in range(0,2):
bhvAfterChn1[ y,u,x,z ] += bhv[xp,y,zp,u]*rChn[x, z, xp, zp]
bhvAfterChn = inf.applyChannel( bhv, rChn, (0,2))
print( np.amax(np.absolute(bhvAfterChn-bhvAfterChn1)))
# Apply the channel to the second and the third party
bhvAfterChn1 = np.zeros( (2,2)+dimsChn)
for y in range(0,dimsChn[0]):
for z in range(0, dimsChn[1]):
for x in range(0,2):
for u in range(0,2):
for yp in range(0,2):
for zp in range(0,2):
bhvAfterChn1[ x,u,y,z ] += bhv[x,yp,zp,u]*rChn[y, z, yp, zp]
bhvAfterChn = inf.applyChannel( bhv, rChn, (1,2))
print( np.amax(np.absolute(bhvAfterChn-bhvAfterChn1)))
# Apply the channel to the first and the fourth party
bhvAfterChn1 = np.zeros( (2,2)+dimsChn)
for x in range(0,dimsChn[0]):
for u in range(0, dimsChn[1]):
for y in range(0,2):
for z in range(0,2):
for xp in range(0,2):
for up in range(0,2):
bhvAfterChn1[ y,z,x,u ] += bhv[xp,y,z,up]*rChn[x, u, xp, up]
bhvAfterChn = inf.applyChannel( bhv, rChn, (0,3))
print( np.amax(np.absolute(bhvAfterChn-bhvAfterChn1)))
# Test on binarization channel
rChnB = inf.randChannelMP((2,),(2,2))
bhvAfterChn1 = np.zeros( (2,2,2))
for x in range(0,2):
for z in range(0,2):
for u in range(0,2):
for xp in range(0,2):
for yp in range(0,2):
bhvAfterChn1[z,u,x] += bhv[xp,yp,z,u]*rChnB[x,xp,yp]
bhvAfterChn = inf.applyChannel( bhv, rChnB, (0,1))
print( np.amax(np.absolute(bhvAfterChn-bhvAfterChn1)))
# Test as in MCupperBoundIntrInfMP
bhvFoo = bv.randBhv( (32,4,4,2) )
rChnFoo = inf.randChannelMP((2,),(32,2))
bhvAfterChn1 = np.zeros( (4,4,2))
for x in range(0,2):
for y in range(0,4):
for z in range(0,4):
for xp in range(0,32):
for up in range(0,2):
bhvAfterChn1[ y,z,x ] += bhvFoo[xp,y,z,up]*rChnFoo[x, xp, up]
bhvAfterChn = inf.applyChannel( bhvFoo, rChnFoo, (0,3))
print( np.amax(np.absolute(bhvAfterChn-bhvAfterChn1))) | <filename>coding/test_info.py
import bhvs as bv
import info as inf
import numpy as np
import prob as pr
import time
import matplotlib.pyplot as plt
# Test conditiona MutInf
if False:
P = bv.FourPDstrb()
print("Test CondMutInfo")
start = time.time()
print(inf.condMutInf(pr.marginal(P,3)))
end = time.time()
print("time Arne: %.8f" % (end - start))
start = time.time()
print(inf.condMutInf_(pr.marginal(P,3),0,1,2))
end = time.time()
print("time Mio: %.8f" % (end - start))
print("---")
# Test channels
if False:
PC = inf.randChannel(3,2)
print(PC.shape)
print(PC)
P = bv.FourPDstrb()
print(P.shape)
print(inf.applyChannel(P, PC, (2)).shape)
print(inf.applyChannel(P, PC, (3)).shape)
print(inf.applyChannel(P, PC, (2)))
print(inf.applyChannel(P, PC, (3)))
print(inf.mutInf(pr.marginal(P, (2,3))))
# Test deterministic channels
if False:
dim_in = (2,2)
dim_out = 3
for l in range(0, dim_out**(np.prod(dim_in))):
PC = inf.detChannel( dim_out, dim_in, l)
for k in range(0, np.prod(dim_in)):
coefftpl = inf.coeffOfNo(k, dim_in)
print(PC[:, coefftpl[0], coefftpl[1]])
# Check the reduced intrinsic information upper bound
if False:
P = bv.FourPDstrb()
P = pr.marginal(P,(3))
PC_UXYZ = inf.randChannelMP( (np.prod(P.shape),), P.shape)
print( np.sum( PC_UXYZ, axis=(0)))
print( PC_UXYZ.shape)
P_UXYZ = np.zeros_like(PC_UXYZ)
P_UXYZ_prime = np.zeros_like(PC_UXYZ)
print(P_UXYZ.shape)
print(P_UXYZ_prime.shape)
for u in range(0,PC_UXYZ.shape[0]):
P_UXYZ[u,:,:,:] = np.multiply( PC_UXYZ[u,:,:,:], P)
for u in range(0,np.prod(P.shape)):
for x in range(0, P.shape[0]):
for y in range(0, P.shape[1]):
for z in range(0, P.shape[2]):
P_UXYZ_prime[u,x,y,z] = PC_UXYZ[u,x,y,z]*P[x,y,z]
print("Diff between P_UXYZ_prime and P_UXYZ: %f" % np.amax(np.absolute( P_UXYZ- P_UXYZ_prime)))
print("Diff: marginal(PC_UXYZ*P_XYZ,U) - P_XYZ %f" % np.amax(np.absolute( pr.marginal( P_UXYZ, (0)) - P)))
print("Diff: marginal(PC_UXYZ*P_XYZ,U) - P_XYZ %f" % np.amax(np.absolute( pr.marginal( P_UXYZ_prime, (0)) - P)))
P_UZ = np.sum( P_UXYZ, (1,2))
print("Diff: P_Z from P_UZ and from P_XYZ %f" % np.amax(np.absolute( pr.marginal( P_UZ, (0)) - pr.marginal( P, (0,1)))))
# Compute the intrinsic information I(X;Y\d UZ)
P = bv.FourPDstrb()
I_rd = 100.
no_iter = 1
for k in range(0, no_iter):
PC_UZ = inf.randChannelMP( (P.shape[2:]), (P.shape[2:]))
P_XYZU_p = inf.applyChannel( P, PC_UZ, (2,3))
P_ZU = np.sum( P_XYZU_p, (0,1))
I = 0.
for z in range(0, P_XYZU_p.shape[2]):
for u in range(0,P_XYZU_p.shape[3]):
I += P_ZU[z,u] * inf.mutInf( np.multiply( 1./P_ZU[z,u], P_XYZU_p[:,:, z,u]))
if (I_rd > I):
I_rd = I
print("Intrinsic information I(X;Y\d ZU) = %f (should go down to zero)" % I_rd)
# Compute the intrinsic information I(X;Y\d UZ)
# Replace the channel by one that goes to joint variable
P = bv.FourPDstrb()
I_rd = 100.
for k in range(0, no_iter):
PC_UZ = inf.randChannelMP( (np.prod(P.shape[2:]),), (P.shape[2:]))
P_XYZU_p = inf.applyChannel( P, PC_UZ, (2,3))
P_ZU = np.sum( P_XYZU_p, (0,1))
I = 0.
for z in range(0, P_XYZU_p.shape[2]):
I += P_ZU[z] * inf.mutInf( np.multiply( 1./P_ZU[z], P_XYZU_p[:,:, z]))
if (I_rd > I):
I_rd = I
print("Intrinsic information I(X;Y\d ZU) = %f (should go down to zero)" % I_rd)
# Alternatively: join the parties ZU to a new one and apply MCupperBoundIntrinInf directly
P_prime = np.zeros( (P.shape[0], P.shape[1], P.shape[2]*P.shape[3]))
for x in range(0,P.shape[0]):
for y in range(0,P.shape[1]):
for zu in range(0, P.shape[2]*P.shape[3]):
P_prime[x,y,zu] = P[ (x,y)+inf.coeffOfNo(zu,(P.shape[2],P.shape[3]))]
print("Intrinsic information I(X;Y\d ZU) after joining Z and U = %f" % inf.MCupperBoundIntrinInf(P_prime, no_iter))
# Use the channel from the paper
P3 = inf.applyChannel( P, inf.zuChannel2(), (2,3))
print("Conditional mutual information I(X;Y|bar{UZ}) %f" % inf.condMutInf( P3))
P4 = inf.applyChannel( P, inf.zuChannel(), (2,3))
I = 0.
P4_ZU = pr.marginal( P4, (0,1))
for z in range(0, P4.shape[2]):
for u in range(0, P4.shape[3]):
if P4_ZU[z,u] > 0:
I += P4_ZU[z,u] * inf.mutInf( np.multiply( 1./P4_ZU[z,u], P4[:,:, z,u]))
print("Conditional mutual information I(X;Y|bar{UZ}) %f" % I)
print("Entropy of P_U %f" % inf.entropy( pr.marginal(P, (0,1,2))))
# IntrInf ThreePDstrb from FourPDstrb
if False:
P = bv.FourPDstrb()
P = pr.marginal(P, 3)
print( "Test MCupperBoundIntrinInfMP with Marginal over U of FourPDstrb()")
for dimBZU in range(2,5):
print( dimBZU, inf.MCupperBoundIntrinInfMP( P, dimBZU, 20))
# IntrInf FourPDstrb
if False:
P = bv.FourPDstrb()
print( "Test MCupperBoundIntrinInfMP with FourPDstrb()")
for dimBZU in range(2,5):
print( dimBZU, inf.MCupperBoundIntrinInfMP( P, dimBZU, 2000))
# IntrInfDet ThreePDstrb from FourPDstrb
if False:
P = bv.FourPDstrb()
P = pr.marginal(P, 3)
print( "Test MCupperBoundIntrinInfMPDet with Marginal over U of FourPDstrb()")
for dimBZU in range(2,5):
print( dimBZU, inf.MCupperBoundIntrinInfMPDet( P, dimBZU))
# IntrInfDet FourPDstrb
if False:
P = bv.FourPDstrb()
print( "Test MCupperBoundIntrinInfMPDet with FourPDstrb()")
for dimBZU in range(2,5):
print( dimBZU, inf.MCupperBoundIntrinInfMPDet( P, dimBZU))
# RedIntrInf
if False:
P = bv.FourPDstrb()
P = pr.marginal(P, 3)
print( "Test MCupperBoundRedIntrinInfX(Y) with FourPDstrb()")
for dimU in range(2,5):
for dimBZU in range(2,5):
print( "dimBZU = ", dimBZU, ", dimU = ", dimU)
print( inf.MCupperBoundRedIntrinInfXY( P, dimU, dimBZU, 200, 200))
print( inf.MCupperBoundRedIntrinInfX ( P, dimU, dimBZU, 200, 200))
# RedIntrInfDet
if False:
P = bv.FourPDstrb()
P = pr.marginal(P, 3)
print( "Test MCupperBoundRedIntrinInfX(Y)Det with FourPDstrb()")
for dimU in range(2,5):
for dimBZU in range(2,5):
print( "dimBZU = ", dimBZU, ", dimU = ", dimU)
print( inf.MCupperBoundRedIntrinInfXYDet( P, dimU, dimBZU, 200))
print( inf.MCupperBoundRedIntrinInfXDet ( P, dimU, dimBZU, 200))
# RedIntrInfDD
if True:
P = bv.FourPDstrb()
P = pr.marginal(P, 3)
print( "Test MCupperBoundRedIntrinInfX(Y)DD with FourPDstrb()")
for dimU in range(2,5):
for dimBZU in range(2,5):
print( "dimBZU = ", dimBZU, ", dimU = ", dimU)
print( inf.MCupperBoundRedIntrinInfXYDD( P, dimU, dimBZU))
print( inf.MCupperBoundRedIntrinInfXDD ( P, dimU, dimBZU))
# Loop over different random channels
if False:
P = bv.FourPDstrb()
print("*** BEGIN LOOPS ***")
for k in range(0, 10):
PC = inf.randChannel(2,2)
print(PC)
# Print P_Z after channel.
# NB: last parties are swapped after applying the channel
print( pr.marginal( inf.applyChannel( P, PC, 3), (0,1,2)))
print( inf.mutInf( pr.marginal( inf.applyChannel(P, PC, 3), (2,3))))
print( inf.MCupperBoundIntrinInf( pr.marginal(P, 3), 100))
print( inf.MCupperBoundRedIntrinInfXY(pr.marginal(P,3), 2, 2, 10, 10))
# Test the new RedIntrinInfo function
print( inf.MCupperBoundRedIntrinInf_( pr.marginal( P, 3), 10, 10))
# New deterministic function
print( inf.MCupperBoundRedIntrinInfDet_(pr.marginal( P, 3), 2, 4, 1000, 1000, True))
pass
print("*** END LOOPS ***")
# Test random bipartite channel
if False:
CMulti = inf.randChannelMP( (4,2), (2,2))
print( CMulti.shape )
print( CMulti.min())
print( np.sum( CMulti , axis=(0,1)))
print("---")
# Test deterministic and general uniform behaviors and then the respective entropy
print( bv.determBhv( (2,2), 3 ) )
print("---")
print( bv.determBhv( (2,2), 2 ) )
print("---")
print( bv.determBhv( (4,), 2 ) )
print("---")
print( inf.entropy(bv.determBhv( (4,), 2 ) ))
print("---")
print( bv.unifBhv( (2,2) ))
print("---")
print( bv.unifBhv( (4,2) ))
print("---")
print( bv.unifBhv( (2,) ))
print("---")
print( inf.entropy(bv.unifBhv( (2,4)) ))
print( inf.entropy(bv.unifBhv( (2,2)) ))
print( inf.entropy(bv.unifBhv( (2,)) ))
# Test the entropy
if False:
values = []
for p in np.linspace(0,1,num=100):
values.append( inf.entropy( bv.coin( p )))
plt.plot(values)
plt.savefig("binEntropy.pdf")
plt.gcf().clear()
values1 = []
values2 = []
for i in range(0,100):
bhv = bv.randBhv( (2,))
values1.append( bhv[0])
values2.append( inf.entropy( bhv))
plt.scatter(values1, values2)
plt.savefig("randomlySampledBinEntropy.pdf")
# Test the application of a channel
if False:
dimsChn = (4,5)
bhv = bv.randBhv( (2,2,2,2) )
rChn = inf.randChannelMP( dimsChn, (2,2))
# Apply the channel to the first two parties
bhvAfterChn1 = np.zeros( (2,2)+dimsChn)
for x in range(0,dimsChn[0]):
for y in range(0, dimsChn[1]):
for z in range(0,2):
for u in range(0,2):
for xp in range(0,2):
for yp in range(0,2):
bhvAfterChn1[ z,u,x,y ] += bhv[xp,yp,z,u]*rChn[x,y,xp, yp]
bhvAfterChn = inf.applyChannel( bhv, rChn, (0,1))
print( np.amax(np.absolute(bhvAfterChn-bhvAfterChn1)))
# Apply the channel to the first and the third party
bhvAfterChn1 = np.zeros( (2,2)+dimsChn)
for x in range(0,dimsChn[0]):
for z in range(0, dimsChn[1]):
for y in range(0,2):
for u in range(0,2):
for xp in range(0,2):
for zp in range(0,2):
bhvAfterChn1[ y,u,x,z ] += bhv[xp,y,zp,u]*rChn[x, z, xp, zp]
bhvAfterChn = inf.applyChannel( bhv, rChn, (0,2))
print( np.amax(np.absolute(bhvAfterChn-bhvAfterChn1)))
# Apply the channel to the second and the third party
bhvAfterChn1 = np.zeros( (2,2)+dimsChn)
for y in range(0,dimsChn[0]):
for z in range(0, dimsChn[1]):
for x in range(0,2):
for u in range(0,2):
for yp in range(0,2):
for zp in range(0,2):
bhvAfterChn1[ x,u,y,z ] += bhv[x,yp,zp,u]*rChn[y, z, yp, zp]
bhvAfterChn = inf.applyChannel( bhv, rChn, (1,2))
print( np.amax(np.absolute(bhvAfterChn-bhvAfterChn1)))
# Apply the channel to the first and the fourth party
bhvAfterChn1 = np.zeros( (2,2)+dimsChn)
for x in range(0,dimsChn[0]):
for u in range(0, dimsChn[1]):
for y in range(0,2):
for z in range(0,2):
for xp in range(0,2):
for up in range(0,2):
bhvAfterChn1[ y,z,x,u ] += bhv[xp,y,z,up]*rChn[x, u, xp, up]
bhvAfterChn = inf.applyChannel( bhv, rChn, (0,3))
print( np.amax(np.absolute(bhvAfterChn-bhvAfterChn1)))
# Test on binarization channel
rChnB = inf.randChannelMP((2,),(2,2))
bhvAfterChn1 = np.zeros( (2,2,2))
for x in range(0,2):
for z in range(0,2):
for u in range(0,2):
for xp in range(0,2):
for yp in range(0,2):
bhvAfterChn1[z,u,x] += bhv[xp,yp,z,u]*rChnB[x,xp,yp]
bhvAfterChn = inf.applyChannel( bhv, rChnB, (0,1))
print( np.amax(np.absolute(bhvAfterChn-bhvAfterChn1)))
# Test as in MCupperBoundIntrInfMP
bhvFoo = bv.randBhv( (32,4,4,2) )
rChnFoo = inf.randChannelMP((2,),(32,2))
bhvAfterChn1 = np.zeros( (4,4,2))
for x in range(0,2):
for y in range(0,4):
for z in range(0,4):
for xp in range(0,32):
for up in range(0,2):
bhvAfterChn1[ y,z,x ] += bhvFoo[xp,y,z,up]*rChnFoo[x, xp, up]
bhvAfterChn = inf.applyChannel( bhvFoo, rChnFoo, (0,3))
print( np.amax(np.absolute(bhvAfterChn-bhvAfterChn1))) | en | 0.781348 | # Test conditiona MutInf # Test channels # Test deterministic channels # Check the reduced intrinsic information upper bound # Compute the intrinsic information I(X;Y\d UZ) # Compute the intrinsic information I(X;Y\d UZ) # Replace the channel by one that goes to joint variable # Alternatively: join the parties ZU to a new one and apply MCupperBoundIntrinInf directly # Use the channel from the paper # IntrInf ThreePDstrb from FourPDstrb # IntrInf FourPDstrb # IntrInfDet ThreePDstrb from FourPDstrb # IntrInfDet FourPDstrb # RedIntrInf # RedIntrInfDet # RedIntrInfDD # Loop over different random channels # Print P_Z after channel. # NB: last parties are swapped after applying the channel # Test the new RedIntrinInfo function # New deterministic function # Test random bipartite channel # Test deterministic and general uniform behaviors and then the respective entropy # Test the entropy # Test the application of a channel # Apply the channel to the first two parties # Apply the channel to the first and the third party # Apply the channel to the second and the third party # Apply the channel to the first and the fourth party # Test on binarization channel # Test as in MCupperBoundIntrInfMP | 1.991136 | 2 |
prediction_lib/prediction_test.py | rhaertel80/model_server | 2 | 6624074 | <reponame>rhaertel80/model_server
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for local prediction."""
import base64
import itertools
import json
import os
import shutil
import unittest
import google3
import mock
import numpy as np
import tensorflow as tf
import xgboost as xgb
from google.cloud.ml import prediction as mlprediction
from google.cloud.ml.prediction import _model_test_util as model_test_util
from google.cloud.ml.prediction.testdata.user_custom_python import user_model
from google3.pyglib import flags
from google3.third_party.tensorflow.core.framework import types_pb2
from google3.third_party.tensorflow.core.protobuf import meta_graph_pb2
FLAGS = flags.FLAGS
SAVED_TESTDATA = (
"google3/third_party/py/google/cloud/ml/prediction/testdata/saved_model/"
"mnist_deployable_saved_model_prediction_input.json")
SAVED_TESTDATA_BAD = (
"google3/third_party/py/google/cloud/ml/prediction/testdata/saved_model/"
"mnist_deployable_saved_model_prediction_input_bad.json")
INPUT_MODEL = (
"google3/third_party/py/google/cloud/ml/prediction/testdata/saved_model")
# simple models that add two numbers.
XGBOOST_MODEL = "google3/third_party/py/google/cloud/ml/testdata/xgboost/"
SKLEARN_JOBLIB_MODEL = (
"google3/third_party/py/google/cloud/ml/testdata/sklearn_joblib/")
SKLEARN_PICKLE_MODEL = (
"google3/third_party/py/google/cloud/ml/testdata/sklearn_pickle/")
class PredictionBatchTest(tf.test.TestCase):
def testBatch(self):
instances = [{"a": [1.0, 2.0],
"b": "a"},
{"a": [3.0, 4.0],
"b": "c"},
{"a": [5.0, 6.0],
"b": "e"},]
columns = mlprediction.columnarize(instances)
self.assertEqual(columns, {"a": [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
"b": ["a", "c", "e"]})
def testBatchSingleTensor(self):
instances = [
{
"b": "a"
},
{
"b": "c"
},
{
"b": "e"
},
]
columns = mlprediction.columnarize(instances)
self.assertEqual(columns, {"b": ["a", "c", "e"]})
def testRowify(self):
outputs = {"prediction": np.array([1,
0,
1]),
"scores": np.array([[0.1, 0.9],
[0.7, 0.3],
[0.4, 0.6]])}
instances = mlprediction.rowify(outputs)
self.assertItemsEqual(instances, [{"prediction": 1,
"scores": [0.1, 0.9]},
{"prediction": 0,
"scores": [0.7, 0.3]},
{"prediction": 1,
"scores": [0.4, 0.6]}])
def testRowifyFailed(self):
# Following are cases the outputs containing items with different sizes.
# Two outputs. One has a size of 2 and the other is 3.
outputs1 = {"prediction": np.array([1,
1]),
"scores": np.array([[0.1, 0.9],
[0.7, 0.3],
[0.4, 0.6]])}
error_msg_1 = {"prediction": 2, "scores": 3}
# Two outputs. One has a size of 0 and the other is 3.
outputs2 = {"prediction": np.array([]),
"scores": np.array([[0.1, 0.9],
[0.7, 0.3],
[0.4, 0.6]])}
error_msg_2 = {"prediction": 0, "scores": 3}
# Two outputs. One has a size of 2 and the other is 1.
outputs3 = {"prediction": np.array([1,
1]),
"scores": np.array([[0.1, 0.9]])}
error_msg_3 = {"prediction": 2, "scores": 1}
# Two outputs. One has a size of 2 and the other is 0.
outputs4 = {"prediction": np.array([1,
1]),
"scores": np.array([])}
error_msg_4 = {"prediction": 2, "scores": 0}
# Three outputs. The first two have size of 3. And the last one is 2.
outputs5 = {"prediction": np.array([1,
0,
1]),
"scores": np.array([[0.1, 0.9],
[0.7, 0.3],
[0.4, 0.6]]),
"third": np.array([[0.1, 0.9],
[0.4, 0.6]])}
error_msg_5 = {"prediction": 3, "scores": 3, "third": 2}
for outputs, error_msg in ((outputs1, error_msg_1),
(outputs2, error_msg_2),
(outputs3, error_msg_3),
(outputs4, error_msg_4),
(outputs5, error_msg_5)):
with self.assertRaises(mlprediction.PredictionError) as e:
# rowify() is a generator, therefore the next() to invoke it.
next(mlprediction.rowify(outputs))
self.assertEqual(
mlprediction.PredictionError.INVALID_OUTPUTS.code,
e.exception.error_code)
self.assertTrue("%s" % error_msg in e.exception.error_detail)
class PredictionCanonicalizeInputTest(tf.test.TestCase):
def testCanonicalizeSingleInstance(self):
instances = "a"
self.assertEqual(
mlprediction.canonicalize_single_tensor_input(instances, "x"), [{
"x": "a"
}])
instances = ["a"]
self.assertEqual(
mlprediction.canonicalize_single_tensor_input(instances, "x"), [{
"x": "a"
}])
instances = [{"x": "a"}]
self.assertEqual(
mlprediction.canonicalize_single_tensor_input(instances, "x"), [{
"x": "a"
}])
def testCanonicalizeBatchInstances(self):
instances = ["a", "b", "c"]
self.assertEqual(
mlprediction.canonicalize_single_tensor_input(instances, "x"), [{
"x": "a"
}, {
"x": "b"
}, {
"x": "c"
}])
instances = [{"x": "a"}, {"x": "b"}, {"x": "c"}]
self.assertEqual(
mlprediction.canonicalize_single_tensor_input(instances, "x"), [{
"x": "a"
}, {
"x": "b"
}, {
"x": "c"
}])
def testWrongTensorName(self):
with self.assertRaises(mlprediction.PredictionError) as error:
instances = [{"y": "a"}]
mlprediction.canonicalize_single_tensor_input(instances, "x")
self.assertEqual(error.exception.error_detail,
("Expected tensor name: x, got tensor name: ['y']."))
class PredictionDecodeTest(tf.test.TestCase):
def testSingleRank1Utf8StringTensor(self):
actual = mlprediction.decode_base64([u"a", u"b", u"c"])
self.assertEqual(actual, [u"a", u"b", u"c"])
def testSingleRank1BytesTensor(self):
actual = mlprediction.decode_base64(
[{u"b64": base64.b64encode(u"a")},
{u"b64": base64.b64encode(u"b")},
{u"b64": base64.b64encode(u"c")},])
self.assertEqual(actual, [u"a", u"b", u"c"])
def testSingleUtf8StringTensor(self):
actual = mlprediction.decode_base64(
[[[u"a", u"b"]], [[u"c", u"d"]]])
self.assertEqual(actual, [[[u"a", u"b"]], [[u"c", u"d"]]])
def testSingleBytesTensor(self):
actual = mlprediction.decode_base64(
[[[{u"b64": base64.b64encode(u"a")},
{u"b64": base64.b64encode(u"b")},]],
[[{u"b64": base64.b64encode(u"c")},
{u"b64": base64.b64encode(u"d")},]]])
self.assertEqual(actual, [[[u"a", u"b"]], [[u"c", u"d"]]])
def testMultiTensorWithUtf8Strings(self):
actual = mlprediction.decode_base64(
[{u"tensor1": [[[u"a", u"b"]], [[u"c", u"d"]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}]
)
self.assertEqual(actual,
[{u"tensor1": [[[u"a", u"b"]], [[u"c", u"d"]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}])
def testMultiTensorWithBase64Strings(self):
actual = mlprediction.decode_base64(
[{u"tensor1": [[[{u"b64": base64.b64encode(u"a")},
{u"b64": base64.b64encode(u"b")},]],
[[{u"b64": base64.b64encode(u"c")},
{u"b64": base64.b64encode(u"d")},]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}])
self.assertEqual(actual,
[{u"tensor1": [[[u"a", u"b"]], [[u"c", u"d"]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}])
class PredictionEncodeTest(tf.test.TestCase):
def testSingleRank1Utf8StringTensor(self):
tensor_info = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
outputs_map = {"dummy": tensor_info}
actual = mlprediction.encode_base64([u"a", u"b", u"c"], outputs_map)
self.assertEqual(actual, [u"a", u"b", u"c"])
def testSingleRank1BytesTensor(self):
tensor_info = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
outputs_map = {"dummy_bytes": tensor_info}
actual = mlprediction.encode_base64([u"a", u"b", u"c"], outputs_map)
self.assertEqual(actual, [{u"b64": base64.b64encode(u"a")},
{u"b64": base64.b64encode(u"b")},
{u"b64": base64.b64encode(u"c")},])
def testSingleUtf8StringTensor(self):
tensor_info = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
outputs_map = {"dummy": tensor_info}
actual = mlprediction.encode_base64(
[[[u"a", u"b"]], [[u"c", u"d"]]], outputs_map)
self.assertEqual(actual, [[[u"a", u"b"]], [[u"c", u"d"]]])
def testSingleBytesTensor(self):
tensor_info = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
outputs_map = {"dummy_bytes": tensor_info}
actual = mlprediction.encode_base64(
[[[u"a", u"b"]], [[u"c", u"d"]]],
outputs_map)
self.assertEqual(actual, [[[{u"b64": base64.b64encode(u"a")},
{u"b64": base64.b64encode(u"b")},]],
[[{u"b64": base64.b64encode(u"c")},
{u"b64": base64.b64encode(u"d")},]]])
def testMultiTensorWithUtf8Strings(self):
tensor_info_1 = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
tensor_info_2 = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
tensor_info_3 = meta_graph_pb2.TensorInfo(dtype=tf.float32.as_datatype_enum)
outputs_map = {
"tensor1": tensor_info_1,
"tensor2": tensor_info_2,
"tensor3": tensor_info_3,
}
actual = mlprediction.encode_base64(
[{u"tensor1": [[[u"a", u"b"]], [[u"c", u"d"]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}],
outputs_map)
self.assertEqual(actual, [{u"tensor1": [[[u"a", u"b"]], [[u"c", u"d"]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}])
def testMultiTensorWithBase64Strings(self):
tensor_info_1 = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
tensor_info_2 = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
tensor_info_3 = meta_graph_pb2.TensorInfo(dtype=tf.float32.as_datatype_enum)
outputs_map = {
"tensor1_bytes": tensor_info_1,
"tensor2": tensor_info_2,
"tensor3": tensor_info_3,
}
actual = mlprediction.encode_base64(
[{u"tensor1_bytes": [[[u"a", u"b"]], [[u"c", u"d"]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}],
outputs_map)
self.assertEqual(
actual,
[{u"tensor1_bytes": [[[{u"b64": base64.b64encode(u"a")},
{u"b64": base64.b64encode(u"b")},]],
[[{u"b64": base64.b64encode(u"c")},
{u"b64": base64.b64encode(u"d")},]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}])
def testModelWithBytesBasedOutput(self):
mock_client = mock.Mock()
mock_client.predict.return_value = {"x_bytes": "to_encode"}
signature_def = meta_graph_pb2.SignatureDef()
signature_def.outputs["x_bytes"].dtype = types_pb2.DT_STRING
signature_def.inputs["input_key"].dtype = types_pb2.DT_STRING
mock_client.signature_map = {
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature_def}
model = mlprediction.create_model(mock_client, "gs://tmp/foo")
_, predictions = model.predict({"input_key": "foo"})
self.assertEqual(predictions, [{
"x_bytes": {
"b64": base64.b64encode("to_encode")
}
}])
class LoadModelTest(tf.test.TestCase):
def testConfigIsSet(self):
# Arrange
test_config = tf.ConfigProto(inter_op_parallelism_threads=3)
# Act
model_path = os.path.join(FLAGS.test_srcdir, INPUT_MODEL)
session, _ = mlprediction.load_model(
model_path,
tags=(tf.saved_model.tag_constants.SERVING,),
config=test_config)
# Assert
self.assertEqual(session._config, test_config)
def testLoadCustomSignature(self):
model_dir = os.path.join(FLAGS.test_tmpdir, "identity_model")
model_test_util.create_identity_model(
model_dir=model_dir,
signature_name="mysignature",
tags=("tag1", "tag2"))
_, signature_map = mlprediction.load_model(model_dir,
tags=("tag1", "tag2"))
signature = signature_map["mysignature"]
self.assertEqual([i for i in signature.inputs], ["in"])
self.assertEqual("Print:0", signature.inputs["in"].name)
self.assertEqual([i for i in signature.outputs], ["out"])
self.assertEqual("Print_1:0", signature.outputs["out"].name)
with self.assertRaises(mlprediction.PredictionError) as error:
_, _ = mlprediction.load_model(model_dir, tags=("tag1",))
self.assertEqual(error.exception.error_detail,
"Failed to load the model due to bad model data. "
"tags: ['tag1']\nMetaGraphDef associated with tags 'tag1' "
"could not be found in SavedModel. To inspect available "
"tag-sets in the SavedModel, please use the SavedModel "
"CLI: `saved_model_cli`")
class LocalPredictionTest(tf.test.TestCase):
def _input_dir(self, rel_path):
return os.path.join(os.path.dirname(__file__), rel_path)
def testPredictionWithSavedModel(self):
data_path = os.path.join(FLAGS.test_srcdir, SAVED_TESTDATA)
with open(data_path) as f:
# Read two input records as strings.
instances = [
json.loads(next(f).rstrip("\n")),
json.loads(next(f).rstrip("\n"))]
model_path = os.path.join(FLAGS.test_srcdir, INPUT_MODEL)
predictions = mlprediction.local_predict(
model_dir=model_path, instances=instances)
prediction_result = [
{"key": p["key"], "prediction": p["prediction"]}
for p in predictions["predictions"]
]
# Just check the key and prediction result, not each individual scores
# that are floating numbers.
self.assertEqual(2, len(prediction_result))
for r in prediction_result:
if r["key"] == 0:
self.assertEqual(3, r["prediction"])
elif r["key"] == 9:
self.assertEqual(4, r["prediction"])
else:
self.fail()
def testPredictWithSavedModelWithCustomSignature(self):
model_dir = os.path.join(FLAGS.test_tmpdir, "identity_model_predict")
model_test_util.create_identity_model(
model_dir=model_dir,
signature_name="mysignature",
tags=("tag1", "tag2"))
result = mlprediction.local_predict(model_dir,
tags=("tag1", "tag2"),
signature_name="mysignature",
instances=[{"in": "check"}])
self.assertEqual(result["predictions"], [{"out": "check"}])
# Only one signature_def in the graph, so it's optional to specify it.
result = mlprediction.local_predict(model_dir,
tags=("tag1", "tag2"),
instances=[{"in": "check"}])
self.assertEqual(result["predictions"], [{"out": "check"}])
def testPredictWithSavedModelMultipleSignatures(self):
model_dir = os.path.join(FLAGS.test_tmpdir, "constant_model_predict")
model_test_util.create_constant_model(
model_dir,
"mysignature", 1, "serving_default", 2,
tags=("tag1", "tag2"))
# Predict with specified signature.
result = mlprediction.local_predict(model_dir,
tags=("tag1", "tag2"),
signature_name="mysignature",
instances=[{"in": "check"}])
self.assertEqual(result["predictions"], [{"out": 1}])
# Predict without specified signature will use serving default.
result = mlprediction.local_predict(model_dir,
tags=("tag1", "tag2"),
instances=[{"in": "check"}])
self.assertEqual(result["predictions"], [{"out": 2}])
# Predict with wrong specified signature.
with self.assertRaises(mlprediction.PredictionError) as error:
result = mlprediction.local_predict(model_dir,
tags=("tag1", "tag2"),
signature_name="wrongsignature",
instances=[{"in": "check"}])
self.assertEqual("No signature found for signature key wrongsignature.",
error.exception.error_detail)
@mock.patch("google.cloud.ml.prediction.prediction_lib.create_client")
def testLocalPredictionTensorflowModelWithStrings(self, mock_create_client):
signature_def = meta_graph_pb2.SignatureDef()
signature_def.outputs["x_bytes"].dtype = types_pb2.DT_STRING
signature_def.inputs["x_bytes"].dtype = types_pb2.DT_STRING
mock_client = mock.Mock()
mock_client.signature_map = {
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature_def
}
mock_client.predict.return_value = {"x_bytes": "to_encode"}
mock_create_client.return_value = mock_client
predictions = mlprediction.local_predict(
model_dir=None, instances=[{"x_bytes": [1, 2, 3]}])
# Validate that the output is correctly base64 encoded (and only once)
self.assertEquals(
predictions,
{"predictions": [{"x_bytes": {"b64": base64.b64encode("to_encode")}}]})
def testPredictionSavedModelWithBadInput(self):
data_path = os.path.join(FLAGS.test_srcdir, SAVED_TESTDATA_BAD)
with open(data_path) as f:
# Read two input records as strings.
instances = [
json.loads(next(f).rstrip("\n")),
json.loads(next(f).rstrip("\n"))]
model_path = os.path.join(FLAGS.test_srcdir, INPUT_MODEL)
with self.assertRaises(mlprediction.PredictionError) as error:
mlprediction.local_predict(
model_dir=model_path, instances=instances)
self.assertTrue("Unexpected tensor name: x" in error.exception.error_detail)
def testLocalPredictionSklearnModel(self):
# Uses a trained sklearn model that computes x+y
instances = [[10, 20], [1, 2], [5, 6]]
model_path = os.path.join(FLAGS.test_srcdir, SKLEARN_JOBLIB_MODEL)
predictions = mlprediction.local_predict(
model_dir=model_path,
instances=instances,
framework=mlprediction.SCIKIT_LEARN_FRAMEWORK_NAME)
self.assertEqual(predictions, {"predictions": [30, 3, 11]})
def testLocalPredictionXgboostModel(self):
# Uses a trained xgboost model that computes x+y
instances = [[10, 20], [1, 2], [5, 6]]
model_path = os.path.join(FLAGS.test_srcdir, XGBOOST_MODEL)
predictions = mlprediction.local_predict(
model_dir=model_path,
instances=instances,
framework=mlprediction.XGBOOST_FRAMEWORK_NAME)
self.assertEqual([round(i)
for i in predictions["predictions"]], [30, 3, 11])
class PredictionXgboostModelTest(unittest.TestCase):
def testXGBoostPredictionNoPreprocessing(self):
expected_output = [[1.0, 1.1, 1.2], [2.1, 2.2, 2.3]]
instances = [1, 2]
mock_client = mock.Mock()
mock_client.predict.return_value = expected_output
xgboost_model = mlprediction.XGBoostModel(mock_client)
self.assertEqual(
xgboost_model.predict(instances), (instances, expected_output))
np.testing.assert_array_equal(
np.array(instances), mock_client.predict.call_args[0][0])
def testXGBoostPredictionWithUserProcessor(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.UserProcessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
instances = [1, 2]
mock_client = mock.Mock()
mock_client.predict.return_value = [10, 20]
xgboost_model = mlprediction.XGBoostModel(mock_client)
# Verify postprocessing (which divides values by 2) is applied to the
# predicted results.
self.assertEqual(
xgboost_model.predict(instances), (instances, [5, 10]))
# Verify preprocessing(which multiplies values by 2) is applied
# before calling predict.
mock_client.predict.assert_has_calls(
[mock.call([2, 4], stats=mock.ANY)])
def testXGBoostPredictionWithInvalidProcessor(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.InvalidProcessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
mock_client = mock.Mock()
with self.assertRaises(mlprediction.PredictionError) as error:
mlprediction.XGBoostModel(mock_client)
self.assertEqual(error.exception.error_detail,
("The provided preprocess function in the Processor class "
"InvalidProcessor is not callable."))
def testCreateXgboostModel(self):
model_path = os.path.join(FLAGS.test_srcdir, XGBOOST_MODEL)
# model is a Xgboost booster.
model = mlprediction.create_xgboost_model(model_path, None)
inputs = [[10, 20], [1, 2], [5, 6]]
stats = mlprediction.Stats()
stats["dummy"] = 1 # So that a new stats object is not created.
original_inputs, predictions = model.predict(inputs, stats)
predictions = [int(round(i)) for i in predictions]
self.assertEqual(predictions, [30, 3, 11])
self.assertEqual(original_inputs, inputs)
self.assertEqual(stats[mlprediction.ENGINE],
mlprediction.XGBOOST_FRAMEWORK_NAME)
def testInvalidXgboostModel(self):
model_path = os.path.join(FLAGS.test_srcdir, "foo") # Model doesn't exist.
with self.assertRaises(mlprediction.PredictionError) as error:
mlprediction.create_xgboost_model(model_path, None)
self.assertTrue(
"Could not load the model: " in error.exception.error_detail)
def testInvalidDmatrixInput(self):
model_path = os.path.join(FLAGS.test_srcdir, XGBOOST_MODEL)
model = mlprediction.create_xgboost_model(model_path, None)
inputs = [1, 2] # Requires a 2-dimensional list.
with self.assertRaises(mlprediction.PredictionError) as error:
model.predict(inputs, None)
self.assertEqual(error.exception.error_code,
mlprediction.PredictionError.FAILED_TO_RUN_MODEL.code)
self.assertIn("Could not initialize DMatrix from inputs:",
error.exception.error_detail)
@mock.patch(
"google.cloud.ml.prediction.prediction_lib"
"._load_joblib_or_pickle_model")
def testInvalidPredictionWithXgboost(self, mock_load_fn):
mock_model = mock.Mock()
mock_model.predict.side_effect = Exception("foo")
mock_load_fn.return_value = mock_model
model_path = "fake_path"
model = mlprediction.create_xgboost_model(model_path, None)
inputs = [[1, 2], [3, 4]]
with self.assertRaises(mlprediction.PredictionError) as error:
model.predict(inputs, None)
self.assertEqual(error.exception.error_code,
mlprediction.PredictionError.FAILED_TO_RUN_MODEL.code)
self.assertIn("Exception during xgboost prediction",
error.exception.error_detail)
self.assertIn("foo",
error.exception.error_detail)
mock_load_fn.assert_called_once_with(model_path)
mock_model.predict.assert_called_once_with(
mock.EQUIV(lambda dmatrix: (dmatrix.num_row(), dmatrix.num_col()),
xgb.DMatrix(inputs)))
class PredictionSklearnModelTest(unittest.TestCase):
def testSklearnPredictionNoPreprocessing(self):
expected_output = [[1.0, 1.1, 1.2], [2.1, 2.2, 2.3]]
instances = [1, 2]
mock_client = mock.Mock()
mock_client.predict.return_value = expected_output
sklearn_model = mlprediction.SklearnModel(mock_client)
self.assertEqual(
sklearn_model.predict(instances), (instances, expected_output))
np.testing.assert_array_equal(
np.array(instances), mock_client.predict.call_args[0][0])
def testTransformerPreprocessing(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.FunctionTransformerPreprocessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
instances = [[1, 2], [3, 4]]
mock_client = mock.Mock()
mock_client.predict.return_value = [10, 20]
sklearn_model = mlprediction.SklearnModel(mock_client)
sklearn_model.predict(instances)
# The first feature is dropped, and log1p is applied on the rest.
expected_preprocessed_input = np.log1p(np.array([[2], [4]]))
np.testing.assert_array_equal(expected_preprocessed_input,
mock_client.predict.call_args[0][0])
def testSklearnPredictionWithUserProcessor(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.UserProcessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
instances = [1, 2]
mock_client = mock.Mock()
mock_client.predict.return_value = [10, 20]
sklearn_model = mlprediction.SklearnModel(mock_client)
# Verify postprocessing (which divides values by 2) is applied to the
# predicted results.
self.assertEqual(
sklearn_model.predict(instances), (instances, [5, 10]))
# Verify preprocessing(which multiplies values by 2) is applied before
# calling predict.
mock_client.predict.assert_has_calls(
[mock.call([2, 4], stats=mock.ANY)])
def testSklearnPredictionWithBadPostProcessor(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.InvalidPostProcessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
instances = [1, 2]
mock_client = mock.Mock()
mock_client.predict.return_value = [10, 20]
sklearn_model = mlprediction.SklearnModel(mock_client)
with self.assertRaises(mlprediction.PredictionError) as e:
_ = sklearn_model.predict(instances), (instances, [5, 10])
# postprocessing returns an invalid type, which we should raise.
self.assertEqual(mlprediction.PredictionError.INVALID_OUTPUTS.code,
e.exception.error_code)
self.assertIn(
"The post-processing function should return either "
"a numpy ndarray or a list.", e.exception.error_detail)
def testCreateSklearnModelFromJoblib(self):
model_path = os.path.join(FLAGS.test_srcdir, SKLEARN_JOBLIB_MODEL)
# model is a Scikit-Learn classifier.
model = mlprediction.create_sklearn_model(
model_path, None)
inputs = [[10, 20], [1, 2], [5, 6]]
stats = mlprediction.Stats()
stats["dummy"] = 1 # So that a new stats object is not created.
original_inputs, predictions = model.predict(inputs, stats=stats)
self.assertEqual(predictions, [30, 3, 11])
self.assertEqual(original_inputs, inputs)
self.assertEqual(stats[mlprediction.ENGINE],
mlprediction.SCIKIT_LEARN_FRAMEWORK_NAME)
def testCreateSklearnModelFromPickle(self):
model_path = os.path.join(FLAGS.test_srcdir, SKLEARN_PICKLE_MODEL)
# model is a Scikit-Learn classifier.
model = mlprediction.create_sklearn_model(
model_path, None)
inputs = [[10, 20], [1, 2], [5, 6]]
stats = mlprediction.Stats()
stats["dummy"] = 1 # So that a new stats object is not created.
original_inputs, predictions = model.predict(inputs, stats=stats)
self.assertEqual(predictions, [30, 3, 11])
self.assertEqual(original_inputs, inputs)
self.assertEqual(stats[mlprediction.ENGINE],
mlprediction.SCIKIT_LEARN_FRAMEWORK_NAME)
def testCreateSklearnInvalidModel(self):
model_path = os.path.join(FLAGS.test_tmpdir)
# Copying a .joblib model with incorrect suffix (.pkl), so that it cannot be
# loaded.
shutil.copy2(
os.path.join(FLAGS.test_srcdir, SKLEARN_JOBLIB_MODEL, "model.joblib"),
os.path.join(model_path, "model.pkl"))
with self.assertRaises(mlprediction.PredictionError) as error:
mlprediction.create_sklearn_model(model_path, None)
self.assertEqual(error.exception.error_code,
mlprediction.PredictionError.FAILED_TO_LOAD_MODEL.code)
self.assertIn(
"Could not load the model", error.exception.error_detail)
def testSklearnModelNotFound(self):
model_path = os.path.join(FLAGS.test_srcdir, "non_existent_path")
with self.assertRaises(mlprediction.PredictionError) as error:
mlprediction.create_sklearn_model(model_path, None)
self.assertIn("Could not find ", error.exception.error_detail)
def testInvalidPredictionWithSklearn(self):
model_path = os.path.join(FLAGS.test_srcdir, SKLEARN_JOBLIB_MODEL)
# model is a Scikit-Learn classifier.
model = mlprediction.create_sklearn_model(
model_path, None)
# The shape doesn't match the expected shape of: (2,)
inputs = [[10, 20, 30]]
with self.assertRaises(mlprediction.PredictionError) as error:
model.predict(inputs, stats=None)
self.assertEqual(error.exception.error_code,
mlprediction.PredictionError.FAILED_TO_RUN_MODEL.code)
self.assertIn("Exception during sklearn prediction",
error.exception.error_detail)
class TensorFlowCustomModelTest(unittest.TestCase):
def testTensorFlowModelCreationNoCreateVersionRequest(self):
client = mock.Mock()
client.signature_map = {"serving_default": None}
dummy_model_path = "gs://dummy/model/path"
model = mlprediction.create_model(client, dummy_model_path)
self.assertIsInstance(model, mlprediction.TensorFlowModel)
def testModelCreationNoCustomCode(self):
create_version_json = """
{
"version": {}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
client = mock.Mock()
client.signature_map = {"serving_default": None}
dummy_model_path = "gs://dummy/model/path"
model = mlprediction.create_model(client, dummy_model_path)
self.assertIsInstance(model, mlprediction.TensorFlowModel)
def testUserModelCreationFromClassMethod(self):
create_version_json = """
{
"version": {
"model_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.UserModel"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
client = None
dummy_model_path = "gs://dummy/model/path"
model = mlprediction.create_model(client, dummy_model_path)
self.assertIsInstance(model, user_model.UserModel)
def testUserModelCreationFromNestedClass(self):
create_version_json = """
{
"version": {
"model_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.UserModelOuter.UserModelInner"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
client = None
dummy_model_path = "gs://dummy/model/path"
model = mlprediction.create_model(client, dummy_model_path)
self.assertIsInstance(model, user_model.UserModelOuter.UserModelInner)
def testUserModelMissingCustomMethod(self):
create_version_json = """
{
"version": {
"model_class": "google.cloud.ml.prediction.testdata.user_custom_python.wrong_user_model",
"package_uris": ["gs://test_package"]
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
with self.assertRaises(mlprediction.PredictionError) as error:
client = None
dummy_model_path = "gs://dummy/model/path"
mlprediction.create_model(client, dummy_model_path)
self.assertEqual(error.exception.error_detail,
("google.cloud.ml.prediction.testdata.user_custom_python."
"wrong_user_model cannot be found. Please make "
"sure (1) model_class is the fully qualified function "
"name, and (2) model_class uses the correct package name "
"as provided by the package_uris: ['gs://test_package']"))
def testMissingUserModelClassModule(self):
create_version_json = """
{
"version": {
"model_class": "wrong_module.UserModel",
"package_uris": ["gs://test_package"]
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
with self.assertRaises(mlprediction.PredictionError) as error:
client = None
dummy_model_path = "gs://dummy/model/path"
mlprediction.create_model(client, dummy_model_path)
self.assertEqual(error.exception.error_detail,
"wrong_module.UserModel cannot be found. "
"Please make sure (1) model_class is the fully qualified "
"function name, and (2) model_class uses the correct "
"package name as provided by the package_uris: "
"['gs://test_package']")
def testMissingPredictAndProcessMethod(self):
create_version_json = """
{
"version": {
"model_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.MissingPredict"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
with self.assertRaises(mlprediction.PredictionError) as error:
client = None
dummy_model_path = "gs://dummy/model/path"
mlprediction.create_model(client, dummy_model_path)
self.assertEqual(error.exception.error_detail,
("The provided model class, MissingPredict, is missing "
"the required predict method."))
def testUserModelTooManyPredictArgs(self):
create_version_json = """
{
"version": {
"model_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.PredictMethodTooManyArgs"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
with self.assertRaises(mlprediction.PredictionError) as error:
client = None
dummy_model_path = "gs://dummy/model/path"
mlprediction.create_model(client, dummy_model_path)
self.assertEqual(error.exception.error_detail,
("The provided model class, PredictMethodTooManyArgs, "
"has a predict method with an invalid signature. "
"Expected signature: ['self', 'instances'] "
"User signature: ['self', 'instances', 'another']"))
def testUserModelTooFewPredictArgs(self):
create_version_json = """
{
"version": {
"model_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.PredictMethodTooFewArgs"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
with self.assertRaises(mlprediction.PredictionError) as error:
client = None
dummy_model_path = "gs://dummy/model/path"
mlprediction.create_model(client, dummy_model_path)
self.assertEqual(error.exception.error_detail,
("The provided model class, PredictMethodTooFewArgs, has "
"a predict method with an invalid signature. "
"Expected signature: ['self', 'instances'] "
"User signature: ['self']"))
class TensorflowCustomProcessingTest(unittest.TestCase):
def setUp(self):
self._instances = [{"a": 1, "b": 2}, {"a": 2, "b": 4}]
self._model_path = "gs://dummy/model/path"
signature_def = meta_graph_pb2.SignatureDef()
signature_def.inputs["a"].dtype = types_pb2.DT_INT32
signature_def.inputs["b"].dtype = types_pb2.DT_INT32
signature_def.outputs["c"].dtype = types_pb2.DT_INT32
self._mock_client = mock.Mock()
self._mock_client.predict.return_value = {"c": np.array([10, 20])}
self._mock_client.signature_map = {
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature_def}
self._kwargs = {
"signature_name":
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY}
def testNoUserProcessor(self):
model = mlprediction.create_model(self._mock_client, self._model_path)
self.assertIsInstance(model, mlprediction.TensorFlowModel)
self.assertEqual(
model.predict(self._instances, **self._kwargs),
(self._instances, [{
"c": 10
}, {
"c": 20
}]))
self._mock_client.predict.assert_has_calls(
[mock.call({
"a": [1, 2],
"b": [2, 4]
}, stats=mock.ANY, signature_name=mock.ANY)])
def testUserProcessor(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.TfUserProcessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
model = mlprediction.create_model(self._mock_client, self._model_path)
# Verify the default TensorFlowModel is instantiated.
self.assertIsInstance(model, mlprediction.TensorFlowModel)
# Verify postprocessing (which divides values by 2) is applied to the
# predicted results.
self.assertEqual(
model.predict(self._instances, **self._kwargs),
(self._instances, [{
"c": 5
}, {
"c": 10
}]))
# Verify preprocessing(which multiplies values by 2) is applied
# before calling predict.
self._mock_client.predict.assert_has_calls(
[mock.call({
"a": [2, 4],
"b": [4, 8]
}, stats=mock.ANY, signature_name=mock.ANY)])
def testUserPreprocessOnly(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.TfUserPreprocessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
model = mlprediction.create_model(self._mock_client, self._model_path)
# Verify the default TensorFlowModel is instantiated.
self.assertIsInstance(model, mlprediction.TensorFlowModel)
# Verify no postprocessing performed.
self.assertEqual(
model.predict(self._instances, **self._kwargs),
(self._instances, [{
"c": 10
}, {
"c": 20
}]))
# Verify preprocessing(which multiplies values by 2) is applied
# before calling predict.
self._mock_client.predict.assert_has_calls(
[mock.call({
"a": [2, 4],
"b": [4, 8]
}, stats=mock.ANY, signature_name=mock.ANY)])
def testUserPostprocessOnly(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.TfUserPostprocessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
model = mlprediction.create_model(self._mock_client, self._model_path)
# Verify the default TensorFlowModel is instantiated.
self.assertIsInstance(model, mlprediction.TensorFlowModel)
# Verify postprocessing (which divides values by 2) is applied to the
# predicted results.
self.assertEqual(
model.predict(self._instances, **self._kwargs),
(self._instances, [{
"c": 5
}, {
"c": 10
}]))
# Verify no preprocessing performed.
self._mock_client.predict.assert_has_calls(
[mock.call({
"a": [1, 2],
"b": [2, 4]
}, stats=mock.ANY, signature_name=mock.ANY)])
def make_timer_fn(start_time, end_time):
"""Returns a function that returns start_time, then end_time, then -1."""
timer_fn = mock.Mock()
timer_fn.side_effect = itertools.chain([start_time, end_time],
itertools.repeat(-1))
return timer_fn
class TestTimer(unittest.TestCase):
def testStandardUsage(self):
with mlprediction.Timer(make_timer_fn(314, 315)) as timer:
pass
duration = timer.seconds
# Ensure that timer is correct
self.assertEqual(timer.seconds, 1) # 315 - 314
# Ensure that unit conversion is correct
self.assertEqual(timer.milliseconds, int(timer.seconds * 1000))
self.assertEqual(timer.microseconds, int(timer.seconds * 1000000))
# Ensure that the timer has stopped
self.assertEqual(duration, timer.seconds)
class TestStats(unittest.TestCase):
def testStandardUsage(self):
stats = mlprediction.Stats()
self.assertEqual(stats, {})
stats["foo"] = 1
with stats.time("bar", make_timer_fn(314, 315)):
pass
self.assertEqual(stats["foo"], 1)
# We slept for 1 sec = 1e6 microseconds
self.assertEqual(stats["bar"], 1000000)
class TestException(unittest.TestCase):
def testOneException(self):
e = mlprediction.PredictionError(
mlprediction.PredictionError.FAILED_TO_RUN_MODEL,
"detailed description.")
self.assertEqual(2, e.error_code)
self.assertEqual("detailed description.", e.error_detail)
self.assertEqual("Failed to run the provided model: detailed description. "
"(Error code: 2)", str(e))
if __name__ == "__main__":
tf.test.main()
| # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for local prediction."""
import base64
import itertools
import json
import os
import shutil
import unittest
import google3
import mock
import numpy as np
import tensorflow as tf
import xgboost as xgb
from google.cloud.ml import prediction as mlprediction
from google.cloud.ml.prediction import _model_test_util as model_test_util
from google.cloud.ml.prediction.testdata.user_custom_python import user_model
from google3.pyglib import flags
from google3.third_party.tensorflow.core.framework import types_pb2
from google3.third_party.tensorflow.core.protobuf import meta_graph_pb2
FLAGS = flags.FLAGS
SAVED_TESTDATA = (
"google3/third_party/py/google/cloud/ml/prediction/testdata/saved_model/"
"mnist_deployable_saved_model_prediction_input.json")
SAVED_TESTDATA_BAD = (
"google3/third_party/py/google/cloud/ml/prediction/testdata/saved_model/"
"mnist_deployable_saved_model_prediction_input_bad.json")
INPUT_MODEL = (
"google3/third_party/py/google/cloud/ml/prediction/testdata/saved_model")
# simple models that add two numbers.
XGBOOST_MODEL = "google3/third_party/py/google/cloud/ml/testdata/xgboost/"
SKLEARN_JOBLIB_MODEL = (
"google3/third_party/py/google/cloud/ml/testdata/sklearn_joblib/")
SKLEARN_PICKLE_MODEL = (
"google3/third_party/py/google/cloud/ml/testdata/sklearn_pickle/")
class PredictionBatchTest(tf.test.TestCase):
def testBatch(self):
instances = [{"a": [1.0, 2.0],
"b": "a"},
{"a": [3.0, 4.0],
"b": "c"},
{"a": [5.0, 6.0],
"b": "e"},]
columns = mlprediction.columnarize(instances)
self.assertEqual(columns, {"a": [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
"b": ["a", "c", "e"]})
def testBatchSingleTensor(self):
instances = [
{
"b": "a"
},
{
"b": "c"
},
{
"b": "e"
},
]
columns = mlprediction.columnarize(instances)
self.assertEqual(columns, {"b": ["a", "c", "e"]})
def testRowify(self):
outputs = {"prediction": np.array([1,
0,
1]),
"scores": np.array([[0.1, 0.9],
[0.7, 0.3],
[0.4, 0.6]])}
instances = mlprediction.rowify(outputs)
self.assertItemsEqual(instances, [{"prediction": 1,
"scores": [0.1, 0.9]},
{"prediction": 0,
"scores": [0.7, 0.3]},
{"prediction": 1,
"scores": [0.4, 0.6]}])
def testRowifyFailed(self):
# Following are cases the outputs containing items with different sizes.
# Two outputs. One has a size of 2 and the other is 3.
outputs1 = {"prediction": np.array([1,
1]),
"scores": np.array([[0.1, 0.9],
[0.7, 0.3],
[0.4, 0.6]])}
error_msg_1 = {"prediction": 2, "scores": 3}
# Two outputs. One has a size of 0 and the other is 3.
outputs2 = {"prediction": np.array([]),
"scores": np.array([[0.1, 0.9],
[0.7, 0.3],
[0.4, 0.6]])}
error_msg_2 = {"prediction": 0, "scores": 3}
# Two outputs. One has a size of 2 and the other is 1.
outputs3 = {"prediction": np.array([1,
1]),
"scores": np.array([[0.1, 0.9]])}
error_msg_3 = {"prediction": 2, "scores": 1}
# Two outputs. One has a size of 2 and the other is 0.
outputs4 = {"prediction": np.array([1,
1]),
"scores": np.array([])}
error_msg_4 = {"prediction": 2, "scores": 0}
# Three outputs. The first two have size of 3. And the last one is 2.
outputs5 = {"prediction": np.array([1,
0,
1]),
"scores": np.array([[0.1, 0.9],
[0.7, 0.3],
[0.4, 0.6]]),
"third": np.array([[0.1, 0.9],
[0.4, 0.6]])}
error_msg_5 = {"prediction": 3, "scores": 3, "third": 2}
for outputs, error_msg in ((outputs1, error_msg_1),
(outputs2, error_msg_2),
(outputs3, error_msg_3),
(outputs4, error_msg_4),
(outputs5, error_msg_5)):
with self.assertRaises(mlprediction.PredictionError) as e:
# rowify() is a generator, therefore the next() to invoke it.
next(mlprediction.rowify(outputs))
self.assertEqual(
mlprediction.PredictionError.INVALID_OUTPUTS.code,
e.exception.error_code)
self.assertTrue("%s" % error_msg in e.exception.error_detail)
class PredictionCanonicalizeInputTest(tf.test.TestCase):
def testCanonicalizeSingleInstance(self):
instances = "a"
self.assertEqual(
mlprediction.canonicalize_single_tensor_input(instances, "x"), [{
"x": "a"
}])
instances = ["a"]
self.assertEqual(
mlprediction.canonicalize_single_tensor_input(instances, "x"), [{
"x": "a"
}])
instances = [{"x": "a"}]
self.assertEqual(
mlprediction.canonicalize_single_tensor_input(instances, "x"), [{
"x": "a"
}])
def testCanonicalizeBatchInstances(self):
instances = ["a", "b", "c"]
self.assertEqual(
mlprediction.canonicalize_single_tensor_input(instances, "x"), [{
"x": "a"
}, {
"x": "b"
}, {
"x": "c"
}])
instances = [{"x": "a"}, {"x": "b"}, {"x": "c"}]
self.assertEqual(
mlprediction.canonicalize_single_tensor_input(instances, "x"), [{
"x": "a"
}, {
"x": "b"
}, {
"x": "c"
}])
def testWrongTensorName(self):
with self.assertRaises(mlprediction.PredictionError) as error:
instances = [{"y": "a"}]
mlprediction.canonicalize_single_tensor_input(instances, "x")
self.assertEqual(error.exception.error_detail,
("Expected tensor name: x, got tensor name: ['y']."))
class PredictionDecodeTest(tf.test.TestCase):
def testSingleRank1Utf8StringTensor(self):
actual = mlprediction.decode_base64([u"a", u"b", u"c"])
self.assertEqual(actual, [u"a", u"b", u"c"])
def testSingleRank1BytesTensor(self):
actual = mlprediction.decode_base64(
[{u"b64": base64.b64encode(u"a")},
{u"b64": base64.b64encode(u"b")},
{u"b64": base64.b64encode(u"c")},])
self.assertEqual(actual, [u"a", u"b", u"c"])
def testSingleUtf8StringTensor(self):
actual = mlprediction.decode_base64(
[[[u"a", u"b"]], [[u"c", u"d"]]])
self.assertEqual(actual, [[[u"a", u"b"]], [[u"c", u"d"]]])
def testSingleBytesTensor(self):
actual = mlprediction.decode_base64(
[[[{u"b64": base64.b64encode(u"a")},
{u"b64": base64.b64encode(u"b")},]],
[[{u"b64": base64.b64encode(u"c")},
{u"b64": base64.b64encode(u"d")},]]])
self.assertEqual(actual, [[[u"a", u"b"]], [[u"c", u"d"]]])
def testMultiTensorWithUtf8Strings(self):
actual = mlprediction.decode_base64(
[{u"tensor1": [[[u"a", u"b"]], [[u"c", u"d"]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}]
)
self.assertEqual(actual,
[{u"tensor1": [[[u"a", u"b"]], [[u"c", u"d"]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}])
def testMultiTensorWithBase64Strings(self):
actual = mlprediction.decode_base64(
[{u"tensor1": [[[{u"b64": base64.b64encode(u"a")},
{u"b64": base64.b64encode(u"b")},]],
[[{u"b64": base64.b64encode(u"c")},
{u"b64": base64.b64encode(u"d")},]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}])
self.assertEqual(actual,
[{u"tensor1": [[[u"a", u"b"]], [[u"c", u"d"]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}])
class PredictionEncodeTest(tf.test.TestCase):
def testSingleRank1Utf8StringTensor(self):
tensor_info = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
outputs_map = {"dummy": tensor_info}
actual = mlprediction.encode_base64([u"a", u"b", u"c"], outputs_map)
self.assertEqual(actual, [u"a", u"b", u"c"])
def testSingleRank1BytesTensor(self):
tensor_info = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
outputs_map = {"dummy_bytes": tensor_info}
actual = mlprediction.encode_base64([u"a", u"b", u"c"], outputs_map)
self.assertEqual(actual, [{u"b64": base64.b64encode(u"a")},
{u"b64": base64.b64encode(u"b")},
{u"b64": base64.b64encode(u"c")},])
def testSingleUtf8StringTensor(self):
tensor_info = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
outputs_map = {"dummy": tensor_info}
actual = mlprediction.encode_base64(
[[[u"a", u"b"]], [[u"c", u"d"]]], outputs_map)
self.assertEqual(actual, [[[u"a", u"b"]], [[u"c", u"d"]]])
def testSingleBytesTensor(self):
tensor_info = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
outputs_map = {"dummy_bytes": tensor_info}
actual = mlprediction.encode_base64(
[[[u"a", u"b"]], [[u"c", u"d"]]],
outputs_map)
self.assertEqual(actual, [[[{u"b64": base64.b64encode(u"a")},
{u"b64": base64.b64encode(u"b")},]],
[[{u"b64": base64.b64encode(u"c")},
{u"b64": base64.b64encode(u"d")},]]])
def testMultiTensorWithUtf8Strings(self):
tensor_info_1 = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
tensor_info_2 = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
tensor_info_3 = meta_graph_pb2.TensorInfo(dtype=tf.float32.as_datatype_enum)
outputs_map = {
"tensor1": tensor_info_1,
"tensor2": tensor_info_2,
"tensor3": tensor_info_3,
}
actual = mlprediction.encode_base64(
[{u"tensor1": [[[u"a", u"b"]], [[u"c", u"d"]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}],
outputs_map)
self.assertEqual(actual, [{u"tensor1": [[[u"a", u"b"]], [[u"c", u"d"]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}])
def testMultiTensorWithBase64Strings(self):
tensor_info_1 = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
tensor_info_2 = meta_graph_pb2.TensorInfo(dtype=tf.string.as_datatype_enum)
tensor_info_3 = meta_graph_pb2.TensorInfo(dtype=tf.float32.as_datatype_enum)
outputs_map = {
"tensor1_bytes": tensor_info_1,
"tensor2": tensor_info_2,
"tensor3": tensor_info_3,
}
actual = mlprediction.encode_base64(
[{u"tensor1_bytes": [[[u"a", u"b"]], [[u"c", u"d"]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}],
outputs_map)
self.assertEqual(
actual,
[{u"tensor1_bytes": [[[{u"b64": base64.b64encode(u"a")},
{u"b64": base64.b64encode(u"b")},]],
[[{u"b64": base64.b64encode(u"c")},
{u"b64": base64.b64encode(u"d")},]]],
u"tensor2": [u"x", u"y", u"z"],
u"tensor3": [1.0, -2.0, 3.14]}])
def testModelWithBytesBasedOutput(self):
mock_client = mock.Mock()
mock_client.predict.return_value = {"x_bytes": "to_encode"}
signature_def = meta_graph_pb2.SignatureDef()
signature_def.outputs["x_bytes"].dtype = types_pb2.DT_STRING
signature_def.inputs["input_key"].dtype = types_pb2.DT_STRING
mock_client.signature_map = {
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature_def}
model = mlprediction.create_model(mock_client, "gs://tmp/foo")
_, predictions = model.predict({"input_key": "foo"})
self.assertEqual(predictions, [{
"x_bytes": {
"b64": base64.b64encode("to_encode")
}
}])
class LoadModelTest(tf.test.TestCase):
def testConfigIsSet(self):
# Arrange
test_config = tf.ConfigProto(inter_op_parallelism_threads=3)
# Act
model_path = os.path.join(FLAGS.test_srcdir, INPUT_MODEL)
session, _ = mlprediction.load_model(
model_path,
tags=(tf.saved_model.tag_constants.SERVING,),
config=test_config)
# Assert
self.assertEqual(session._config, test_config)
def testLoadCustomSignature(self):
model_dir = os.path.join(FLAGS.test_tmpdir, "identity_model")
model_test_util.create_identity_model(
model_dir=model_dir,
signature_name="mysignature",
tags=("tag1", "tag2"))
_, signature_map = mlprediction.load_model(model_dir,
tags=("tag1", "tag2"))
signature = signature_map["mysignature"]
self.assertEqual([i for i in signature.inputs], ["in"])
self.assertEqual("Print:0", signature.inputs["in"].name)
self.assertEqual([i for i in signature.outputs], ["out"])
self.assertEqual("Print_1:0", signature.outputs["out"].name)
with self.assertRaises(mlprediction.PredictionError) as error:
_, _ = mlprediction.load_model(model_dir, tags=("tag1",))
self.assertEqual(error.exception.error_detail,
"Failed to load the model due to bad model data. "
"tags: ['tag1']\nMetaGraphDef associated with tags 'tag1' "
"could not be found in SavedModel. To inspect available "
"tag-sets in the SavedModel, please use the SavedModel "
"CLI: `saved_model_cli`")
class LocalPredictionTest(tf.test.TestCase):
def _input_dir(self, rel_path):
return os.path.join(os.path.dirname(__file__), rel_path)
def testPredictionWithSavedModel(self):
data_path = os.path.join(FLAGS.test_srcdir, SAVED_TESTDATA)
with open(data_path) as f:
# Read two input records as strings.
instances = [
json.loads(next(f).rstrip("\n")),
json.loads(next(f).rstrip("\n"))]
model_path = os.path.join(FLAGS.test_srcdir, INPUT_MODEL)
predictions = mlprediction.local_predict(
model_dir=model_path, instances=instances)
prediction_result = [
{"key": p["key"], "prediction": p["prediction"]}
for p in predictions["predictions"]
]
# Just check the key and prediction result, not each individual scores
# that are floating numbers.
self.assertEqual(2, len(prediction_result))
for r in prediction_result:
if r["key"] == 0:
self.assertEqual(3, r["prediction"])
elif r["key"] == 9:
self.assertEqual(4, r["prediction"])
else:
self.fail()
def testPredictWithSavedModelWithCustomSignature(self):
model_dir = os.path.join(FLAGS.test_tmpdir, "identity_model_predict")
model_test_util.create_identity_model(
model_dir=model_dir,
signature_name="mysignature",
tags=("tag1", "tag2"))
result = mlprediction.local_predict(model_dir,
tags=("tag1", "tag2"),
signature_name="mysignature",
instances=[{"in": "check"}])
self.assertEqual(result["predictions"], [{"out": "check"}])
# Only one signature_def in the graph, so it's optional to specify it.
result = mlprediction.local_predict(model_dir,
tags=("tag1", "tag2"),
instances=[{"in": "check"}])
self.assertEqual(result["predictions"], [{"out": "check"}])
def testPredictWithSavedModelMultipleSignatures(self):
model_dir = os.path.join(FLAGS.test_tmpdir, "constant_model_predict")
model_test_util.create_constant_model(
model_dir,
"mysignature", 1, "serving_default", 2,
tags=("tag1", "tag2"))
# Predict with specified signature.
result = mlprediction.local_predict(model_dir,
tags=("tag1", "tag2"),
signature_name="mysignature",
instances=[{"in": "check"}])
self.assertEqual(result["predictions"], [{"out": 1}])
# Predict without specified signature will use serving default.
result = mlprediction.local_predict(model_dir,
tags=("tag1", "tag2"),
instances=[{"in": "check"}])
self.assertEqual(result["predictions"], [{"out": 2}])
# Predict with wrong specified signature.
with self.assertRaises(mlprediction.PredictionError) as error:
result = mlprediction.local_predict(model_dir,
tags=("tag1", "tag2"),
signature_name="wrongsignature",
instances=[{"in": "check"}])
self.assertEqual("No signature found for signature key wrongsignature.",
error.exception.error_detail)
@mock.patch("google.cloud.ml.prediction.prediction_lib.create_client")
def testLocalPredictionTensorflowModelWithStrings(self, mock_create_client):
signature_def = meta_graph_pb2.SignatureDef()
signature_def.outputs["x_bytes"].dtype = types_pb2.DT_STRING
signature_def.inputs["x_bytes"].dtype = types_pb2.DT_STRING
mock_client = mock.Mock()
mock_client.signature_map = {
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature_def
}
mock_client.predict.return_value = {"x_bytes": "to_encode"}
mock_create_client.return_value = mock_client
predictions = mlprediction.local_predict(
model_dir=None, instances=[{"x_bytes": [1, 2, 3]}])
# Validate that the output is correctly base64 encoded (and only once)
self.assertEquals(
predictions,
{"predictions": [{"x_bytes": {"b64": base64.b64encode("to_encode")}}]})
def testPredictionSavedModelWithBadInput(self):
data_path = os.path.join(FLAGS.test_srcdir, SAVED_TESTDATA_BAD)
with open(data_path) as f:
# Read two input records as strings.
instances = [
json.loads(next(f).rstrip("\n")),
json.loads(next(f).rstrip("\n"))]
model_path = os.path.join(FLAGS.test_srcdir, INPUT_MODEL)
with self.assertRaises(mlprediction.PredictionError) as error:
mlprediction.local_predict(
model_dir=model_path, instances=instances)
self.assertTrue("Unexpected tensor name: x" in error.exception.error_detail)
def testLocalPredictionSklearnModel(self):
# Uses a trained sklearn model that computes x+y
instances = [[10, 20], [1, 2], [5, 6]]
model_path = os.path.join(FLAGS.test_srcdir, SKLEARN_JOBLIB_MODEL)
predictions = mlprediction.local_predict(
model_dir=model_path,
instances=instances,
framework=mlprediction.SCIKIT_LEARN_FRAMEWORK_NAME)
self.assertEqual(predictions, {"predictions": [30, 3, 11]})
def testLocalPredictionXgboostModel(self):
# Uses a trained xgboost model that computes x+y
instances = [[10, 20], [1, 2], [5, 6]]
model_path = os.path.join(FLAGS.test_srcdir, XGBOOST_MODEL)
predictions = mlprediction.local_predict(
model_dir=model_path,
instances=instances,
framework=mlprediction.XGBOOST_FRAMEWORK_NAME)
self.assertEqual([round(i)
for i in predictions["predictions"]], [30, 3, 11])
class PredictionXgboostModelTest(unittest.TestCase):
def testXGBoostPredictionNoPreprocessing(self):
expected_output = [[1.0, 1.1, 1.2], [2.1, 2.2, 2.3]]
instances = [1, 2]
mock_client = mock.Mock()
mock_client.predict.return_value = expected_output
xgboost_model = mlprediction.XGBoostModel(mock_client)
self.assertEqual(
xgboost_model.predict(instances), (instances, expected_output))
np.testing.assert_array_equal(
np.array(instances), mock_client.predict.call_args[0][0])
def testXGBoostPredictionWithUserProcessor(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.UserProcessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
instances = [1, 2]
mock_client = mock.Mock()
mock_client.predict.return_value = [10, 20]
xgboost_model = mlprediction.XGBoostModel(mock_client)
# Verify postprocessing (which divides values by 2) is applied to the
# predicted results.
self.assertEqual(
xgboost_model.predict(instances), (instances, [5, 10]))
# Verify preprocessing(which multiplies values by 2) is applied
# before calling predict.
mock_client.predict.assert_has_calls(
[mock.call([2, 4], stats=mock.ANY)])
def testXGBoostPredictionWithInvalidProcessor(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.InvalidProcessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
mock_client = mock.Mock()
with self.assertRaises(mlprediction.PredictionError) as error:
mlprediction.XGBoostModel(mock_client)
self.assertEqual(error.exception.error_detail,
("The provided preprocess function in the Processor class "
"InvalidProcessor is not callable."))
def testCreateXgboostModel(self):
model_path = os.path.join(FLAGS.test_srcdir, XGBOOST_MODEL)
# model is a Xgboost booster.
model = mlprediction.create_xgboost_model(model_path, None)
inputs = [[10, 20], [1, 2], [5, 6]]
stats = mlprediction.Stats()
stats["dummy"] = 1 # So that a new stats object is not created.
original_inputs, predictions = model.predict(inputs, stats)
predictions = [int(round(i)) for i in predictions]
self.assertEqual(predictions, [30, 3, 11])
self.assertEqual(original_inputs, inputs)
self.assertEqual(stats[mlprediction.ENGINE],
mlprediction.XGBOOST_FRAMEWORK_NAME)
def testInvalidXgboostModel(self):
model_path = os.path.join(FLAGS.test_srcdir, "foo") # Model doesn't exist.
with self.assertRaises(mlprediction.PredictionError) as error:
mlprediction.create_xgboost_model(model_path, None)
self.assertTrue(
"Could not load the model: " in error.exception.error_detail)
def testInvalidDmatrixInput(self):
model_path = os.path.join(FLAGS.test_srcdir, XGBOOST_MODEL)
model = mlprediction.create_xgboost_model(model_path, None)
inputs = [1, 2] # Requires a 2-dimensional list.
with self.assertRaises(mlprediction.PredictionError) as error:
model.predict(inputs, None)
self.assertEqual(error.exception.error_code,
mlprediction.PredictionError.FAILED_TO_RUN_MODEL.code)
self.assertIn("Could not initialize DMatrix from inputs:",
error.exception.error_detail)
@mock.patch(
"google.cloud.ml.prediction.prediction_lib"
"._load_joblib_or_pickle_model")
def testInvalidPredictionWithXgboost(self, mock_load_fn):
mock_model = mock.Mock()
mock_model.predict.side_effect = Exception("foo")
mock_load_fn.return_value = mock_model
model_path = "fake_path"
model = mlprediction.create_xgboost_model(model_path, None)
inputs = [[1, 2], [3, 4]]
with self.assertRaises(mlprediction.PredictionError) as error:
model.predict(inputs, None)
self.assertEqual(error.exception.error_code,
mlprediction.PredictionError.FAILED_TO_RUN_MODEL.code)
self.assertIn("Exception during xgboost prediction",
error.exception.error_detail)
self.assertIn("foo",
error.exception.error_detail)
mock_load_fn.assert_called_once_with(model_path)
mock_model.predict.assert_called_once_with(
mock.EQUIV(lambda dmatrix: (dmatrix.num_row(), dmatrix.num_col()),
xgb.DMatrix(inputs)))
class PredictionSklearnModelTest(unittest.TestCase):
def testSklearnPredictionNoPreprocessing(self):
expected_output = [[1.0, 1.1, 1.2], [2.1, 2.2, 2.3]]
instances = [1, 2]
mock_client = mock.Mock()
mock_client.predict.return_value = expected_output
sklearn_model = mlprediction.SklearnModel(mock_client)
self.assertEqual(
sklearn_model.predict(instances), (instances, expected_output))
np.testing.assert_array_equal(
np.array(instances), mock_client.predict.call_args[0][0])
def testTransformerPreprocessing(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.FunctionTransformerPreprocessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
instances = [[1, 2], [3, 4]]
mock_client = mock.Mock()
mock_client.predict.return_value = [10, 20]
sklearn_model = mlprediction.SklearnModel(mock_client)
sklearn_model.predict(instances)
# The first feature is dropped, and log1p is applied on the rest.
expected_preprocessed_input = np.log1p(np.array([[2], [4]]))
np.testing.assert_array_equal(expected_preprocessed_input,
mock_client.predict.call_args[0][0])
def testSklearnPredictionWithUserProcessor(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.UserProcessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
instances = [1, 2]
mock_client = mock.Mock()
mock_client.predict.return_value = [10, 20]
sklearn_model = mlprediction.SklearnModel(mock_client)
# Verify postprocessing (which divides values by 2) is applied to the
# predicted results.
self.assertEqual(
sklearn_model.predict(instances), (instances, [5, 10]))
# Verify preprocessing(which multiplies values by 2) is applied before
# calling predict.
mock_client.predict.assert_has_calls(
[mock.call([2, 4], stats=mock.ANY)])
def testSklearnPredictionWithBadPostProcessor(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.InvalidPostProcessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
instances = [1, 2]
mock_client = mock.Mock()
mock_client.predict.return_value = [10, 20]
sklearn_model = mlprediction.SklearnModel(mock_client)
with self.assertRaises(mlprediction.PredictionError) as e:
_ = sklearn_model.predict(instances), (instances, [5, 10])
# postprocessing returns an invalid type, which we should raise.
self.assertEqual(mlprediction.PredictionError.INVALID_OUTPUTS.code,
e.exception.error_code)
self.assertIn(
"The post-processing function should return either "
"a numpy ndarray or a list.", e.exception.error_detail)
def testCreateSklearnModelFromJoblib(self):
model_path = os.path.join(FLAGS.test_srcdir, SKLEARN_JOBLIB_MODEL)
# model is a Scikit-Learn classifier.
model = mlprediction.create_sklearn_model(
model_path, None)
inputs = [[10, 20], [1, 2], [5, 6]]
stats = mlprediction.Stats()
stats["dummy"] = 1 # So that a new stats object is not created.
original_inputs, predictions = model.predict(inputs, stats=stats)
self.assertEqual(predictions, [30, 3, 11])
self.assertEqual(original_inputs, inputs)
self.assertEqual(stats[mlprediction.ENGINE],
mlprediction.SCIKIT_LEARN_FRAMEWORK_NAME)
def testCreateSklearnModelFromPickle(self):
model_path = os.path.join(FLAGS.test_srcdir, SKLEARN_PICKLE_MODEL)
# model is a Scikit-Learn classifier.
model = mlprediction.create_sklearn_model(
model_path, None)
inputs = [[10, 20], [1, 2], [5, 6]]
stats = mlprediction.Stats()
stats["dummy"] = 1 # So that a new stats object is not created.
original_inputs, predictions = model.predict(inputs, stats=stats)
self.assertEqual(predictions, [30, 3, 11])
self.assertEqual(original_inputs, inputs)
self.assertEqual(stats[mlprediction.ENGINE],
mlprediction.SCIKIT_LEARN_FRAMEWORK_NAME)
def testCreateSklearnInvalidModel(self):
model_path = os.path.join(FLAGS.test_tmpdir)
# Copying a .joblib model with incorrect suffix (.pkl), so that it cannot be
# loaded.
shutil.copy2(
os.path.join(FLAGS.test_srcdir, SKLEARN_JOBLIB_MODEL, "model.joblib"),
os.path.join(model_path, "model.pkl"))
with self.assertRaises(mlprediction.PredictionError) as error:
mlprediction.create_sklearn_model(model_path, None)
self.assertEqual(error.exception.error_code,
mlprediction.PredictionError.FAILED_TO_LOAD_MODEL.code)
self.assertIn(
"Could not load the model", error.exception.error_detail)
def testSklearnModelNotFound(self):
model_path = os.path.join(FLAGS.test_srcdir, "non_existent_path")
with self.assertRaises(mlprediction.PredictionError) as error:
mlprediction.create_sklearn_model(model_path, None)
self.assertIn("Could not find ", error.exception.error_detail)
def testInvalidPredictionWithSklearn(self):
model_path = os.path.join(FLAGS.test_srcdir, SKLEARN_JOBLIB_MODEL)
# model is a Scikit-Learn classifier.
model = mlprediction.create_sklearn_model(
model_path, None)
# The shape doesn't match the expected shape of: (2,)
inputs = [[10, 20, 30]]
with self.assertRaises(mlprediction.PredictionError) as error:
model.predict(inputs, stats=None)
self.assertEqual(error.exception.error_code,
mlprediction.PredictionError.FAILED_TO_RUN_MODEL.code)
self.assertIn("Exception during sklearn prediction",
error.exception.error_detail)
class TensorFlowCustomModelTest(unittest.TestCase):
def testTensorFlowModelCreationNoCreateVersionRequest(self):
client = mock.Mock()
client.signature_map = {"serving_default": None}
dummy_model_path = "gs://dummy/model/path"
model = mlprediction.create_model(client, dummy_model_path)
self.assertIsInstance(model, mlprediction.TensorFlowModel)
def testModelCreationNoCustomCode(self):
create_version_json = """
{
"version": {}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
client = mock.Mock()
client.signature_map = {"serving_default": None}
dummy_model_path = "gs://dummy/model/path"
model = mlprediction.create_model(client, dummy_model_path)
self.assertIsInstance(model, mlprediction.TensorFlowModel)
def testUserModelCreationFromClassMethod(self):
create_version_json = """
{
"version": {
"model_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.UserModel"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
client = None
dummy_model_path = "gs://dummy/model/path"
model = mlprediction.create_model(client, dummy_model_path)
self.assertIsInstance(model, user_model.UserModel)
def testUserModelCreationFromNestedClass(self):
create_version_json = """
{
"version": {
"model_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.UserModelOuter.UserModelInner"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
client = None
dummy_model_path = "gs://dummy/model/path"
model = mlprediction.create_model(client, dummy_model_path)
self.assertIsInstance(model, user_model.UserModelOuter.UserModelInner)
def testUserModelMissingCustomMethod(self):
create_version_json = """
{
"version": {
"model_class": "google.cloud.ml.prediction.testdata.user_custom_python.wrong_user_model",
"package_uris": ["gs://test_package"]
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
with self.assertRaises(mlprediction.PredictionError) as error:
client = None
dummy_model_path = "gs://dummy/model/path"
mlprediction.create_model(client, dummy_model_path)
self.assertEqual(error.exception.error_detail,
("google.cloud.ml.prediction.testdata.user_custom_python."
"wrong_user_model cannot be found. Please make "
"sure (1) model_class is the fully qualified function "
"name, and (2) model_class uses the correct package name "
"as provided by the package_uris: ['gs://test_package']"))
def testMissingUserModelClassModule(self):
create_version_json = """
{
"version": {
"model_class": "wrong_module.UserModel",
"package_uris": ["gs://test_package"]
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
with self.assertRaises(mlprediction.PredictionError) as error:
client = None
dummy_model_path = "gs://dummy/model/path"
mlprediction.create_model(client, dummy_model_path)
self.assertEqual(error.exception.error_detail,
"wrong_module.UserModel cannot be found. "
"Please make sure (1) model_class is the fully qualified "
"function name, and (2) model_class uses the correct "
"package name as provided by the package_uris: "
"['gs://test_package']")
def testMissingPredictAndProcessMethod(self):
create_version_json = """
{
"version": {
"model_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.MissingPredict"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
with self.assertRaises(mlprediction.PredictionError) as error:
client = None
dummy_model_path = "gs://dummy/model/path"
mlprediction.create_model(client, dummy_model_path)
self.assertEqual(error.exception.error_detail,
("The provided model class, MissingPredict, is missing "
"the required predict method."))
def testUserModelTooManyPredictArgs(self):
create_version_json = """
{
"version": {
"model_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.PredictMethodTooManyArgs"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
with self.assertRaises(mlprediction.PredictionError) as error:
client = None
dummy_model_path = "gs://dummy/model/path"
mlprediction.create_model(client, dummy_model_path)
self.assertEqual(error.exception.error_detail,
("The provided model class, PredictMethodTooManyArgs, "
"has a predict method with an invalid signature. "
"Expected signature: ['self', 'instances'] "
"User signature: ['self', 'instances', 'another']"))
def testUserModelTooFewPredictArgs(self):
create_version_json = """
{
"version": {
"model_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.PredictMethodTooFewArgs"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
with self.assertRaises(mlprediction.PredictionError) as error:
client = None
dummy_model_path = "gs://dummy/model/path"
mlprediction.create_model(client, dummy_model_path)
self.assertEqual(error.exception.error_detail,
("The provided model class, PredictMethodTooFewArgs, has "
"a predict method with an invalid signature. "
"Expected signature: ['self', 'instances'] "
"User signature: ['self']"))
class TensorflowCustomProcessingTest(unittest.TestCase):
def setUp(self):
self._instances = [{"a": 1, "b": 2}, {"a": 2, "b": 4}]
self._model_path = "gs://dummy/model/path"
signature_def = meta_graph_pb2.SignatureDef()
signature_def.inputs["a"].dtype = types_pb2.DT_INT32
signature_def.inputs["b"].dtype = types_pb2.DT_INT32
signature_def.outputs["c"].dtype = types_pb2.DT_INT32
self._mock_client = mock.Mock()
self._mock_client.predict.return_value = {"c": np.array([10, 20])}
self._mock_client.signature_map = {
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature_def}
self._kwargs = {
"signature_name":
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY}
def testNoUserProcessor(self):
model = mlprediction.create_model(self._mock_client, self._model_path)
self.assertIsInstance(model, mlprediction.TensorFlowModel)
self.assertEqual(
model.predict(self._instances, **self._kwargs),
(self._instances, [{
"c": 10
}, {
"c": 20
}]))
self._mock_client.predict.assert_has_calls(
[mock.call({
"a": [1, 2],
"b": [2, 4]
}, stats=mock.ANY, signature_name=mock.ANY)])
def testUserProcessor(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.TfUserProcessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
model = mlprediction.create_model(self._mock_client, self._model_path)
# Verify the default TensorFlowModel is instantiated.
self.assertIsInstance(model, mlprediction.TensorFlowModel)
# Verify postprocessing (which divides values by 2) is applied to the
# predicted results.
self.assertEqual(
model.predict(self._instances, **self._kwargs),
(self._instances, [{
"c": 5
}, {
"c": 10
}]))
# Verify preprocessing(which multiplies values by 2) is applied
# before calling predict.
self._mock_client.predict.assert_has_calls(
[mock.call({
"a": [2, 4],
"b": [4, 8]
}, stats=mock.ANY, signature_name=mock.ANY)])
def testUserPreprocessOnly(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.TfUserPreprocessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
model = mlprediction.create_model(self._mock_client, self._model_path)
# Verify the default TensorFlowModel is instantiated.
self.assertIsInstance(model, mlprediction.TensorFlowModel)
# Verify no postprocessing performed.
self.assertEqual(
model.predict(self._instances, **self._kwargs),
(self._instances, [{
"c": 10
}, {
"c": 20
}]))
# Verify preprocessing(which multiplies values by 2) is applied
# before calling predict.
self._mock_client.predict.assert_has_calls(
[mock.call({
"a": [2, 4],
"b": [4, 8]
}, stats=mock.ANY, signature_name=mock.ANY)])
def testUserPostprocessOnly(self):
create_version_json = """
{
"version": {
"processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.TfUserPostprocessor"
}
}
"""
env_map = {"create_version_request": create_version_json}
with mock.patch.dict("os.environ", env_map):
model = mlprediction.create_model(self._mock_client, self._model_path)
# Verify the default TensorFlowModel is instantiated.
self.assertIsInstance(model, mlprediction.TensorFlowModel)
# Verify postprocessing (which divides values by 2) is applied to the
# predicted results.
self.assertEqual(
model.predict(self._instances, **self._kwargs),
(self._instances, [{
"c": 5
}, {
"c": 10
}]))
# Verify no preprocessing performed.
self._mock_client.predict.assert_has_calls(
[mock.call({
"a": [1, 2],
"b": [2, 4]
}, stats=mock.ANY, signature_name=mock.ANY)])
def make_timer_fn(start_time, end_time):
"""Returns a function that returns start_time, then end_time, then -1."""
timer_fn = mock.Mock()
timer_fn.side_effect = itertools.chain([start_time, end_time],
itertools.repeat(-1))
return timer_fn
class TestTimer(unittest.TestCase):
def testStandardUsage(self):
with mlprediction.Timer(make_timer_fn(314, 315)) as timer:
pass
duration = timer.seconds
# Ensure that timer is correct
self.assertEqual(timer.seconds, 1) # 315 - 314
# Ensure that unit conversion is correct
self.assertEqual(timer.milliseconds, int(timer.seconds * 1000))
self.assertEqual(timer.microseconds, int(timer.seconds * 1000000))
# Ensure that the timer has stopped
self.assertEqual(duration, timer.seconds)
class TestStats(unittest.TestCase):
def testStandardUsage(self):
stats = mlprediction.Stats()
self.assertEqual(stats, {})
stats["foo"] = 1
with stats.time("bar", make_timer_fn(314, 315)):
pass
self.assertEqual(stats["foo"], 1)
# We slept for 1 sec = 1e6 microseconds
self.assertEqual(stats["bar"], 1000000)
class TestException(unittest.TestCase):
def testOneException(self):
e = mlprediction.PredictionError(
mlprediction.PredictionError.FAILED_TO_RUN_MODEL,
"detailed description.")
self.assertEqual(2, e.error_code)
self.assertEqual("detailed description.", e.error_detail)
self.assertEqual("Failed to run the provided model: detailed description. "
"(Error code: 2)", str(e))
if __name__ == "__main__":
tf.test.main() | en | 0.674856 | # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for local prediction. # simple models that add two numbers. # Following are cases the outputs containing items with different sizes. # Two outputs. One has a size of 2 and the other is 3. # Two outputs. One has a size of 0 and the other is 3. # Two outputs. One has a size of 2 and the other is 1. # Two outputs. One has a size of 2 and the other is 0. # Three outputs. The first two have size of 3. And the last one is 2. # rowify() is a generator, therefore the next() to invoke it. # Arrange # Act # Assert # Read two input records as strings. # Just check the key and prediction result, not each individual scores # that are floating numbers. # Only one signature_def in the graph, so it's optional to specify it. # Predict with specified signature. # Predict without specified signature will use serving default. # Predict with wrong specified signature. # Validate that the output is correctly base64 encoded (and only once) # Read two input records as strings. # Uses a trained sklearn model that computes x+y # Uses a trained xgboost model that computes x+y { "version": { "processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.UserProcessor" } } # Verify postprocessing (which divides values by 2) is applied to the # predicted results. # Verify preprocessing(which multiplies values by 2) is applied # before calling predict. { "version": { "processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.InvalidProcessor" } } # model is a Xgboost booster. # So that a new stats object is not created. # Model doesn't exist. # Requires a 2-dimensional list. { "version": { "processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.FunctionTransformerPreprocessor" } } # The first feature is dropped, and log1p is applied on the rest. { "version": { "processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.UserProcessor" } } # Verify postprocessing (which divides values by 2) is applied to the # predicted results. # Verify preprocessing(which multiplies values by 2) is applied before # calling predict. { "version": { "processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.InvalidPostProcessor" } } # postprocessing returns an invalid type, which we should raise. # model is a Scikit-Learn classifier. # So that a new stats object is not created. # model is a Scikit-Learn classifier. # So that a new stats object is not created. # Copying a .joblib model with incorrect suffix (.pkl), so that it cannot be # loaded. # model is a Scikit-Learn classifier. # The shape doesn't match the expected shape of: (2,) { "version": {} } { "version": { "model_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.UserModel" } } { "version": { "model_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.UserModelOuter.UserModelInner" } } { "version": { "model_class": "google.cloud.ml.prediction.testdata.user_custom_python.wrong_user_model", "package_uris": ["gs://test_package"] } } { "version": { "model_class": "wrong_module.UserModel", "package_uris": ["gs://test_package"] } } { "version": { "model_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.MissingPredict" } } { "version": { "model_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.PredictMethodTooManyArgs" } } { "version": { "model_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.PredictMethodTooFewArgs" } } { "version": { "processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.TfUserProcessor" } } # Verify the default TensorFlowModel is instantiated. # Verify postprocessing (which divides values by 2) is applied to the # predicted results. # Verify preprocessing(which multiplies values by 2) is applied # before calling predict. { "version": { "processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.TfUserPreprocessor" } } # Verify the default TensorFlowModel is instantiated. # Verify no postprocessing performed. # Verify preprocessing(which multiplies values by 2) is applied # before calling predict. { "version": { "processor_class": "google.cloud.ml.prediction.testdata.user_custom_python.user_model.TfUserPostprocessor" } } # Verify the default TensorFlowModel is instantiated. # Verify postprocessing (which divides values by 2) is applied to the # predicted results. # Verify no preprocessing performed. Returns a function that returns start_time, then end_time, then -1. # Ensure that timer is correct # 315 - 314 # Ensure that unit conversion is correct # Ensure that the timer has stopped # We slept for 1 sec = 1e6 microseconds | 2.013642 | 2 |
internal/handlers/qatar.py | fillingthemoon/cartogram-web | 0 | 6624075 | <reponame>fillingthemoon/cartogram-web
import settings
import handlers.base_handler
import csv
class CartogramHandler(handlers.base_handler.BaseCartogramHandler):
def get_name(self):
return "Qatar"
def get_gen_file(self):
return "{}/qat_processedmap.json".format(settings.CARTOGRAM_DATA_DIR)
def validate_values(self, values):
if len(values) != 8:
return False
for v in values:
if type(v) != float:
return False
return True
def gen_area_data(self, values):
return """1 {} Al Daayen
2 {} Al Khor
3 {} Al Rayyan
4 {} Al Shamal
5 {} Al Wakrah
6 {} Al-Shahaniya
7 {} Doha
8 {} Umm Salal""".format(*values)
def expect_geojson_output(self):
return True
def csv_to_area_string_and_colors(self, csvfile):
return self.order_by_example(csv.reader(csvfile), "Municipality", 0, 1, 2, 3, ["Al Daayen","<NAME>","<NAME>","<NAME>","Al Wakrah","Al-Shahaniya","Doha","Umm Salal"], [0.0 for i in range(0,8)], {"Al Daayen":"1","Al Khor":"2","Al Rayyan":"3","Al Shamal":"4","Al Wakrah":"5","Al-Shahaniya":"6","Doha":"7","Umm Salal":"8"})
| import settings
import handlers.base_handler
import csv
class CartogramHandler(handlers.base_handler.BaseCartogramHandler):
def get_name(self):
return "Qatar"
def get_gen_file(self):
return "{}/qat_processedmap.json".format(settings.CARTOGRAM_DATA_DIR)
def validate_values(self, values):
if len(values) != 8:
return False
for v in values:
if type(v) != float:
return False
return True
def gen_area_data(self, values):
return """1 {} Al Daayen
2 {} Al Khor
3 {} Al Rayyan
4 {} Al Shamal
5 {} Al Wakrah
6 {} Al-Shahaniya
7 {} Doha
8 {} Umm Salal""".format(*values)
def expect_geojson_output(self):
return True
def csv_to_area_string_and_colors(self, csvfile):
return self.order_by_example(csv.reader(csvfile), "Municipality", 0, 1, 2, 3, ["Al Daayen","<NAME>","<NAME>","<NAME>","Al Wakrah","Al-Shahaniya","Doha","Umm Salal"], [0.0 for i in range(0,8)], {"Al Daayen":"1","Al Khor":"2","Al Rayyan":"3","Al Shamal":"4","Al Wakrah":"5","Al-Shahaniya":"6","Doha":"7","Umm Salal":"8"}) | en | 0.297982 | 1 {} Al Daayen 2 {} Al Khor 3 {} Al Rayyan 4 {} Al Shamal 5 {} Al Wakrah 6 {} Al-Shahaniya 7 {} Doha 8 {} Umm Salal | 2.703695 | 3 |
students/K33421/Golub_Anna/LR_1/task 2/client.py | aytakr/ITMO_ICT_WebDevelopment_2021-2022 | 7 | 6624076 | import socket
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect(("127.0.0.1", 9000))
# sending message to server
a = input('Верхнее основание трапеции: ')
b = input('Нижнее основание трапеции: ')
h = input('Высота трапеции: ')
message = ' '.join([str(a), str(b), str(h)]).encode()
conn.send(message)
# receiving server's response
data = conn.recv(16384)
data = data.decode('utf-8')
print('Площадь трапеции равна', data)
conn.close()
| import socket
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect(("127.0.0.1", 9000))
# sending message to server
a = input('Верхнее основание трапеции: ')
b = input('Нижнее основание трапеции: ')
h = input('Высота трапеции: ')
message = ' '.join([str(a), str(b), str(h)]).encode()
conn.send(message)
# receiving server's response
data = conn.recv(16384)
data = data.decode('utf-8')
print('Площадь трапеции равна', data)
conn.close()
| en | 0.869646 | # sending message to server # receiving server's response | 3.111938 | 3 |
driver/mimasdriver/__init__.py | Jojojoppe/MimasV1Firmware | 0 | 6624077 | <filename>driver/mimasdriver/__init__.py
from .mimas_interface import MimasInterface | <filename>driver/mimasdriver/__init__.py
from .mimas_interface import MimasInterface | none | 1 | 1.145871 | 1 | |
tests/test-default.py | oyebode23/govexec-sample | 0 | 6624078 | import sys
def main():
sys.stdout.write("Hello World")
if __name__=="__main__":
main()
| import sys
def main():
sys.stdout.write("Hello World")
if __name__=="__main__":
main()
| none | 1 | 2.101519 | 2 | |
examples/setuptools-test_suite/setup.py | andreztz/SomePackage | 1 | 6624079 | from setuptools import setup
setup(
name='A_Package',
packages=['a_package'],
test_suite='a_package.load_suite'
)
| from setuptools import setup
setup(
name='A_Package',
packages=['a_package'],
test_suite='a_package.load_suite'
)
| none | 1 | 1.231544 | 1 | |
src/notelist/views/authentication.py | jajimenez/notelist | 1 | 6624080 | <filename>src/notelist/views/authentication.py
"""Authentication views module."""
from hmac import compare_digest
from datetime import timedelta
from flask import Blueprint, request
from flask_jwt_extended import (
jwt_required, create_access_token, create_refresh_token, get_jwt,
get_jwt_identity
)
from notelist.tools import get_hash
from notelist.config import get_sm
from notelist.schemas.auth import LoginSchema
from notelist.db import get_db
from notelist.responses import OK, ERROR_INVALID_CREDENTIALS, get_response_data
# Messages
USER_LOGGED_IN = "User logged in"
INVALID_CREDENTIALS = "Invalid credentials"
TOKEN_REFRESHED = "Token refreshed"
USER_LOGGED_OUT = "User logged out"
# Blueprint object
bp = Blueprint("auth", __name__)
# Schema
schema = LoginSchema()
@bp.route("/login", methods=["POST"])
def login() -> tuple[dict, int]:
"""Log in.
This operation returns a fresh access token and a refresh token. Any of the
tokens can be provided to an API request in the following header:
"Authorization: Bearer access_token"
Request data (JSON string):
- username (string): Username.
- password (string): Password.
Response status codes:
- 200 (Success)
- 400 (Bad Request)
- 401 (Unauthorized)
Response data (JSON string):
- message (string): Message.
- message_type (string): Message type.
- result (object): User ID, access token and refresh token.
:return: Tuple containing the response data and the response status code.
"""
# Get and validate request data (a "marshmallow.ValidationError" exception
# is raised if the data is invalid).
auth = request.get_json() or {}
auth = schema.load(auth)
# We get the hash of the request password, as passwords are stored
# encrypted in the database.
password = get_hash(auth["password"])
# Get the user from the database
db = get_db()
user = db.users.get_by_username(auth["username"])
# Check user and password
if (
not user or
not user["enabled"] or
not compare_digest(password, user["password"])
):
d = get_response_data(INVALID_CREDENTIALS, ERROR_INVALID_CREDENTIALS)
return d, 401
# Create access and refresh tokens. The user ID is the Identity of the
# tokens (not to be confused with the JTI (unique identifier) of the
# tokens).
user_id = user["id"]
acc_tok = create_access_token(user_id, fresh=True)
ref_tok = create_refresh_token(user_id)
sm = get_sm()
acc_exp = sm.get("NL_ACCESS_TOKEN_EXP") # Acc. token expiration (minutes)
ref_exp = sm.get("NL_REFRESH_TOKEN_EXP") # Ref. token expiration (minutes)
acc_exp = str(timedelta(minutes=acc_exp)) # Acc. token exp. description
ref_exp = str(timedelta(minutes=ref_exp)) # Ref. token exp. description
result = {
"user_id": user_id,
"access_token": acc_tok,
"access_token_expiration": acc_exp,
"refresh_token": ref_tok,
"refresh_token_expiration": ref_exp
}
return get_response_data(USER_LOGGED_IN, OK, result), 200
@bp.route("/refresh", methods=["GET"])
@jwt_required(refresh=True)
def refresh() -> tuple[dict, int]:
"""Get a new, not fresh, access token.
Refreshing the access token is needed when the token is expired. This
operation requires the following header with a refresh token:
"Authorization: Bearer refresh_token"
Response status codes:
- 200 (Success)
- 401 (Unauthorized)
- 422 (Unprocessable Entity)
Response data (JSON string):
- message (string): Message.
- message_type (string): Message type.
- result (object): New, not fresh, access token.
:return: Tuple containing the response data and the response status code.
"""
# Get the request JWT Identity, which in this application is equal to the
# ID of the request user.
user_id = get_jwt_identity()
# Create a new, not fresh, access token
acc_tok = create_access_token(user_id, fresh=False)
# Access token expiration (in minutes)
sm = get_sm()
acc_exp = sm.get("NL_ACCESS_TOKEN_EXP")
# Access token expiration description
acc_exp = str(timedelta(minutes=acc_exp))
result = {
"access_token": acc_tok,
"access_token_expiration": acc_exp
}
return get_response_data(TOKEN_REFRESHED, OK, result), 200
@bp.route("/logout", methods=["GET"])
@jwt_required()
def logout() -> tuple[dict, int]:
"""Log out.
This operation revokes an access token provided in the request. This
operation requires the following header with the access token:
"Authorization: Bearer access_token"
Response status codes:
- 200 (Success)
- 401 (Unauthorized)
- 422 (Unprocessable Entity)
Response data (JSON string):
- message (string): Message.
- message_type (string): Message type.
:return: Tuple containing the response data and the response status code.
"""
# JWT payload data, which contains:
# - "jti" (string): The JTI is a unique identifier of the JWT token.
# - "exp" (integer): Expiration time in seconds of the JWT token.
jwt = get_jwt()
# We add the JTI of the JWT token of the current request to the Block List
# in order to revoke the token. We use the token expiration time as the
# expiration time for the block list token (after this time, the token is
# removed from the block list.
db = get_db()
db.blocklist.put(jwt["jti"], jwt["exp"])
return get_response_data(USER_LOGGED_OUT, OK), 200
| <filename>src/notelist/views/authentication.py
"""Authentication views module."""
from hmac import compare_digest
from datetime import timedelta
from flask import Blueprint, request
from flask_jwt_extended import (
jwt_required, create_access_token, create_refresh_token, get_jwt,
get_jwt_identity
)
from notelist.tools import get_hash
from notelist.config import get_sm
from notelist.schemas.auth import LoginSchema
from notelist.db import get_db
from notelist.responses import OK, ERROR_INVALID_CREDENTIALS, get_response_data
# Messages
USER_LOGGED_IN = "User logged in"
INVALID_CREDENTIALS = "Invalid credentials"
TOKEN_REFRESHED = "Token refreshed"
USER_LOGGED_OUT = "User logged out"
# Blueprint object
bp = Blueprint("auth", __name__)
# Schema
schema = LoginSchema()
@bp.route("/login", methods=["POST"])
def login() -> tuple[dict, int]:
"""Log in.
This operation returns a fresh access token and a refresh token. Any of the
tokens can be provided to an API request in the following header:
"Authorization: Bearer access_token"
Request data (JSON string):
- username (string): Username.
- password (string): Password.
Response status codes:
- 200 (Success)
- 400 (Bad Request)
- 401 (Unauthorized)
Response data (JSON string):
- message (string): Message.
- message_type (string): Message type.
- result (object): User ID, access token and refresh token.
:return: Tuple containing the response data and the response status code.
"""
# Get and validate request data (a "marshmallow.ValidationError" exception
# is raised if the data is invalid).
auth = request.get_json() or {}
auth = schema.load(auth)
# We get the hash of the request password, as passwords are stored
# encrypted in the database.
password = get_hash(auth["password"])
# Get the user from the database
db = get_db()
user = db.users.get_by_username(auth["username"])
# Check user and password
if (
not user or
not user["enabled"] or
not compare_digest(password, user["password"])
):
d = get_response_data(INVALID_CREDENTIALS, ERROR_INVALID_CREDENTIALS)
return d, 401
# Create access and refresh tokens. The user ID is the Identity of the
# tokens (not to be confused with the JTI (unique identifier) of the
# tokens).
user_id = user["id"]
acc_tok = create_access_token(user_id, fresh=True)
ref_tok = create_refresh_token(user_id)
sm = get_sm()
acc_exp = sm.get("NL_ACCESS_TOKEN_EXP") # Acc. token expiration (minutes)
ref_exp = sm.get("NL_REFRESH_TOKEN_EXP") # Ref. token expiration (minutes)
acc_exp = str(timedelta(minutes=acc_exp)) # Acc. token exp. description
ref_exp = str(timedelta(minutes=ref_exp)) # Ref. token exp. description
result = {
"user_id": user_id,
"access_token": acc_tok,
"access_token_expiration": acc_exp,
"refresh_token": ref_tok,
"refresh_token_expiration": ref_exp
}
return get_response_data(USER_LOGGED_IN, OK, result), 200
@bp.route("/refresh", methods=["GET"])
@jwt_required(refresh=True)
def refresh() -> tuple[dict, int]:
"""Get a new, not fresh, access token.
Refreshing the access token is needed when the token is expired. This
operation requires the following header with a refresh token:
"Authorization: Bearer refresh_token"
Response status codes:
- 200 (Success)
- 401 (Unauthorized)
- 422 (Unprocessable Entity)
Response data (JSON string):
- message (string): Message.
- message_type (string): Message type.
- result (object): New, not fresh, access token.
:return: Tuple containing the response data and the response status code.
"""
# Get the request JWT Identity, which in this application is equal to the
# ID of the request user.
user_id = get_jwt_identity()
# Create a new, not fresh, access token
acc_tok = create_access_token(user_id, fresh=False)
# Access token expiration (in minutes)
sm = get_sm()
acc_exp = sm.get("NL_ACCESS_TOKEN_EXP")
# Access token expiration description
acc_exp = str(timedelta(minutes=acc_exp))
result = {
"access_token": acc_tok,
"access_token_expiration": acc_exp
}
return get_response_data(TOKEN_REFRESHED, OK, result), 200
@bp.route("/logout", methods=["GET"])
@jwt_required()
def logout() -> tuple[dict, int]:
"""Log out.
This operation revokes an access token provided in the request. This
operation requires the following header with the access token:
"Authorization: Bearer access_token"
Response status codes:
- 200 (Success)
- 401 (Unauthorized)
- 422 (Unprocessable Entity)
Response data (JSON string):
- message (string): Message.
- message_type (string): Message type.
:return: Tuple containing the response data and the response status code.
"""
# JWT payload data, which contains:
# - "jti" (string): The JTI is a unique identifier of the JWT token.
# - "exp" (integer): Expiration time in seconds of the JWT token.
jwt = get_jwt()
# We add the JTI of the JWT token of the current request to the Block List
# in order to revoke the token. We use the token expiration time as the
# expiration time for the block list token (after this time, the token is
# removed from the block list.
db = get_db()
db.blocklist.put(jwt["jti"], jwt["exp"])
return get_response_data(USER_LOGGED_OUT, OK), 200
| en | 0.725459 | Authentication views module. # Messages # Blueprint object # Schema Log in. This operation returns a fresh access token and a refresh token. Any of the tokens can be provided to an API request in the following header: "Authorization: Bearer access_token" Request data (JSON string): - username (string): Username. - password (string): Password. Response status codes: - 200 (Success) - 400 (Bad Request) - 401 (Unauthorized) Response data (JSON string): - message (string): Message. - message_type (string): Message type. - result (object): User ID, access token and refresh token. :return: Tuple containing the response data and the response status code. # Get and validate request data (a "marshmallow.ValidationError" exception # is raised if the data is invalid). # We get the hash of the request password, as passwords are stored # encrypted in the database. # Get the user from the database # Check user and password # Create access and refresh tokens. The user ID is the Identity of the # tokens (not to be confused with the JTI (unique identifier) of the # tokens). # Acc. token expiration (minutes) # Ref. token expiration (minutes) # Acc. token exp. description # Ref. token exp. description Get a new, not fresh, access token. Refreshing the access token is needed when the token is expired. This operation requires the following header with a refresh token: "Authorization: Bearer refresh_token" Response status codes: - 200 (Success) - 401 (Unauthorized) - 422 (Unprocessable Entity) Response data (JSON string): - message (string): Message. - message_type (string): Message type. - result (object): New, not fresh, access token. :return: Tuple containing the response data and the response status code. # Get the request JWT Identity, which in this application is equal to the # ID of the request user. # Create a new, not fresh, access token # Access token expiration (in minutes) # Access token expiration description Log out. This operation revokes an access token provided in the request. This operation requires the following header with the access token: "Authorization: Bearer access_token" Response status codes: - 200 (Success) - 401 (Unauthorized) - 422 (Unprocessable Entity) Response data (JSON string): - message (string): Message. - message_type (string): Message type. :return: Tuple containing the response data and the response status code. # JWT payload data, which contains: # - "jti" (string): The JTI is a unique identifier of the JWT token. # - "exp" (integer): Expiration time in seconds of the JWT token. # We add the JTI of the JWT token of the current request to the Block List # in order to revoke the token. We use the token expiration time as the # expiration time for the block list token (after this time, the token is # removed from the block list. | 2.666074 | 3 |
trading_alert/base/line_drawer.py | culdo/trading_alert | 0 | 6624081 | <reponame>culdo/trading_alert<gh_stars>0
import time
import tkinter
import numpy as np
from trading_alert.base.single_line import SingleLine
class LineDrawer:
def __init__(self, pp):
self.is_move_done = False
self.lines = []
self.fig = pp.fig
self.ax = pp.ax1
self.pp = pp
self._min_d = None
self.selected_line = None
def draw_tline(self):
xy = self.fig.ginput(n=2)
x = [p[0] for p in xy]
y = [p[1] for p in xy]
line = self.ax.plot(x, y)
self.selected_line = SingleLine(self.pp, line[0], SingleLine.TLINE, annotation_point=xy[-1])
self.lines.append(self.selected_line)
def draw_hline(self):
xy = self.fig.ginput(n=1)
y = [p[1] for p in xy]
line = self.ax.axhline(y)
self.selected_line = SingleLine(self.pp, line, SingleLine.HLINE, annotation_point=xy[-1])
self.lines.append(self.selected_line)
def draw_vline(self):
xy = self.fig.ginput(n=1)
x = [p[0] for p in xy]
line = self.ax.axvline(x)
self.selected_line = SingleLine(self.pp, line, SingleLine.VLINE)
self.lines.append(self.selected_line)
def restore_notify(self):
for line in self.lines:
if line.notify_msg:
line.set_win10_toast()
def get_clicked_line(self):
xy = self.fig.ginput(n=1)
p3 = np.array(xy[0])
for line in self.lines:
d = line.calc_point_dist(p3)
if self._min_d is None or d < self._min_d:
self._min_d = d
self.selected_line = line
self._min_d = None
return self.selected_line
def move_line_end(self):
while not self.is_move_done:
xy = self.fig.ginput(n=1)
if self.is_move_done:
print("move line done")
break
p3 = np.array(xy[0])
self.selected_line.move_line_end(p3)
self.fig.canvas.draw()
self.is_move_done = False
def remove_clicked(self):
self.unset_alert()
self.selected_line.remove()
self.selected_line = None
def set_alert(self):
if not self.selected_line:
print("Please click a line")
return
symbol_label = self.pp.symbol[:-4]+"/"+self.pp.symbol[-4:]
bookmark_list = self.pp.ta.main_window.bookmark_list
if symbol_label not in bookmark_list.get(0, tkinter.END):
bookmark_list.insert(tkinter.END, symbol_label)
self.selected_line.set_alert()
def unset_alert(self):
self.selected_line.unset_alert()
if not self.has_alert():
bookmark_list = self.pp.ta.main_window.bookmark_list
symbol_label = self.pp.symbol[:-4]+"/"+self.pp.symbol[-4:]
if symbol_label in bookmark_list.get(0, tkinter.END):
idx = bookmark_list.get(0, tkinter.END).index(symbol_label)
bookmark_list.delete(idx)
def has_alert(self):
for line in self.lines:
if line.alert_equation:
return True
return False
| import time
import tkinter
import numpy as np
from trading_alert.base.single_line import SingleLine
class LineDrawer:
def __init__(self, pp):
self.is_move_done = False
self.lines = []
self.fig = pp.fig
self.ax = pp.ax1
self.pp = pp
self._min_d = None
self.selected_line = None
def draw_tline(self):
xy = self.fig.ginput(n=2)
x = [p[0] for p in xy]
y = [p[1] for p in xy]
line = self.ax.plot(x, y)
self.selected_line = SingleLine(self.pp, line[0], SingleLine.TLINE, annotation_point=xy[-1])
self.lines.append(self.selected_line)
def draw_hline(self):
xy = self.fig.ginput(n=1)
y = [p[1] for p in xy]
line = self.ax.axhline(y)
self.selected_line = SingleLine(self.pp, line, SingleLine.HLINE, annotation_point=xy[-1])
self.lines.append(self.selected_line)
def draw_vline(self):
xy = self.fig.ginput(n=1)
x = [p[0] for p in xy]
line = self.ax.axvline(x)
self.selected_line = SingleLine(self.pp, line, SingleLine.VLINE)
self.lines.append(self.selected_line)
def restore_notify(self):
for line in self.lines:
if line.notify_msg:
line.set_win10_toast()
def get_clicked_line(self):
xy = self.fig.ginput(n=1)
p3 = np.array(xy[0])
for line in self.lines:
d = line.calc_point_dist(p3)
if self._min_d is None or d < self._min_d:
self._min_d = d
self.selected_line = line
self._min_d = None
return self.selected_line
def move_line_end(self):
while not self.is_move_done:
xy = self.fig.ginput(n=1)
if self.is_move_done:
print("move line done")
break
p3 = np.array(xy[0])
self.selected_line.move_line_end(p3)
self.fig.canvas.draw()
self.is_move_done = False
def remove_clicked(self):
self.unset_alert()
self.selected_line.remove()
self.selected_line = None
def set_alert(self):
if not self.selected_line:
print("Please click a line")
return
symbol_label = self.pp.symbol[:-4]+"/"+self.pp.symbol[-4:]
bookmark_list = self.pp.ta.main_window.bookmark_list
if symbol_label not in bookmark_list.get(0, tkinter.END):
bookmark_list.insert(tkinter.END, symbol_label)
self.selected_line.set_alert()
def unset_alert(self):
self.selected_line.unset_alert()
if not self.has_alert():
bookmark_list = self.pp.ta.main_window.bookmark_list
symbol_label = self.pp.symbol[:-4]+"/"+self.pp.symbol[-4:]
if symbol_label in bookmark_list.get(0, tkinter.END):
idx = bookmark_list.get(0, tkinter.END).index(symbol_label)
bookmark_list.delete(idx)
def has_alert(self):
for line in self.lines:
if line.alert_equation:
return True
return False | none | 1 | 2.631869 | 3 | |
tools/c7n_openstack/c7n_openstack/client.py | al3pht/cloud-custodian | 2,415 | 6624082 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import openstack
log = logging.getLogger('custodian.openstack.client')
class Session:
def __init__(self):
self.http_proxy = os.getenv('HTTPS_PROXY')
self.cloud_name = os.getenv('OS_CLOUD_NAME')
def client(self):
if self.cloud_name:
log.debug(f"Connect to OpenStack cloud {self.cloud_name}")
else:
log.debug(("OpenStack cloud name not set, "
"try to get openstack credential from environment"))
cloud = openstack.connect(cloud=self.cloud_name)
return cloud
| # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import openstack
log = logging.getLogger('custodian.openstack.client')
class Session:
def __init__(self):
self.http_proxy = os.getenv('HTTPS_PROXY')
self.cloud_name = os.getenv('OS_CLOUD_NAME')
def client(self):
if self.cloud_name:
log.debug(f"Connect to OpenStack cloud {self.cloud_name}")
else:
log.debug(("OpenStack cloud name not set, "
"try to get openstack credential from environment"))
cloud = openstack.connect(cloud=self.cloud_name)
return cloud
| en | 0.82087 | # Copyright The Cloud Custodian Authors. # SPDX-License-Identifier: Apache-2.0 # Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.10994 | 2 |
backend/src/myCU_App/migrations/0002_auto_20200309_0348.py | citz73/myCUProject | 1 | 6624083 | # Generated by Django 2.2.7 on 2020-03-09 03:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myCU_App', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image_name', models.CharField(max_length=100)),
('image_details', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message_name', models.CharField(max_length=100)),
('message_details', models.CharField(max_length=300)),
],
),
migrations.CreateModel(
name='NewProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('project_name', models.CharField(max_length=150)),
('project_member', models.CharField(max_length=150)),
('project_detail', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag_name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
('year', models.DecimalField(decimal_places=2, max_digits=4)),
('email', models.EmailField(max_length=254)),
('major', models.CharField(max_length=10)),
],
),
migrations.DeleteModel(
name='MyModelTest',
),
]
| # Generated by Django 2.2.7 on 2020-03-09 03:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myCU_App', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image_name', models.CharField(max_length=100)),
('image_details', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message_name', models.CharField(max_length=100)),
('message_details', models.CharField(max_length=300)),
],
),
migrations.CreateModel(
name='NewProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('project_name', models.CharField(max_length=150)),
('project_member', models.CharField(max_length=150)),
('project_detail', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag_name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
('year', models.DecimalField(decimal_places=2, max_digits=4)),
('email', models.EmailField(max_length=254)),
('major', models.CharField(max_length=10)),
],
),
migrations.DeleteModel(
name='MyModelTest',
),
]
| en | 0.699448 | # Generated by Django 2.2.7 on 2020-03-09 03:48 | 1.855632 | 2 |
viz.py | varshiths/rmod-snn | 1 | 6624084 |
import sys
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set()
def smooth(x,window_len=100,window='hanning'):
s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y
# xlimits=1e7
# ylimits=2000
res1 = np.loadtxt(sys.argv[1])
plt.figure()
x = np.arange(res1.size)
y = np.cumsum(res1)
plt.plot(x, y)
plt.xlabel("Number of Episodes")
plt.ylabel("Cumulative Reward")
axes = plt.gca()
# axes.set_xlim([0,xlimits])
axes.set_ylim([-100,2400])
plt.show()
|
import sys
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set()
def smooth(x,window_len=100,window='hanning'):
s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y
# xlimits=1e7
# ylimits=2000
res1 = np.loadtxt(sys.argv[1])
plt.figure()
x = np.arange(res1.size)
y = np.cumsum(res1)
plt.plot(x, y)
plt.xlabel("Number of Episodes")
plt.ylabel("Cumulative Reward")
axes = plt.gca()
# axes.set_xlim([0,xlimits])
axes.set_ylim([-100,2400])
plt.show()
| en | 0.135236 | #moving average # xlimits=1e7 # ylimits=2000 # axes.set_xlim([0,xlimits]) | 3.045737 | 3 |
plot_support_jgm.py | JAGalvis/useful_code | 0 | 6624085 | def plot_categorical_bars(df, column, hue=None, colors = None, normalized = True, figsize=(10,4), display_val = True):
'''
Creates a column plot of categorical data using seaborn
'''
norm = len(df) if normalized else 1
y = 'percent' if normalized else 'count_values'
columns = [col for col in [column, hue] if col != None]
df = df[columns].copy()
df['count_values'] = 1
df = df.groupby(by = columns).count().reset_index()
df['percent'] = df['count_values']/norm * 100
fig, ax = plt.subplots(figsize=figsize); sns.set(style="darkgrid")
ax = sns.barplot(x=column, y=y, data=df, hue=hue, palette=colors, ax=ax)
if display_val:
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x()+p.get_width()/2.,
height + 3,
'{:1.2f}'.format(height),
ha="center")
plt.show()
def plot_faceted_categorical_bar(df, bars, group_cols, columns, rows, hue = None, colors = None):
'''
Plots faceted bar chart
'''
df = df.copy()
df = df[group_cols]
df['count_values'] = 1
df = df.groupby(by= group_cols, as_index = False).count()
g = sns.FacetGrid(data=df, col=columns, row=rows, hue=hue, palette=colors)
g = g.map(plt.bar, bars, 'count_values')
g.set_xticklabels(rotation=80)
plt.show()
def plot_unique_values_integer_dist(df):
'''
Plots the distribution of unique values in the integer columns.
'''
df.select_dtypes(np.int64).nunique().value_counts().sort_index().plot.bar(color = 'blue',
figsize = (8, 6),
edgecolor = 'k', linewidth = 2);
plt.xlabel('Number of Unique Values'); plt.ylabel('Count');
plt.title('Count of Unique Values in Integer Columns');
plt.show()
def plot_float_values_dist(df, target_col, colors_dict, category_dict):
'''
Plots distribution of all float columns
https://www.kaggle.com/willkoehrsen/a-complete-introduction-and-walkthrough
'''
from collections import OrderedDict
plt.figure(figsize = (20, 16))
plt.style.use('fivethirtyeight')
# Color mapping
colors = OrderedDict(colors_dict) #{1: 'red', 2: 'orange', 3: 'blue', 4: 'green'}
category_mapping = OrderedDict(category_dict) # {1: 'extreme', 2: 'moderate', 3: 'vulnerable', 4: 'non vulnerable'}
# Iterate through the float columns
for i, col in enumerate(df.select_dtypes('float')):
ax = plt.subplot(4, 2, i + 1)
# Iterate through the poverty levels
for category, color in colors.items():
# Plot each poverty level as a separate line
sns.kdeplot(df.loc[df[target_col] == category, col].dropna(),
ax = ax, color = color, label = category_mapping[category])
plt.title(f'{col.capitalize()} Distribution'); plt.xlabel(f'{col}'); plt.ylabel('Density')
plt.subplots_adjust(top = 2)
plt.show()
def plot_bubble_categoricals(x, y, data, annotate = True):
"""
Plot counts of two categoricals.
Size is raw count for each grouping.
Percentages are for a given value of y.
To read the plot, choose a given y-value and then read across the row.
https://www.kaggle.com/willkoehrsen/a-complete-introduction-and-walkthrough
"""
# Raw counts
raw_counts = pd.DataFrame(data.groupby(y)[x].value_counts(normalize = False))
raw_counts = raw_counts.rename(columns = {x: 'raw_count'})
# Calculate counts for each group of x and y
counts = pd.DataFrame(data.groupby(y)[x].value_counts(normalize = True))
# Rename the column and reset the index
counts = counts.rename(columns = {x: 'normalized_count'}).reset_index()
counts['percent'] = 100 * counts['normalized_count']
# Add the raw count
counts['raw_count'] = list(raw_counts['raw_count'])
plt.figure(figsize = (14, 10))
# Scatter plot sized by percent
plt.scatter(counts[x], counts[y], edgecolor = 'k', color = 'lightgreen',
s = 100 * np.sqrt(counts['raw_count']), marker = 'o',
alpha = 0.6, linewidth = 1.5)
if annotate:
# Annotate the plot with text
for i, row in counts.iterrows():
# Put text with appropriate offsets
plt.annotate(xy = (row[x] - (1 / counts[x].nunique()),
row[y] - (0.15 / counts[y].nunique())),
color = 'navy',
s = f"{round(row['percent'], 1)}%")
# Set tick marks
plt.yticks(counts[y].unique())
plt.xticks(counts[x].unique())
# Transform min and max to evenly space in square root domain
sqr_min = int(np.sqrt(raw_counts['raw_count'].min()))
sqr_max = int(np.sqrt(raw_counts['raw_count'].max()))
# 5 sizes for legend
msizes = list(range(sqr_min, sqr_max,
int(( sqr_max - sqr_min) / 5)))
markers = []
# Markers for legend
for size in msizes:
markers.append(plt.scatter([], [], s = 100 * size,
label = f'{int(round(np.square(size) / 100) * 100)}',
color = 'lightgreen',
alpha = 0.6, edgecolor = 'k', linewidth = 1.5))
# Legend and formatting
plt.legend(handles = markers, title = 'Counts',
labelspacing = 3, handletextpad = 2,
fontsize = 16,
loc = (1.10, 0.19))
plt.annotate(f'* Size represents raw count while % is for a given y value.',
xy = (0, 1), xycoords = 'figure points', size = 10)
# Adjust axes limits
plt.xlim((counts[x].min() - (6 / counts[x].nunique()),
counts[x].max() + (6 / counts[x].nunique())))
plt.ylim((counts[y].min() - (4 / counts[y].nunique()),
counts[y].max() + (4 / counts[y].nunique())))
plt.grid(None)
plt.xlabel(f"{x}"); plt.ylabel(f"{y}"); plt.title(f"{y} vs {x}");
def plot_correlation_heatmap(df, variables):
'''
'''
# variables = ['Target', 'dependency', 'warning', 'walls+roof+floor', 'meaneduc',
# 'floor', 'r4m1', 'overcrowding']
# Calculate the correlations
corr_mat = df[variables].corr().round(2)
# Draw a correlation heatmap
# plt.rcParams['font.size'] = 18
plt.figure(figsize = (12, 12))
sns.heatmap(corr_mat, vmin = -0.5, vmax = 0.8, center = 0,
cmap = plt.cm.RdYlGn_r, annot = True);
# def plot_featuresplot(df, features):
# #import warnings
# #warnings.filterwarnings('ignore')
# # Copy the data for plotting
# plot_data = df[features]
# # Create the pairgrid object
# grid = sns.PairGrid(data = plot_data, size = 4, diag_sharey=False,
# hue = 'Target', hue_order = [4, 3, 2, 1],
# vars = [x for x in list(plot_data.columns) if x != 'Target'])
# # Upper is a scatter plot
# grid.map_upper(plt.scatter, alpha = 0.8, s = 20)
# # Diagonal is a histogram
# grid.map_diag(sns.kdeplot)
# # Bottom is density plot
# grid.map_lower(sns.kdeplot, cmap = plt.cm.OrRd_r);
# grid = grid.add_legend()
# plt.suptitle('Feature Plots Colored By Target', size = 32, y = 1.05); | def plot_categorical_bars(df, column, hue=None, colors = None, normalized = True, figsize=(10,4), display_val = True):
'''
Creates a column plot of categorical data using seaborn
'''
norm = len(df) if normalized else 1
y = 'percent' if normalized else 'count_values'
columns = [col for col in [column, hue] if col != None]
df = df[columns].copy()
df['count_values'] = 1
df = df.groupby(by = columns).count().reset_index()
df['percent'] = df['count_values']/norm * 100
fig, ax = plt.subplots(figsize=figsize); sns.set(style="darkgrid")
ax = sns.barplot(x=column, y=y, data=df, hue=hue, palette=colors, ax=ax)
if display_val:
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x()+p.get_width()/2.,
height + 3,
'{:1.2f}'.format(height),
ha="center")
plt.show()
def plot_faceted_categorical_bar(df, bars, group_cols, columns, rows, hue = None, colors = None):
'''
Plots faceted bar chart
'''
df = df.copy()
df = df[group_cols]
df['count_values'] = 1
df = df.groupby(by= group_cols, as_index = False).count()
g = sns.FacetGrid(data=df, col=columns, row=rows, hue=hue, palette=colors)
g = g.map(plt.bar, bars, 'count_values')
g.set_xticklabels(rotation=80)
plt.show()
def plot_unique_values_integer_dist(df):
'''
Plots the distribution of unique values in the integer columns.
'''
df.select_dtypes(np.int64).nunique().value_counts().sort_index().plot.bar(color = 'blue',
figsize = (8, 6),
edgecolor = 'k', linewidth = 2);
plt.xlabel('Number of Unique Values'); plt.ylabel('Count');
plt.title('Count of Unique Values in Integer Columns');
plt.show()
def plot_float_values_dist(df, target_col, colors_dict, category_dict):
'''
Plots distribution of all float columns
https://www.kaggle.com/willkoehrsen/a-complete-introduction-and-walkthrough
'''
from collections import OrderedDict
plt.figure(figsize = (20, 16))
plt.style.use('fivethirtyeight')
# Color mapping
colors = OrderedDict(colors_dict) #{1: 'red', 2: 'orange', 3: 'blue', 4: 'green'}
category_mapping = OrderedDict(category_dict) # {1: 'extreme', 2: 'moderate', 3: 'vulnerable', 4: 'non vulnerable'}
# Iterate through the float columns
for i, col in enumerate(df.select_dtypes('float')):
ax = plt.subplot(4, 2, i + 1)
# Iterate through the poverty levels
for category, color in colors.items():
# Plot each poverty level as a separate line
sns.kdeplot(df.loc[df[target_col] == category, col].dropna(),
ax = ax, color = color, label = category_mapping[category])
plt.title(f'{col.capitalize()} Distribution'); plt.xlabel(f'{col}'); plt.ylabel('Density')
plt.subplots_adjust(top = 2)
plt.show()
def plot_bubble_categoricals(x, y, data, annotate = True):
"""
Plot counts of two categoricals.
Size is raw count for each grouping.
Percentages are for a given value of y.
To read the plot, choose a given y-value and then read across the row.
https://www.kaggle.com/willkoehrsen/a-complete-introduction-and-walkthrough
"""
# Raw counts
raw_counts = pd.DataFrame(data.groupby(y)[x].value_counts(normalize = False))
raw_counts = raw_counts.rename(columns = {x: 'raw_count'})
# Calculate counts for each group of x and y
counts = pd.DataFrame(data.groupby(y)[x].value_counts(normalize = True))
# Rename the column and reset the index
counts = counts.rename(columns = {x: 'normalized_count'}).reset_index()
counts['percent'] = 100 * counts['normalized_count']
# Add the raw count
counts['raw_count'] = list(raw_counts['raw_count'])
plt.figure(figsize = (14, 10))
# Scatter plot sized by percent
plt.scatter(counts[x], counts[y], edgecolor = 'k', color = 'lightgreen',
s = 100 * np.sqrt(counts['raw_count']), marker = 'o',
alpha = 0.6, linewidth = 1.5)
if annotate:
# Annotate the plot with text
for i, row in counts.iterrows():
# Put text with appropriate offsets
plt.annotate(xy = (row[x] - (1 / counts[x].nunique()),
row[y] - (0.15 / counts[y].nunique())),
color = 'navy',
s = f"{round(row['percent'], 1)}%")
# Set tick marks
plt.yticks(counts[y].unique())
plt.xticks(counts[x].unique())
# Transform min and max to evenly space in square root domain
sqr_min = int(np.sqrt(raw_counts['raw_count'].min()))
sqr_max = int(np.sqrt(raw_counts['raw_count'].max()))
# 5 sizes for legend
msizes = list(range(sqr_min, sqr_max,
int(( sqr_max - sqr_min) / 5)))
markers = []
# Markers for legend
for size in msizes:
markers.append(plt.scatter([], [], s = 100 * size,
label = f'{int(round(np.square(size) / 100) * 100)}',
color = 'lightgreen',
alpha = 0.6, edgecolor = 'k', linewidth = 1.5))
# Legend and formatting
plt.legend(handles = markers, title = 'Counts',
labelspacing = 3, handletextpad = 2,
fontsize = 16,
loc = (1.10, 0.19))
plt.annotate(f'* Size represents raw count while % is for a given y value.',
xy = (0, 1), xycoords = 'figure points', size = 10)
# Adjust axes limits
plt.xlim((counts[x].min() - (6 / counts[x].nunique()),
counts[x].max() + (6 / counts[x].nunique())))
plt.ylim((counts[y].min() - (4 / counts[y].nunique()),
counts[y].max() + (4 / counts[y].nunique())))
plt.grid(None)
plt.xlabel(f"{x}"); plt.ylabel(f"{y}"); plt.title(f"{y} vs {x}");
def plot_correlation_heatmap(df, variables):
'''
'''
# variables = ['Target', 'dependency', 'warning', 'walls+roof+floor', 'meaneduc',
# 'floor', 'r4m1', 'overcrowding']
# Calculate the correlations
corr_mat = df[variables].corr().round(2)
# Draw a correlation heatmap
# plt.rcParams['font.size'] = 18
plt.figure(figsize = (12, 12))
sns.heatmap(corr_mat, vmin = -0.5, vmax = 0.8, center = 0,
cmap = plt.cm.RdYlGn_r, annot = True);
# def plot_featuresplot(df, features):
# #import warnings
# #warnings.filterwarnings('ignore')
# # Copy the data for plotting
# plot_data = df[features]
# # Create the pairgrid object
# grid = sns.PairGrid(data = plot_data, size = 4, diag_sharey=False,
# hue = 'Target', hue_order = [4, 3, 2, 1],
# vars = [x for x in list(plot_data.columns) if x != 'Target'])
# # Upper is a scatter plot
# grid.map_upper(plt.scatter, alpha = 0.8, s = 20)
# # Diagonal is a histogram
# grid.map_diag(sns.kdeplot)
# # Bottom is density plot
# grid.map_lower(sns.kdeplot, cmap = plt.cm.OrRd_r);
# grid = grid.add_legend()
# plt.suptitle('Feature Plots Colored By Target', size = 32, y = 1.05); | en | 0.628154 | Creates a column plot of categorical data using seaborn Plots faceted bar chart Plots the distribution of unique values in the integer columns. Plots distribution of all float columns https://www.kaggle.com/willkoehrsen/a-complete-introduction-and-walkthrough # Color mapping #{1: 'red', 2: 'orange', 3: 'blue', 4: 'green'} # {1: 'extreme', 2: 'moderate', 3: 'vulnerable', 4: 'non vulnerable'} # Iterate through the float columns # Iterate through the poverty levels # Plot each poverty level as a separate line Plot counts of two categoricals. Size is raw count for each grouping. Percentages are for a given value of y. To read the plot, choose a given y-value and then read across the row. https://www.kaggle.com/willkoehrsen/a-complete-introduction-and-walkthrough # Raw counts # Calculate counts for each group of x and y # Rename the column and reset the index # Add the raw count # Scatter plot sized by percent # Annotate the plot with text # Put text with appropriate offsets # Set tick marks # Transform min and max to evenly space in square root domain # 5 sizes for legend # Markers for legend # Legend and formatting # Adjust axes limits # variables = ['Target', 'dependency', 'warning', 'walls+roof+floor', 'meaneduc', # 'floor', 'r4m1', 'overcrowding'] # Calculate the correlations # Draw a correlation heatmap # plt.rcParams['font.size'] = 18 # def plot_featuresplot(df, features): # #import warnings # #warnings.filterwarnings('ignore') # # Copy the data for plotting # plot_data = df[features] # # Create the pairgrid object # grid = sns.PairGrid(data = plot_data, size = 4, diag_sharey=False, # hue = 'Target', hue_order = [4, 3, 2, 1], # vars = [x for x in list(plot_data.columns) if x != 'Target']) # # Upper is a scatter plot # grid.map_upper(plt.scatter, alpha = 0.8, s = 20) # # Diagonal is a histogram # grid.map_diag(sns.kdeplot) # # Bottom is density plot # grid.map_lower(sns.kdeplot, cmap = plt.cm.OrRd_r); # grid = grid.add_legend() # plt.suptitle('Feature Plots Colored By Target', size = 32, y = 1.05); | 3.274566 | 3 |
landmapper/urls.py | Ecotrust/woodland-discovery | 1 | 6624086 | <gh_stars>1-10
from django.conf.urls import url, include
from rpc4django.views import serve_rpc_request
from . import views
from .models import AOI
from features.views import form_resources
urlpatterns = [
url(r'^rpc$', serve_rpc_request, name='rpc'),
url(r'^about/', views.about, name='about'),
url(r'^help/', views.help, name='help'),
url(r'^user/', views.index, name='user'),
url(r'^search/', views.index, name='search'),
url(r'^portfolio/', views.index, name='portfolio'),
# TODO fix urls so account and visualize static text can be overwritten
url(r'^visualize/?$', views.planner, name='planner'),
# url(r"^visualize/", include("visualize.urls")),
url(r'^account/', views.account, name='login'),
url(r'^get_taxlot_json', views.get_taxlot_json, name='get taxlot json'),
url(r'^geosearch', views.geosearch, name='geosearch'),
url(r"^features/aoi/form", form_resources,
kwargs={'model': AOI},
name="aoi_create_form"),
url(r"^features/", include("features.urls")),
url(r'^', views.index, name='index'),
]
| from django.conf.urls import url, include
from rpc4django.views import serve_rpc_request
from . import views
from .models import AOI
from features.views import form_resources
urlpatterns = [
url(r'^rpc$', serve_rpc_request, name='rpc'),
url(r'^about/', views.about, name='about'),
url(r'^help/', views.help, name='help'),
url(r'^user/', views.index, name='user'),
url(r'^search/', views.index, name='search'),
url(r'^portfolio/', views.index, name='portfolio'),
# TODO fix urls so account and visualize static text can be overwritten
url(r'^visualize/?$', views.planner, name='planner'),
# url(r"^visualize/", include("visualize.urls")),
url(r'^account/', views.account, name='login'),
url(r'^get_taxlot_json', views.get_taxlot_json, name='get taxlot json'),
url(r'^geosearch', views.geosearch, name='geosearch'),
url(r"^features/aoi/form", form_resources,
kwargs={'model': AOI},
name="aoi_create_form"),
url(r"^features/", include("features.urls")),
url(r'^', views.index, name='index'),
] | en | 0.49316 | # TODO fix urls so account and visualize static text can be overwritten # url(r"^visualize/", include("visualize.urls")), | 2.046643 | 2 |
research/cv/3dcnn/src/loss.py | leelige/mindspore | 77 | 6624087 | <filename>research/cv/3dcnn/src/loss.py<gh_stars>10-100
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
python loss.py
"""
import mindspore
import mindspore.nn as nn
import mindspore.ops as ops
import mindspore.numpy as mnp
from mindspore import Tensor
class LabelTransform(nn.Cell):
""" label transform """
def __init__(self, num_classes=5):
super(LabelTransform, self).__init__()
self.onehot1 = nn.OneHot(depth=2, axis=-1)
self.onehot2 = nn.OneHot(depth=num_classes, axis=-1)
self.num_classes = num_classes
self.T = Tensor(1, mindspore.int32)
self.F = Tensor(0, mindspore.int32)
def construct(self, label):
""" Construct label transform """
label1 = mnp.where(label > 0, self.T, self.F)
flair_t2_gt_node = self.onehot1(label1)
t1_t1ce_gt_node = self.onehot2(label)
return flair_t2_gt_node, t1_t1ce_gt_node
class SegmentationLoss(nn.Cell):
""" segmentation loss """
def __init__(self, num_classes=5):
super(SegmentationLoss, self).__init__()
self.label_transform = LabelTransform(num_classes)
self.reshape = ops.Reshape()
self.transpose = ops.Transpose()
self.loss = nn.SoftmaxCrossEntropyWithLogits(reduction='mean')
self.num_classes = num_classes
def construct(self, flair_t2_score, t1_t1ce_score, label):
""" Construct segmentation loss """
flair_t2_gt_node, t1_t1ce_gt_node = self.label_transform(label)
flair_t2_score = self.transpose(flair_t2_score, (0, 2, 3, 4, 1))
flair_t2_gt_node = self.reshape(flair_t2_gt_node, (-1, 2))
flair_t2_score = self.reshape(flair_t2_score, (-1, 2))
t1_t1ce_score = self.transpose(t1_t1ce_score, (0, 2, 3, 4, 1))
t1_t1ce_gt_node = self.reshape(t1_t1ce_gt_node, (-1, self.num_classes))
t1_t1ce_score = self.reshape(t1_t1ce_score, (-1, self.num_classes))
flair_t2_loss = self.loss(labels=flair_t2_gt_node, logits=flair_t2_score)
t1_t1ce_loss = self.loss(labels=t1_t1ce_gt_node, logits=t1_t1ce_score)
loss = flair_t2_loss + t1_t1ce_loss
return loss
class NetWithLoss(nn.Cell):
""" NetWithLoss """
def __init__(self, network, num_classes=5):
super(NetWithLoss, self).__init__()
self.net = network
self.loss_func = SegmentationLoss(num_classes)
self.num_classes = num_classes
def construct(self, flair_t2_node, t1_t1ce_node, label):
""" Construct NetWithLoss """
flair_t2_score, t1_t1ce_score = self.net(flair_t2_node, t1_t1ce_node)
loss = self.loss_func(flair_t2_score, t1_t1ce_score, label)
return loss
| <filename>research/cv/3dcnn/src/loss.py<gh_stars>10-100
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
python loss.py
"""
import mindspore
import mindspore.nn as nn
import mindspore.ops as ops
import mindspore.numpy as mnp
from mindspore import Tensor
class LabelTransform(nn.Cell):
""" label transform """
def __init__(self, num_classes=5):
super(LabelTransform, self).__init__()
self.onehot1 = nn.OneHot(depth=2, axis=-1)
self.onehot2 = nn.OneHot(depth=num_classes, axis=-1)
self.num_classes = num_classes
self.T = Tensor(1, mindspore.int32)
self.F = Tensor(0, mindspore.int32)
def construct(self, label):
""" Construct label transform """
label1 = mnp.where(label > 0, self.T, self.F)
flair_t2_gt_node = self.onehot1(label1)
t1_t1ce_gt_node = self.onehot2(label)
return flair_t2_gt_node, t1_t1ce_gt_node
class SegmentationLoss(nn.Cell):
""" segmentation loss """
def __init__(self, num_classes=5):
super(SegmentationLoss, self).__init__()
self.label_transform = LabelTransform(num_classes)
self.reshape = ops.Reshape()
self.transpose = ops.Transpose()
self.loss = nn.SoftmaxCrossEntropyWithLogits(reduction='mean')
self.num_classes = num_classes
def construct(self, flair_t2_score, t1_t1ce_score, label):
""" Construct segmentation loss """
flair_t2_gt_node, t1_t1ce_gt_node = self.label_transform(label)
flair_t2_score = self.transpose(flair_t2_score, (0, 2, 3, 4, 1))
flair_t2_gt_node = self.reshape(flair_t2_gt_node, (-1, 2))
flair_t2_score = self.reshape(flair_t2_score, (-1, 2))
t1_t1ce_score = self.transpose(t1_t1ce_score, (0, 2, 3, 4, 1))
t1_t1ce_gt_node = self.reshape(t1_t1ce_gt_node, (-1, self.num_classes))
t1_t1ce_score = self.reshape(t1_t1ce_score, (-1, self.num_classes))
flair_t2_loss = self.loss(labels=flair_t2_gt_node, logits=flair_t2_score)
t1_t1ce_loss = self.loss(labels=t1_t1ce_gt_node, logits=t1_t1ce_score)
loss = flair_t2_loss + t1_t1ce_loss
return loss
class NetWithLoss(nn.Cell):
""" NetWithLoss """
def __init__(self, network, num_classes=5):
super(NetWithLoss, self).__init__()
self.net = network
self.loss_func = SegmentationLoss(num_classes)
self.num_classes = num_classes
def construct(self, flair_t2_node, t1_t1ce_node, label):
""" Construct NetWithLoss """
flair_t2_score, t1_t1ce_score = self.net(flair_t2_node, t1_t1ce_node)
loss = self.loss_func(flair_t2_score, t1_t1ce_score, label)
return loss
| en | 0.799096 | # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ python loss.py label transform Construct label transform segmentation loss Construct segmentation loss NetWithLoss Construct NetWithLoss | 2.182154 | 2 |
boucanpy/db/migrate/history.py | bbhunter/boucanpy | 34 | 6624088 | from boucanpy.db.migrate.config import get_config
# late import of alembic because it destroys loggers
def history(directory=None, rev_range=None, verbose=False, indicate_current=False):
from alembic import command
"""List changeset scripts in chronological order."""
config = get_config(directory)
command.history(
config, rev_range, verbose=verbose, indicate_current=indicate_current
)
| from boucanpy.db.migrate.config import get_config
# late import of alembic because it destroys loggers
def history(directory=None, rev_range=None, verbose=False, indicate_current=False):
from alembic import command
"""List changeset scripts in chronological order."""
config = get_config(directory)
command.history(
config, rev_range, verbose=verbose, indicate_current=indicate_current
)
| en | 0.711118 | # late import of alembic because it destroys loggers List changeset scripts in chronological order. | 2.098748 | 2 |
tests/strats.py | Tinche/flattrs | 7 | 6624089 | from struct import pack, unpack
from hypothesis.strategies import composite, integers, floats
uint8s = integers(0, 2 ** 8 - 1)
uint16s = integers(0, 2 ** 16 - 1)
uint32s = integers(0, 2 ** 32 - 1)
uint64s = integers(0, 2 ** 64 - 1)
int8s = integers(-(2 ** 7), (2 ** 7) - 1)
int16s = integers(-(2 ** 15), (2 ** 15) - 1)
int32s = integers(-(2 ** 31), (2 ** 31) - 1)
int64s = integers(-(2 ** 63), (2 ** 63) - 1)
float64s = floats(allow_nan=False)
@composite
def float32s(draw):
val = draw(float64s)
return unpack("f", pack("f", val))[0]
float32s = float32s()
| from struct import pack, unpack
from hypothesis.strategies import composite, integers, floats
uint8s = integers(0, 2 ** 8 - 1)
uint16s = integers(0, 2 ** 16 - 1)
uint32s = integers(0, 2 ** 32 - 1)
uint64s = integers(0, 2 ** 64 - 1)
int8s = integers(-(2 ** 7), (2 ** 7) - 1)
int16s = integers(-(2 ** 15), (2 ** 15) - 1)
int32s = integers(-(2 ** 31), (2 ** 31) - 1)
int64s = integers(-(2 ** 63), (2 ** 63) - 1)
float64s = floats(allow_nan=False)
@composite
def float32s(draw):
val = draw(float64s)
return unpack("f", pack("f", val))[0]
float32s = float32s()
| none | 1 | 2.477077 | 2 | |
fsl-feat/interfaces.py | oesteban/misc | 2 | 6624090 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import nibabel as nb
import pandas as pd
from scipy.spatial.distance import pdist, squareform
from scipy.stats import pearsonr
from nipype import logging
from nipype.interfaces.base import (
BaseInterfaceInputSpec, TraitedSpec, SimpleInterface,
traits, File, OutputMultiPath, isdefined,
CommandLine, CommandLineInputSpec
)
from nipype.interfaces.ants.resampling import ApplyTransforms
logger = logging.getLogger('workflow')
class FixHeaderApplyTransforms(ApplyTransforms):
"""
A replacement for nipype.interfaces.ants.resampling.ApplyTransforms that
fixes the resampled image header to match the xform of the reference
image
"""
def _run_interface(self, runtime, correct_return_codes=(0,)):
# Run normally
runtime = super(FixHeaderApplyTransforms, self)._run_interface(
runtime, correct_return_codes)
_copyxform(self.inputs.reference_image,
os.path.abspath(self._gen_filename('output_image')))
return runtime
class FDRInputSpec(CommandLineInputSpec):
in_file = File(exists=True, mandatory=True, argstr='-i %s',
desc='input pstat file')
in_mask = File(exists=True, argstr='-m %s', desc='mask file')
q_value = traits.Float(0.05, argstr='-q %f', usedefault=True,
desc='q-value (FDR) threshold')
class FDROutputSpec(TraitedSpec):
fdr_val = traits.Float()
class FDR(CommandLine):
_cmd = 'fdr'
input_spec = FDRInputSpec
output_spec = FDROutputSpec
def _run_interface(self, runtime):
self.terminal_output = 'file_split'
runtime = super(FDR, self)._run_interface(runtime)
fdr = float(runtime.stdout.splitlines()[1])
setattr(self, 'result', fdr)
return runtime
def _list_outputs(self):
return {'fdr_val': getattr(self, 'result')}
class PtoZInputSpec(CommandLineInputSpec):
p_value = traits.Float(0.05, argstr='%f', usedefault=True, position=1,
desc='p-value (PtoZ) threshold')
twotail = traits.Bool(False, argstr='-2', usedefault=True, position=2,
desc='use 2-tailed conversion (default is 1-tailed)')
resels = traits.Float(argstr='-g %f', position=-1,
desc='use GRF maximum-height theory instead of Gaussian pdf')
class PtoZOutputSpec(TraitedSpec):
z_val = traits.Float()
class PtoZ(CommandLine):
_cmd = 'ptoz'
input_spec = PtoZInputSpec
output_spec = PtoZOutputSpec
def _run_interface(self, runtime):
self.terminal_output = 'file_split'
runtime = super(PtoZ, self)._run_interface(runtime)
zval = float(runtime.stdout.splitlines()[0])
setattr(self, 'result', zval)
return runtime
def _list_outputs(self):
return {'z_val': getattr(self, 'result')}
class CorrelationInputSpec(BaseInterfaceInputSpec):
in_file1 = File(exists=True, mandatory=True, desc='input file 1')
in_file2 = File(exists=True, mandatory=True, desc='input file 2')
in_mask = File(exists=True, desc='input mask')
metric = traits.Enum('pearson', 'distance', usedefault=True,
desc='correlation metric')
subsample = traits.Float(100.0, usedefault=True)
class CorrelationOutputSpec(TraitedSpec):
out_corr = traits.Float()
class Correlation(SimpleInterface):
"""
"""
input_spec = CorrelationInputSpec
output_spec = CorrelationOutputSpec
def _run_interface(self, runtime):
im1 = nb.load(self.inputs.in_file1).get_data()
im2 = nb.load(self.inputs.in_file2).get_data()
mask = np.ones_like(im1, dtype=bool)
if isdefined(self.inputs.in_mask):
mask = nb.load(
self.inputs.in_mask).get_data() > 0.0
if self.inputs.metric == 'pearson':
corr = float(pearsonr(im1[mask], im2[mask])[0])
else:
if 0 < self.inputs.subsample < 100:
nvox = int(mask.sum())
logger.info('before: %d', nvox)
size = int(nvox * self.inputs.subsample) // 100
reshaped = np.zeros_like(mask[mask], dtype=bool)
indexes = np.random.choice(
range(nvox), size=size, replace=False)
reshaped[indexes] = True
mask[mask] = reshaped
logger.info('after: %d', mask.sum())
corr = float(distcorr(im1[mask], im2[mask]))
self._results['out_corr'] = corr
return runtime
class EventsFilesForTaskInputSpec(BaseInterfaceInputSpec):
in_file = File(mandatory=True, desc='input file, part of a BIDS tree')
task = traits.Str(mandatory=True, desc='task')
class EventsFilesForTaskOutputSpec(TraitedSpec):
event_files = OutputMultiPath(File(exists=True), desc='event files')
orthogonalization = traits.Dict(int, traits.Dict(int, int),
desc='orthogonalization')
class EventsFilesForTask(SimpleInterface):
"""
"""
input_spec = EventsFilesForTaskInputSpec
output_spec = EventsFilesForTaskOutputSpec
def _run_interface(self, runtime):
if self.inputs.task != 'stopsignal':
raise NotImplementedError(
'This function was not designed for tasks other than "stopsignal". '
'Task "%s" cannot be processed' % self.inputs.task)
events = pd.read_csv(self.inputs.in_file, sep="\t", na_values='n/a')
self._results['event_files'] = []
nEV = 6
self._results['orthogonalization'] = {
x: {y: 0 for y in range(1, nEV + 1)} for x in range(1, nEV + 1)
}
go_table = events[(events.TrialOutcome == "SuccessfulGo")]
self._results['event_files'].append(
create_ev(go_table, out_name="GO", duration=1, amplitude=1,
out_dir=runtime.cwd))
self._results['event_files'].append(create_ev(
go_table, out_name="GO_rt", duration='ReactionTime',
amplitude=1, out_dir=runtime.cwd))
self._results['orthogonalization'][2][1] = 1
self._results['orthogonalization'][2][0] = 1
stop_success_table = events[(events.TrialOutcome == "SuccessfulStop")]
self._results['event_files'].append(create_ev(
stop_success_table, out_name="STOP_SUCCESS",
duration=1, amplitude=1, out_dir=runtime.cwd))
stop_unsuccess_table = events[(events.TrialOutcome == "UnsuccessfulStop")]
self._results['event_files'].append(create_ev(
stop_unsuccess_table, out_name="STOP_UNSUCCESS",
duration=1, amplitude=1, out_dir=runtime.cwd))
self._results['event_files'].append(create_ev(
stop_unsuccess_table, out_name="STOP_UNSUCCESS_rt",
duration='ReactionTime', amplitude=1, out_dir=runtime.cwd))
self._results['orthogonalization'][5][4] = 1
self._results['orthogonalization'][5][0] = 1
junk_table = events[(events.TrialOutcome == "JUNK")]
if len(junk_table) > 0:
self._results['event_files'].append(create_ev(
junk_table, out_name="JUNK",
duration=1, amplitude=1, out_dir=runtime.cwd))
return runtime
def create_ev(dataframe, out_dir, out_name, duration=1, amplitude=1):
"""
Adapt a BIDS-compliant events file to a format compatible with FSL feat
Args:
dataframe: events file from BIDS spec
out_dir: path where new events file will be stored
out_name: filename for the new events file
amplitude: value or variable name
duration: value or variable name
Returns:
Full path to the new events file
"""
dataframe = dataframe[dataframe.onset.notnull()]
dataframe.onset = dataframe.onset.round(3)
if isinstance(duration, (float, int)):
dataframe['duration'] = [duration] * len(dataframe)
elif isinstance(duration, str):
dataframe.duration = dataframe[[duration]].round(3)
if isinstance(amplitude, (float, int)):
dataframe['weights'] = [amplitude] * len(dataframe)
elif isinstance(amplitude, str):
dataframe['weights'] = dataframe[[amplitude]] - dataframe[[amplitude]].mean()
dataframe.weights = dataframe.weights.round(3)
# Prepare file
ev_file = os.path.join(out_dir, '%s.txt' % out_name)
dataframe[['onset', 'duration', 'weights']].to_csv(
ev_file, sep="\t", header=False, index=False)
return ev_file
def _copyxform(ref_image, out_image, message=None):
# Read in reference and output
resampled = nb.load(out_image)
orig = nb.load(ref_image)
# Copy xform infos
qform, qform_code = orig.header.get_qform(coded=True)
sform, sform_code = orig.header.get_sform(coded=True)
header = resampled.header.copy()
header.set_qform(qform, int(qform_code))
header.set_sform(sform, int(sform_code))
header['descrip'] = 'xform matrices modified by %s.' % (message or '(unknown)')
newimg = resampled.__class__(resampled.get_data(), orig.affine, header)
newimg.to_filename(out_image)
def distcorr(X, Y):
""" Compute the distance correlation function
>>> a = [1,2,3,4,5]
>>> b = np.array([1,2,9,4,4])
>>> distcorr(a, b)
0.762676242417
"""
X = np.atleast_1d(X).astype(float)
Y = np.atleast_1d(Y).astype(float)
if np.prod(X.shape) == len(X):
X = X[:, None]
if np.prod(Y.shape) == len(Y):
Y = Y[:, None]
X = np.atleast_2d(X)
Y = np.atleast_2d(Y)
n = X.shape[0]
if Y.shape[0] != X.shape[0]:
raise ValueError('Number of samples must match')
a = squareform(pdist(X))
b = squareform(pdist(Y))
A = a - a.mean(axis=0)[None, :] - a.mean(axis=1)[:, None] + a.mean()
B = b - b.mean(axis=0)[None, :] - b.mean(axis=1)[:, None] + b.mean()
dcov2_xy = (A * B).sum() / float(n * n)
dcov2_xx = (A * A).sum() / float(n * n)
dcov2_yy = (B * B).sum() / float(n * n)
dcor = np.sqrt(dcov2_xy) / np.sqrt(np.sqrt(dcov2_xx) * np.sqrt(dcov2_yy))
return dcor
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import nibabel as nb
import pandas as pd
from scipy.spatial.distance import pdist, squareform
from scipy.stats import pearsonr
from nipype import logging
from nipype.interfaces.base import (
BaseInterfaceInputSpec, TraitedSpec, SimpleInterface,
traits, File, OutputMultiPath, isdefined,
CommandLine, CommandLineInputSpec
)
from nipype.interfaces.ants.resampling import ApplyTransforms
logger = logging.getLogger('workflow')
class FixHeaderApplyTransforms(ApplyTransforms):
"""
A replacement for nipype.interfaces.ants.resampling.ApplyTransforms that
fixes the resampled image header to match the xform of the reference
image
"""
def _run_interface(self, runtime, correct_return_codes=(0,)):
# Run normally
runtime = super(FixHeaderApplyTransforms, self)._run_interface(
runtime, correct_return_codes)
_copyxform(self.inputs.reference_image,
os.path.abspath(self._gen_filename('output_image')))
return runtime
class FDRInputSpec(CommandLineInputSpec):
in_file = File(exists=True, mandatory=True, argstr='-i %s',
desc='input pstat file')
in_mask = File(exists=True, argstr='-m %s', desc='mask file')
q_value = traits.Float(0.05, argstr='-q %f', usedefault=True,
desc='q-value (FDR) threshold')
class FDROutputSpec(TraitedSpec):
fdr_val = traits.Float()
class FDR(CommandLine):
_cmd = 'fdr'
input_spec = FDRInputSpec
output_spec = FDROutputSpec
def _run_interface(self, runtime):
self.terminal_output = 'file_split'
runtime = super(FDR, self)._run_interface(runtime)
fdr = float(runtime.stdout.splitlines()[1])
setattr(self, 'result', fdr)
return runtime
def _list_outputs(self):
return {'fdr_val': getattr(self, 'result')}
class PtoZInputSpec(CommandLineInputSpec):
p_value = traits.Float(0.05, argstr='%f', usedefault=True, position=1,
desc='p-value (PtoZ) threshold')
twotail = traits.Bool(False, argstr='-2', usedefault=True, position=2,
desc='use 2-tailed conversion (default is 1-tailed)')
resels = traits.Float(argstr='-g %f', position=-1,
desc='use GRF maximum-height theory instead of Gaussian pdf')
class PtoZOutputSpec(TraitedSpec):
z_val = traits.Float()
class PtoZ(CommandLine):
_cmd = 'ptoz'
input_spec = PtoZInputSpec
output_spec = PtoZOutputSpec
def _run_interface(self, runtime):
self.terminal_output = 'file_split'
runtime = super(PtoZ, self)._run_interface(runtime)
zval = float(runtime.stdout.splitlines()[0])
setattr(self, 'result', zval)
return runtime
def _list_outputs(self):
return {'z_val': getattr(self, 'result')}
class CorrelationInputSpec(BaseInterfaceInputSpec):
in_file1 = File(exists=True, mandatory=True, desc='input file 1')
in_file2 = File(exists=True, mandatory=True, desc='input file 2')
in_mask = File(exists=True, desc='input mask')
metric = traits.Enum('pearson', 'distance', usedefault=True,
desc='correlation metric')
subsample = traits.Float(100.0, usedefault=True)
class CorrelationOutputSpec(TraitedSpec):
out_corr = traits.Float()
class Correlation(SimpleInterface):
"""
"""
input_spec = CorrelationInputSpec
output_spec = CorrelationOutputSpec
def _run_interface(self, runtime):
im1 = nb.load(self.inputs.in_file1).get_data()
im2 = nb.load(self.inputs.in_file2).get_data()
mask = np.ones_like(im1, dtype=bool)
if isdefined(self.inputs.in_mask):
mask = nb.load(
self.inputs.in_mask).get_data() > 0.0
if self.inputs.metric == 'pearson':
corr = float(pearsonr(im1[mask], im2[mask])[0])
else:
if 0 < self.inputs.subsample < 100:
nvox = int(mask.sum())
logger.info('before: %d', nvox)
size = int(nvox * self.inputs.subsample) // 100
reshaped = np.zeros_like(mask[mask], dtype=bool)
indexes = np.random.choice(
range(nvox), size=size, replace=False)
reshaped[indexes] = True
mask[mask] = reshaped
logger.info('after: %d', mask.sum())
corr = float(distcorr(im1[mask], im2[mask]))
self._results['out_corr'] = corr
return runtime
class EventsFilesForTaskInputSpec(BaseInterfaceInputSpec):
in_file = File(mandatory=True, desc='input file, part of a BIDS tree')
task = traits.Str(mandatory=True, desc='task')
class EventsFilesForTaskOutputSpec(TraitedSpec):
event_files = OutputMultiPath(File(exists=True), desc='event files')
orthogonalization = traits.Dict(int, traits.Dict(int, int),
desc='orthogonalization')
class EventsFilesForTask(SimpleInterface):
"""
"""
input_spec = EventsFilesForTaskInputSpec
output_spec = EventsFilesForTaskOutputSpec
def _run_interface(self, runtime):
if self.inputs.task != 'stopsignal':
raise NotImplementedError(
'This function was not designed for tasks other than "stopsignal". '
'Task "%s" cannot be processed' % self.inputs.task)
events = pd.read_csv(self.inputs.in_file, sep="\t", na_values='n/a')
self._results['event_files'] = []
nEV = 6
self._results['orthogonalization'] = {
x: {y: 0 for y in range(1, nEV + 1)} for x in range(1, nEV + 1)
}
go_table = events[(events.TrialOutcome == "SuccessfulGo")]
self._results['event_files'].append(
create_ev(go_table, out_name="GO", duration=1, amplitude=1,
out_dir=runtime.cwd))
self._results['event_files'].append(create_ev(
go_table, out_name="GO_rt", duration='ReactionTime',
amplitude=1, out_dir=runtime.cwd))
self._results['orthogonalization'][2][1] = 1
self._results['orthogonalization'][2][0] = 1
stop_success_table = events[(events.TrialOutcome == "SuccessfulStop")]
self._results['event_files'].append(create_ev(
stop_success_table, out_name="STOP_SUCCESS",
duration=1, amplitude=1, out_dir=runtime.cwd))
stop_unsuccess_table = events[(events.TrialOutcome == "UnsuccessfulStop")]
self._results['event_files'].append(create_ev(
stop_unsuccess_table, out_name="STOP_UNSUCCESS",
duration=1, amplitude=1, out_dir=runtime.cwd))
self._results['event_files'].append(create_ev(
stop_unsuccess_table, out_name="STOP_UNSUCCESS_rt",
duration='ReactionTime', amplitude=1, out_dir=runtime.cwd))
self._results['orthogonalization'][5][4] = 1
self._results['orthogonalization'][5][0] = 1
junk_table = events[(events.TrialOutcome == "JUNK")]
if len(junk_table) > 0:
self._results['event_files'].append(create_ev(
junk_table, out_name="JUNK",
duration=1, amplitude=1, out_dir=runtime.cwd))
return runtime
def create_ev(dataframe, out_dir, out_name, duration=1, amplitude=1):
"""
Adapt a BIDS-compliant events file to a format compatible with FSL feat
Args:
dataframe: events file from BIDS spec
out_dir: path where new events file will be stored
out_name: filename for the new events file
amplitude: value or variable name
duration: value or variable name
Returns:
Full path to the new events file
"""
dataframe = dataframe[dataframe.onset.notnull()]
dataframe.onset = dataframe.onset.round(3)
if isinstance(duration, (float, int)):
dataframe['duration'] = [duration] * len(dataframe)
elif isinstance(duration, str):
dataframe.duration = dataframe[[duration]].round(3)
if isinstance(amplitude, (float, int)):
dataframe['weights'] = [amplitude] * len(dataframe)
elif isinstance(amplitude, str):
dataframe['weights'] = dataframe[[amplitude]] - dataframe[[amplitude]].mean()
dataframe.weights = dataframe.weights.round(3)
# Prepare file
ev_file = os.path.join(out_dir, '%s.txt' % out_name)
dataframe[['onset', 'duration', 'weights']].to_csv(
ev_file, sep="\t", header=False, index=False)
return ev_file
def _copyxform(ref_image, out_image, message=None):
# Read in reference and output
resampled = nb.load(out_image)
orig = nb.load(ref_image)
# Copy xform infos
qform, qform_code = orig.header.get_qform(coded=True)
sform, sform_code = orig.header.get_sform(coded=True)
header = resampled.header.copy()
header.set_qform(qform, int(qform_code))
header.set_sform(sform, int(sform_code))
header['descrip'] = 'xform matrices modified by %s.' % (message or '(unknown)')
newimg = resampled.__class__(resampled.get_data(), orig.affine, header)
newimg.to_filename(out_image)
def distcorr(X, Y):
""" Compute the distance correlation function
>>> a = [1,2,3,4,5]
>>> b = np.array([1,2,9,4,4])
>>> distcorr(a, b)
0.762676242417
"""
X = np.atleast_1d(X).astype(float)
Y = np.atleast_1d(Y).astype(float)
if np.prod(X.shape) == len(X):
X = X[:, None]
if np.prod(Y.shape) == len(Y):
Y = Y[:, None]
X = np.atleast_2d(X)
Y = np.atleast_2d(Y)
n = X.shape[0]
if Y.shape[0] != X.shape[0]:
raise ValueError('Number of samples must match')
a = squareform(pdist(X))
b = squareform(pdist(Y))
A = a - a.mean(axis=0)[None, :] - a.mean(axis=1)[:, None] + a.mean()
B = b - b.mean(axis=0)[None, :] - b.mean(axis=1)[:, None] + b.mean()
dcov2_xy = (A * B).sum() / float(n * n)
dcov2_xx = (A * A).sum() / float(n * n)
dcov2_yy = (B * B).sum() / float(n * n)
dcor = np.sqrt(dcov2_xy) / np.sqrt(np.sqrt(dcov2_xx) * np.sqrt(dcov2_yy))
return dcor
| en | 0.71084 | #!/usr/bin/env python # -*- coding: utf-8 -*- A replacement for nipype.interfaces.ants.resampling.ApplyTransforms that fixes the resampled image header to match the xform of the reference image # Run normally Adapt a BIDS-compliant events file to a format compatible with FSL feat Args: dataframe: events file from BIDS spec out_dir: path where new events file will be stored out_name: filename for the new events file amplitude: value or variable name duration: value or variable name Returns: Full path to the new events file # Prepare file # Read in reference and output # Copy xform infos Compute the distance correlation function >>> a = [1,2,3,4,5] >>> b = np.array([1,2,9,4,4]) >>> distcorr(a, b) 0.762676242417 | 2.069535 | 2 |
{{cookiecutter.repo_name}}/config/settings.py | st4lk/cookiecutter-django | 2 | 6624091 | <filename>{{cookiecutter.repo_name}}/config/settings.py
"""
Django settings for {{cookiecutter.project_name}} project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ADMINS = (
('{{cookiecutter.author_name}}', '{{cookiecutter.email}}'),
)
# Settings checklist https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '__{default_secret_key}__')
DEBUG = os.environ.get('DJANGO_DEBUG', False)
ALLOWED_HOSTS = [os.environ.get('DJANGO_ALLOWED_HOSTS')] if os.environ.get('DJANGO_ALLOWED_HOSTS', None) else []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# local apps
{% if cookiecutter.custom_user.lower() == 'true' %} 'apps.users',{% endif %}
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': os.environ.get('DJANGO_TEMPLATE_DEBUG', os.environ.get('DJANGO_DEBUG', False)),
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('DJANGO_DB_HOST', 'localhost'),
'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),
'NAME': os.environ.get('DJANGO_DB_NAME', '{{cookiecutter.repo_name}}'),
'USER': os.environ.get('DJANGO_DB_USER', '{{cookiecutter.repo_name}}'),
'PASSWORD': os.environ.get('DJANGO_DB_PASSWORD', '{{cookiecutter.repo_name}}'),
'TEST_CHARSET': 'utf8',
'CONN_MAX_AGE': os.environ.get('DJANGO_DB_CONN_MAX_AGE', 60),
'ATOMIC_REQUESTS': os.environ.get('DJANGO_DB_ATOMIC_REQUESTS', 'True').capitalize() == 'True',
},
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = '/var/www/{{cookiecutter.project_name}}/static' # dir must have corresponding access rights
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = '/var/www/{{cookiecutter.project_name}}/media' # dir must have corresponding access rights
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
{% if cookiecutter.custom_user.lower() == 'true' %}
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
{% endif %}
# Mail settings
# dummy server:
# python -m smtpd -n -c DebuggingServer localhost:1025
EMAIL_HOST = os.environ.get('DJANGO_EMAIL_HOST', "localhost")
EMAIL_PORT = os.environ.get('DJANGO_EMAIL_PORT', 1025)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'formatters': {
'main_formatter': {
'format': '%(levelname)s:%(name)s: %(message)s '
'(%(asctime)s; %(filename)s:%(lineno)d)',
'datefmt': "%Y-%m-%d %H:%M:%S",
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'main_formatter',
},
'null': {
"class": 'logging.NullHandler',
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', 'console'],
'level': 'ERROR',
'propagate': True,
},
'django': {
'handlers': ['null', ],
},
'py.warnings': {
'handlers': ['null', ],
},
'': {
'handlers': ['console'],
'level': "INFO",
},
}
}
{% if cookiecutter.heroku.lower() == 'true' %}
if 'DYNO' in os.environ:
# Heroku settings
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
STATIC_ROOT = 'staticfiles'
MEDIA_ROOT = ''
STATICFILES_DIRS = ()
{% endif %}
try:
from settings_local import *
except ImportError:
pass
| <filename>{{cookiecutter.repo_name}}/config/settings.py
"""
Django settings for {{cookiecutter.project_name}} project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ADMINS = (
('{{cookiecutter.author_name}}', '{{cookiecutter.email}}'),
)
# Settings checklist https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '__{default_secret_key}__')
DEBUG = os.environ.get('DJANGO_DEBUG', False)
ALLOWED_HOSTS = [os.environ.get('DJANGO_ALLOWED_HOSTS')] if os.environ.get('DJANGO_ALLOWED_HOSTS', None) else []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# local apps
{% if cookiecutter.custom_user.lower() == 'true' %} 'apps.users',{% endif %}
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': os.environ.get('DJANGO_TEMPLATE_DEBUG', os.environ.get('DJANGO_DEBUG', False)),
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('DJANGO_DB_HOST', 'localhost'),
'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),
'NAME': os.environ.get('DJANGO_DB_NAME', '{{cookiecutter.repo_name}}'),
'USER': os.environ.get('DJANGO_DB_USER', '{{cookiecutter.repo_name}}'),
'PASSWORD': os.environ.get('DJANGO_DB_PASSWORD', '{{cookiecutter.repo_name}}'),
'TEST_CHARSET': 'utf8',
'CONN_MAX_AGE': os.environ.get('DJANGO_DB_CONN_MAX_AGE', 60),
'ATOMIC_REQUESTS': os.environ.get('DJANGO_DB_ATOMIC_REQUESTS', 'True').capitalize() == 'True',
},
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = '/var/www/{{cookiecutter.project_name}}/static' # dir must have corresponding access rights
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = '/var/www/{{cookiecutter.project_name}}/media' # dir must have corresponding access rights
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
{% if cookiecutter.custom_user.lower() == 'true' %}
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
{% endif %}
# Mail settings
# dummy server:
# python -m smtpd -n -c DebuggingServer localhost:1025
EMAIL_HOST = os.environ.get('DJANGO_EMAIL_HOST', "localhost")
EMAIL_PORT = os.environ.get('DJANGO_EMAIL_PORT', 1025)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'formatters': {
'main_formatter': {
'format': '%(levelname)s:%(name)s: %(message)s '
'(%(asctime)s; %(filename)s:%(lineno)d)',
'datefmt': "%Y-%m-%d %H:%M:%S",
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'main_formatter',
},
'null': {
"class": 'logging.NullHandler',
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', 'console'],
'level': 'ERROR',
'propagate': True,
},
'django': {
'handlers': ['null', ],
},
'py.warnings': {
'handlers': ['null', ],
},
'': {
'handlers': ['console'],
'level': "INFO",
},
}
}
{% if cookiecutter.heroku.lower() == 'true' %}
if 'DYNO' in os.environ:
# Heroku settings
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
STATIC_ROOT = 'staticfiles'
MEDIA_ROOT = ''
STATICFILES_DIRS = ()
{% endif %}
try:
from settings_local import *
except ImportError:
pass
| en | 0.606041 | Django settings for {{cookiecutter.project_name}} project. For more information on this file, see https://docs.djangoproject.com/en/dev/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/dev/ref/settings/ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # Settings checklist https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # Application definition # local apps # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root # dir must have corresponding access rights # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root # dir must have corresponding access rights # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url # Mail settings # dummy server: # python -m smtpd -n -c DebuggingServer localhost:1025 # Heroku settings # Honor the 'X-Forwarded-Proto' header for request.is_secure() # Allow all host headers # Static asset configuration | 1.742388 | 2 |
classes/EMail.py | ZikyHD/Sigma2SplunkAlert | 85 | 6624092 | <gh_stars>10-100
class EMail:
def __init__(self, email_config, sigma_uc):
# mandatory values
self.to = email_config["to"]
self.subject = email_config["subject"]
self.message = email_config["message"]
# optional values
if "result_link" in email_config:
self.result_link = email_config["result_link"]
if "view_link" in email_config:
self.view_link = email_config["view_link"]
if "include_search" in email_config:
self.include_search = email_config["include_search"]
if "include_trigger" in email_config:
self.include_trigger = email_config["include_trigger"]
if "include_trigger_time" in email_config:
self.include_trigger_time = email_config["include_trigger_time"]
if "inline" in email_config:
self.inline = email_config["inline"]
if "sendcsv" in email_config:
self.sendcsv = email_config["sendcsv"]
if "sendpdf" in email_config:
self.sendpdf = email_config["sendpdf"]
if "sendresults" in email_config:
self.sendresults = email_config["sendresults"]
# Generate text block based on fields value in Sigma Use Case
self.generateFieldsBlock(sigma_uc)
# Generate tag block based on tags in Sigma Use Case
self.generateMitreTagBlock(sigma_uc)
def generateFieldsBlock(self, sigma_uc):
if 'fields' in sigma_uc:
field_block = ''
for field_value in sigma_uc['fields']:
field_block = field_block + '|' + field_value + ': $result.' + field_value + '$ '
self.field_block = field_block
def generateMitreTagBlock(self, sigma_uc):
if 'tags' in sigma_uc:
mitre_block = '|Mitre ATT&CK ID: '
for tag_value in sigma_uc['tags']:
if tag_value.startswith('attack.t'):
mitre_block = mitre_block + tag_value[7:] + ' '
mitre_block = mitre_block + '|Mitre ATT&CK Tactic: '
for tag_value in sigma_uc['tags']:
if not (tag_value.startswith('attack.t') or tag_value.startswith('attack.g') or tag_value.startswith('attack.s')) and tag_value.startswith('attack.'):
mitre_block = mitre_block + tag_value[7:] + ' '
mitre_block = mitre_block + '|'
self.mitre_block = mitre_block
| class EMail:
def __init__(self, email_config, sigma_uc):
# mandatory values
self.to = email_config["to"]
self.subject = email_config["subject"]
self.message = email_config["message"]
# optional values
if "result_link" in email_config:
self.result_link = email_config["result_link"]
if "view_link" in email_config:
self.view_link = email_config["view_link"]
if "include_search" in email_config:
self.include_search = email_config["include_search"]
if "include_trigger" in email_config:
self.include_trigger = email_config["include_trigger"]
if "include_trigger_time" in email_config:
self.include_trigger_time = email_config["include_trigger_time"]
if "inline" in email_config:
self.inline = email_config["inline"]
if "sendcsv" in email_config:
self.sendcsv = email_config["sendcsv"]
if "sendpdf" in email_config:
self.sendpdf = email_config["sendpdf"]
if "sendresults" in email_config:
self.sendresults = email_config["sendresults"]
# Generate text block based on fields value in Sigma Use Case
self.generateFieldsBlock(sigma_uc)
# Generate tag block based on tags in Sigma Use Case
self.generateMitreTagBlock(sigma_uc)
def generateFieldsBlock(self, sigma_uc):
if 'fields' in sigma_uc:
field_block = ''
for field_value in sigma_uc['fields']:
field_block = field_block + '|' + field_value + ': $result.' + field_value + '$ '
self.field_block = field_block
def generateMitreTagBlock(self, sigma_uc):
if 'tags' in sigma_uc:
mitre_block = '|Mitre ATT&CK ID: '
for tag_value in sigma_uc['tags']:
if tag_value.startswith('attack.t'):
mitre_block = mitre_block + tag_value[7:] + ' '
mitre_block = mitre_block + '|Mitre ATT&CK Tactic: '
for tag_value in sigma_uc['tags']:
if not (tag_value.startswith('attack.t') or tag_value.startswith('attack.g') or tag_value.startswith('attack.s')) and tag_value.startswith('attack.'):
mitre_block = mitre_block + tag_value[7:] + ' '
mitre_block = mitre_block + '|'
self.mitre_block = mitre_block | en | 0.453474 | # mandatory values # optional values # Generate text block based on fields value in Sigma Use Case # Generate tag block based on tags in Sigma Use Case | 2.401919 | 2 |
apphub/meta_learning/MAML/maml.py | rajesh1226/fastestimator | 1 | 6624093 | # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tempfile
import numpy as np
import tensorflow as tf
import fastestimator as fe
from tensorflow.keras import layers, losses
from tensorflow.keras import Sequential
from fastestimator.op import TensorOp
from fastestimator.op.tensorop import ModelOp, UpdateOp, Gradients
from fastestimator.op.tensorop.loss import MeanSquaredError, Loss
from fastestimator.trace import ModelSaver
class MetaModelOp(ModelOp):
def _single_forward(self, data):
return self.model(data, training=True)
def forward(self, data, state):
out = tf.map_fn(fn=self._single_forward, elems=data, dtype=tf.float32)
return out
class MetaMSE(MeanSquaredError):
def forward(self, data, state):
true, pred = data
out = self.loss_obj(true, pred)
return tf.reduce_mean(out, axis=1)
class InnerGradientOp(TensorOp):
def __init__(self, loss, model, outputs):
super().__init__(inputs=loss, outputs=outputs, mode="train")
self.model = model
def forward(self, data, state):
loss = data
tape = state['tape']
gradients = tape.gradient(loss, self.model.trainable_variables)
return gradients, self.model.trainable_variables
class InnerUpdateOp(TensorOp):
def __init__(self, inputs, outputs, inner_lr):
super().__init__(inputs=inputs, outputs=outputs, mode="train")
self.inner_lr = inner_lr
def forward(self, data, state):
g, v = data
return [v_ - self.inner_lr * g_ for g_, v_ in zip(g, v)]
class MetaForwardOp(TensorOp):
def forward(self, data, state):
x0, model_var = data
def _single_forward(x):
out = tf.nn.relu(tf.matmul(x, model_var[0]) + model_var[1])
for i in range(2, len(model_var) - 2, 2):
out = tf.nn.relu(tf.matmul(out, model_var[2]) + model_var[3])
out = tf.matmul(out, model_var[-2]) + model_var[-1]
return out
return tf.map_fn(_single_forward, elems=x0, dtype=tf.float32)
def generate_random_sine(amp_range=[0.1, 5.0], phase_range=[0, np.pi], x_range=[-5.0, 5.0], K=10):
while True:
a = np.random.uniform(amp_range[0], amp_range[1])
b = np.random.uniform(phase_range[0], phase_range[1])
x = np.random.uniform(x_range[0], x_range[1], 2 * K).astype(np.float32)
y = a * np.sin(x + b).astype(np.float32)
yield {
"x_meta_train": np.expand_dims(x[:K], axis=-1),
"x_meta_test": np.expand_dims(x[K:], axis=-1),
"y_meta_train": np.expand_dims(y[:K], axis=-1),
"y_meta_test": np.expand_dims(y[K:], axis=-1),
"amp": a,
"phase": b
}
def build_sine_model():
mdl = Sequential()
mdl.add(layers.Dense(40, input_shape=(1, ), activation="relu"))
mdl.add(layers.Dense(40, activation="relu"))
mdl.add(layers.Dense(1))
return mdl
def get_estimator(batch_size=25, epochs=1, steps_per_epoch=20000, validation_steps=None, model_dir=tempfile.mkdtemp()):
pipeline = fe.Pipeline(data={"train": generate_random_sine}, batch_size=batch_size)
meta_model = fe.build(model_def=build_sine_model, model_name="meta_model", loss_name="meta_loss", optimizer="adam")
network = fe.Network(ops=[
MetaModelOp(inputs="x_meta_train", outputs="y_meta_pred", model=meta_model),
MetaMSE(inputs=("y_meta_train", "y_meta_pred"), outputs="inner_loss"),
InnerGradientOp(loss="inner_loss", model=meta_model, outputs=("inner_grad", "model_var")),
InnerUpdateOp(inputs=("inner_grad", "model_var"), outputs="model_var", inner_lr=1e-3),
MetaForwardOp(inputs=("x_meta_test", "model_var"), outputs="y_pred"),
MetaMSE(inputs=("y_meta_test", "y_pred"), outputs="meta_loss"),
Gradients(loss="meta_loss", models=meta_model, outputs="meta_grad"),
UpdateOp(model=meta_model, gradients="meta_grad")
])
traces = [ModelSaver(model_name="meta_model", save_dir=model_dir, save_best=False)]
estimator = fe.Estimator(network=network,
pipeline=pipeline,
traces=traces,
epochs=epochs,
steps_per_epoch=steps_per_epoch)
return estimator
if __name__ == "__main__":
est = get_estimator()
est.fit()
| # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tempfile
import numpy as np
import tensorflow as tf
import fastestimator as fe
from tensorflow.keras import layers, losses
from tensorflow.keras import Sequential
from fastestimator.op import TensorOp
from fastestimator.op.tensorop import ModelOp, UpdateOp, Gradients
from fastestimator.op.tensorop.loss import MeanSquaredError, Loss
from fastestimator.trace import ModelSaver
class MetaModelOp(ModelOp):
def _single_forward(self, data):
return self.model(data, training=True)
def forward(self, data, state):
out = tf.map_fn(fn=self._single_forward, elems=data, dtype=tf.float32)
return out
class MetaMSE(MeanSquaredError):
def forward(self, data, state):
true, pred = data
out = self.loss_obj(true, pred)
return tf.reduce_mean(out, axis=1)
class InnerGradientOp(TensorOp):
def __init__(self, loss, model, outputs):
super().__init__(inputs=loss, outputs=outputs, mode="train")
self.model = model
def forward(self, data, state):
loss = data
tape = state['tape']
gradients = tape.gradient(loss, self.model.trainable_variables)
return gradients, self.model.trainable_variables
class InnerUpdateOp(TensorOp):
def __init__(self, inputs, outputs, inner_lr):
super().__init__(inputs=inputs, outputs=outputs, mode="train")
self.inner_lr = inner_lr
def forward(self, data, state):
g, v = data
return [v_ - self.inner_lr * g_ for g_, v_ in zip(g, v)]
class MetaForwardOp(TensorOp):
def forward(self, data, state):
x0, model_var = data
def _single_forward(x):
out = tf.nn.relu(tf.matmul(x, model_var[0]) + model_var[1])
for i in range(2, len(model_var) - 2, 2):
out = tf.nn.relu(tf.matmul(out, model_var[2]) + model_var[3])
out = tf.matmul(out, model_var[-2]) + model_var[-1]
return out
return tf.map_fn(_single_forward, elems=x0, dtype=tf.float32)
def generate_random_sine(amp_range=[0.1, 5.0], phase_range=[0, np.pi], x_range=[-5.0, 5.0], K=10):
while True:
a = np.random.uniform(amp_range[0], amp_range[1])
b = np.random.uniform(phase_range[0], phase_range[1])
x = np.random.uniform(x_range[0], x_range[1], 2 * K).astype(np.float32)
y = a * np.sin(x + b).astype(np.float32)
yield {
"x_meta_train": np.expand_dims(x[:K], axis=-1),
"x_meta_test": np.expand_dims(x[K:], axis=-1),
"y_meta_train": np.expand_dims(y[:K], axis=-1),
"y_meta_test": np.expand_dims(y[K:], axis=-1),
"amp": a,
"phase": b
}
def build_sine_model():
mdl = Sequential()
mdl.add(layers.Dense(40, input_shape=(1, ), activation="relu"))
mdl.add(layers.Dense(40, activation="relu"))
mdl.add(layers.Dense(1))
return mdl
def get_estimator(batch_size=25, epochs=1, steps_per_epoch=20000, validation_steps=None, model_dir=tempfile.mkdtemp()):
pipeline = fe.Pipeline(data={"train": generate_random_sine}, batch_size=batch_size)
meta_model = fe.build(model_def=build_sine_model, model_name="meta_model", loss_name="meta_loss", optimizer="adam")
network = fe.Network(ops=[
MetaModelOp(inputs="x_meta_train", outputs="y_meta_pred", model=meta_model),
MetaMSE(inputs=("y_meta_train", "y_meta_pred"), outputs="inner_loss"),
InnerGradientOp(loss="inner_loss", model=meta_model, outputs=("inner_grad", "model_var")),
InnerUpdateOp(inputs=("inner_grad", "model_var"), outputs="model_var", inner_lr=1e-3),
MetaForwardOp(inputs=("x_meta_test", "model_var"), outputs="y_pred"),
MetaMSE(inputs=("y_meta_test", "y_pred"), outputs="meta_loss"),
Gradients(loss="meta_loss", models=meta_model, outputs="meta_grad"),
UpdateOp(model=meta_model, gradients="meta_grad")
])
traces = [ModelSaver(model_name="meta_model", save_dir=model_dir, save_best=False)]
estimator = fe.Estimator(network=network,
pipeline=pipeline,
traces=traces,
epochs=epochs,
steps_per_epoch=steps_per_epoch)
return estimator
if __name__ == "__main__":
est = get_estimator()
est.fit()
| en | 0.807271 | # Copyright 2019 The FastEstimator Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== | 2.03527 | 2 |
bouncer/authentication/models.py | sourcelair/bouncer-api | 0 | 6624094 | <reponame>sourcelair/bouncer-api
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.crypto import get_random_string
def generate_key():
return get_random_string(length=32)
class AuthToken(models.Model):
key = models.CharField(
max_length=32, default=generate_key, primary_key=True, editable=False
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=_("User"), on_delete=models.CASCADE
)
created_at = models.DateTimeField(_("Created"), auto_now_add=True)
def __str__(self):
return self.key
| from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.crypto import get_random_string
def generate_key():
return get_random_string(length=32)
class AuthToken(models.Model):
key = models.CharField(
max_length=32, default=generate_key, primary_key=True, editable=False
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=_("User"), on_delete=models.CASCADE
)
created_at = models.DateTimeField(_("Created"), auto_now_add=True)
def __str__(self):
return self.key | none | 1 | 2.110045 | 2 | |
examples/stack_examples.py | jtmorrell/curie | 1 | 6624095 | import curie as ci
import numpy as np
def basic_examples():
el = ci.Element('Fe')
print(el.S(20.0))
print(el.S(20.0, particle='a'))
el.plot_S()
stack = stack=[{'compound':'Ni', 'name':'Ni01', 'thickness':0.025}, # Thickness only (mm)
{'compound':'Kapton', 'thickness':0.05}, # No name - will not be tallied
{'compound':'Ti', 'name':'Ti01', 'thickness':1.025}, # Very thick: should see straggle
{'compound':'Inconel','ad':1.0,'name':'test'},
{'compound':'SrCO3', 'name':'SrCO3', 'area':0.785, 'mass':4.8E-3}]
st = ci.Stack(stack, E0=45, particle='d', compounds=[{'Inconel':{'Fe':33, 'Ni':55}}])
st.summarize()
st.plot()
### Import stack design from .csv file
st = ci.Stack('test_stack.csv', particle='a', E0=70, min_steps=20, accuracy=1E-4)
st.plot()
def extended_examples():
el = ci.Element('Hf')
print(el.mass)
print(el.density)
print(el.isotopes)
print(el.abundances)
f,ax = el.plot_mass_coeff(return_plot=True)
el.plot_mass_coeff_en(energy=10.0**(np.arange(0,4,0.01)), f=f, ax=ax)
el.plot_mass_coeff_en()
el = ci.Element('Fe')
print(el.attenuation(511, x=0.3))
print(el.attenuation(300, x=0.5, density=8))
print(el.S(20.0, particle='Ca-40'))
print(el.S(60.0))
el.plot_S(particle='40CA')
print(el.range(60.0))
el.plot_range()
el.plot_mass_coeff()
el.plot_mass_coeff(style='poster')
ci.set_style()
el = ci.Element('La')
print(el.S(60.0))
print(el.S(55.0, density=1E-3)) ### S in MeV/(mg/cm^2)
el = ci.Element('Fe')
print(el.range(60.0))
el = ci.Element('U')
print(el.range(60.0))
el = ci.Element('Hg')
print(el.mu(200))
print(el.mu_en(200))
cm = ci.Compound('Silicone')
print(cm.weights)
for c in ['H2C3.2RbHeCe4Pb','H2O','NHO2','H2.5O1.5','SrCO3']:
cm = ci.Compound(c)
print(cm.weights)
print('Silicone' in ci.COMPOUND_LIST)
cm = ci.Compound('Silicone')
print(list(map(str, cm.elements)))
cm = ci.Compound('H2O', density=1.0)
print(cm.mu(200))
print(cm.mu_en(200))
print(cm.weights)
cm = ci.Compound('SS_316') # preset compound for 316 Stainless
print(cm.attenuation(511, x=0.3))
print(cm.attenuation(300, x=1.0, density=5.0))
cm = ci.Compound('Fe')
print(cm.range(60.0))
cm = ci.Compound('SS_316')
print(cm.range(60.0))
cm = ci.Compound('Brass', weights={'Zn':-33,'Cu':-66})
print(cm.weights)
cm.saveas('compounds.csv')
cm = ci.Compound('Bronze', weights={'Cu':-80, 'Sn':-20}, density=8.9)
f,ax = cm.plot_range(return_plot=True)
cm.plot_range(particle='d', f=f, ax=ax)
cm = ci.Compound('Bronze', weights='example_compounds.json')
print(cm.weights)
cm.saveas('compounds.csv')
cm = ci.Compound('Bronze', weights='example_compounds.csv', density=8.9)
cm.plot_mass_coeff()
cm.plot_S()
cm.plot_range()
cm = ci.Compound('SrCO3', density=3.5)
print(cm.S(60.0))
print(cm.S(55.0, density=1E-3)) ### S in MeV/(mg/cm^2)
stack = [{'cm':'H2O', 'ad':800.0, 'name':'water'},
{'cm':'RbCl', 'density':3.0, 't':0.03, 'name':'salt'},
{'cm':'Kapton', 't':0.025},
{'cm':'Brass','ad':350, 'name':'metal'}]
st = ci.Stack(stack, compounds={'Brass':{'Cu':-66, 'Zn':-33}}, E0=60.0)
st.saveas('example_stack.csv')
st.saveas('example_stack.json', filter_name=False)
st.saveas('example_stack.db', save_fluxes=False)
st.summarize()
st.summarize(filter_name=False)
st.plot()
st.plot(filter_name='salt')
st = ci.Stack(stack, compounds='example_compounds.json')
print(st.stack)
st.saveas('stack_calc.csv')
print(st.fluxes)
st.saveas('test.csv')
st.saveas('test.db')
st.summarize()
st.plot()
st = ci.Stack('test_stack.csv')
print(st.stack)
st.plot()
basic_examples()
extended_examples() | import curie as ci
import numpy as np
def basic_examples():
el = ci.Element('Fe')
print(el.S(20.0))
print(el.S(20.0, particle='a'))
el.plot_S()
stack = stack=[{'compound':'Ni', 'name':'Ni01', 'thickness':0.025}, # Thickness only (mm)
{'compound':'Kapton', 'thickness':0.05}, # No name - will not be tallied
{'compound':'Ti', 'name':'Ti01', 'thickness':1.025}, # Very thick: should see straggle
{'compound':'Inconel','ad':1.0,'name':'test'},
{'compound':'SrCO3', 'name':'SrCO3', 'area':0.785, 'mass':4.8E-3}]
st = ci.Stack(stack, E0=45, particle='d', compounds=[{'Inconel':{'Fe':33, 'Ni':55}}])
st.summarize()
st.plot()
### Import stack design from .csv file
st = ci.Stack('test_stack.csv', particle='a', E0=70, min_steps=20, accuracy=1E-4)
st.plot()
def extended_examples():
el = ci.Element('Hf')
print(el.mass)
print(el.density)
print(el.isotopes)
print(el.abundances)
f,ax = el.plot_mass_coeff(return_plot=True)
el.plot_mass_coeff_en(energy=10.0**(np.arange(0,4,0.01)), f=f, ax=ax)
el.plot_mass_coeff_en()
el = ci.Element('Fe')
print(el.attenuation(511, x=0.3))
print(el.attenuation(300, x=0.5, density=8))
print(el.S(20.0, particle='Ca-40'))
print(el.S(60.0))
el.plot_S(particle='40CA')
print(el.range(60.0))
el.plot_range()
el.plot_mass_coeff()
el.plot_mass_coeff(style='poster')
ci.set_style()
el = ci.Element('La')
print(el.S(60.0))
print(el.S(55.0, density=1E-3)) ### S in MeV/(mg/cm^2)
el = ci.Element('Fe')
print(el.range(60.0))
el = ci.Element('U')
print(el.range(60.0))
el = ci.Element('Hg')
print(el.mu(200))
print(el.mu_en(200))
cm = ci.Compound('Silicone')
print(cm.weights)
for c in ['H2C3.2RbHeCe4Pb','H2O','NHO2','H2.5O1.5','SrCO3']:
cm = ci.Compound(c)
print(cm.weights)
print('Silicone' in ci.COMPOUND_LIST)
cm = ci.Compound('Silicone')
print(list(map(str, cm.elements)))
cm = ci.Compound('H2O', density=1.0)
print(cm.mu(200))
print(cm.mu_en(200))
print(cm.weights)
cm = ci.Compound('SS_316') # preset compound for 316 Stainless
print(cm.attenuation(511, x=0.3))
print(cm.attenuation(300, x=1.0, density=5.0))
cm = ci.Compound('Fe')
print(cm.range(60.0))
cm = ci.Compound('SS_316')
print(cm.range(60.0))
cm = ci.Compound('Brass', weights={'Zn':-33,'Cu':-66})
print(cm.weights)
cm.saveas('compounds.csv')
cm = ci.Compound('Bronze', weights={'Cu':-80, 'Sn':-20}, density=8.9)
f,ax = cm.plot_range(return_plot=True)
cm.plot_range(particle='d', f=f, ax=ax)
cm = ci.Compound('Bronze', weights='example_compounds.json')
print(cm.weights)
cm.saveas('compounds.csv')
cm = ci.Compound('Bronze', weights='example_compounds.csv', density=8.9)
cm.plot_mass_coeff()
cm.plot_S()
cm.plot_range()
cm = ci.Compound('SrCO3', density=3.5)
print(cm.S(60.0))
print(cm.S(55.0, density=1E-3)) ### S in MeV/(mg/cm^2)
stack = [{'cm':'H2O', 'ad':800.0, 'name':'water'},
{'cm':'RbCl', 'density':3.0, 't':0.03, 'name':'salt'},
{'cm':'Kapton', 't':0.025},
{'cm':'Brass','ad':350, 'name':'metal'}]
st = ci.Stack(stack, compounds={'Brass':{'Cu':-66, 'Zn':-33}}, E0=60.0)
st.saveas('example_stack.csv')
st.saveas('example_stack.json', filter_name=False)
st.saveas('example_stack.db', save_fluxes=False)
st.summarize()
st.summarize(filter_name=False)
st.plot()
st.plot(filter_name='salt')
st = ci.Stack(stack, compounds='example_compounds.json')
print(st.stack)
st.saveas('stack_calc.csv')
print(st.fluxes)
st.saveas('test.csv')
st.saveas('test.db')
st.summarize()
st.plot()
st = ci.Stack('test_stack.csv')
print(st.stack)
st.plot()
basic_examples()
extended_examples() | en | 0.7131 | # Thickness only (mm) # No name - will not be tallied # Very thick: should see straggle ### Import stack design from .csv file ### S in MeV/(mg/cm^2) # preset compound for 316 Stainless ### S in MeV/(mg/cm^2) | 2.709723 | 3 |
migrations/versions/4ea72a17a3f_.py | anniejw6/numb3rs_randomizer | 0 | 6624096 | <reponame>anniejw6/numb3rs_randomizer
"""empty message
Revision ID: 4<PASSWORD>
Revises: None
Create Date: 2015-07-05 12:43:37.912793
"""
# revision identifiers, used by Alembic.
revision = '4<PASSWORD>'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| """empty message
Revision ID: 4<PASSWORD>
Revises: None
Create Date: 2015-07-05 12:43:37.912793
"""
# revision identifiers, used by Alembic.
revision = '4<PASSWORD>'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ### | en | 0.495954 | empty message Revision ID: 4<PASSWORD> Revises: None Create Date: 2015-07-05 12:43:37.912793 # revision identifiers, used by Alembic. ### commands auto generated by Alembic - please adjust! ### ### end Alembic commands ### ### commands auto generated by Alembic - please adjust! ### ### end Alembic commands ### | 1.25928 | 1 |
npstreams/tests/test_flow.py | LaurentRDC/npstreams | 30 | 6624097 | <filename>npstreams/tests/test_flow.py
# -*- coding: utf-8 -*-
import numpy as np
from pathlib import Path
from npstreams import array_stream, ipipe, last, iload, pload, isum
@array_stream
def iden(arrays):
yield from arrays
def test_ipipe_order():
"""Test that ipipe(f, g, h, arrays) -> f(g(h(arr))) for arr in arrays"""
stream = [np.random.random((15, 7, 2, 1)) for _ in range(10)]
squared = [np.cbrt(np.square(arr)) for arr in stream]
pipeline = ipipe(np.cbrt, np.square, stream)
assert all(np.allclose(s, p) for s, p in zip(pipeline, squared))
def test_ipipe_multiprocessing():
"""Test that ipipe(f, g, h, arrays) -> f(g(h(arr))) for arr in arrays"""
stream = [np.random.random((15, 7, 2, 1)) for _ in range(10)]
squared = [np.cbrt(np.square(arr)) for arr in stream]
pipeline = ipipe(np.cbrt, np.square, stream, processes=2)
assert all(np.allclose(s, p) for s, p in zip(pipeline, squared))
def test_iload_glob():
"""Test that iload works on glob-like patterns"""
stream = iload(Path(__file__).parent / "data" / "test_data*.npy", load_func=np.load)
s = last(isum(stream)).astype(float) # Cast to float for np.allclose
assert np.allclose(s, np.zeros_like(s))
def test_iload_file_list():
"""Test that iload works on iterable of filenames"""
files = [
Path(__file__).parent / "data" / "test_data1.npy",
Path(__file__).parent / "data" / "test_data2.npy",
Path(__file__).parent / "data" / "test_data3.npy",
]
stream = iload(files, load_func=np.load)
s = last(isum(stream)).astype(float) # Cast to float for np.allclose
assert np.allclose(s, np.zeros_like(s))
def test_pload_glob():
"""Test that pload works on glob-like patterns"""
stream = pload(Path(__file__).parent / "data" / "test_data*.npy", load_func=np.load)
s = last(isum(stream)).astype(float) # Cast to float for np.allclose
assert np.allclose(s, np.zeros_like(s))
stream = pload(
Path(__file__).parent / "data" / "test_data*.npy",
load_func=np.load,
processes=2,
)
s = last(isum(stream)).astype(float) # Cast to float for np.allclose
assert np.allclose(s, np.zeros_like(s))
def test_pload_file_list():
"""Test that pload works on iterable of filenames"""
files = [
Path(__file__).parent / "data" / "test_data1.npy",
Path(__file__).parent / "data" / "test_data2.npy",
Path(__file__).parent / "data" / "test_data3.npy",
]
stream = pload(files, load_func=np.load)
s = last(isum(stream)).astype(float) # Cast to float for np.allclose
assert np.allclose(s, np.zeros_like(s))
files = [
Path(__file__).parent / "data" / "test_data1.npy",
Path(__file__).parent / "data" / "test_data2.npy",
Path(__file__).parent / "data" / "test_data3.npy",
]
stream = pload(files, load_func=np.load, processes=2)
s = last(isum(stream)).astype(float) # Cast to float for np.allclose
assert np.allclose(s, np.zeros_like(s))
| <filename>npstreams/tests/test_flow.py
# -*- coding: utf-8 -*-
import numpy as np
from pathlib import Path
from npstreams import array_stream, ipipe, last, iload, pload, isum
@array_stream
def iden(arrays):
yield from arrays
def test_ipipe_order():
"""Test that ipipe(f, g, h, arrays) -> f(g(h(arr))) for arr in arrays"""
stream = [np.random.random((15, 7, 2, 1)) for _ in range(10)]
squared = [np.cbrt(np.square(arr)) for arr in stream]
pipeline = ipipe(np.cbrt, np.square, stream)
assert all(np.allclose(s, p) for s, p in zip(pipeline, squared))
def test_ipipe_multiprocessing():
"""Test that ipipe(f, g, h, arrays) -> f(g(h(arr))) for arr in arrays"""
stream = [np.random.random((15, 7, 2, 1)) for _ in range(10)]
squared = [np.cbrt(np.square(arr)) for arr in stream]
pipeline = ipipe(np.cbrt, np.square, stream, processes=2)
assert all(np.allclose(s, p) for s, p in zip(pipeline, squared))
def test_iload_glob():
"""Test that iload works on glob-like patterns"""
stream = iload(Path(__file__).parent / "data" / "test_data*.npy", load_func=np.load)
s = last(isum(stream)).astype(float) # Cast to float for np.allclose
assert np.allclose(s, np.zeros_like(s))
def test_iload_file_list():
"""Test that iload works on iterable of filenames"""
files = [
Path(__file__).parent / "data" / "test_data1.npy",
Path(__file__).parent / "data" / "test_data2.npy",
Path(__file__).parent / "data" / "test_data3.npy",
]
stream = iload(files, load_func=np.load)
s = last(isum(stream)).astype(float) # Cast to float for np.allclose
assert np.allclose(s, np.zeros_like(s))
def test_pload_glob():
"""Test that pload works on glob-like patterns"""
stream = pload(Path(__file__).parent / "data" / "test_data*.npy", load_func=np.load)
s = last(isum(stream)).astype(float) # Cast to float for np.allclose
assert np.allclose(s, np.zeros_like(s))
stream = pload(
Path(__file__).parent / "data" / "test_data*.npy",
load_func=np.load,
processes=2,
)
s = last(isum(stream)).astype(float) # Cast to float for np.allclose
assert np.allclose(s, np.zeros_like(s))
def test_pload_file_list():
"""Test that pload works on iterable of filenames"""
files = [
Path(__file__).parent / "data" / "test_data1.npy",
Path(__file__).parent / "data" / "test_data2.npy",
Path(__file__).parent / "data" / "test_data3.npy",
]
stream = pload(files, load_func=np.load)
s = last(isum(stream)).astype(float) # Cast to float for np.allclose
assert np.allclose(s, np.zeros_like(s))
files = [
Path(__file__).parent / "data" / "test_data1.npy",
Path(__file__).parent / "data" / "test_data2.npy",
Path(__file__).parent / "data" / "test_data3.npy",
]
stream = pload(files, load_func=np.load, processes=2)
s = last(isum(stream)).astype(float) # Cast to float for np.allclose
assert np.allclose(s, np.zeros_like(s))
| en | 0.719061 | # -*- coding: utf-8 -*- Test that ipipe(f, g, h, arrays) -> f(g(h(arr))) for arr in arrays Test that ipipe(f, g, h, arrays) -> f(g(h(arr))) for arr in arrays Test that iload works on glob-like patterns # Cast to float for np.allclose Test that iload works on iterable of filenames # Cast to float for np.allclose Test that pload works on glob-like patterns # Cast to float for np.allclose # Cast to float for np.allclose Test that pload works on iterable of filenames # Cast to float for np.allclose # Cast to float for np.allclose | 2.579848 | 3 |
colour/examples/recovery/examples_smits1999.py | BPearlstine/colour | 2 | 6624098 | # -*- coding: utf-8 -*-
"""
Showcases reflectance recovery computations using *Smits (1999)* method.
"""
import numpy as np
import colour
from colour.recovery.smits1999 import XYZ_to_RGB_Smits1999
from colour.utilities import message_box
message_box('"Smits (1999)" - Reflectance Recovery Computations')
XYZ = np.array([1.14176346, 1.00000000, 0.49815206])
RGB = XYZ_to_RGB_Smits1999(XYZ)
message_box(('Recovering reflectance using "Smits (1999)" method from '
'given "RGB" colourspace array:\n'
'\n\tRGB: {0}'.format(RGB)))
print(colour.XYZ_to_sd(XYZ, method='Smits 1999'))
print(colour.recovery.RGB_to_sd_Smits1999(RGB))
print('\n')
message_box(
('An analysis of "Smits (1999)" method is available at the '
'following url : '
'http://nbviewer.jupyter.org/github/colour-science/colour-website/'
'blob/master/ipython/about_reflectance_recovery.ipynb'))
| # -*- coding: utf-8 -*-
"""
Showcases reflectance recovery computations using *Smits (1999)* method.
"""
import numpy as np
import colour
from colour.recovery.smits1999 import XYZ_to_RGB_Smits1999
from colour.utilities import message_box
message_box('"Smits (1999)" - Reflectance Recovery Computations')
XYZ = np.array([1.14176346, 1.00000000, 0.49815206])
RGB = XYZ_to_RGB_Smits1999(XYZ)
message_box(('Recovering reflectance using "Smits (1999)" method from '
'given "RGB" colourspace array:\n'
'\n\tRGB: {0}'.format(RGB)))
print(colour.XYZ_to_sd(XYZ, method='Smits 1999'))
print(colour.recovery.RGB_to_sd_Smits1999(RGB))
print('\n')
message_box(
('An analysis of "Smits (1999)" method is available at the '
'following url : '
'http://nbviewer.jupyter.org/github/colour-science/colour-website/'
'blob/master/ipython/about_reflectance_recovery.ipynb'))
| en | 0.578172 | # -*- coding: utf-8 -*- Showcases reflectance recovery computations using *Smits (1999)* method. | 2.993708 | 3 |
experiment.py | adammoss/vae | 0 | 6624099 | <gh_stars>0
import math
import torch
from torch import optim
from models import BaseVAE
from models.types_ import *
import pytorch_lightning as pl
from torchvision import transforms
import torchvision.utils as vutils
from torchvision.datasets import CelebA, MNIST, CIFAR10
from astrovision.datasets import LensChallengeSpace1
from torch.utils.data import DataLoader
class VAEXperiment(pl.LightningModule):
def __init__(self,
vae_model: BaseVAE,
params: dict) -> None:
super(VAEXperiment, self).__init__()
self.model = vae_model
self.params = params
self.hold_graph = False
try:
self.hold_graph = self.params['retain_first_backpass']
except:
pass
def forward(self, input: Tensor, **kwargs) -> Tensor:
return self.model(input, **kwargs)
def training_step(self, batch, batch_idx, optimizer_idx=0):
real_img, labels = batch
results = self.forward(real_img, labels=labels)
loss, logs = self.model.loss_function(*results,
M_N=self.params['batch_size'] / self.num_train_imgs,
optimizer_idx=optimizer_idx,
batch_idx=batch_idx)
self.log_dict({f"train_{k}": v.detach() for k, v in logs.items()}, on_step=True, on_epoch=False)
return loss
def validation_step(self, batch, batch_idx, optimizer_idx=0):
real_img, labels = batch
results = self.forward(real_img, labels=labels)
loss, logs = self.model.loss_function(*results,
M_N=self.params['batch_size'] / self.num_val_imgs,
optimizer_idx=optimizer_idx,
batch_idx=batch_idx)
self.log_dict({f"val_{k}": v.detach() for k, v in logs.items()}, on_step=True, on_epoch=False)
return loss
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x for x in outputs]).mean()
return {'val_loss': avg_loss}
def configure_optimizers(self):
optims = []
scheds = []
optimizer = optim.Adam(self.model.parameters(),
lr=self.params['LR'],
weight_decay=self.params['weight_decay'])
optims.append(optimizer)
# Check if more than 1 optimizer is required (Used for adversarial training)
try:
if self.params['LR_2'] is not None:
optimizer2 = optim.Adam(getattr(self.model, self.params['submodel']).parameters(),
lr=self.params['LR_2'])
optims.append(optimizer2)
except:
pass
try:
if self.params['scheduler_gamma'] is not None:
scheduler = optim.lr_scheduler.ExponentialLR(optims[0],
gamma=self.params['scheduler_gamma'])
scheds.append(scheduler)
# Check if another scheduler is required for the second optimizer
try:
if self.params['scheduler_gamma_2'] is not None:
scheduler2 = optim.lr_scheduler.ExponentialLR(optims[1],
gamma=self.params['scheduler_gamma_2'])
scheds.append(scheduler2)
except:
pass
return optims, scheds
except:
return optims
def train_dataloader(self):
transform = self.data_transforms()
if self.params['dataset'] == 'celeba':
dataset = CelebA(root=self.params['data_path'],
split="train",
transform=transform,
download=True)
elif self.params['dataset'] == 'cifar10':
dataset = CIFAR10(root=self.params['data_path'],
train=True,
transform=transform,
download=True)
elif self.params['dataset'] == 'mnist':
dataset = MNIST(root=self.params['data_path'],
train=True,
transform=transform,
download=True)
elif self.params['dataset'] == 'lens':
dataset = LensChallengeSpace1(root=self.params['data_path'],
train=True,
transform=transform,
download=True)
else:
raise ValueError('Undefined dataset type')
self.num_train_imgs = len(dataset)
return DataLoader(dataset,
batch_size=self.params['batch_size'],
shuffle=True,
drop_last=True)
def val_dataloader(self):
transform = self.data_transforms()
if self.params['dataset'] == 'celeba':
dataset = CelebA(root=self.params['data_path'],
split="test",
transform=transform,
download=True)
elif self.params['dataset'] == 'cifar10':
dataset = CIFAR10(root=self.params['data_path'],
train=False,
transform=transform,
download=True)
elif self.params['dataset'] == 'mnist':
dataset = MNIST(root=self.params['data_path'],
train=False,
transform=transform,
download=True)
elif self.params['dataset'] == 'lens':
dataset = LensChallengeSpace1(root=self.params['data_path'],
train=False,
transform=transform,
download=True)
else:
raise ValueError('Undefined dataset type')
self.num_val_imgs = len(dataset)
return DataLoader(dataset,
batch_size=144,
shuffle=True,
drop_last=True)
def data_transforms(self):
SetRange = transforms.Lambda(lambda X: 2 * X - 1.)
SetScale = transforms.Lambda(lambda X: X / X.sum(0).expand_as(X))
ArcSinh = transforms.Lambda(lambda X: torch.asinh(X))
if self.params['dataset'] == 'celeba':
transform = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.CenterCrop(148),
transforms.Resize(self.params['img_size']),
transforms.ToTensor(),
SetRange])
elif self.params['dataset'] == 'cifar10':
transform = transforms.Compose([transforms.Resize(self.params['img_size']),
transforms.ToTensor(),
SetRange])
elif self.params['dataset'] == 'mnist':
transform = transforms.Compose([transforms.Resize(self.params['img_size']),
transforms.ToTensor(),
SetRange])
elif self.params['dataset'] == 'lens':
transform = transforms.Compose([transforms.Resize(self.params['img_size']),
transforms.ToTensor(),
transforms.Normalize(1.0E-13, 1.0E-12),
ArcSinh])
else:
raise ValueError('Undefined dataset type')
return transform
| import math
import torch
from torch import optim
from models import BaseVAE
from models.types_ import *
import pytorch_lightning as pl
from torchvision import transforms
import torchvision.utils as vutils
from torchvision.datasets import CelebA, MNIST, CIFAR10
from astrovision.datasets import LensChallengeSpace1
from torch.utils.data import DataLoader
class VAEXperiment(pl.LightningModule):
def __init__(self,
vae_model: BaseVAE,
params: dict) -> None:
super(VAEXperiment, self).__init__()
self.model = vae_model
self.params = params
self.hold_graph = False
try:
self.hold_graph = self.params['retain_first_backpass']
except:
pass
def forward(self, input: Tensor, **kwargs) -> Tensor:
return self.model(input, **kwargs)
def training_step(self, batch, batch_idx, optimizer_idx=0):
real_img, labels = batch
results = self.forward(real_img, labels=labels)
loss, logs = self.model.loss_function(*results,
M_N=self.params['batch_size'] / self.num_train_imgs,
optimizer_idx=optimizer_idx,
batch_idx=batch_idx)
self.log_dict({f"train_{k}": v.detach() for k, v in logs.items()}, on_step=True, on_epoch=False)
return loss
def validation_step(self, batch, batch_idx, optimizer_idx=0):
real_img, labels = batch
results = self.forward(real_img, labels=labels)
loss, logs = self.model.loss_function(*results,
M_N=self.params['batch_size'] / self.num_val_imgs,
optimizer_idx=optimizer_idx,
batch_idx=batch_idx)
self.log_dict({f"val_{k}": v.detach() for k, v in logs.items()}, on_step=True, on_epoch=False)
return loss
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x for x in outputs]).mean()
return {'val_loss': avg_loss}
def configure_optimizers(self):
optims = []
scheds = []
optimizer = optim.Adam(self.model.parameters(),
lr=self.params['LR'],
weight_decay=self.params['weight_decay'])
optims.append(optimizer)
# Check if more than 1 optimizer is required (Used for adversarial training)
try:
if self.params['LR_2'] is not None:
optimizer2 = optim.Adam(getattr(self.model, self.params['submodel']).parameters(),
lr=self.params['LR_2'])
optims.append(optimizer2)
except:
pass
try:
if self.params['scheduler_gamma'] is not None:
scheduler = optim.lr_scheduler.ExponentialLR(optims[0],
gamma=self.params['scheduler_gamma'])
scheds.append(scheduler)
# Check if another scheduler is required for the second optimizer
try:
if self.params['scheduler_gamma_2'] is not None:
scheduler2 = optim.lr_scheduler.ExponentialLR(optims[1],
gamma=self.params['scheduler_gamma_2'])
scheds.append(scheduler2)
except:
pass
return optims, scheds
except:
return optims
def train_dataloader(self):
transform = self.data_transforms()
if self.params['dataset'] == 'celeba':
dataset = CelebA(root=self.params['data_path'],
split="train",
transform=transform,
download=True)
elif self.params['dataset'] == 'cifar10':
dataset = CIFAR10(root=self.params['data_path'],
train=True,
transform=transform,
download=True)
elif self.params['dataset'] == 'mnist':
dataset = MNIST(root=self.params['data_path'],
train=True,
transform=transform,
download=True)
elif self.params['dataset'] == 'lens':
dataset = LensChallengeSpace1(root=self.params['data_path'],
train=True,
transform=transform,
download=True)
else:
raise ValueError('Undefined dataset type')
self.num_train_imgs = len(dataset)
return DataLoader(dataset,
batch_size=self.params['batch_size'],
shuffle=True,
drop_last=True)
def val_dataloader(self):
transform = self.data_transforms()
if self.params['dataset'] == 'celeba':
dataset = CelebA(root=self.params['data_path'],
split="test",
transform=transform,
download=True)
elif self.params['dataset'] == 'cifar10':
dataset = CIFAR10(root=self.params['data_path'],
train=False,
transform=transform,
download=True)
elif self.params['dataset'] == 'mnist':
dataset = MNIST(root=self.params['data_path'],
train=False,
transform=transform,
download=True)
elif self.params['dataset'] == 'lens':
dataset = LensChallengeSpace1(root=self.params['data_path'],
train=False,
transform=transform,
download=True)
else:
raise ValueError('Undefined dataset type')
self.num_val_imgs = len(dataset)
return DataLoader(dataset,
batch_size=144,
shuffle=True,
drop_last=True)
def data_transforms(self):
SetRange = transforms.Lambda(lambda X: 2 * X - 1.)
SetScale = transforms.Lambda(lambda X: X / X.sum(0).expand_as(X))
ArcSinh = transforms.Lambda(lambda X: torch.asinh(X))
if self.params['dataset'] == 'celeba':
transform = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.CenterCrop(148),
transforms.Resize(self.params['img_size']),
transforms.ToTensor(),
SetRange])
elif self.params['dataset'] == 'cifar10':
transform = transforms.Compose([transforms.Resize(self.params['img_size']),
transforms.ToTensor(),
SetRange])
elif self.params['dataset'] == 'mnist':
transform = transforms.Compose([transforms.Resize(self.params['img_size']),
transforms.ToTensor(),
SetRange])
elif self.params['dataset'] == 'lens':
transform = transforms.Compose([transforms.Resize(self.params['img_size']),
transforms.ToTensor(),
transforms.Normalize(1.0E-13, 1.0E-12),
ArcSinh])
else:
raise ValueError('Undefined dataset type')
return transform | en | 0.861106 | # Check if more than 1 optimizer is required (Used for adversarial training) # Check if another scheduler is required for the second optimizer | 2.112276 | 2 |
pinax/messages/api/serializers.py | thetruefuss/theoctopuslibrary | 4 | 6624100 | <reponame>thetruefuss/theoctopuslibrary<filename>pinax/messages/api/serializers.py
from accounts.api.serializers import UserPublicSerializer
from pinax.messages.models import Message, Thread
from rest_framework import serializers
class ThreadListSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='messages-api:thread_retrieve',
lookup_field='pk'
)
latest_message_content = serializers.SerializerMethodField()
is_unread = serializers.SerializerMethodField()
class Meta:
model = Thread
fields = (
'url', 'subject', 'users', 'latest_message_content', 'is_unread',
)
def get_latest_message_content(self, obj):
return obj.latest_message.content
def get_is_unread(self, obj):
request = self.context.get('request')
return bool(obj.userthread_set.filter(user=request.user, unread=True))
class MessageSerializer(serializers.ModelSerializer):
sender = UserPublicSerializer(read_only=True)
class Meta:
model = Message
fields = (
'content', 'sender', 'sent_at'
)
class ThreadRetrieveSerializer(serializers.ModelSerializer):
messages = MessageSerializer(many=True)
class Meta:
model = Thread
fields = (
'id', 'messages',
)
| from accounts.api.serializers import UserPublicSerializer
from pinax.messages.models import Message, Thread
from rest_framework import serializers
class ThreadListSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='messages-api:thread_retrieve',
lookup_field='pk'
)
latest_message_content = serializers.SerializerMethodField()
is_unread = serializers.SerializerMethodField()
class Meta:
model = Thread
fields = (
'url', 'subject', 'users', 'latest_message_content', 'is_unread',
)
def get_latest_message_content(self, obj):
return obj.latest_message.content
def get_is_unread(self, obj):
request = self.context.get('request')
return bool(obj.userthread_set.filter(user=request.user, unread=True))
class MessageSerializer(serializers.ModelSerializer):
sender = UserPublicSerializer(read_only=True)
class Meta:
model = Message
fields = (
'content', 'sender', 'sent_at'
)
class ThreadRetrieveSerializer(serializers.ModelSerializer):
messages = MessageSerializer(many=True)
class Meta:
model = Thread
fields = (
'id', 'messages',
) | none | 1 | 2.103594 | 2 | |
higlass_schema/utils.py | higlass/higlass-schema | 1 | 6624101 | <gh_stars>1-10
from typing import Any, Dict, TypeVar
from pydantic import BaseModel, schema_of
def simplify_schema(root_schema: Dict[str, Any]) -> Dict[str, Any]:
"""Lift defintion reference to root if only definition"""
# type of root is not a reference to a definition
if not "$ref" in root_schema:
return root_schema
defs = list(root_schema["definitions"].values())
if len(defs) != 1:
return root_schema
return defs[0]
## Schema modifiers
ModelT = TypeVar("ModelT", bound=BaseModel)
def exclude_properties_titles(schema: Dict[str, Any]) -> None:
"""Remove automatically generated tiles for pydantic classes."""
for prop in schema.get("properties", {}).values():
prop.pop("title", None)
def get_schema_of(type_: Any):
schema = schema_of(type_)
schema = simplify_schema(schema)
exclude_properties_titles(schema)
# remove autogenerated title
schema.pop("title")
return schema
def simplify_enum_schema(schema: Dict[str, Any]):
# reduce union of enums into single enum
if "anyOf" in schema:
enum = []
for entry in schema["anyOf"]:
assert "enum" in entry
enum.extend(entry["enum"])
return {"enum": enum}
enum = schema["enum"]
# if there is only one enum entry, make it a const
if len(enum) == 1:
return {"const": enum[0]}
return {"enum": enum}
| from typing import Any, Dict, TypeVar
from pydantic import BaseModel, schema_of
def simplify_schema(root_schema: Dict[str, Any]) -> Dict[str, Any]:
"""Lift defintion reference to root if only definition"""
# type of root is not a reference to a definition
if not "$ref" in root_schema:
return root_schema
defs = list(root_schema["definitions"].values())
if len(defs) != 1:
return root_schema
return defs[0]
## Schema modifiers
ModelT = TypeVar("ModelT", bound=BaseModel)
def exclude_properties_titles(schema: Dict[str, Any]) -> None:
"""Remove automatically generated tiles for pydantic classes."""
for prop in schema.get("properties", {}).values():
prop.pop("title", None)
def get_schema_of(type_: Any):
schema = schema_of(type_)
schema = simplify_schema(schema)
exclude_properties_titles(schema)
# remove autogenerated title
schema.pop("title")
return schema
def simplify_enum_schema(schema: Dict[str, Any]):
# reduce union of enums into single enum
if "anyOf" in schema:
enum = []
for entry in schema["anyOf"]:
assert "enum" in entry
enum.extend(entry["enum"])
return {"enum": enum}
enum = schema["enum"]
# if there is only one enum entry, make it a const
if len(enum) == 1:
return {"const": enum[0]}
return {"enum": enum} | en | 0.720761 | Lift defintion reference to root if only definition # type of root is not a reference to a definition ## Schema modifiers Remove automatically generated tiles for pydantic classes. # remove autogenerated title # reduce union of enums into single enum # if there is only one enum entry, make it a const | 2.703411 | 3 |
BEGIN/DAY_03/001.flow-if-else-conditional.py | thakopian/100-DAYS-OF-PYTHON-PROJECT | 0 | 6624102 | <filename>BEGIN/DAY_03/001.flow-if-else-conditional.py
print("Can you the rollercoaster!?")
height = int(input("What is your height in cm? "))
'''
#pseudo code
if condition:
do this
else:
do this
'''
if height >= 125:
print("Get on board and ridet he rollercoaster!!!")
else:
print("sorry not good enough kid!")
| <filename>BEGIN/DAY_03/001.flow-if-else-conditional.py
print("Can you the rollercoaster!?")
height = int(input("What is your height in cm? "))
'''
#pseudo code
if condition:
do this
else:
do this
'''
if height >= 125:
print("Get on board and ridet he rollercoaster!!!")
else:
print("sorry not good enough kid!")
| en | 0.418989 | #pseudo code if condition: do this else: do this | 4.246231 | 4 |
src/mcare_backend/patients/urls.py | BuildForSDG/Team-108-Backend | 0 | 6624103 | <gh_stars>0
from django.urls import path, include
from rest_framework import routers
from patients.views import (
PatientGroupViewSet,
MessagesViewSet
)
router = routers.DefaultRouter()
router.register(r'groups', PatientGroupViewSet)
router.register(r'messages', MessagesViewSet)
urlpatterns = [
path('', include(router.urls)),
]
| from django.urls import path, include
from rest_framework import routers
from patients.views import (
PatientGroupViewSet,
MessagesViewSet
)
router = routers.DefaultRouter()
router.register(r'groups', PatientGroupViewSet)
router.register(r'messages', MessagesViewSet)
urlpatterns = [
path('', include(router.urls)),
] | none | 1 | 1.751749 | 2 | |
preprocessing/data_normalize.py | xxcheng0708/AudioEmbeddingExtraction | 1 | 6624104 | # coding:utf-8
"""
Created by <NAME> at 2022/1/16 20:10
@email : <EMAIL>
"""
import torch
def get_dataset_mean_and_std(dataset, batch_size):
"""
计算数据集的均值和方差,用来做归一化
:param dataset:
:param batch_size:
:return:
"""
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=8)
mean = torch.zeros(1)
std = torch.zeros(1)
total_count = 0
for inputs, targets in dataloader:
# 音频特征数据是1通道的,所以这里是1
for i in range(1):
mean[i] += inputs[:, i, :, :].mean()
std[i] += inputs[:, i, :, :].std()
total_count += inputs.shape
mean.div_(total_count)
std.div_(total_count)
return mean, std
| # coding:utf-8
"""
Created by <NAME> at 2022/1/16 20:10
@email : <EMAIL>
"""
import torch
def get_dataset_mean_and_std(dataset, batch_size):
"""
计算数据集的均值和方差,用来做归一化
:param dataset:
:param batch_size:
:return:
"""
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=8)
mean = torch.zeros(1)
std = torch.zeros(1)
total_count = 0
for inputs, targets in dataloader:
# 音频特征数据是1通道的,所以这里是1
for i in range(1):
mean[i] += inputs[:, i, :, :].mean()
std[i] += inputs[:, i, :, :].std()
total_count += inputs.shape
mean.div_(total_count)
std.div_(total_count)
return mean, std
| en | 0.372041 | # coding:utf-8 Created by <NAME> at 2022/1/16 20:10 @email : <EMAIL> 计算数据集的均值和方差,用来做归一化 :param dataset: :param batch_size: :return: # 音频特征数据是1通道的,所以这里是1 | 2.634179 | 3 |
aws/amazonia/amazonia/classes/hosted_zone.py | linz/Geodesy-Web-Services | 2 | 6624105 | <gh_stars>1-10
#!/usr/bin/python3
from troposphere import route53, Ref, Join
from troposphere.route53 import HostedZoneVPCs
class HostedZone(object):
def __init__(self, template, domain, vpcs):
"""
Creates a troposphere HostedZoneVPC object from a troposphere vpc object.
AWS: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-route53-hostedzone.html
Troposphere: https://github.com/cloudtools/troposphere/blob/master/troposphere/route53.py
:param template: The cloud formation template to add this hosted zone to.
:param domain: The domain you would like for your hosted zone. MUST be 'something.something' (eg 'example.com')
:param vpcs: A list of VPCs to associate this hosted zone with (if none, a public hosted zone is created)
"""
self.template = template
self.domain = domain
self.trop_hosted_zone = self.create_hosted_zone(self.domain, vpcs)
def create_hosted_zone(self, domain, vpcs):
"""
Creates a route53 hosted zone object either public (vpcs=None) or private (vpcs=[vpc1,...])
AWS: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-route53-hostedzone.html
Troposphere: https://github.com/cloudtools/troposphere/blob/master/troposphere/route53.py
:param domain: The domain you would like for your hosted zone. MUST be 'something.something' (eg 'example.com')
:param vpcs: A list of VPCs to associate this hosted zone with (if none, a public hosted zone is created)
"""
hz_type = 'private' if vpcs else 'public'
hz_config = route53.HostedZoneConfiguration(
Comment=Join('', [hz_type,
' hosted zone created by Amazonia for stack: ',
Ref('AWS::StackName')])
)
hz = self.template.add_resource(route53.HostedZone(
hz_type+'HostedZone',
HostedZoneConfig=hz_config,
Name=domain
))
if vpcs:
hz.VPCs = []
for vpc in vpcs:
hz.VPCs.append(HostedZoneVPCs(VPCId=vpc, VPCRegion=Ref('AWS::Region')))
return hz
| #!/usr/bin/python3
from troposphere import route53, Ref, Join
from troposphere.route53 import HostedZoneVPCs
class HostedZone(object):
def __init__(self, template, domain, vpcs):
"""
Creates a troposphere HostedZoneVPC object from a troposphere vpc object.
AWS: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-route53-hostedzone.html
Troposphere: https://github.com/cloudtools/troposphere/blob/master/troposphere/route53.py
:param template: The cloud formation template to add this hosted zone to.
:param domain: The domain you would like for your hosted zone. MUST be 'something.something' (eg 'example.com')
:param vpcs: A list of VPCs to associate this hosted zone with (if none, a public hosted zone is created)
"""
self.template = template
self.domain = domain
self.trop_hosted_zone = self.create_hosted_zone(self.domain, vpcs)
def create_hosted_zone(self, domain, vpcs):
"""
Creates a route53 hosted zone object either public (vpcs=None) or private (vpcs=[vpc1,...])
AWS: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-route53-hostedzone.html
Troposphere: https://github.com/cloudtools/troposphere/blob/master/troposphere/route53.py
:param domain: The domain you would like for your hosted zone. MUST be 'something.something' (eg 'example.com')
:param vpcs: A list of VPCs to associate this hosted zone with (if none, a public hosted zone is created)
"""
hz_type = 'private' if vpcs else 'public'
hz_config = route53.HostedZoneConfiguration(
Comment=Join('', [hz_type,
' hosted zone created by Amazonia for stack: ',
Ref('AWS::StackName')])
)
hz = self.template.add_resource(route53.HostedZone(
hz_type+'HostedZone',
HostedZoneConfig=hz_config,
Name=domain
))
if vpcs:
hz.VPCs = []
for vpc in vpcs:
hz.VPCs.append(HostedZoneVPCs(VPCId=vpc, VPCRegion=Ref('AWS::Region')))
return hz | en | 0.793054 | #!/usr/bin/python3 Creates a troposphere HostedZoneVPC object from a troposphere vpc object. AWS: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-route53-hostedzone.html Troposphere: https://github.com/cloudtools/troposphere/blob/master/troposphere/route53.py :param template: The cloud formation template to add this hosted zone to. :param domain: The domain you would like for your hosted zone. MUST be 'something.something' (eg 'example.com') :param vpcs: A list of VPCs to associate this hosted zone with (if none, a public hosted zone is created) Creates a route53 hosted zone object either public (vpcs=None) or private (vpcs=[vpc1,...]) AWS: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-route53-hostedzone.html Troposphere: https://github.com/cloudtools/troposphere/blob/master/troposphere/route53.py :param domain: The domain you would like for your hosted zone. MUST be 'something.something' (eg 'example.com') :param vpcs: A list of VPCs to associate this hosted zone with (if none, a public hosted zone is created) | 2.830776 | 3 |
mininet/mininet/wifi.py | gustavo978/helpful | 0 | 6624106 | <filename>mininet/mininet/wifi.py
import socket
from time import sleep
import mininet.node
import mininet.link
from mininet.log import info
from mininet.util import moveIntf
from mininet.cluster.link import RemoteLink
class WIFI (object):
def __init__ (self, enableQos=True, rootSwitch=None, agentIP=None, agentPort=53724):
self.rootSwitch = rootSwitch
self.startAgent ()
self.csock = None
while self.csock == None:
self.csock = self.connectAgent (agentIP, agentPort)
self.tapBridgeIntfs = []
self.csock.sendall ('GlobalValue.Bind ("SimulatorImplementationType", StringValue ("ns3::RealtimeSimulatorImpl"))\n')
self.csock.sendall ('GlobalValue.Bind ("ChecksumEnabled", BooleanValue (True))\n')
self.csock.sendall ('wifihelper = WifiHelper.Default()\n')
self.csock.sendall ('wifihelper.SetStandard (WIFI_PHY_STANDARD_80211g)\n')
self.csock.sendall ('phyhelper = YansWifiPhyHelper.Default()\n')
self.csock.sendall ('channelhelper = YansWifiChannelHelper.Default()\n')
self.csock.sendall ('phyhelper.SetChannel (channelhelper.Create())\n')
if enableQos:
self.csock.sendall ('machelper = QosWifiMacHelper.Default()\n')
else:
self.csock.sendall ('machelper = NqosWifiMacHelper.Default()\n')
self.csock.sendall ('mobilityhelper = MobilityHelper ()\n')
self.csock.sendall ('def run ():\n')
self.csock.sendall (' Simulator.Stop (Seconds (86400))\n')
self.csock.sendall (' Simulator.Run ()\n')
self.csock.sendall ('nsThread = Thread (target = run)\n')
self.csock.sendall ('tapBridges = []\n')
def startAgent (self):
self.rootSwitch.cmd ("/usr/bin/opennet-agent.py start")
def stopAgent (self):
self.rootSwitch.rcmd ("/usr/bin/opennet-agent.py stop")
def connectAgent (self, ip, port):
csock = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
try:
info ('*** Connecting to opennet-agent... ')
csock.connect ((ip, port))
except socket.error, exc:
info ('Failed\n')
return None
else:
info ('Successed\n')
return csock
def start (self):
self.csock.sendall ('if nsThread.isAlive ():\n csock.sendall ("True")\nelse:\n csock.sendall ("False")\n')
while True:
data = self.csock.recv (1024)
if data == "True":
info ('*** NS-3 thread is already running\n')
return
elif data == "False":
info ('*** Starting NS-3 thread\n')
break
self.csock.sendall ('nsThread.start ()\n')
info ('*** moveIntoNamespace\n')
for tbIntf in self.tapBridgeIntfs:
info ('{0} '.format (tbIntf.name))
tbIntf.moveIntoNamespace ()
info ('\n')
def stop (self):
self.csock.sendall ('Simulator.Stop (Seconds (1))\n')
self.csock.sendall ('while nsThread.isAlive ():\n sleep (0.1)\n')
def clear (self):
self.csock.sendall ('Simulator.Destroy ()\n')
self.csock.sendall ('exit ()\n')
self.csock.close ()
self.stopAgent ()
def addAdhoc (self, node, mobilityType="ns3::ConstantPositionMobilityModel", position=None, velocity=None):
self.csock.sendall ('machelper.SetType ("ns3::AdhocWifiMac")\n')
self.csock.sendall ('nsNode = Node ()\n')
self.csock.sendall ('mobilityhelper.SetMobilityModel ("{0}")\n'.format (mobilityType))
self.csock.sendall ('mobilityhelper.Install (nsNode)\n')
if position != None:
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetPosition(Vector({0}, {1}, {2}))\n'.format (position[0], position[1], position[2]))
if velocity != None and mobilityType == "ns3::ConstantVelocityMobilityModel":
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetVelocity(Vector({0}, {1}, {2}))\n'.format (velocity[0], velocity[1], velocity[2]))
self.csock.sendall ('wifiDev = wifihelper.Install (phyhelper, machelper, nsNode).Get(0)\n')
port = node.newPort ()
intfName = "{0}-eth{1}".format (node.name, port)
tbIntf = self.TapBridgeIntf (intfName, node, port, self.rootSwitch, self.csock)
self.tapBridgeIntfs.append (tbIntf)
def addAP (self, node, channelNumber=1, ssid="default-ssid", mobilityType="ns3::ConstantPositionMobilityModel", position=None, velocity=None):
self.csock.sendall ('machelper.SetType ("ns3::ApWifiMac", "Ssid", SsidValue (Ssid("{0}")), "BeaconGeneration", BooleanValue(True), "BeaconInterval", TimeValue(Seconds(2.5)))\n'.format (ssid))
self.csock.sendall ('phyhelper.Set ("ChannelNumber", UintegerValue ({0}))\n'.format (channelNumber))
self.csock.sendall ('nsNode = Node ()\n')
self.csock.sendall ('mobilityhelper.SetMobilityModel ("{0}")\n'.format (mobilityType))
self.csock.sendall ('mobilityhelper.Install (nsNode)\n')
if position != None:
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetPosition(Vector({0}, {1}, {2}))\n'.format (position[0], position[1], position[2]))
if velocity != None and mobilityType == "ns3::ConstantVelocityMobilityModel":
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetVelocity(Vector({0}, {1}, {2}))\n'.format (velocity[0], velocity[1], velocity[2]))
self.csock.sendall ('wifiDev = wifihelper.Install (phyhelper, machelper, nsNode).Get(0)\n')
port = node.newPort ()
intfName = "{0}-eth{1}".format (node.name, port)
tbIntf = self.TapBridgeIntf (intfName, node, port, self.rootSwitch, self.csock)
self.tapBridgeIntfs.append (tbIntf)
def addSta (self, node, channelNumber=1, ssid="default-ssid", mobilityType="ns3::ConstantPositionMobilityModel", position=None, velocity=None):
self.csock.sendall ('machelper.SetType ("ns3::StaWifiMac", "Ssid", SsidValue (Ssid("{0}")), "ScanType", EnumValue (StaWifiMac.ACTIVE))\n'.format (ssid))
self.csock.sendall ('phyhelper.Set ("ChannelNumber", UintegerValue ({0}))\n'.format (channelNumber))
self.csock.sendall ('nsNode = Node ()\n')
self.csock.sendall ('mobilityhelper.SetMobilityModel ("{0}")\n'.format (mobilityType))
self.csock.sendall ('mobilityhelper.Install (nsNode)\n')
if position != None:
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetPosition(Vector({0}, {1}, {2}))\n'.format (position[0], position[1], position[2]))
if velocity != None and mobilityType == "ns3::ConstantVelocityMobilityModel":
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetVelocity(Vector({0}, {1}, {2}))\n'.format (velocity[0], velocity[1], velocity[2]))
self.csock.sendall ('wifiDev = wifihelper.Install (phyhelper, machelper, nsNode).Get(0)\n')
port = node.newPort ()
intfName = "{0}-eth{1}".format (node.name, port)
tbIntf = self.TapBridgeIntf (intfName, node, port, self.rootSwitch, self.csock)
self.tapBridgeIntfs.append (tbIntf)
class TapBridgeIntf (mininet.link.Intf):
"""
TapBridgeIntf is a Linux TAP interface, which is bridged with an NS-3 NetDevice.
"""
def __init__ (self, name=None, node=None, port=None, localNode=None, csock=None, **params):
self.name = name
self.node = node
self.localNode = localNode
self.csock = csock
self.createTap (self.name)
self.delayedMove = True
if node.inNamespace == True:
self.inRightNamespace = False
else:
self.inRightNamespace = True
mininet.link.Intf.__init__ (self, name, node, port, **params)
self.csock.sendall ('nsDevice = wifiDev\n')
self.csock.sendall ('tapBridgeHelper = TapBridgeHelper ()\n')
self.csock.sendall ('tapBridgeHelper.SetAttribute ("Mode", StringValue ("UseLocal"))\n')
self.csock.sendall ('tapBridgeHelper.SetAttribute ("DeviceName", StringValue ("{0}"))\n'.format (self.name))
self.csock.sendall ('macAddress = Mac48Address.Allocate ()\n')
self.csock.sendall ('tapBridgeHelper.SetAttribute ("MacAddress", Mac48AddressValue (macAddress))\n')
self.csock.sendall ('tb = tapBridgeHelper.Install (nsNode, nsDevice)\n')
self.csock.sendall ('tapBridges.append (tb)\n')
def moveIntoNamespace (self):
while True:
self.csock.sendall ('if tapBridges[-1].IsLinkUp():\n csock.sendall ("True")\nelse:\n csock.sendall ("False")\n')
data = self.csock.recv (1024)
if data == "True":
break
else:
sleep (0.1)
RemoteLink.moveIntf (self.name, self.node)
self.node.cmd ('ip link set dev {0} up'.format (self.name))
self.node.cmd ('ip addr add dev {0} {1}/{2}'.format (self.name, self.ip, self.prefixLen))
def cmd (self, *args, **kwargs):
if self.inRightNamespace == True:
return self.node.cmd (*args, **kwargs)
else:
return self.localNode.cmd (*args, **kwargs)
def createTap (self, name):
self.node.cmd ('ip tuntap add {0} mode tap'.format (name))
self.node.cmd ('ip link set dev {0} netns 1'.format (name))
| <filename>mininet/mininet/wifi.py
import socket
from time import sleep
import mininet.node
import mininet.link
from mininet.log import info
from mininet.util import moveIntf
from mininet.cluster.link import RemoteLink
class WIFI (object):
def __init__ (self, enableQos=True, rootSwitch=None, agentIP=None, agentPort=53724):
self.rootSwitch = rootSwitch
self.startAgent ()
self.csock = None
while self.csock == None:
self.csock = self.connectAgent (agentIP, agentPort)
self.tapBridgeIntfs = []
self.csock.sendall ('GlobalValue.Bind ("SimulatorImplementationType", StringValue ("ns3::RealtimeSimulatorImpl"))\n')
self.csock.sendall ('GlobalValue.Bind ("ChecksumEnabled", BooleanValue (True))\n')
self.csock.sendall ('wifihelper = WifiHelper.Default()\n')
self.csock.sendall ('wifihelper.SetStandard (WIFI_PHY_STANDARD_80211g)\n')
self.csock.sendall ('phyhelper = YansWifiPhyHelper.Default()\n')
self.csock.sendall ('channelhelper = YansWifiChannelHelper.Default()\n')
self.csock.sendall ('phyhelper.SetChannel (channelhelper.Create())\n')
if enableQos:
self.csock.sendall ('machelper = QosWifiMacHelper.Default()\n')
else:
self.csock.sendall ('machelper = NqosWifiMacHelper.Default()\n')
self.csock.sendall ('mobilityhelper = MobilityHelper ()\n')
self.csock.sendall ('def run ():\n')
self.csock.sendall (' Simulator.Stop (Seconds (86400))\n')
self.csock.sendall (' Simulator.Run ()\n')
self.csock.sendall ('nsThread = Thread (target = run)\n')
self.csock.sendall ('tapBridges = []\n')
def startAgent (self):
self.rootSwitch.cmd ("/usr/bin/opennet-agent.py start")
def stopAgent (self):
self.rootSwitch.rcmd ("/usr/bin/opennet-agent.py stop")
def connectAgent (self, ip, port):
csock = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
try:
info ('*** Connecting to opennet-agent... ')
csock.connect ((ip, port))
except socket.error, exc:
info ('Failed\n')
return None
else:
info ('Successed\n')
return csock
def start (self):
self.csock.sendall ('if nsThread.isAlive ():\n csock.sendall ("True")\nelse:\n csock.sendall ("False")\n')
while True:
data = self.csock.recv (1024)
if data == "True":
info ('*** NS-3 thread is already running\n')
return
elif data == "False":
info ('*** Starting NS-3 thread\n')
break
self.csock.sendall ('nsThread.start ()\n')
info ('*** moveIntoNamespace\n')
for tbIntf in self.tapBridgeIntfs:
info ('{0} '.format (tbIntf.name))
tbIntf.moveIntoNamespace ()
info ('\n')
def stop (self):
self.csock.sendall ('Simulator.Stop (Seconds (1))\n')
self.csock.sendall ('while nsThread.isAlive ():\n sleep (0.1)\n')
def clear (self):
self.csock.sendall ('Simulator.Destroy ()\n')
self.csock.sendall ('exit ()\n')
self.csock.close ()
self.stopAgent ()
def addAdhoc (self, node, mobilityType="ns3::ConstantPositionMobilityModel", position=None, velocity=None):
self.csock.sendall ('machelper.SetType ("ns3::AdhocWifiMac")\n')
self.csock.sendall ('nsNode = Node ()\n')
self.csock.sendall ('mobilityhelper.SetMobilityModel ("{0}")\n'.format (mobilityType))
self.csock.sendall ('mobilityhelper.Install (nsNode)\n')
if position != None:
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetPosition(Vector({0}, {1}, {2}))\n'.format (position[0], position[1], position[2]))
if velocity != None and mobilityType == "ns3::ConstantVelocityMobilityModel":
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetVelocity(Vector({0}, {1}, {2}))\n'.format (velocity[0], velocity[1], velocity[2]))
self.csock.sendall ('wifiDev = wifihelper.Install (phyhelper, machelper, nsNode).Get(0)\n')
port = node.newPort ()
intfName = "{0}-eth{1}".format (node.name, port)
tbIntf = self.TapBridgeIntf (intfName, node, port, self.rootSwitch, self.csock)
self.tapBridgeIntfs.append (tbIntf)
def addAP (self, node, channelNumber=1, ssid="default-ssid", mobilityType="ns3::ConstantPositionMobilityModel", position=None, velocity=None):
self.csock.sendall ('machelper.SetType ("ns3::ApWifiMac", "Ssid", SsidValue (Ssid("{0}")), "BeaconGeneration", BooleanValue(True), "BeaconInterval", TimeValue(Seconds(2.5)))\n'.format (ssid))
self.csock.sendall ('phyhelper.Set ("ChannelNumber", UintegerValue ({0}))\n'.format (channelNumber))
self.csock.sendall ('nsNode = Node ()\n')
self.csock.sendall ('mobilityhelper.SetMobilityModel ("{0}")\n'.format (mobilityType))
self.csock.sendall ('mobilityhelper.Install (nsNode)\n')
if position != None:
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetPosition(Vector({0}, {1}, {2}))\n'.format (position[0], position[1], position[2]))
if velocity != None and mobilityType == "ns3::ConstantVelocityMobilityModel":
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetVelocity(Vector({0}, {1}, {2}))\n'.format (velocity[0], velocity[1], velocity[2]))
self.csock.sendall ('wifiDev = wifihelper.Install (phyhelper, machelper, nsNode).Get(0)\n')
port = node.newPort ()
intfName = "{0}-eth{1}".format (node.name, port)
tbIntf = self.TapBridgeIntf (intfName, node, port, self.rootSwitch, self.csock)
self.tapBridgeIntfs.append (tbIntf)
def addSta (self, node, channelNumber=1, ssid="default-ssid", mobilityType="ns3::ConstantPositionMobilityModel", position=None, velocity=None):
self.csock.sendall ('machelper.SetType ("ns3::StaWifiMac", "Ssid", SsidValue (Ssid("{0}")), "ScanType", EnumValue (StaWifiMac.ACTIVE))\n'.format (ssid))
self.csock.sendall ('phyhelper.Set ("ChannelNumber", UintegerValue ({0}))\n'.format (channelNumber))
self.csock.sendall ('nsNode = Node ()\n')
self.csock.sendall ('mobilityhelper.SetMobilityModel ("{0}")\n'.format (mobilityType))
self.csock.sendall ('mobilityhelper.Install (nsNode)\n')
if position != None:
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetPosition(Vector({0}, {1}, {2}))\n'.format (position[0], position[1], position[2]))
if velocity != None and mobilityType == "ns3::ConstantVelocityMobilityModel":
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetVelocity(Vector({0}, {1}, {2}))\n'.format (velocity[0], velocity[1], velocity[2]))
self.csock.sendall ('wifiDev = wifihelper.Install (phyhelper, machelper, nsNode).Get(0)\n')
port = node.newPort ()
intfName = "{0}-eth{1}".format (node.name, port)
tbIntf = self.TapBridgeIntf (intfName, node, port, self.rootSwitch, self.csock)
self.tapBridgeIntfs.append (tbIntf)
class TapBridgeIntf (mininet.link.Intf):
"""
TapBridgeIntf is a Linux TAP interface, which is bridged with an NS-3 NetDevice.
"""
def __init__ (self, name=None, node=None, port=None, localNode=None, csock=None, **params):
self.name = name
self.node = node
self.localNode = localNode
self.csock = csock
self.createTap (self.name)
self.delayedMove = True
if node.inNamespace == True:
self.inRightNamespace = False
else:
self.inRightNamespace = True
mininet.link.Intf.__init__ (self, name, node, port, **params)
self.csock.sendall ('nsDevice = wifiDev\n')
self.csock.sendall ('tapBridgeHelper = TapBridgeHelper ()\n')
self.csock.sendall ('tapBridgeHelper.SetAttribute ("Mode", StringValue ("UseLocal"))\n')
self.csock.sendall ('tapBridgeHelper.SetAttribute ("DeviceName", StringValue ("{0}"))\n'.format (self.name))
self.csock.sendall ('macAddress = Mac48Address.Allocate ()\n')
self.csock.sendall ('tapBridgeHelper.SetAttribute ("MacAddress", Mac48AddressValue (macAddress))\n')
self.csock.sendall ('tb = tapBridgeHelper.Install (nsNode, nsDevice)\n')
self.csock.sendall ('tapBridges.append (tb)\n')
def moveIntoNamespace (self):
while True:
self.csock.sendall ('if tapBridges[-1].IsLinkUp():\n csock.sendall ("True")\nelse:\n csock.sendall ("False")\n')
data = self.csock.recv (1024)
if data == "True":
break
else:
sleep (0.1)
RemoteLink.moveIntf (self.name, self.node)
self.node.cmd ('ip link set dev {0} up'.format (self.name))
self.node.cmd ('ip addr add dev {0} {1}/{2}'.format (self.name, self.ip, self.prefixLen))
def cmd (self, *args, **kwargs):
if self.inRightNamespace == True:
return self.node.cmd (*args, **kwargs)
else:
return self.localNode.cmd (*args, **kwargs)
def createTap (self, name):
self.node.cmd ('ip tuntap add {0} mode tap'.format (name))
self.node.cmd ('ip link set dev {0} netns 1'.format (name))
| en | 0.964399 | TapBridgeIntf is a Linux TAP interface, which is bridged with an NS-3 NetDevice. | 2.210024 | 2 |
feasability_study/con_utils.py | totonga/wodson | 1 | 6624107 | #!/usr/bin/env python
"""
Access ASAM Ods python using omniorb and wrap it using swagger.
Copyright (c) 2015, <NAME>
License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0.html)
"""
__author__ = "<NAME>"
__license__ = "Apache 2.0"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Prototype"
def add(config, conI, params):
if not config.has_key('cons'):
config['cons'] = {}
config['cons'][conI] = params
def list(config):
rv = []
if config.has_key('cons'):
for conI in config['cons']:
rv.append(conI)
return rv
def delete(config, conI):
if config.has_key('cons'):
if config['cons'].has_key(conI):
del config['cons'][conI]
def update(config, conI, params):
if config.has_key('cons'):
if config['cons'].has_key(conI):
for param in params:
config['cons'][conI][param] = params[param]
return
add(config, conI, params)
def get_params(config, conI):
if config.has_key('cons'):
if config['cons'].has_key(conI):
return config['cons'][conI]
raise SyntaxError('Con "' + conI + '" not defined')
def exists(config, conI):
if config.has_key('cons'):
if config['cons'].has_key(conI):
return True
return False
| #!/usr/bin/env python
"""
Access ASAM Ods python using omniorb and wrap it using swagger.
Copyright (c) 2015, <NAME>
License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0.html)
"""
__author__ = "<NAME>"
__license__ = "Apache 2.0"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Prototype"
def add(config, conI, params):
if not config.has_key('cons'):
config['cons'] = {}
config['cons'][conI] = params
def list(config):
rv = []
if config.has_key('cons'):
for conI in config['cons']:
rv.append(conI)
return rv
def delete(config, conI):
if config.has_key('cons'):
if config['cons'].has_key(conI):
del config['cons'][conI]
def update(config, conI, params):
if config.has_key('cons'):
if config['cons'].has_key(conI):
for param in params:
config['cons'][conI][param] = params[param]
return
add(config, conI, params)
def get_params(config, conI):
if config.has_key('cons'):
if config['cons'].has_key(conI):
return config['cons'][conI]
raise SyntaxError('Con "' + conI + '" not defined')
def exists(config, conI):
if config.has_key('cons'):
if config['cons'].has_key(conI):
return True
return False
| en | 0.431107 | #!/usr/bin/env python Access ASAM Ods python using omniorb and wrap it using swagger. Copyright (c) 2015, <NAME> License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0.html) | 2.273715 | 2 |
pyowb/tasks.py | fifoforlifo/pyowb | 0 | 6624108 | import sys
import xml.sax.saxutils
from .keywords import *
def xml_escape_attr(string):
return xml.sax.saxutils.quoteattr(string)
def xml_escape_elem(string):
return xml.sax.saxutils.escape(string)
_global_auto_id = 100
def _next_global_auto_id():
global _global_auto_id
auto_id = '_auto' + str(_global_auto_id)
_global_auto_id += 1
return auto_id
def parse_category(name):
index_of_dash = name.find('-')
if index_of_dash == -1:
return ''
return name[0:index_of_dash].rstrip()
def has_children(task):
return (CHILDREN in task) and (len(task[CHILDREN]) != 0)
def sanitize_tasks(plan, id_to_task, add_child_dependencies):
def _sanitize_recursive(task, auto_predecessor_stack):
if ID not in task:
task[ID] = _next_global_auto_id()
id_to_task[task[ID]] = task
if DEPS not in task:
task[DEPS] = []
if add_child_dependencies:
for auto_predecessor_id in auto_predecessor_stack:
if auto_predecessor_id:
task[DEPS].append(auto_predecessor_id)
elif len(auto_predecessor_stack):
auto_predecessor_id = auto_predecessor_stack[-1]
if auto_predecessor_id:
task[DEPS].append(auto_predecessor_id)
children = task.get(CHILDREN, None)
if children:
auto_predecessor_stack.append(None)
in_sequence = False
for child in children:
if child == SEQUENCE:
auto_predecessor_stack[-1] = None
in_sequence = True
elif child == PARALLEL:
auto_predecessor_stack[-1] = None
in_sequence = False
else:
_sanitize_recursive(child, auto_predecessor_stack)
if add_child_dependencies:
task[DEPS].append(child[ID])
if in_sequence:
auto_predecessor_stack[-1] = child[ID]
auto_predecessor_stack.pop()
auto_predecessor_stack = []
_sanitize_recursive(plan, auto_predecessor_stack)
| import sys
import xml.sax.saxutils
from .keywords import *
def xml_escape_attr(string):
return xml.sax.saxutils.quoteattr(string)
def xml_escape_elem(string):
return xml.sax.saxutils.escape(string)
_global_auto_id = 100
def _next_global_auto_id():
global _global_auto_id
auto_id = '_auto' + str(_global_auto_id)
_global_auto_id += 1
return auto_id
def parse_category(name):
index_of_dash = name.find('-')
if index_of_dash == -1:
return ''
return name[0:index_of_dash].rstrip()
def has_children(task):
return (CHILDREN in task) and (len(task[CHILDREN]) != 0)
def sanitize_tasks(plan, id_to_task, add_child_dependencies):
def _sanitize_recursive(task, auto_predecessor_stack):
if ID not in task:
task[ID] = _next_global_auto_id()
id_to_task[task[ID]] = task
if DEPS not in task:
task[DEPS] = []
if add_child_dependencies:
for auto_predecessor_id in auto_predecessor_stack:
if auto_predecessor_id:
task[DEPS].append(auto_predecessor_id)
elif len(auto_predecessor_stack):
auto_predecessor_id = auto_predecessor_stack[-1]
if auto_predecessor_id:
task[DEPS].append(auto_predecessor_id)
children = task.get(CHILDREN, None)
if children:
auto_predecessor_stack.append(None)
in_sequence = False
for child in children:
if child == SEQUENCE:
auto_predecessor_stack[-1] = None
in_sequence = True
elif child == PARALLEL:
auto_predecessor_stack[-1] = None
in_sequence = False
else:
_sanitize_recursive(child, auto_predecessor_stack)
if add_child_dependencies:
task[DEPS].append(child[ID])
if in_sequence:
auto_predecessor_stack[-1] = child[ID]
auto_predecessor_stack.pop()
auto_predecessor_stack = []
_sanitize_recursive(plan, auto_predecessor_stack)
| none | 1 | 2.45612 | 2 | |
useful-scripts/seg2mesh.py | hci-unihd/plant-seg-tools | 0 | 6624109 | import argparse
import glob
import os
import time
from datetime import datetime
import numpy as np
from plantsegtools.meshes.meshes import seg2mesh, seg2mesh_ray
from plantsegtools.meshes.vtkutils import CreateMeshVTK, create_ply
from plantsegtools.utils import TIFF_FORMATS, H5_FORMATS, get_largest_object
def parse():
parser = argparse.ArgumentParser()
# Required
parser.add_argument("--path", type=str, required=True, help='path to a segmentation file or'
' to a directory for batch processing'
' of multiple stacks')
# Optional - path setup
parser.add_argument("--new-base", type=str, help='optional custom saving directory. '
'If not given the ply will be saved in the same dir as the source')
parser.add_argument("--h5-dataset", help='h5 internal dataset name. Default: segmentation',
default='segmentation')
parser.add_argument("--labels", help='List of labels to process. By default the script will process all labels',
default=None, nargs='+', type=int)
# Optional - pipeline parameters
parser.add_argument('--step-size', help='Step size for the marching cube algorithm, '
'larger steps yield a coarser but faster result.'
' Default 2.', type=int, default=2)
parser.add_argument("--crop", default=[0, 0, 0, -1, -1, -1], nargs='+', type=int,
help='Crop the dataset, takes as input a bounding box. eg --crop 10, 0, 0 15, -1, -1.')
parser.add_argument("--voxel-size", default=None, nargs='+', type=float,
help='Voxel size [Z, Y, X] of the segmentation stack.'
' By default voxel size is read from the source file,'
' if this is not possible voxel-size is set to [1, 1, 1].')
parser.add_argument('--min-size', help='Minimum cell size. Default 50.', type=int, default=50)
parser.add_argument('--max-size', help='Maximum cell size. Default inf.', type=int, default=-1)
parser.add_argument('--relabel', help='If this argument is passed the pipeline will relabel the segmentation.'
' This will ensure the contiguity of each segment but will change the '
'labels.', action='store_true')
parser.add_argument('--check-cc', help='If this argument is passed the pipeline will check if each label is'
' has a single connected component (cc).'
' If multiple cc are present only the largest will be processed.',
action='store_true')
parser.add_argument('--ignore-labels', help='List of labels to ignore.'
' By default only background (label 0) is ignored',
default=[0], nargs='+', type=int)
# Optional - mesh processing parameters
parser.add_argument('--reduction', type=float,
help='If reduction > 0 a decimation filter is applied.' ' MaxValue: 1.0 (100%reduction).',
default=-.0)
parser.add_argument('--smoothing', help='To apply a Laplacian smoothing filter.', action='store_true')
# Optional - multiprocessing
parser.add_argument('--use-ray', help='If use ray flag is used the multiprocessing flag is managed by ray',
action='store_true')
parser.add_argument('--multiprocessing', help='Define the number of cores to use for parallel processing.'
' Default value (-1) will try to parallelize over'
' all available processors.', default=-1, type=int)
return parser.parse_args()
if __name__ == '__main__':
args = parse()
print(f"[{datetime.now().strftime('%d-%m-%y %H:%M:%S')}] start pipeline setup, parameters: {vars(args)}")
# Setup path
if os.path.isfile(args.path):
all_files = [args.path]
elif os.path.isdir(args.path):
all_files = glob.glob(os.path.join(args.path, f'*{TIFF_FORMATS}'))
all_files += glob.glob(os.path.join(args.path, f'*{H5_FORMATS}'))
else:
raise NotImplementedError
# Optional - path setups
list_labels = None if args.labels is None else args.labels
default_prefix = 'meshes'
# Optional - pipeline parameters
min_size = 0 if args.min_size < 0 else args.min_size
max_size = np.inf if args.max_size < 0 else args.max_size
preprocessing = get_largest_object if args.check_cc else None
# Optional - multiprocessing
_seg2mesh = seg2mesh_ray if args.use_ray else seg2mesh
multiprocessing = args.multiprocessing
# Setup mesh specific utils
# Seg2mesh mesh backend is completely independent options available are
# 'from plantsegtools.meshes.vtkutils import CreateMeshVTK, create_ply'
# 'from plantsegtools.meshes.trimeshutils import CreateTriMesh, create_ply'
# * trimesh support is experimental, is easier to use than vtk but results are worst
mesh_processor = CreateMeshVTK(reduction=args.reduction, smoothing=args.smoothing)
file_writer = create_ply
# main loop
for i, file_path in enumerate(all_files, 1):
# just use the same name as original file as base
base_name = os.path.splitext(os.path.split(file_path)[1])[0]
if args.new_base is None:
base_path = os.path.join(os.path.split(file_path)[0], f'{default_prefix}_{base_name}')
else:
base_path = os.path.join(args.new_base, f'{default_prefix}_{base_name}')
print(f"base path: {base_path}")
print(f"[{datetime.now().strftime('%d-%m-%y %H:%M:%S')}]"
f" start processing file: {os.path.split(file_path)[1]} ({i}/{len(all_files)})")
timer = time.time()
_seg2mesh(file_path,
mesh_processing=mesh_processor,
file_writer=create_ply,
base_name=base_name,
base_path=base_path,
n_process=multiprocessing,
step_size=args.step_size,
h5_key=args.h5_dataset,
voxel_size=args.voxel_size,
preprocessing=preprocessing,
min_size=min_size,
max_size=max_size,
idx_list=list_labels,
relabel_cc=args.relabel,
ignore_labels=args.ignore_labels
)
print(f"[{datetime.now().strftime('%d-%m-%y %H:%M:%S')}]"
f" process complete in {time.time() - timer: .2f}s,"
f" number of ply generated {len(glob.glob(os.path.join(base_path, '*.ply')))}")
i += 1
| import argparse
import glob
import os
import time
from datetime import datetime
import numpy as np
from plantsegtools.meshes.meshes import seg2mesh, seg2mesh_ray
from plantsegtools.meshes.vtkutils import CreateMeshVTK, create_ply
from plantsegtools.utils import TIFF_FORMATS, H5_FORMATS, get_largest_object
def parse():
parser = argparse.ArgumentParser()
# Required
parser.add_argument("--path", type=str, required=True, help='path to a segmentation file or'
' to a directory for batch processing'
' of multiple stacks')
# Optional - path setup
parser.add_argument("--new-base", type=str, help='optional custom saving directory. '
'If not given the ply will be saved in the same dir as the source')
parser.add_argument("--h5-dataset", help='h5 internal dataset name. Default: segmentation',
default='segmentation')
parser.add_argument("--labels", help='List of labels to process. By default the script will process all labels',
default=None, nargs='+', type=int)
# Optional - pipeline parameters
parser.add_argument('--step-size', help='Step size for the marching cube algorithm, '
'larger steps yield a coarser but faster result.'
' Default 2.', type=int, default=2)
parser.add_argument("--crop", default=[0, 0, 0, -1, -1, -1], nargs='+', type=int,
help='Crop the dataset, takes as input a bounding box. eg --crop 10, 0, 0 15, -1, -1.')
parser.add_argument("--voxel-size", default=None, nargs='+', type=float,
help='Voxel size [Z, Y, X] of the segmentation stack.'
' By default voxel size is read from the source file,'
' if this is not possible voxel-size is set to [1, 1, 1].')
parser.add_argument('--min-size', help='Minimum cell size. Default 50.', type=int, default=50)
parser.add_argument('--max-size', help='Maximum cell size. Default inf.', type=int, default=-1)
parser.add_argument('--relabel', help='If this argument is passed the pipeline will relabel the segmentation.'
' This will ensure the contiguity of each segment but will change the '
'labels.', action='store_true')
parser.add_argument('--check-cc', help='If this argument is passed the pipeline will check if each label is'
' has a single connected component (cc).'
' If multiple cc are present only the largest will be processed.',
action='store_true')
parser.add_argument('--ignore-labels', help='List of labels to ignore.'
' By default only background (label 0) is ignored',
default=[0], nargs='+', type=int)
# Optional - mesh processing parameters
parser.add_argument('--reduction', type=float,
help='If reduction > 0 a decimation filter is applied.' ' MaxValue: 1.0 (100%reduction).',
default=-.0)
parser.add_argument('--smoothing', help='To apply a Laplacian smoothing filter.', action='store_true')
# Optional - multiprocessing
parser.add_argument('--use-ray', help='If use ray flag is used the multiprocessing flag is managed by ray',
action='store_true')
parser.add_argument('--multiprocessing', help='Define the number of cores to use for parallel processing.'
' Default value (-1) will try to parallelize over'
' all available processors.', default=-1, type=int)
return parser.parse_args()
if __name__ == '__main__':
args = parse()
print(f"[{datetime.now().strftime('%d-%m-%y %H:%M:%S')}] start pipeline setup, parameters: {vars(args)}")
# Setup path
if os.path.isfile(args.path):
all_files = [args.path]
elif os.path.isdir(args.path):
all_files = glob.glob(os.path.join(args.path, f'*{TIFF_FORMATS}'))
all_files += glob.glob(os.path.join(args.path, f'*{H5_FORMATS}'))
else:
raise NotImplementedError
# Optional - path setups
list_labels = None if args.labels is None else args.labels
default_prefix = 'meshes'
# Optional - pipeline parameters
min_size = 0 if args.min_size < 0 else args.min_size
max_size = np.inf if args.max_size < 0 else args.max_size
preprocessing = get_largest_object if args.check_cc else None
# Optional - multiprocessing
_seg2mesh = seg2mesh_ray if args.use_ray else seg2mesh
multiprocessing = args.multiprocessing
# Setup mesh specific utils
# Seg2mesh mesh backend is completely independent options available are
# 'from plantsegtools.meshes.vtkutils import CreateMeshVTK, create_ply'
# 'from plantsegtools.meshes.trimeshutils import CreateTriMesh, create_ply'
# * trimesh support is experimental, is easier to use than vtk but results are worst
mesh_processor = CreateMeshVTK(reduction=args.reduction, smoothing=args.smoothing)
file_writer = create_ply
# main loop
for i, file_path in enumerate(all_files, 1):
# just use the same name as original file as base
base_name = os.path.splitext(os.path.split(file_path)[1])[0]
if args.new_base is None:
base_path = os.path.join(os.path.split(file_path)[0], f'{default_prefix}_{base_name}')
else:
base_path = os.path.join(args.new_base, f'{default_prefix}_{base_name}')
print(f"base path: {base_path}")
print(f"[{datetime.now().strftime('%d-%m-%y %H:%M:%S')}]"
f" start processing file: {os.path.split(file_path)[1]} ({i}/{len(all_files)})")
timer = time.time()
_seg2mesh(file_path,
mesh_processing=mesh_processor,
file_writer=create_ply,
base_name=base_name,
base_path=base_path,
n_process=multiprocessing,
step_size=args.step_size,
h5_key=args.h5_dataset,
voxel_size=args.voxel_size,
preprocessing=preprocessing,
min_size=min_size,
max_size=max_size,
idx_list=list_labels,
relabel_cc=args.relabel,
ignore_labels=args.ignore_labels
)
print(f"[{datetime.now().strftime('%d-%m-%y %H:%M:%S')}]"
f" process complete in {time.time() - timer: .2f}s,"
f" number of ply generated {len(glob.glob(os.path.join(base_path, '*.ply')))}")
i += 1
| en | 0.613348 | # Required # Optional - path setup # Optional - pipeline parameters # Optional - mesh processing parameters # Optional - multiprocessing # Setup path # Optional - path setups # Optional - pipeline parameters # Optional - multiprocessing # Setup mesh specific utils # Seg2mesh mesh backend is completely independent options available are # 'from plantsegtools.meshes.vtkutils import CreateMeshVTK, create_ply' # 'from plantsegtools.meshes.trimeshutils import CreateTriMesh, create_ply' # * trimesh support is experimental, is easier to use than vtk but results are worst # main loop # just use the same name as original file as base | 2.447163 | 2 |
cookdemo/urls.py | ChenJnHui/git_demo | 0 | 6624110 | <filename>cookdemo/urls.py
from django.urls import re_path
from . import views
urlpatterns = [
re_path(r'^setcook/$', views.setcookfunc),
re_path(r'^getcook/$', views.getcookfunc),
] | <filename>cookdemo/urls.py
from django.urls import re_path
from . import views
urlpatterns = [
re_path(r'^setcook/$', views.setcookfunc),
re_path(r'^getcook/$', views.getcookfunc),
] | none | 1 | 1.610374 | 2 | |
Projects/CS_VQE/old_scripts/myriad_script_NEW_implementation.py | AlexisRalli/VQE-code | 1 | 6624111 | <reponame>AlexisRalli/VQE-code<filename>Projects/CS_VQE/old_scripts/myriad_script_NEW_implementation.py
import numpy as np
import cs_vqe as c
from copy import deepcopy as copy
from tqdm import tqdm
import pickle
import datetime
import quchem.Misc_functions.conversion_scripts as conv_scr
import cs_vqe_with_LCU as c_LCU
import ast
import matplotlib.pyplot as plt
import os
# working_dir = os.getcwd()
working_dir = os.path.dirname(os.path.abspath(__file__)) # gets directory where running python file is!
data_dir = os.path.join(working_dir, 'data')
data_hamiltonians_file = os.path.join(data_dir, 'hamiltonians.txt')
print('start time: {}'.format(datetime.datetime.now().strftime('%Y%b%d-%H%M%S%f')))
print('working directory:', working_dir)
with open(data_hamiltonians_file, 'r') as input_file:
hamiltonians = ast.literal_eval(input_file.read())
for key in hamiltonians.keys():
print(f"{key: <25} n_qubits: {hamiltonians[key][1]:<5.0f}")
##### OLD WAY ####
csvqe_results = {}
for speciesname in hamiltonians.keys():
n_qubits = hamiltonians[speciesname][1]
ham = hamiltonians[speciesname][2]
ham_noncon = hamiltonians[speciesname][3]
true_gs = hamiltonians[speciesname][4]
print(speciesname,n_qubits)
csvqe_out = c.csvqe_approximations_heuristic(ham, ham_noncon, n_qubits, true_gs)
csvqe_results[speciesname] = csvqe_out
print(' best order:',csvqe_out[3])
print(' resulting errors:',csvqe_out[2],'\n')
####### SAVE OUTPUT details
unique_file_time = datetime.datetime.now().strftime('%Y%b%d-%H%M%S%f')
output_dir = os.path.join(working_dir, 'Pickle_out')
########
####### SAVE OUTPUT
file_name1 = 'standard_CS_VQE_exp__{}.pickle'.format(unique_file_time)
file_out1=os.path.join(output_dir, file_name1)
with open(file_out1, 'wb') as outfile:
pickle.dump(csvqe_results, outfile)
##### NEW IMPLEMENTATION ####
N_index=0
csvqe_results_NEW = {}
for speciesname in hamiltonians.keys():
n_qubits = hamiltonians[speciesname][1]
ham = hamiltonians[speciesname][2]
ham_noncon = hamiltonians[speciesname][3]
true_gs = hamiltonians[speciesname][4]
print(speciesname,n_qubits)
csvqe_out = c_LCU.csvqe_approximations_heuristic_LCU(
ham,
ham_noncon,
n_qubits,
true_gs,
N_index,
check_reduction=False) ### <--- change for paper!
csvqe_results_NEW[speciesname] = csvqe_out
print(' best order:',csvqe_out[3])
print(' resulting errors:',csvqe_out[2],'\n')
####### SAVE OUTPUT
file_name2 = 'NEW_method_CS_VQE_exp__{}.pickle'.format(unique_file_time)
file_out2=os.path.join(output_dir, file_name2)
with open(file_out2, 'wb') as outfile:
pickle.dump(csvqe_results_NEW, outfile)
print('pickle files dumped unqiue time id: {}'.format(unique_file_time))
print('end time: {}'.format(datetime.datetime.now().strftime('%Y%b%d-%H%M%S%f'))) | import numpy as np
import cs_vqe as c
from copy import deepcopy as copy
from tqdm import tqdm
import pickle
import datetime
import quchem.Misc_functions.conversion_scripts as conv_scr
import cs_vqe_with_LCU as c_LCU
import ast
import matplotlib.pyplot as plt
import os
# working_dir = os.getcwd()
working_dir = os.path.dirname(os.path.abspath(__file__)) # gets directory where running python file is!
data_dir = os.path.join(working_dir, 'data')
data_hamiltonians_file = os.path.join(data_dir, 'hamiltonians.txt')
print('start time: {}'.format(datetime.datetime.now().strftime('%Y%b%d-%H%M%S%f')))
print('working directory:', working_dir)
with open(data_hamiltonians_file, 'r') as input_file:
hamiltonians = ast.literal_eval(input_file.read())
for key in hamiltonians.keys():
print(f"{key: <25} n_qubits: {hamiltonians[key][1]:<5.0f}")
##### OLD WAY ####
csvqe_results = {}
for speciesname in hamiltonians.keys():
n_qubits = hamiltonians[speciesname][1]
ham = hamiltonians[speciesname][2]
ham_noncon = hamiltonians[speciesname][3]
true_gs = hamiltonians[speciesname][4]
print(speciesname,n_qubits)
csvqe_out = c.csvqe_approximations_heuristic(ham, ham_noncon, n_qubits, true_gs)
csvqe_results[speciesname] = csvqe_out
print(' best order:',csvqe_out[3])
print(' resulting errors:',csvqe_out[2],'\n')
####### SAVE OUTPUT details
unique_file_time = datetime.datetime.now().strftime('%Y%b%d-%H%M%S%f')
output_dir = os.path.join(working_dir, 'Pickle_out')
########
####### SAVE OUTPUT
file_name1 = 'standard_CS_VQE_exp__{}.pickle'.format(unique_file_time)
file_out1=os.path.join(output_dir, file_name1)
with open(file_out1, 'wb') as outfile:
pickle.dump(csvqe_results, outfile)
##### NEW IMPLEMENTATION ####
N_index=0
csvqe_results_NEW = {}
for speciesname in hamiltonians.keys():
n_qubits = hamiltonians[speciesname][1]
ham = hamiltonians[speciesname][2]
ham_noncon = hamiltonians[speciesname][3]
true_gs = hamiltonians[speciesname][4]
print(speciesname,n_qubits)
csvqe_out = c_LCU.csvqe_approximations_heuristic_LCU(
ham,
ham_noncon,
n_qubits,
true_gs,
N_index,
check_reduction=False) ### <--- change for paper!
csvqe_results_NEW[speciesname] = csvqe_out
print(' best order:',csvqe_out[3])
print(' resulting errors:',csvqe_out[2],'\n')
####### SAVE OUTPUT
file_name2 = 'NEW_method_CS_VQE_exp__{}.pickle'.format(unique_file_time)
file_out2=os.path.join(output_dir, file_name2)
with open(file_out2, 'wb') as outfile:
pickle.dump(csvqe_results_NEW, outfile)
print('pickle files dumped unqiue time id: {}'.format(unique_file_time))
print('end time: {}'.format(datetime.datetime.now().strftime('%Y%b%d-%H%M%S%f'))) | en | 0.23116 | # working_dir = os.getcwd() # gets directory where running python file is! ##### OLD WAY #### ####### SAVE OUTPUT details ######## ####### SAVE OUTPUT ##### NEW IMPLEMENTATION #### ### <--- change for paper! ####### SAVE OUTPUT | 2.065171 | 2 |
drf_handlers/__version__.py | thomas545/drf-errors-formatter | 1 | 6624112 | <filename>drf_handlers/__version__.py
__title__ = 'drf-errors-formatter'
__description__ = 'Format errors in Django Rest Framework.'
__url__ = 'https://github.com/thomas545/drf-errors-formatter'
__version__ = '1.0.1'
__author__ = '@thomas545 https://github.com/thomas545'
__author_email__ = '<EMAIL>'
__license__ = 'MIT'
__copyright__ = 'Copyright 2021 @thomas545 https://github.com/thomas545'
| <filename>drf_handlers/__version__.py
__title__ = 'drf-errors-formatter'
__description__ = 'Format errors in Django Rest Framework.'
__url__ = 'https://github.com/thomas545/drf-errors-formatter'
__version__ = '1.0.1'
__author__ = '@thomas545 https://github.com/thomas545'
__author_email__ = '<EMAIL>'
__license__ = 'MIT'
__copyright__ = 'Copyright 2021 @thomas545 https://github.com/thomas545'
| none | 1 | 1.50477 | 2 | |
tests/04_parser/test_parser.py | krystophny/pytchfort | 6 | 6624113 | import os
import pytest
from shutil import copy
from fffi import FortranModule
dtypes = ['logical', 'integer', 'real', 'complex']
@pytest.fixture(scope='module')
def cwd():
return os.path.dirname(__file__)
@pytest.fixture
def fort_mod(tmp_path, cwd):
copy(os.path.join(cwd, 'test_parser.f90'), tmp_path)
copy(os.path.join(cwd, 'Makefile'), tmp_path)
os.chdir(tmp_path)
os.system('make')
return FortranModule('test_parser', 'test_parser_mod', path=tmp_path)
def test_module(fort_mod):
fort_mod.fdef("""\
module mod_test
contains
subroutine test(x)
real :: x
x = 1.
end subroutine test
subroutine test2(x)
real :: x
x = 1.
end subroutine
subroutine test3(x)
real :: x
x = 1.
end
subroutine test4(x)
real :: x
x = 1.
end subroutine test4
subroutine test5(x)
real :: x
x = 1.
end
end module mod_test
""")
def test_module_end(fort_mod):
fort_mod.fdef("""\
module mod_test
contains
subroutine test(x)
real :: x
x = 1.
end
subroutine test2(x)
real :: x
x = 1.
end
subroutine test3(x)
real :: x
x = 1.
end
subroutine test4(x)
real :: x
x = 1.
end
subroutine test5(x)
real :: x
x = 1.
end
end
""")
def test_subroutine(fort_mod):
fort_mod.fdef("""\
subroutine test(x)
real :: x
end subroutine test
subroutine test2(x)
real :: x
end subroutine
subroutine test3(x)
real :: x
end
subroutine test4(x)
real :: x
end subroutine test4
subroutine test5(x)
real :: x
end
""")
assert fort_mod.csource
def test_scalar_types(fort_mod):
for dtype in dtypes:
fort_mod.fdef("""\
subroutine test_{0}(x)
{0} :: x
end subroutine
""".format(dtype))
assert fort_mod.csource
def test_scalar_types_kind(fort_mod):
for dtype in dtypes:
fort_mod.fdef("""\
subroutine test_{0}(x)
{0}(4) :: x
end subroutine
""".format(dtype))
assert fort_mod.csource
# This should throw a KeyError for logical(8) and pass otherwise
with pytest.raises(KeyError, match=r"('logical', 8)"):
for dtype in dtypes:
fort_mod.fdef("""\
subroutine test_{0}(x)
{0}(8) :: x
end subroutine
""".format(dtype))
def test_array_types(fort_mod):
for dtype in dtypes:
fort_mod.fdef("""\
subroutine test_logical(x, y, z)
{0} :: x(:)
{0}, dimension(:) :: y, z
end subroutine
""".format(dtype))
assert fort_mod.csource
def test_comment(fort_mod):
fort_mod.fdef("""\
! Resonant transport regimes in tokamaks
! in the action-angle formalism
! <NAME>, 2015-2017
integer :: i ! This is an integer
""")
assert fort_mod.csource
def test_use(fort_mod):
fort_mod.fdef("""\
subroutine a
use mod_tets
end subroutine
""")
assert fort_mod.csource
| import os
import pytest
from shutil import copy
from fffi import FortranModule
dtypes = ['logical', 'integer', 'real', 'complex']
@pytest.fixture(scope='module')
def cwd():
return os.path.dirname(__file__)
@pytest.fixture
def fort_mod(tmp_path, cwd):
copy(os.path.join(cwd, 'test_parser.f90'), tmp_path)
copy(os.path.join(cwd, 'Makefile'), tmp_path)
os.chdir(tmp_path)
os.system('make')
return FortranModule('test_parser', 'test_parser_mod', path=tmp_path)
def test_module(fort_mod):
fort_mod.fdef("""\
module mod_test
contains
subroutine test(x)
real :: x
x = 1.
end subroutine test
subroutine test2(x)
real :: x
x = 1.
end subroutine
subroutine test3(x)
real :: x
x = 1.
end
subroutine test4(x)
real :: x
x = 1.
end subroutine test4
subroutine test5(x)
real :: x
x = 1.
end
end module mod_test
""")
def test_module_end(fort_mod):
fort_mod.fdef("""\
module mod_test
contains
subroutine test(x)
real :: x
x = 1.
end
subroutine test2(x)
real :: x
x = 1.
end
subroutine test3(x)
real :: x
x = 1.
end
subroutine test4(x)
real :: x
x = 1.
end
subroutine test5(x)
real :: x
x = 1.
end
end
""")
def test_subroutine(fort_mod):
fort_mod.fdef("""\
subroutine test(x)
real :: x
end subroutine test
subroutine test2(x)
real :: x
end subroutine
subroutine test3(x)
real :: x
end
subroutine test4(x)
real :: x
end subroutine test4
subroutine test5(x)
real :: x
end
""")
assert fort_mod.csource
def test_scalar_types(fort_mod):
for dtype in dtypes:
fort_mod.fdef("""\
subroutine test_{0}(x)
{0} :: x
end subroutine
""".format(dtype))
assert fort_mod.csource
def test_scalar_types_kind(fort_mod):
for dtype in dtypes:
fort_mod.fdef("""\
subroutine test_{0}(x)
{0}(4) :: x
end subroutine
""".format(dtype))
assert fort_mod.csource
# This should throw a KeyError for logical(8) and pass otherwise
with pytest.raises(KeyError, match=r"('logical', 8)"):
for dtype in dtypes:
fort_mod.fdef("""\
subroutine test_{0}(x)
{0}(8) :: x
end subroutine
""".format(dtype))
def test_array_types(fort_mod):
for dtype in dtypes:
fort_mod.fdef("""\
subroutine test_logical(x, y, z)
{0} :: x(:)
{0}, dimension(:) :: y, z
end subroutine
""".format(dtype))
assert fort_mod.csource
def test_comment(fort_mod):
fort_mod.fdef("""\
! Resonant transport regimes in tokamaks
! in the action-angle formalism
! <NAME>, 2015-2017
integer :: i ! This is an integer
""")
assert fort_mod.csource
def test_use(fort_mod):
fort_mod.fdef("""\
subroutine a
use mod_tets
end subroutine
""")
assert fort_mod.csource
| en | 0.473279 | \ module mod_test contains subroutine test(x) real :: x x = 1. end subroutine test subroutine test2(x) real :: x x = 1. end subroutine subroutine test3(x) real :: x x = 1. end subroutine test4(x) real :: x x = 1. end subroutine test4 subroutine test5(x) real :: x x = 1. end end module mod_test \ module mod_test contains subroutine test(x) real :: x x = 1. end subroutine test2(x) real :: x x = 1. end subroutine test3(x) real :: x x = 1. end subroutine test4(x) real :: x x = 1. end subroutine test5(x) real :: x x = 1. end end \ subroutine test(x) real :: x end subroutine test subroutine test2(x) real :: x end subroutine subroutine test3(x) real :: x end subroutine test4(x) real :: x end subroutine test4 subroutine test5(x) real :: x end \ subroutine test_{0}(x) {0} :: x end subroutine \ subroutine test_{0}(x) {0}(4) :: x end subroutine # This should throw a KeyError for logical(8) and pass otherwise \ subroutine test_{0}(x) {0}(8) :: x end subroutine \ subroutine test_logical(x, y, z) {0} :: x(:) {0}, dimension(:) :: y, z end subroutine \ ! Resonant transport regimes in tokamaks ! in the action-angle formalism ! <NAME>, 2015-2017 integer :: i ! This is an integer \ subroutine a use mod_tets end subroutine | 1.974597 | 2 |
monitor/cube.py | UWCubeSat/bairing | 0 | 6624114 | <reponame>UWCubeSat/bairing<gh_stars>0
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
import numpy as np
vertices = (
(1, -1, -1),
(1, 1, -1),
(-1, 1, -1),
(-1, -1, -1),
(1, -1, 1),
(1, 1, 1),
(-1, -1, 1),
(-1, 1, 1),
(0, 0, 1),
(0, 0, 2)
)
edges = (
(0,1),
(0,3),
(0,4),
(2,1),
(2,3),
(2,7),
(6,3),
(6,4),
(6,7),
(5,1),
(5,4),
(5,7),
(8,9)
)
def cube():
glBegin(GL_LINES)
for edge in edges:
for vertex in edge:
glVertex3fv(vertices[vertex])
glEnd()
def q_to_mat4(q):
w, x, y, z = q
return np.array(
[[1 - 2*y*y - 2*z*z, 2*x*y - 2*z*w, 2*x*z + 2*y*w, 0],
[2*x*y + 2*z*w, 1 - 2*x*x - 2*z*z, 2*y*z - 2*x*w, 0],
[2*x*z - 2*y*w, 2*y*z + 2*x*w, 1 - 2*x*x - 2*y*y, 0],
[0, 0, 0, 1] ],'f')
class CubeRenderer:
def __init__(self):
pygame.init()
self.display = (1280, 768)
pygame.display.set_mode(self.display, DOUBLEBUF | OPENGL)
def render(self, q):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
glLoadIdentity();
gluPerspective(45, (self.display[0]/self.display[1]), 0.1, 50.0)
glTranslatef(0.0,0.0, -5)
glRotatef(180, 1, 0, 0)
glMultMatrixf(q_to_mat4(q))
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
cube()
pygame.display.flip()
def main():
renderer = CubeRenderer()
while True:
renderer.render([0.7071, 0.0, 0.7071, 0.0])
pygame.time.wait(10)
if __name__ == '__main__':
main() | import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
import numpy as np
vertices = (
(1, -1, -1),
(1, 1, -1),
(-1, 1, -1),
(-1, -1, -1),
(1, -1, 1),
(1, 1, 1),
(-1, -1, 1),
(-1, 1, 1),
(0, 0, 1),
(0, 0, 2)
)
edges = (
(0,1),
(0,3),
(0,4),
(2,1),
(2,3),
(2,7),
(6,3),
(6,4),
(6,7),
(5,1),
(5,4),
(5,7),
(8,9)
)
def cube():
glBegin(GL_LINES)
for edge in edges:
for vertex in edge:
glVertex3fv(vertices[vertex])
glEnd()
def q_to_mat4(q):
w, x, y, z = q
return np.array(
[[1 - 2*y*y - 2*z*z, 2*x*y - 2*z*w, 2*x*z + 2*y*w, 0],
[2*x*y + 2*z*w, 1 - 2*x*x - 2*z*z, 2*y*z - 2*x*w, 0],
[2*x*z - 2*y*w, 2*y*z + 2*x*w, 1 - 2*x*x - 2*y*y, 0],
[0, 0, 0, 1] ],'f')
class CubeRenderer:
def __init__(self):
pygame.init()
self.display = (1280, 768)
pygame.display.set_mode(self.display, DOUBLEBUF | OPENGL)
def render(self, q):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
glLoadIdentity();
gluPerspective(45, (self.display[0]/self.display[1]), 0.1, 50.0)
glTranslatef(0.0,0.0, -5)
glRotatef(180, 1, 0, 0)
glMultMatrixf(q_to_mat4(q))
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
cube()
pygame.display.flip()
def main():
renderer = CubeRenderer()
while True:
renderer.render([0.7071, 0.0, 0.7071, 0.0])
pygame.time.wait(10)
if __name__ == '__main__':
main() | none | 1 | 2.832299 | 3 | |
GR_BASS/launchBASS.py | oliviermirat/BASS | 1 | 6624115 | import sys
sys.path.insert(1, './BASS_only_original/')
import bass as md
import numpy as np
import os
import pickle
def getPosteriorProbabilities(data_explo_hmm, lengths_explo_hmm, model_fit):
lengths_explo = lengths_explo_hmm[:]
data_explo = data_explo_hmm[:np.sum(lengths_explo)]
Hexplo = -model_fit.score(data_explo,0)/len(data_explo) #entropy
Yexplo = np.exp(model_fit._compute_log_likelihood(data_explo))/np.exp(-Hexplo)
return Yexplo
def launchBASS(Yexplo, lengths_explo_hmm, model_fit, eps, p_d, Jthr, seed):
lengths_explo = lengths_explo_hmm[:]
w_thr = 1e-4
p_ins = 0.2
mu = 1.0
H_beta_fac = 0
Sigma = Yexplo.shape[1]
std = 0.05
params = np.array([eps,p_d,p_ins, mu, w_thr,H_beta_fac, Jthr, Sigma, std], dtype =float)
lengths_explo = lengths_explo.astype(int)
print("launching solve dictionary")
P_w_explo, w_dict_explo = md.solve_dictionary(Yexplo,lengths_explo,params,model_fit,7)
print("FINISH solve dictionary")
print("Explo dictionary")
md.print_dict(Yexplo,w_dict_explo,P_w_explo)
# transmat_, stationary_probs_ = md.compute_transmat(Yexplo)
# a,b,c = md.test_for_markovianity(Yexplo,w_dict_explo,eps,p_d,transmat_, stationary_probs_)
P_w_explo2 = []
nbInstances = []
w_dict_explo2 = []
sorted_ = np.argsort(-P_w_explo)
lengths = [len(w) for w in w_dict_explo]
lmean = np.mean(lengths)
for i in sorted_[:]:
P_w_explo2.append(P_w_explo[i])
nbInstances.append(int(P_w_explo[i]*len(Yexplo)/lmean))
w_dict_explo2.append(w_dict_explo[i])
return [P_w_explo2, nbInstances, w_dict_explo2]
def saveBASSresults(dataframeOptions, P_w_explo_All, nbInstances_All, w_dict_explo_All):
if not(os.path.exists('results/'+dataframeOptions['nameOfFile'])):
os.mkdir('results/'+dataframeOptions['nameOfFile'])
outfile = open('results/'+dataframeOptions['nameOfFile']+'/BASSresults','wb')
pickle.dump([P_w_explo_All, nbInstances_All, w_dict_explo_All],outfile)
outfile.close()
f = open('results/'+dataframeOptions['nameOfFile']+'/BASSresults.txt',"w+")
for i in range(0, len(P_w_explo_All)):
f.write("Condition:" + str(i) + "\n")
for j in range(0, len(P_w_explo_All[i])):
f.write(str(P_w_explo_All[i][j]) + " , " + str(nbInstances_All[i][j]) + " , " + str(w_dict_explo_All[i][j]) + "\n")
f.write("\n")
f.close()
def saveBASSresultsAddClassNames(dataframeOptions, P_w_explo_All, nbInstances_All, w_dict_explo_All, classNamesConvertion):
if not(os.path.exists('results/'+dataframeOptions['nameOfFile'])):
os.mkdir('results/'+dataframeOptions['nameOfFile'])
f = open('results/'+dataframeOptions['nameOfFile']+'/BASSresultsClassNames.txt',"w+")
for i in range(0, len(P_w_explo_All)):
f.write("Condition:" + str(i) + "\n")
for j in range(0, len(P_w_explo_All[i])):
tab = []
for c in w_dict_explo_All[i][j]:
tab.append(classNamesConvertion[c])
f.write(str(P_w_explo_All[i][j]) + " , " + str(nbInstances_All[i][j]) + " , " + str(tab) + "\n")
f.write("\n")
f.close()
def reloadBASSresults(dataframeOptions):
infile = open('results/'+dataframeOptions['nameOfFile']+'/BASSresults','rb')
results = pickle.load(infile)
infile.close()
# res = {"P_w_explo_All" : results[0], "nbInstances_All" : results[1], "w_dict_explo_All" : results[2]}
return results
| import sys
sys.path.insert(1, './BASS_only_original/')
import bass as md
import numpy as np
import os
import pickle
def getPosteriorProbabilities(data_explo_hmm, lengths_explo_hmm, model_fit):
lengths_explo = lengths_explo_hmm[:]
data_explo = data_explo_hmm[:np.sum(lengths_explo)]
Hexplo = -model_fit.score(data_explo,0)/len(data_explo) #entropy
Yexplo = np.exp(model_fit._compute_log_likelihood(data_explo))/np.exp(-Hexplo)
return Yexplo
def launchBASS(Yexplo, lengths_explo_hmm, model_fit, eps, p_d, Jthr, seed):
lengths_explo = lengths_explo_hmm[:]
w_thr = 1e-4
p_ins = 0.2
mu = 1.0
H_beta_fac = 0
Sigma = Yexplo.shape[1]
std = 0.05
params = np.array([eps,p_d,p_ins, mu, w_thr,H_beta_fac, Jthr, Sigma, std], dtype =float)
lengths_explo = lengths_explo.astype(int)
print("launching solve dictionary")
P_w_explo, w_dict_explo = md.solve_dictionary(Yexplo,lengths_explo,params,model_fit,7)
print("FINISH solve dictionary")
print("Explo dictionary")
md.print_dict(Yexplo,w_dict_explo,P_w_explo)
# transmat_, stationary_probs_ = md.compute_transmat(Yexplo)
# a,b,c = md.test_for_markovianity(Yexplo,w_dict_explo,eps,p_d,transmat_, stationary_probs_)
P_w_explo2 = []
nbInstances = []
w_dict_explo2 = []
sorted_ = np.argsort(-P_w_explo)
lengths = [len(w) for w in w_dict_explo]
lmean = np.mean(lengths)
for i in sorted_[:]:
P_w_explo2.append(P_w_explo[i])
nbInstances.append(int(P_w_explo[i]*len(Yexplo)/lmean))
w_dict_explo2.append(w_dict_explo[i])
return [P_w_explo2, nbInstances, w_dict_explo2]
def saveBASSresults(dataframeOptions, P_w_explo_All, nbInstances_All, w_dict_explo_All):
if not(os.path.exists('results/'+dataframeOptions['nameOfFile'])):
os.mkdir('results/'+dataframeOptions['nameOfFile'])
outfile = open('results/'+dataframeOptions['nameOfFile']+'/BASSresults','wb')
pickle.dump([P_w_explo_All, nbInstances_All, w_dict_explo_All],outfile)
outfile.close()
f = open('results/'+dataframeOptions['nameOfFile']+'/BASSresults.txt',"w+")
for i in range(0, len(P_w_explo_All)):
f.write("Condition:" + str(i) + "\n")
for j in range(0, len(P_w_explo_All[i])):
f.write(str(P_w_explo_All[i][j]) + " , " + str(nbInstances_All[i][j]) + " , " + str(w_dict_explo_All[i][j]) + "\n")
f.write("\n")
f.close()
def saveBASSresultsAddClassNames(dataframeOptions, P_w_explo_All, nbInstances_All, w_dict_explo_All, classNamesConvertion):
if not(os.path.exists('results/'+dataframeOptions['nameOfFile'])):
os.mkdir('results/'+dataframeOptions['nameOfFile'])
f = open('results/'+dataframeOptions['nameOfFile']+'/BASSresultsClassNames.txt',"w+")
for i in range(0, len(P_w_explo_All)):
f.write("Condition:" + str(i) + "\n")
for j in range(0, len(P_w_explo_All[i])):
tab = []
for c in w_dict_explo_All[i][j]:
tab.append(classNamesConvertion[c])
f.write(str(P_w_explo_All[i][j]) + " , " + str(nbInstances_All[i][j]) + " , " + str(tab) + "\n")
f.write("\n")
f.close()
def reloadBASSresults(dataframeOptions):
infile = open('results/'+dataframeOptions['nameOfFile']+'/BASSresults','rb')
results = pickle.load(infile)
infile.close()
# res = {"P_w_explo_All" : results[0], "nbInstances_All" : results[1], "w_dict_explo_All" : results[2]}
return results
| en | 0.407638 | #entropy # transmat_, stationary_probs_ = md.compute_transmat(Yexplo) # a,b,c = md.test_for_markovianity(Yexplo,w_dict_explo,eps,p_d,transmat_, stationary_probs_) # res = {"P_w_explo_All" : results[0], "nbInstances_All" : results[1], "w_dict_explo_All" : results[2]} | 2.186306 | 2 |
plist/__init__.py | ziank/plist2json | 4 | 6624116 | # -*- coding:utf-8 -*-
#authour:ziank | # -*- coding:utf-8 -*-
#authour:ziank | en | 0.584103 | # -*- coding:utf-8 -*- #authour:ziank | 0.979771 | 1 |
ad_api/api/sd/creatives.py | mkdir700/python-amazon-ad-api | 12 | 6624117 | <gh_stars>10-100
from ad_api.base import Client, sp_endpoint, fill_query_params, ApiResponse
class Creatives(Client):
@sp_endpoint('/sd/creatives', method='GET')
def list_creatives(self, **kwargs) -> ApiResponse:
return self._request(kwargs.pop('path'), params=kwargs)
@sp_endpoint('/sd/creatives', method='PUT')
def edit_creatives(self, **kwargs) -> ApiResponse:
# print(kwargs.get('body'))
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)
@sp_endpoint('/sd/creatives', method='POST')
def create_creatives(self, **kwargs) -> ApiResponse:
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)
@sp_endpoint('/sd/moderation/creatives', method='GET')
def list_moderation_creatives(self, **kwargs) -> ApiResponse:
return self._request(kwargs.pop('path'), params=kwargs)
@sp_endpoint('/sd/creatives/preview', method='POST')
def show_creative_preview(self, **kwargs) -> ApiResponse:
#'not a valid key=value pair (missing equal-sign) in '
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs) | from ad_api.base import Client, sp_endpoint, fill_query_params, ApiResponse
class Creatives(Client):
@sp_endpoint('/sd/creatives', method='GET')
def list_creatives(self, **kwargs) -> ApiResponse:
return self._request(kwargs.pop('path'), params=kwargs)
@sp_endpoint('/sd/creatives', method='PUT')
def edit_creatives(self, **kwargs) -> ApiResponse:
# print(kwargs.get('body'))
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)
@sp_endpoint('/sd/creatives', method='POST')
def create_creatives(self, **kwargs) -> ApiResponse:
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)
@sp_endpoint('/sd/moderation/creatives', method='GET')
def list_moderation_creatives(self, **kwargs) -> ApiResponse:
return self._request(kwargs.pop('path'), params=kwargs)
@sp_endpoint('/sd/creatives/preview', method='POST')
def show_creative_preview(self, **kwargs) -> ApiResponse:
#'not a valid key=value pair (missing equal-sign) in '
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs) | en | 0.110607 | # print(kwargs.get('body')) #'not a valid key=value pair (missing equal-sign) in ' | 2.01977 | 2 |
build/lib/vicedtools/vce/schoolscores.py | gregbreese/vicedtools | 2 | 6624118 | # Copyright 2021 VicEdTools authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for working with VCE school scores."""
import re
from bs4 import BeautifulSoup
import pandas as pd
def parse_vass_school_scores_file(file_name: str) -> pd.DataFrame:
"""Converts the data in VASS school score export into a pandas DataFrame.
VASS school score exports contain scores in a HTML table. This function
reads the relevant values into a DataFrame.
Args:
file_name: The path of the school scores export.
Returns:
A DataFrame containing the total score results from the VASS export.
"""
#open file and parse HTML
html = open(file_name).read()
soup = BeautifulSoup(html)
table = soup.find("table")
#extract table data as csv
output_rows = []
for table_row in table.findAll('tr'):
columns = table_row.findAll('td')
output_row = []
for column in columns:
output_row.append(column.text)
output_rows.append(output_row)
# import results rows from CSV into dataframe
df = pd.DataFrame(data=output_rows[7:], columns=output_rows[5])
df.drop(columns=df.columns[4:], inplace=True)
# drop NAs and make all scores out of 1.
df = df[df.iloc[:, 3] != "NA"]
mark_total_pattern = "Total GA Score / (?P<total>[0-9]+)"
mark_total_str = df.columns[3]
m = re.search(mark_total_pattern, mark_total_str)
total = m.group('total')
scores = df.iloc[:, 3].astype(int) / int(total)
df.drop(columns=df.columns[3], inplace=True)
df["Score"] = scores
# add column to dataframe with year
year_str = output_rows[0][0]
year_pattern = "(?P<year>[0-9][0-9][0-9][0-9])"
m = re.search(year_pattern, year_str)
year = m.group('year')
# add column to dataframe with subject and unit
subject_str = output_rows[1][0]
subject_pattern = "- (?P<subject>[A-Z():. ]+) (?P<unit>[34])"
m = re.search(subject_pattern, subject_str)
subject = m.group('subject')
unit = m.group('unit')
# add column to dataframe with assessment data type
assessment_type_pattern = "- (?P<type>[A-Z34 -/]+)"
assessment_type_str = output_rows[2][0]
m = re.search(assessment_type_pattern, assessment_type_str)
assessment_type = m.group('type')
df["Year"] = year
df["Subject"] = subject
#df["Unit"] = unit
df["Graded Assessment"] = output_rows[2][0][6:-1]
return df
| # Copyright 2021 VicEdTools authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for working with VCE school scores."""
import re
from bs4 import BeautifulSoup
import pandas as pd
def parse_vass_school_scores_file(file_name: str) -> pd.DataFrame:
"""Converts the data in VASS school score export into a pandas DataFrame.
VASS school score exports contain scores in a HTML table. This function
reads the relevant values into a DataFrame.
Args:
file_name: The path of the school scores export.
Returns:
A DataFrame containing the total score results from the VASS export.
"""
#open file and parse HTML
html = open(file_name).read()
soup = BeautifulSoup(html)
table = soup.find("table")
#extract table data as csv
output_rows = []
for table_row in table.findAll('tr'):
columns = table_row.findAll('td')
output_row = []
for column in columns:
output_row.append(column.text)
output_rows.append(output_row)
# import results rows from CSV into dataframe
df = pd.DataFrame(data=output_rows[7:], columns=output_rows[5])
df.drop(columns=df.columns[4:], inplace=True)
# drop NAs and make all scores out of 1.
df = df[df.iloc[:, 3] != "NA"]
mark_total_pattern = "Total GA Score / (?P<total>[0-9]+)"
mark_total_str = df.columns[3]
m = re.search(mark_total_pattern, mark_total_str)
total = m.group('total')
scores = df.iloc[:, 3].astype(int) / int(total)
df.drop(columns=df.columns[3], inplace=True)
df["Score"] = scores
# add column to dataframe with year
year_str = output_rows[0][0]
year_pattern = "(?P<year>[0-9][0-9][0-9][0-9])"
m = re.search(year_pattern, year_str)
year = m.group('year')
# add column to dataframe with subject and unit
subject_str = output_rows[1][0]
subject_pattern = "- (?P<subject>[A-Z():. ]+) (?P<unit>[34])"
m = re.search(subject_pattern, subject_str)
subject = m.group('subject')
unit = m.group('unit')
# add column to dataframe with assessment data type
assessment_type_pattern = "- (?P<type>[A-Z34 -/]+)"
assessment_type_str = output_rows[2][0]
m = re.search(assessment_type_pattern, assessment_type_str)
assessment_type = m.group('type')
df["Year"] = year
df["Subject"] = subject
#df["Unit"] = unit
df["Graded Assessment"] = output_rows[2][0][6:-1]
return df
| en | 0.822181 | # Copyright 2021 VicEdTools authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Functions for working with VCE school scores. Converts the data in VASS school score export into a pandas DataFrame. VASS school score exports contain scores in a HTML table. This function reads the relevant values into a DataFrame. Args: file_name: The path of the school scores export. Returns: A DataFrame containing the total score results from the VASS export. #open file and parse HTML #extract table data as csv # import results rows from CSV into dataframe # drop NAs and make all scores out of 1. # add column to dataframe with year # add column to dataframe with subject and unit # add column to dataframe with assessment data type #df["Unit"] = unit | 3.610427 | 4 |
machine-learning/coursera_exercises/ex1/in_python/exercises/warmUpExercise.py | pk-ai/training | 1 | 6624119 | import numpy as np
def get5by5IdentityMatrix():
return np.identity(5) | import numpy as np
def get5by5IdentityMatrix():
return np.identity(5) | none | 1 | 2.400223 | 2 | |
src/diffie_hellman.py | Calder10/Secure-Chat-with-AES-and-Diffiie-Hellman-Key-Exchange | 0 | 6624120 | <filename>src/diffie_hellman.py
"""
Università degli Studi Di Palermo
Corso di Laurea Magistrale in Informatica
Anno Accademico 2020/2021
Cybersecurity
@author: <NAME>
DH-AES256 - Chatter
"""
from Crypto.Util.number import getPrime,getNRandomBitInteger
from Crypto.Random import get_random_bytes
SIZE_PG=1024
SIZE_PK=512
"""
Funzione che ritorna un numero primo p che ha una dimensione in bit
uguale a quella del parametro dato in input e un intero g compreso
tra 1 e p
"""
def create_p_g():
p=getPrime(SIZE_PG, randfunc=get_random_bytes)
while True:
g=getRandomNBitInteger(SIZE_PG,randfunc=get_random_bytes)
if g<p:
break
parameters=[p,g]
return parameters
"""
Algorimto quadra e moltiplica
"""
def exponentiation(bas, exp,N):
if (exp == 0):
return 1
if (exp == 1):
return bas % N
t = exponentiation(bas, int(exp / 2),N)
t = (t * t) % N
if (exp % 2 == 0):
return t
else:
return ((bas % N) * t) % N
"""
Funzione che calcola la chiave privata
scegliendo un intero a compreso tra 1 e p-1
e a partire da quest'ultima la chiave pubblica.
"""
def create_private_key(p):
private_key=getRandomNBitInteger(SIZE_PK,randfunc=get_random_bytes)
return private_key
"""
Funzione per la creazione della chiave pubblica
"""
def create_public_key(g,p,a):
public_key=exponentiation(g,a,p)
return public_key
"""
Funzione per la creazione della chiave condivisa
"""
def create_shared_key(x,y,p):
shared_key=exponentiation(x,y,p)
return shared_key
| <filename>src/diffie_hellman.py
"""
Università degli Studi Di Palermo
Corso di Laurea Magistrale in Informatica
Anno Accademico 2020/2021
Cybersecurity
@author: <NAME>
DH-AES256 - Chatter
"""
from Crypto.Util.number import getPrime,getNRandomBitInteger
from Crypto.Random import get_random_bytes
SIZE_PG=1024
SIZE_PK=512
"""
Funzione che ritorna un numero primo p che ha una dimensione in bit
uguale a quella del parametro dato in input e un intero g compreso
tra 1 e p
"""
def create_p_g():
p=getPrime(SIZE_PG, randfunc=get_random_bytes)
while True:
g=getRandomNBitInteger(SIZE_PG,randfunc=get_random_bytes)
if g<p:
break
parameters=[p,g]
return parameters
"""
Algorimto quadra e moltiplica
"""
def exponentiation(bas, exp,N):
if (exp == 0):
return 1
if (exp == 1):
return bas % N
t = exponentiation(bas, int(exp / 2),N)
t = (t * t) % N
if (exp % 2 == 0):
return t
else:
return ((bas % N) * t) % N
"""
Funzione che calcola la chiave privata
scegliendo un intero a compreso tra 1 e p-1
e a partire da quest'ultima la chiave pubblica.
"""
def create_private_key(p):
private_key=getRandomNBitInteger(SIZE_PK,randfunc=get_random_bytes)
return private_key
"""
Funzione per la creazione della chiave pubblica
"""
def create_public_key(g,p,a):
public_key=exponentiation(g,a,p)
return public_key
"""
Funzione per la creazione della chiave condivisa
"""
def create_shared_key(x,y,p):
shared_key=exponentiation(x,y,p)
return shared_key
| it | 0.971864 | Università degli Studi Di Palermo Corso di Laurea Magistrale in Informatica Anno Accademico 2020/2021 Cybersecurity @author: <NAME> DH-AES256 - Chatter Funzione che ritorna un numero primo p che ha una dimensione in bit uguale a quella del parametro dato in input e un intero g compreso tra 1 e p Algorimto quadra e moltiplica Funzione che calcola la chiave privata scegliendo un intero a compreso tra 1 e p-1 e a partire da quest'ultima la chiave pubblica. Funzione per la creazione della chiave pubblica Funzione per la creazione della chiave condivisa | 3.003412 | 3 |
contentcuration/contentcuration/db/models/manager.py | DXCanas/content-curation | 0 | 6624121 | import contextlib
from django.db import transaction
from django.db.models import Manager
from django.db.models import Q
from django_cte import CTEQuerySet
from mptt.managers import TreeManager
from contentcuration.db.models.query import CustomTreeQuerySet
class CustomManager(Manager.from_queryset(CTEQuerySet)):
"""
The CTEManager improperly overrides `get_queryset`
"""
pass
class CustomContentNodeTreeManager(TreeManager.from_queryset(CustomTreeQuerySet)):
# Added 7-31-2018. We can remove this once we are certain we have eliminated all cases
# where root nodes are getting prepended rather than appended to the tree list.
def _create_tree_space(self, target_tree_id, num_trees=1):
"""
Creates space for a new tree by incrementing all tree ids
greater than ``target_tree_id``.
"""
if target_tree_id == -1:
raise Exception(
"ERROR: Calling _create_tree_space with -1! Something is attempting to sort all MPTT trees root nodes!"
)
return super(CustomContentNodeTreeManager, self)._create_tree_space(
target_tree_id, num_trees
)
def _get_next_tree_id(self, *args, **kwargs):
from contentcuration.models import MPTTTreeIDManager
new_id = MPTTTreeIDManager.objects.create().id
return new_id
@contextlib.contextmanager
def lock_mptt(self, *tree_ids):
# If this is not inside the context of a delay context manager
# or updates are not disabled set a lock on the tree_ids.
if not self.model._mptt_is_tracking and self.model._mptt_updates_enabled:
with transaction.atomic():
# Lock only MPTT columns for updates on any of the tree_ids specified
# until the end of this transaction
query = Q()
for tree_id in tree_ids:
query |= Q(tree_id=tree_id)
self.select_for_update().order_by().filter(query).values(
"tree_id", "lft", "rght"
)
yield
else:
# Otherwise just let it carry on!
yield
def partial_rebuild(self, tree_id):
with self.lock_mptt(tree_id):
return super(CustomContentNodeTreeManager, self).partial_rebuild(tree_id)
def _move_child_to_new_tree(self, node, target, position):
from contentcuration.models import PrerequisiteContentRelationship
super(CustomContentNodeTreeManager, self)._move_child_to_new_tree(
node, target, position
)
PrerequisiteContentRelationship.objects.filter(
Q(prerequisite_id=node.id) | Q(target_node_id=node.id)
).delete()
@contextlib.contextmanager
def _update_changes(self, node, save):
original_parent_id = node.parent_id
yield
ids = [original_parent_id, node.parent_id] + [node.id] if save else []
# Always write to the database for the parent change updates, as we have
# no persistent object references for the original and new parent to modify
if ids:
self.filter(id__in=ids).update(changed=True)
node.changed = True
def _move_node(
self, node, target, position="last-child", save=True, refresh_target=True
):
# If we are delaying updates, then _move_node defers to insert_node
# we are already wrapping for parent changes below, so no need to
# add our parent changes context manager.
if self.tree_model._mptt_is_tracking:
return super(CustomContentNodeTreeManager, self)._move_node(
node, target, position, save, refresh_target
)
with self._update_changes(node, save):
return super(CustomContentNodeTreeManager, self)._move_node(
node, target, position, save, refresh_target
)
def insert_node(
self,
node,
target,
position="last-child",
save=False,
allow_existing_pk=False,
refresh_target=True,
):
with self._update_changes(node, save):
if save:
with self.lock_mptt(target.tree_id):
return super(CustomContentNodeTreeManager, self).insert_node(
node, target, position, save, allow_existing_pk, refresh_target
)
return super(CustomContentNodeTreeManager, self).insert_node(
node, target, position, save, allow_existing_pk, refresh_target
)
def move_node(self, node, target, position="first-child"):
with self.lock_mptt(node.tree_id, target.tree_id):
super(CustomContentNodeTreeManager, self).move_node(node, target, position)
def build_tree_nodes(self, data, target=None, position="last-child"):
"""
vendored from:
https://github.com/django-mptt/django-mptt/blob/fe2b9cc8cfd8f4b764d294747dba2758147712eb/mptt/managers.py#L614
"""
opts = self.model._mptt_meta
if target:
tree_id = target.tree_id
if position in ("left", "right"):
level = getattr(target, opts.level_attr)
if position == "left":
cursor = getattr(target, opts.left_attr)
else:
cursor = getattr(target, opts.right_attr) + 1
else:
level = getattr(target, opts.level_attr) + 1
if position == "first-child":
cursor = getattr(target, opts.left_attr) + 1
else:
cursor = getattr(target, opts.right_attr)
else:
tree_id = self._get_next_tree_id()
cursor = 1
level = 0
stack = []
def treeify(data, cursor=1, level=0):
data = dict(data)
children = data.pop("children", [])
node = self.model(**data)
stack.append(node)
setattr(node, opts.tree_id_attr, tree_id)
setattr(node, opts.level_attr, level)
setattr(node, opts.left_attr, cursor)
for child in children:
cursor = treeify(child, cursor=cursor + 1, level=level + 1)
cursor += 1
setattr(node, opts.right_attr, cursor)
return cursor
treeify(data, cursor=cursor, level=level)
if target:
self._create_space(2 * len(stack), cursor - 1, tree_id)
return stack
| import contextlib
from django.db import transaction
from django.db.models import Manager
from django.db.models import Q
from django_cte import CTEQuerySet
from mptt.managers import TreeManager
from contentcuration.db.models.query import CustomTreeQuerySet
class CustomManager(Manager.from_queryset(CTEQuerySet)):
"""
The CTEManager improperly overrides `get_queryset`
"""
pass
class CustomContentNodeTreeManager(TreeManager.from_queryset(CustomTreeQuerySet)):
# Added 7-31-2018. We can remove this once we are certain we have eliminated all cases
# where root nodes are getting prepended rather than appended to the tree list.
def _create_tree_space(self, target_tree_id, num_trees=1):
"""
Creates space for a new tree by incrementing all tree ids
greater than ``target_tree_id``.
"""
if target_tree_id == -1:
raise Exception(
"ERROR: Calling _create_tree_space with -1! Something is attempting to sort all MPTT trees root nodes!"
)
return super(CustomContentNodeTreeManager, self)._create_tree_space(
target_tree_id, num_trees
)
def _get_next_tree_id(self, *args, **kwargs):
from contentcuration.models import MPTTTreeIDManager
new_id = MPTTTreeIDManager.objects.create().id
return new_id
@contextlib.contextmanager
def lock_mptt(self, *tree_ids):
# If this is not inside the context of a delay context manager
# or updates are not disabled set a lock on the tree_ids.
if not self.model._mptt_is_tracking and self.model._mptt_updates_enabled:
with transaction.atomic():
# Lock only MPTT columns for updates on any of the tree_ids specified
# until the end of this transaction
query = Q()
for tree_id in tree_ids:
query |= Q(tree_id=tree_id)
self.select_for_update().order_by().filter(query).values(
"tree_id", "lft", "rght"
)
yield
else:
# Otherwise just let it carry on!
yield
def partial_rebuild(self, tree_id):
with self.lock_mptt(tree_id):
return super(CustomContentNodeTreeManager, self).partial_rebuild(tree_id)
def _move_child_to_new_tree(self, node, target, position):
from contentcuration.models import PrerequisiteContentRelationship
super(CustomContentNodeTreeManager, self)._move_child_to_new_tree(
node, target, position
)
PrerequisiteContentRelationship.objects.filter(
Q(prerequisite_id=node.id) | Q(target_node_id=node.id)
).delete()
@contextlib.contextmanager
def _update_changes(self, node, save):
original_parent_id = node.parent_id
yield
ids = [original_parent_id, node.parent_id] + [node.id] if save else []
# Always write to the database for the parent change updates, as we have
# no persistent object references for the original and new parent to modify
if ids:
self.filter(id__in=ids).update(changed=True)
node.changed = True
def _move_node(
self, node, target, position="last-child", save=True, refresh_target=True
):
# If we are delaying updates, then _move_node defers to insert_node
# we are already wrapping for parent changes below, so no need to
# add our parent changes context manager.
if self.tree_model._mptt_is_tracking:
return super(CustomContentNodeTreeManager, self)._move_node(
node, target, position, save, refresh_target
)
with self._update_changes(node, save):
return super(CustomContentNodeTreeManager, self)._move_node(
node, target, position, save, refresh_target
)
def insert_node(
self,
node,
target,
position="last-child",
save=False,
allow_existing_pk=False,
refresh_target=True,
):
with self._update_changes(node, save):
if save:
with self.lock_mptt(target.tree_id):
return super(CustomContentNodeTreeManager, self).insert_node(
node, target, position, save, allow_existing_pk, refresh_target
)
return super(CustomContentNodeTreeManager, self).insert_node(
node, target, position, save, allow_existing_pk, refresh_target
)
def move_node(self, node, target, position="first-child"):
with self.lock_mptt(node.tree_id, target.tree_id):
super(CustomContentNodeTreeManager, self).move_node(node, target, position)
def build_tree_nodes(self, data, target=None, position="last-child"):
"""
vendored from:
https://github.com/django-mptt/django-mptt/blob/fe2b9cc8cfd8f4b764d294747dba2758147712eb/mptt/managers.py#L614
"""
opts = self.model._mptt_meta
if target:
tree_id = target.tree_id
if position in ("left", "right"):
level = getattr(target, opts.level_attr)
if position == "left":
cursor = getattr(target, opts.left_attr)
else:
cursor = getattr(target, opts.right_attr) + 1
else:
level = getattr(target, opts.level_attr) + 1
if position == "first-child":
cursor = getattr(target, opts.left_attr) + 1
else:
cursor = getattr(target, opts.right_attr)
else:
tree_id = self._get_next_tree_id()
cursor = 1
level = 0
stack = []
def treeify(data, cursor=1, level=0):
data = dict(data)
children = data.pop("children", [])
node = self.model(**data)
stack.append(node)
setattr(node, opts.tree_id_attr, tree_id)
setattr(node, opts.level_attr, level)
setattr(node, opts.left_attr, cursor)
for child in children:
cursor = treeify(child, cursor=cursor + 1, level=level + 1)
cursor += 1
setattr(node, opts.right_attr, cursor)
return cursor
treeify(data, cursor=cursor, level=level)
if target:
self._create_space(2 * len(stack), cursor - 1, tree_id)
return stack
| en | 0.766582 | The CTEManager improperly overrides `get_queryset` # Added 7-31-2018. We can remove this once we are certain we have eliminated all cases # where root nodes are getting prepended rather than appended to the tree list. Creates space for a new tree by incrementing all tree ids greater than ``target_tree_id``. # If this is not inside the context of a delay context manager # or updates are not disabled set a lock on the tree_ids. # Lock only MPTT columns for updates on any of the tree_ids specified # until the end of this transaction # Otherwise just let it carry on! # Always write to the database for the parent change updates, as we have # no persistent object references for the original and new parent to modify # If we are delaying updates, then _move_node defers to insert_node # we are already wrapping for parent changes below, so no need to # add our parent changes context manager. vendored from: https://github.com/django-mptt/django-mptt/blob/fe2b9cc8cfd8f4b764d294747dba2758147712eb/mptt/managers.py#L614 | 2.110534 | 2 |
tools/mqppep/mqppep_mrgfltr.py | eschen42/mqppep | 0 | 6624122 | <filename>tools/mqppep/mqppep_mrgfltr.py
#!/usr/bin/env python
# Import the packages needed
import argparse
import operator # for operator.itemgetter
import os.path
import re
import shutil # for shutil.copyfile(src, dest)
import sqlite3 as sql
import sys # import the sys module for exc_info
import time
import traceback # for formatting stack-trace
from codecs import getreader as cx_getreader
import numpy as np
import pandas
# global constants
N_A = "N/A"
# ref: https://stackoverflow.com/a/8915613/15509512
# answers: "How to handle exceptions in a list comprehensions"
# usage:
# from math import log
# eggs = [1,3,0,3,2]
# print([x for x in [catch(log, egg) for egg in eggs] if x is not None])
# producing:
# for <built-in function log>
# with args (0,)
# exception: math domain error
# [0.0, 1.0986122886681098, 1.0986122886681098, 0.6931471805599453]
def catch(func, *args, handle=lambda e: e, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
print("For %s" % str(func))
print(" with args %s" % str(args))
print(" caught exception: %s" % str(e))
(ty, va, tb) = sys.exc_info()
print(" stack trace: " + str(traceback.format_exception(ty, va, tb)))
exit(-1)
return None
def whine(func, *args, handle=lambda e: e, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
print("Warning: For %s" % str(func))
print(" with args %s" % str(args))
print(" caught exception: %s" % str(e))
(ty, va, tb) = sys.exc_info()
print(" stack trace: " + str(traceback.format_exception(ty, va, tb)))
return None
def ppep_join(x):
x = [i for i in x if N_A != i]
result = "%s" % " | ".join(x)
if result != "":
return result
else:
return N_A
def melt_join(x):
tmp = {key.lower(): key for key in x}
result = "%s" % " | ".join([tmp[key] for key in tmp])
return result
def __main__():
# Parse Command Line
parser = argparse.ArgumentParser(
description="Phopsphoproteomic Enrichment Pipeline Merge and Filter."
)
# inputs:
# Phosphopeptide data for experimental results, including the intensities
# and the mapping to kinase domains, in tabular format.
parser.add_argument(
"--phosphopeptides",
"-p",
nargs=1,
required=True,
dest="phosphopeptides",
help="Phosphopeptide data for experimental results, including the intensities and the mapping to kinase domains, in tabular format",
)
# UniProtKB/SwissProt DB input, SQLite
parser.add_argument(
"--ppep_mapping_db",
"-d",
nargs=1,
required=True,
dest="ppep_mapping_db",
help="UniProtKB/SwissProt SQLite Database",
)
# species to limit records chosed from PhosPhositesPlus
parser.add_argument(
"--species",
"-x",
nargs=1,
required=False,
default=[],
dest="species",
help="limit PhosphoSitePlus records to indicated species (field may be empty)",
)
# outputs:
# tabular output
parser.add_argument(
"--mrgfltr_tab",
"-o",
nargs=1,
required=True,
dest="mrgfltr_tab",
help="Tabular output file for results",
)
# CSV output
parser.add_argument(
"--mrgfltr_csv",
"-c",
nargs=1,
required=True,
dest="mrgfltr_csv",
help="CSV output file for results",
)
# SQLite output
parser.add_argument(
"--mrgfltr_sqlite",
"-S",
nargs=1,
required=True,
dest="mrgfltr_sqlite",
help="SQLite output file for results",
)
# "Make it so!" (parse the arguments)
options = parser.parse_args()
print("options: " + str(options))
# determine phosphopeptide ("upstream map") input tabular file access
if options.phosphopeptides is None:
exit('Argument "phosphopeptides" is required but not supplied')
try:
upstream_map_filename_tab = os.path.abspath(options.phosphopeptides[0])
input_file = open(upstream_map_filename_tab, "r")
input_file.close()
except Exception as e:
exit("Error parsing phosphopeptides argument: %s" % str(e))
# determine input SQLite access
if options.ppep_mapping_db is None:
exit('Argument "ppep_mapping_db" is required but not supplied')
try:
uniprot_sqlite = os.path.abspath(options.ppep_mapping_db[0])
input_file = open(uniprot_sqlite, "rb")
input_file.close()
except Exception as e:
exit("Error parsing ppep_mapping_db argument: %s" % str(e))
# copy input SQLite dataset to output SQLite dataset
if options.mrgfltr_sqlite is None:
exit('Argument "mrgfltr_sqlite" is required but not supplied')
try:
output_sqlite = os.path.abspath(options.mrgfltr_sqlite[0])
shutil.copyfile(uniprot_sqlite, output_sqlite)
except Exception as e:
exit("Error copying ppep_mapping_db to mrgfltr_sqlite: %s" % str(e))
# determine species to limit records from PSP_Regulatory_Sites
if options.species is None:
exit(
'Argument "species" is required (and may be empty) but not supplied'
)
try:
if len(options.species) > 0:
species = options.species[0]
else:
species = ""
except Exception as e:
exit("Error parsing species argument: %s" % str(e))
# determine tabular output destination
if options.mrgfltr_tab is None:
exit('Argument "mrgfltr_tab" is required but not supplied')
try:
output_filename_tab = os.path.abspath(options.mrgfltr_tab[0])
output_file = open(output_filename_tab, "w")
output_file.close()
except Exception as e:
exit("Error parsing mrgfltr_tab argument: %s" % str(e))
# determine CSV output destination
if options.mrgfltr_csv is None:
exit('Argument "mrgfltr_csv" is required but not supplied')
try:
output_filename_csv = os.path.abspath(options.mrgfltr_csv[0])
output_file = open(output_filename_csv, "w")
output_file.close()
except Exception as e:
exit("Error parsing mrgfltr_csv argument: %s" % str(e))
def mqpep_getswissprot():
#
# copied from Excel Output Script.ipynb BEGIN #
#
# String Constants #################
DEPHOSPHOPEP = "DephosphoPep"
DESCRIPTION = "Description"
FUNCTION_PHOSPHORESIDUE = (
"Function Phosphoresidue(PSP=PhosphoSitePlus.org)"
)
GENE_NAME = "Gene_Name" # Gene Name from UniProtKB
ON_FUNCTION = (
"ON_FUNCTION" # ON_FUNCTION column from PSP_Regulatory_Sites
)
ON_NOTES = "NOTES" # NOTES column from PSP_Regulatory_Sites
ON_OTHER_INTERACT = "ON_OTHER_INTERACT" # ON_OTHER_INTERACT column from PSP_Regulatory_Sites
ON_PROCESS = (
"ON_PROCESS" # ON_PROCESS column from PSP_Regulatory_Sites
)
ON_PROT_INTERACT = "ON_PROT_INTERACT" # ON_PROT_INTERACT column from PSP_Regulatory_Sites
PHOSPHOPEPTIDE = "Phosphopeptide"
PHOSPHOPEPTIDE_MATCH = "Phosphopeptide_match"
PHOSPHORESIDUE = "Phosphoresidue"
PUTATIVE_UPSTREAM_DOMAINS = "Putative Upstream Kinases(PSP=PhosphoSitePlus.org)/Phosphatases/Binding Domains"
SEQUENCE = "Sequence"
SEQUENCE10 = "Sequence10"
SEQUENCE7 = "Sequence7"
SITE_PLUSMINUS_7AA_SQL = "SITE_PLUSMINUS_7AA"
UNIPROT_ID = "UniProt_ID"
UNIPROT_SEQ_AND_META_SQL = """
select Uniprot_ID, Description, Gene_Name, Sequence,
Organism_Name, Organism_ID, PE, SV
from UniProtKB
order by Sequence, UniProt_ID
"""
UNIPROT_UNIQUE_SEQ_SQL = """
select distinct Sequence
from UniProtKB
group by Sequence
"""
PPEP_PEP_UNIPROTSEQ_SQL = """
select distinct phosphopeptide, peptide, sequence
from uniprotkb_pep_ppep_view
order by sequence
"""
PPEP_MELT_SQL = """
SELECT DISTINCT
phospho_peptide AS 'p_peptide',
kinase_map AS 'characterization',
'X' AS 'X'
FROM ppep_gene_site_view
"""
# CREATE TABLE PSP_Regulatory_site (
# site_plusminus_7AA TEXT PRIMARY KEY ON CONFLICT IGNORE,
# domain TEXT,
# ON_FUNCTION TEXT,
# ON_PROCESS TEXT,
# ON_PROT_INTERACT TEXT,
# ON_OTHER_INTERACT TEXT,
# notes TEXT,
# organism TEXT
# );
PSP_REGSITE_SQL = """
SELECT DISTINCT
SITE_PLUSMINUS_7AA ,
DOMAIN ,
ON_FUNCTION ,
ON_PROCESS ,
ON_PROT_INTERACT ,
ON_OTHER_INTERACT ,
NOTES ,
ORGANISM
FROM PSP_Regulatory_site
"""
PPEP_ID_SQL = """
SELECT
id AS 'ppep_id',
seq AS 'ppep_seq'
FROM ppep
"""
MRGFLTR_DDL = """
DROP VIEW IF EXISTS mrgfltr_metadata_view;
DROP TABLE IF EXISTS mrgfltr_metadata;
CREATE TABLE mrgfltr_metadata
( ppep_id INTEGER REFERENCES ppep(id)
, Sequence10 TEXT
, Sequence7 TEXT
, GeneName TEXT
, Phosphoresidue TEXT
, UniProtID TEXT
, Description TEXT
, FunctionPhosphoresidue TEXT
, PutativeUpstreamDomains TEXT
, PRIMARY KEY (ppep_id) ON CONFLICT IGNORE
)
;
CREATE VIEW mrgfltr_metadata_view AS
SELECT DISTINCT
ppep.seq AS phospho_peptide
, Sequence10
, Sequence7
, GeneName
, Phosphoresidue
, UniProtID
, Description
, FunctionPhosphoresidue
, PutativeUpstreamDomains
FROM
ppep, mrgfltr_metadata
WHERE
mrgfltr_metadata.ppep_id = ppep.id
ORDER BY
ppep.seq
;
"""
CITATION_INSERT_STMT = """
INSERT INTO Citation (
ObjectName,
CitationData
) VALUES (?,?)
"""
CITATION_INSERT_PSP = 'PhosphoSitePlus(R) (PSP) was created by Cell Signaling Technology Inc. It is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License. When using PSP data or analyses in printed publications or in online resources, the following acknowledgements must be included: (a) the words "PhosphoSitePlus(R), www.phosphosite.org" must be included at appropriate places in the text or webpage, and (b) the following citation must be included in the bibliography: "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Skrzypek E PhosphoSitePlus, 2014: mutations, PTMs and recalibrations. Nucleic Acids Res. 2015 43:D512-20. PMID: 25514926."'
CITATION_INSERT_PSP_REF = 'Hornbeck, 2014, "PhosphoSitePlus, 2014: mutations, PTMs and recalibrations.", https://pubmed.ncbi.nlm.nih.gov/22135298, https://doi.org/10.1093/nar/gkr1122'
MRGFLTR_METADATA_COLUMNS = [
"ppep_id",
"Sequence10",
"Sequence7",
"GeneName",
"Phosphoresidue",
"UniProtID",
"Description",
"FunctionPhosphoresidue",
"PutativeUpstreamDomains",
]
# String Constants (end) ############
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class PreconditionError(Error):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
# start_time = time.clock() #timer
start_time = time.process_time() # timer
# get keys from upstream tabular file using readline()
# ref: https://stackoverflow.com/a/16713581/15509512
# answer to "Use codecs to read file with correct encoding"
file1_encoded = open(upstream_map_filename_tab, "rb")
file1 = cx_getreader("latin-1")(file1_encoded)
count = 0
upstream_map_p_peptide_list = []
re_tab = re.compile("^[^\t]*")
while True:
count += 1
# Get next line from file
line = file1.readline()
# if line is empty
# end of file is reached
if not line:
break
if count > 1:
m = re_tab.match(line)
upstream_map_p_peptide_list.append(m[0])
file1.close()
file1_encoded.close()
# Get the list of phosphopeptides with the p's that represent the phosphorylation sites removed
re_phos = re.compile("p")
end_time = time.process_time() # timer
print(
"%0.6f pre-read-SwissProt [0.1]" % (end_time - start_time,),
file=sys.stderr,
)
# ----------- Get SwissProt data from SQLite database (start) -----------
# build UniProt sequence LUT and list of unique SwissProt sequences
# Open SwissProt SQLite database
conn = sql.connect(uniprot_sqlite)
cur = conn.cursor()
# Set up structures to hold SwissProt data
uniprot_Sequence_List = []
UniProtSeqLUT = {}
# Execute query for unique seqs without fetching the results yet
uniprot_unique_seq_cur = cur.execute(UNIPROT_UNIQUE_SEQ_SQL)
while 1:
batch = uniprot_unique_seq_cur.fetchmany(size=50)
if not batch:
# handle case where no records are returned
break
for row in batch:
Sequence = row[0]
UniProtSeqLUT[(Sequence, DESCRIPTION)] = []
UniProtSeqLUT[(Sequence, GENE_NAME)] = []
UniProtSeqLUT[(Sequence, UNIPROT_ID)] = []
UniProtSeqLUT[Sequence] = []
# Execute query for seqs and metadata without fetching the results yet
uniprot_seq_and_meta = cur.execute(UNIPROT_SEQ_AND_META_SQL)
while 1:
batch = uniprot_seq_and_meta.fetchmany(size=50)
if not batch:
# handle case where no records are returned
break
for (
UniProt_ID,
Description,
Gene_Name,
Sequence,
OS,
OX,
PE,
SV,
) in batch:
uniprot_Sequence_List.append(Sequence)
UniProtSeqLUT[Sequence] = Sequence
UniProtSeqLUT[(Sequence, UNIPROT_ID)].append(UniProt_ID)
UniProtSeqLUT[(Sequence, GENE_NAME)].append(Gene_Name)
if OS != N_A:
Description += " OS=" + OS
if OX != -1:
Description += " OX=" + str(OX)
if Gene_Name != N_A:
Description += " GN=" + Gene_Name
if PE != N_A:
Description += " PE=" + PE
if SV != N_A:
Description += " SV=" + SV
UniProtSeqLUT[(Sequence, DESCRIPTION)].append(Description)
# Close SwissProt SQLite database; clean up local variables
conn.close()
Sequence = ""
UniProt_ID = ""
Description = ""
Gene_Name = ""
# ----------- Get SwissProt data from SQLite database (finish) -----------
end_time = time.process_time() # timer
print(
"%0.6f post-read-SwissProt [0.2]" % (end_time - start_time,),
file=sys.stderr,
)
# ----------- Get SwissProt data from SQLite database (start) -----------
# Open SwissProt SQLite database
conn = sql.connect(uniprot_sqlite)
cur = conn.cursor()
# Set up dictionary to aggregate results for phosphopeptides correspounding to dephosphoeptide
DephosphoPep_UniProtSeq_LUT = {}
# Set up dictionary to accumulate results
PhosphoPep_UniProtSeq_LUT = {}
# Execute query for tuples without fetching the results yet
ppep_pep_uniprotseq_cur = cur.execute(PPEP_PEP_UNIPROTSEQ_SQL)
while 1:
batch = ppep_pep_uniprotseq_cur.fetchmany(size=50)
if not batch:
# handle case where no records are returned
break
for (phospho_pep, dephospho_pep, sequence) in batch:
# do interesting stuff here...
PhosphoPep_UniProtSeq_LUT[phospho_pep] = phospho_pep
PhosphoPep_UniProtSeq_LUT[
(phospho_pep, DEPHOSPHOPEP)
] = dephospho_pep
if dephospho_pep not in DephosphoPep_UniProtSeq_LUT:
DephosphoPep_UniProtSeq_LUT[dephospho_pep] = set()
DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, DESCRIPTION)
] = []
DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, GENE_NAME)
] = []
DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, UNIPROT_ID)
] = []
DephosphoPep_UniProtSeq_LUT[(dephospho_pep, SEQUENCE)] = []
DephosphoPep_UniProtSeq_LUT[dephospho_pep].add(phospho_pep)
if (
sequence
not in DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, SEQUENCE)
]
):
DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, SEQUENCE)
].append(sequence)
for phospho_pep in DephosphoPep_UniProtSeq_LUT[dephospho_pep]:
if phospho_pep != phospho_pep:
print(
"phospho_pep:'%s' phospho_pep:'%s'"
% (phospho_pep, phospho_pep)
)
if phospho_pep not in PhosphoPep_UniProtSeq_LUT:
PhosphoPep_UniProtSeq_LUT[phospho_pep] = phospho_pep
PhosphoPep_UniProtSeq_LUT[
(phospho_pep, DEPHOSPHOPEP)
] = dephospho_pep
r = list(
zip(
[s for s in UniProtSeqLUT[(sequence, UNIPROT_ID)]],
[s for s in UniProtSeqLUT[(sequence, GENE_NAME)]],
[
s
for s in UniProtSeqLUT[(sequence, DESCRIPTION)]
],
)
)
# Sort by `UniProt_ID`
# ref: https://stackoverflow.com/a/4174955/15509512
r = sorted(r, key=operator.itemgetter(0))
# Get one tuple for each `phospho_pep`
# in DephosphoPep_UniProtSeq_LUT[dephospho_pep]
for (upid, gn, desc) in r:
# Append pseudo-tuple per UniProt_ID but only when it is not present
if (
upid
not in DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, UNIPROT_ID)
]
):
DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, UNIPROT_ID)
].append(upid)
DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, DESCRIPTION)
].append(desc)
DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, GENE_NAME)
].append(gn)
# Close SwissProt SQLite database; clean up local variables
conn.close()
# wipe local variables
phospho_pep = dephospho_pep = sequence = 0
upid = gn = desc = r = ""
# ----------- Get SwissProt data from SQLite database (finish) -----------
end_time = time.process_time() # timer
print(
"%0.6f finished reading and decoding '%s' [0.4]"
% (end_time - start_time, upstream_map_filename_tab),
file=sys.stderr,
)
print(
"{:>10} unique upstream phosphopeptides tested".format(
str(len(upstream_map_p_peptide_list))
)
)
# Read in Upstream tabular file
# We are discarding the intensity data; so read it as text
upstream_data = pandas.read_table(
upstream_map_filename_tab, dtype="str", index_col=0
)
end_time = time.process_time() # timer
print(
"%0.6f read Upstream Map from file [1g_1]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
upstream_data.index = upstream_map_p_peptide_list
end_time = time.process_time() # timer
print(
"%0.6f added index to Upstream Map [1g_2]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# trim upstream_data to include only the upstream map columns
old_cols = upstream_data.columns.tolist()
i = 0
first_intensity = -1
last_intensity = -1
intensity_re = re.compile("Intensity.*")
for col_name in old_cols:
m = intensity_re.match(col_name)
if m:
last_intensity = i
if first_intensity == -1:
first_intensity = i
i += 1
# print('last intensity = %d' % last_intensity)
col_PKCalpha = last_intensity + 2
data_in_cols = [old_cols[0]] + old_cols[
first_intensity: last_intensity + 1
]
if upstream_data.empty:
print("upstream_data is empty")
exit(0)
data_in = upstream_data.copy(deep=True)[data_in_cols]
# Convert floating-point integers to int64 integers
# ref: https://stackoverflow.com/a/68497603/15509512
data_in[list(data_in.columns[1:])] = (
data_in[list(data_in.columns[1:])]
.astype("float64")
.apply(np.int64)
)
# create another phosphopeptide column that will be used to join later;
# MAY need to change depending on Phosphopeptide column position
# data_in[PHOSPHOPEPTIDE_MATCH] = data_in[data_in.columns.tolist()[0]]
data_in[PHOSPHOPEPTIDE_MATCH] = data_in.index
end_time = time.process_time() # timer
print(
"%0.6f set data_in[PHOSPHOPEPTIDE_MATCH] [A]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# Produce a dictionary of metadata for a single phosphopeptide.
# This is a replacement of `UniProtInfo_subdict` in the original code.
def pseq_to_subdict(phospho_pep):
# Strip "p" from phosphopeptide sequence
dephospho_pep = re_phos.sub("", phospho_pep)
# Determine number of phosphoresidues in phosphopeptide
numps = len(phospho_pep) - len(dephospho_pep)
# Determine location(s) of phosphoresidue(s) in phosphopeptide
# (used later for Phosphoresidue, Sequence7, and Sequence10)
ploc = [] # list of p locations
i = 0
p = phospho_pep
while i < numps:
ploc.append(p.find("p"))
p = p[: p.find("p")] + p[p.find("p") + 1:]
i += 1
# Establish nested dictionary
result = {}
result[SEQUENCE] = []
result[UNIPROT_ID] = []
result[DESCRIPTION] = []
result[GENE_NAME] = []
result[PHOSPHORESIDUE] = []
result[SEQUENCE7] = []
result[SEQUENCE10] = []
# Add stripped sequence to dictionary
result[SEQUENCE].append(dephospho_pep)
# Locate phospho_pep in PhosphoPep_UniProtSeq_LUT
# Caller may elect to:
# try:
# ...
# except PreconditionError as pe:
# print("'{expression}': {message}".format(
# expression = pe.expression,
# message = pe.message))
# )
# )
if phospho_pep not in PhosphoPep_UniProtSeq_LUT:
raise PreconditionError(
phospho_pep,
"no matching phosphopeptide found in PhosphoPep_UniProtSeq_LUT",
)
if dephospho_pep not in DephosphoPep_UniProtSeq_LUT:
raise PreconditionError(
dephospho_pep,
"dephosphorylated phosphopeptide not found in DephosphoPep_UniProtSeq_LUT",
)
if (
dephospho_pep != PhosphoPep_UniProtSeq_LUT[(phospho_pep, DEPHOSPHOPEP)]
):
my_err_msg = "dephosphorylated phosphopeptide does not match "
my_err_msg += "PhosphoPep_UniProtSeq_LUT[(phospho_pep,DEPHOSPHOPEP)] = "
my_err_msg += PhosphoPep_UniProtSeq_LUT[(phospho_pep, DEPHOSPHOPEP)]
raise PreconditionError(dephospho_pep, my_err_msg)
result[SEQUENCE] = [dephospho_pep]
result[UNIPROT_ID] = DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, UNIPROT_ID)
]
result[DESCRIPTION] = DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, DESCRIPTION)
]
result[GENE_NAME] = DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, GENE_NAME)
]
if (dephospho_pep, SEQUENCE) not in DephosphoPep_UniProtSeq_LUT:
raise PreconditionError(
dephospho_pep,
"no matching phosphopeptide found in DephosphoPep_UniProtSeq_LUT",
)
UniProtSeqList = DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, SEQUENCE)
]
if len(UniProtSeqList) < 1:
print(
"Skipping DephosphoPep_UniProtSeq_LUT[('%s',SEQUENCE)] because value has zero length"
% dephospho_pep
)
# raise PreconditionError(
# "DephosphoPep_UniProtSeq_LUT[('" + dephospho_pep + ",SEQUENCE)",
# 'value has zero length'
# )
for UniProtSeq in UniProtSeqList:
i = 0
phosphoresidues = []
seq7s_set = set()
seq7s = []
seq10s_set = set()
seq10s = []
while i < len(ploc):
start = UniProtSeq.find(dephospho_pep)
# handle case where no sequence was found for dep-pep
if start < 0:
i += 1
continue
psite = (
start + ploc[i]
) # location of phosphoresidue on protein sequence
# add Phosphoresidue
phosphosite = "p" + str(UniProtSeq)[psite] + str(psite + 1)
phosphoresidues.append(phosphosite)
# Add Sequence7
if psite < 7: # phospho_pep at N terminus
seq7 = str(UniProtSeq)[: psite + 8]
if seq7[psite] == "S": # if phosphosresidue is serine
pres = "s"
elif (
seq7[psite] == "T"
): # if phosphosresidue is threonine
pres = "t"
elif (
seq7[psite] == "Y"
): # if phosphoresidue is tyrosine
pres = "y"
else: # if not pSTY
pres = "?"
seq7 = (
seq7[:psite] + pres + seq7[psite + 1: psite + 8]
)
while (
len(seq7) < 15
): # add appropriate number of "_" to the front
seq7 = "_" + seq7
elif (
len(UniProtSeq) - psite < 8
): # phospho_pep at C terminus
seq7 = str(UniProtSeq)[psite - 7:]
if seq7[7] == "S":
pres = "s"
elif seq7[7] == "T":
pres = "t"
elif seq7[7] == "Y":
pres = "y"
else:
pres = "?"
seq7 = seq7[:7] + pres + seq7[8:]
while (
len(seq7) < 15
): # add appropriate number of "_" to the back
seq7 = seq7 + "_"
else:
seq7 = str(UniProtSeq)[psite - 7: psite + 8]
pres = "" # phosphoresidue
if seq7[7] == "S": # if phosphosresidue is serine
pres = "s"
elif seq7[7] == "T": # if phosphosresidue is threonine
pres = "t"
elif seq7[7] == "Y": # if phosphoresidue is tyrosine
pres = "y"
else: # if not pSTY
pres = "?"
seq7 = seq7[:7] + pres + seq7[8:]
if seq7 not in seq7s_set:
seq7s.append(seq7)
seq7s_set.add(seq7)
# add Sequence10
if psite < 10: # phospho_pep at N terminus
seq10 = (
str(UniProtSeq)[:psite] + "p" + str(UniProtSeq)[psite: psite + 11]
)
elif (
len(UniProtSeq) - psite < 11
): # phospho_pep at C terminus
seq10 = (
str(UniProtSeq)[psite - 10: psite] + "p" + str(UniProtSeq)[psite:]
)
else:
seq10 = str(UniProtSeq)[psite - 10: psite + 11]
seq10 = seq10[:10] + "p" + seq10[10:]
if seq10 not in seq10s_set:
seq10s.append(seq10)
seq10s_set.add(seq10)
i += 1
result[PHOSPHORESIDUE].append(phosphoresidues)
result[SEQUENCE7].append(seq7s)
# result[SEQUENCE10] is a list of lists of strings
result[SEQUENCE10].append(seq10s)
r = list(
zip(
result[UNIPROT_ID],
result[GENE_NAME],
result[DESCRIPTION],
result[PHOSPHORESIDUE],
)
)
# Sort by `UniProt_ID`
# ref: https://stackoverflow.com//4174955/15509512
s = sorted(r, key=operator.itemgetter(0))
result[UNIPROT_ID] = []
result[GENE_NAME] = []
result[DESCRIPTION] = []
result[PHOSPHORESIDUE] = []
for r in s:
result[UNIPROT_ID].append(r[0])
result[GENE_NAME].append(r[1])
result[DESCRIPTION].append(r[2])
result[PHOSPHORESIDUE].append(r[3])
# convert lists to strings in the dictionary
for key, value in result.items():
if key not in [PHOSPHORESIDUE, SEQUENCE7, SEQUENCE10]:
result[key] = "; ".join(map(str, value))
elif key in [SEQUENCE10]:
# result[SEQUENCE10] is a list of lists of strings
joined_value = ""
joined_set = set()
sep = ""
for valL in value:
# valL is a list of strings
for val in valL:
# val is a string
if val not in joined_set:
joined_set.add(val)
joined_value += sep + val
sep = "; "
# joined_value is a string
result[key] = joined_value
newstring = "; ".join(
[", ".join(prez) for prez in result[PHOSPHORESIDUE]]
)
# #separate the isoforms in PHOSPHORESIDUE column with ";"
# oldstring = result[PHOSPHORESIDUE]
# oldlist = list(oldstring)
# newstring = ""
# i = 0
# for e in oldlist:
# if e == ";":
# if numps > 1:
# if i%numps:
# newstring = newstring + ";"
# else:
# newstring = newstring + ","
# else:
# newstring = newstring + ";"
# i +=1
# else:
# newstring = newstring + e
result[PHOSPHORESIDUE] = newstring
# separate sequence7's by |
oldstring = result[SEQUENCE7]
oldlist = oldstring
newstring = ""
for ol in oldlist:
for e in ol:
if e == ";":
newstring = newstring + " |"
elif len(newstring) > 0 and 1 > newstring.count(e):
newstring = newstring + " | " + e
elif 1 > newstring.count(e):
newstring = newstring + e
result[SEQUENCE7] = newstring
return [phospho_pep, result]
# Construct list of [string, dictionary] lists
# where the dictionary provides the SwissProt metadata
# for a phosphopeptide
result_list = [
whine(pseq_to_subdict, psequence)
for psequence in data_in[PHOSPHOPEPTIDE_MATCH]
]
end_time = time.process_time() # timer
print(
"%0.6f added SwissProt annotations to phosphopeptides [B]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# Construct dictionary from list of lists
# ref: https://www.8bitavenue.com/how-to-convert-list-of-lists-to-dictionary-in-python/
UniProt_Info = {
result[0]: result[1]
for result in result_list
if result is not None
}
end_time = time.process_time() # timer
print(
"%0.6f create dictionary mapping phosphopeptide to metadata dictionary [C]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# cosmetic: add N_A to phosphopeptide rows with no hits
p_peptide_list = []
for key in UniProt_Info:
p_peptide_list.append(key)
for nestedKey in UniProt_Info[key]:
if UniProt_Info[key][nestedKey] == "":
UniProt_Info[key][nestedKey] = N_A
end_time = time.process_time() # timer
print(
"%0.6f performed cosmetic clean-up [D]" % (end_time - start_time,),
file=sys.stderr,
) # timer
# convert UniProt_Info dictionary to dataframe
uniprot_df = pandas.DataFrame.transpose(
pandas.DataFrame.from_dict(UniProt_Info)
)
# reorder columns to match expected output file
uniprot_df[
PHOSPHOPEPTIDE
] = uniprot_df.index # make index a column too
cols = uniprot_df.columns.tolist()
# cols = [cols[-1]]+cols[4:6]+[cols[1]]+[cols[2]]+[cols[6]]+[cols[0]]
# uniprot_df = uniprot_df[cols]
uniprot_df = uniprot_df[
[
PHOSPHOPEPTIDE,
SEQUENCE10,
SEQUENCE7,
GENE_NAME,
PHOSPHORESIDUE,
UNIPROT_ID,
DESCRIPTION,
]
]
end_time = time.process_time() # timer
print(
"%0.6f reordered columns to match expected output file [1]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# concat to split then groupby to collapse
seq7_df = pandas.concat(
[
pandas.Series(row[PHOSPHOPEPTIDE], row[SEQUENCE7].split(" | "))
for _, row in uniprot_df.iterrows()
]
).reset_index()
seq7_df.columns = [SEQUENCE7, PHOSPHOPEPTIDE]
# --- -------------- begin read PSP_Regulatory_sites ---------------------------------
# read in PhosphoSitePlus Regulatory Sites dataset
# ----------- Get PhosphoSitePlus Regulatory Sites data from SQLite database (start) -----------
conn = sql.connect(uniprot_sqlite)
regsites_df = pandas.read_sql_query(PSP_REGSITE_SQL, conn)
# Close SwissProt SQLite database
conn.close()
# ... -------------- end read PSP_Regulatory_sites ------------------------------------
# keep only the human entries in dataframe
if len(species) > 0:
print(
'Limit PhosphoSitesPlus records to species "' + species + '"'
)
regsites_df = regsites_df[regsites_df.ORGANISM == species]
# merge the seq7 df with the regsites df based off of the sequence7
merge_df = seq7_df.merge(
regsites_df,
left_on=SEQUENCE7,
right_on=SITE_PLUSMINUS_7AA_SQL,
how="left",
)
# after merging df, select only the columns of interest;
# note that PROTEIN is absent here
merge_df = merge_df[
[
PHOSPHOPEPTIDE,
SEQUENCE7,
ON_FUNCTION,
ON_PROCESS,
ON_PROT_INTERACT,
ON_OTHER_INTERACT,
ON_NOTES,
]
]
# combine column values of interest
# into one FUNCTION_PHOSPHORESIDUE column"
merge_df[FUNCTION_PHOSPHORESIDUE] = merge_df[ON_FUNCTION].str.cat(
merge_df[ON_PROCESS], sep="; ", na_rep=""
)
merge_df[FUNCTION_PHOSPHORESIDUE] = merge_df[
FUNCTION_PHOSPHORESIDUE
].str.cat(merge_df[ON_PROT_INTERACT], sep="; ", na_rep="")
merge_df[FUNCTION_PHOSPHORESIDUE] = merge_df[
FUNCTION_PHOSPHORESIDUE
].str.cat(merge_df[ON_OTHER_INTERACT], sep="; ", na_rep="")
merge_df[FUNCTION_PHOSPHORESIDUE] = merge_df[
FUNCTION_PHOSPHORESIDUE
].str.cat(merge_df[ON_NOTES], sep="; ", na_rep="")
# remove the columns that were combined
merge_df = merge_df[
[PHOSPHOPEPTIDE, SEQUENCE7, FUNCTION_PHOSPHORESIDUE]
]
end_time = time.process_time() # timer
print(
"%0.6f merge regsite metadata [1a]" % (end_time - start_time,),
file=sys.stderr,
) # timer
# cosmetic changes to Function Phosphoresidue column
fp_series = pandas.Series(merge_df[FUNCTION_PHOSPHORESIDUE])
end_time = time.process_time() # timer
print(
"%0.6f more cosmetic changes [1b]" % (end_time - start_time,),
file=sys.stderr,
) # timer
i = 0
while i < len(fp_series):
# remove the extra ";" so that it looks more professional
if fp_series[i] == "; ; ; ; ": # remove ; from empty hits
fp_series[i] = ""
while fp_series[i].endswith("; "): # remove ; from the ends
fp_series[i] = fp_series[i][:-2]
while fp_series[i].startswith("; "): # remove ; from the beginning
fp_series[i] = fp_series[i][2:]
fp_series[i] = fp_series[i].replace("; ; ; ; ", "; ")
fp_series[i] = fp_series[i].replace("; ; ; ", "; ")
fp_series[i] = fp_series[i].replace("; ; ", "; ")
# turn blanks into N_A to signify the info was searched for but cannot be found
if fp_series[i] == "":
fp_series[i] = N_A
i += 1
merge_df[FUNCTION_PHOSPHORESIDUE] = fp_series
end_time = time.process_time() # timer
print(
"%0.6f cleaned up semicolons [1c]" % (end_time - start_time,),
file=sys.stderr,
) # timer
# merge uniprot df with merge df
uniprot_regsites_merged_df = uniprot_df.merge(
merge_df,
left_on=PHOSPHOPEPTIDE,
right_on=PHOSPHOPEPTIDE,
how="left",
)
# collapse the merged df
uniprot_regsites_collapsed_df = pandas.DataFrame(
uniprot_regsites_merged_df.groupby(PHOSPHOPEPTIDE)[
FUNCTION_PHOSPHORESIDUE
].apply(lambda x: ppep_join(x))
)
# .apply(lambda x: "%s" % ' | '.join(x)))
end_time = time.process_time() # timer
print(
"%0.6f collapsed pandas dataframe [1d]" % (end_time - start_time,),
file=sys.stderr,
) # timer
uniprot_regsites_collapsed_df[
PHOSPHOPEPTIDE
] = (
uniprot_regsites_collapsed_df.index
) # add df index as its own column
# rename columns
uniprot_regsites_collapsed_df.columns = [
FUNCTION_PHOSPHORESIDUE,
"ppp",
]
end_time = time.process_time() # timer
print(
"%0.6f selected columns to be merged to uniprot_df [1e]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# add columns based on Sequence7 matching site_+/-7_AA
uniprot_regsite_df = pandas.merge(
left=uniprot_df,
right=uniprot_regsites_collapsed_df,
how="left",
left_on=PHOSPHOPEPTIDE,
right_on="ppp",
)
end_time = time.process_time() # timer
print(
"%0.6f added columns based on Sequence7 matching site_+/-7_AA [1f]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
data_in.rename(
{"Protein description": PHOSPHOPEPTIDE},
axis="columns",
inplace=True,
)
# data_in.sort_values(PHOSPHOPEPTIDE_MATCH, inplace=True, kind='mergesort')
res2 = sorted(
data_in[PHOSPHOPEPTIDE_MATCH].tolist(), key=lambda s: s.casefold()
)
data_in = data_in.loc[res2]
end_time = time.process_time() # timer
print(
"%0.6f sorting time [1f]" % (end_time - start_time,),
file=sys.stderr,
) # timer
cols = [old_cols[0]] + old_cols[col_PKCalpha - 1:]
upstream_data = upstream_data[cols]
end_time = time.process_time() # timer
print(
"%0.6f refactored columns for Upstream Map [1g]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# #rename upstream columns in new list
# new_cols = []
# for name in cols:
# if "_NetworKIN" in name:
# name = name.split("_")[0]
# if " motif" in name:
# name = name.split(" motif")[0]
# if " sequence " in name:
# name = name.split(" sequence")[0]
# if "_Phosida" in name:
# name = name.split("_")[0]
# if "_PhosphoSite" in name:
# name = name.split("_")[0]
# new_cols.append(name)
# rename upstream columns in new list
def col_rename(name):
if "_NetworKIN" in name:
name = name.split("_")[0]
if " motif" in name:
name = name.split(" motif")[0]
if " sequence " in name:
name = name.split(" sequence")[0]
if "_Phosida" in name:
name = name.split("_")[0]
if "_PhosphoSite" in name:
name = name.split("_")[0]
return name
new_cols = [col_rename(col) for col in cols]
upstream_data.columns = new_cols
end_time = time.process_time() # timer
print(
"%0.6f renamed columns for Upstream Map [1h_1]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# Create upstream_data_cast as a copy of upstream_data
# but with first column substituted by the phosphopeptide sequence
upstream_data_cast = upstream_data.copy()
new_cols_cast = new_cols
new_cols_cast[0] = "p_peptide"
upstream_data_cast.columns = new_cols_cast
upstream_data_cast["p_peptide"] = upstream_data.index
# --- -------------- begin read upstream_data_melt ------------------------------------
# ----------- Get melted kinase mapping data from SQLite database (start) -----------
conn = sql.connect(uniprot_sqlite)
upstream_data_melt_df = pandas.read_sql_query(PPEP_MELT_SQL, conn)
# Close SwissProt SQLite database
conn.close()
upstream_data_melt = upstream_data_melt_df.copy()
upstream_data_melt.columns = ["p_peptide", "characterization", "X"]
upstream_data_melt["characterization"] = [
col_rename(s) for s in upstream_data_melt["characterization"]
]
print(
"%0.6f upstream_data_melt_df initially has %d rows"
% (end_time - start_time, len(upstream_data_melt.axes[0])),
file=sys.stderr,
)
# ref: https://stackoverflow.com/a/27360130/15509512
# e.g. df.drop(df[df.score < 50].index, inplace=True)
upstream_data_melt.drop(
upstream_data_melt[upstream_data_melt.X != "X"].index, inplace=True
)
print(
"%0.6f upstream_data_melt_df pre-dedup has %d rows"
% (end_time - start_time, len(upstream_data_melt.axes[0])),
file=sys.stderr,
)
# ----------- Get melted kinase mapping data from SQLite database (finish) -----------
# ... -------------- end read upstream_data_melt --------------------------------------
end_time = time.process_time() # timer
print(
"%0.6f melted and minimized Upstream Map dataframe [1h_2]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# ... end read upstream_data_melt
end_time = time.process_time() # timer
print(
"%0.6f indexed melted Upstream Map [1h_2a]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
upstream_delta_melt_LoL = upstream_data_melt.values.tolist()
melt_dict = {}
for key in upstream_map_p_peptide_list:
melt_dict[key] = []
for el in upstream_delta_melt_LoL:
(p_peptide, characterization, X) = tuple(el)
if p_peptide in melt_dict:
melt_dict[p_peptide].append(characterization)
else:
exit(
'Phosphopeptide %s not found in ppep_mapping_db: "phopsphopeptides" and "ppep_mapping_db" must both originate from the same run of mqppep_kinase_mapping'
% (p_peptide)
)
end_time = time.process_time() # timer
print(
"%0.6f appended peptide characterizations [1h_2b]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# for key in upstream_map_p_peptide_list:
# melt_dict[key] = ' | '.join(melt_dict[key])
for key in upstream_map_p_peptide_list:
melt_dict[key] = melt_join(melt_dict[key])
end_time = time.process_time() # timer
print(
"%0.6f concatenated multiple characterizations [1h_2c]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# map_dict is a dictionary of dictionaries
map_dict = {}
for key in upstream_map_p_peptide_list:
map_dict[key] = {}
map_dict[key][PUTATIVE_UPSTREAM_DOMAINS] = melt_dict[key]
end_time = time.process_time() # timer
print(
"%0.6f instantiated map dictionary [2]" % (end_time - start_time,),
file=sys.stderr,
) # timer
# convert map_dict to dataframe
map_df = pandas.DataFrame.transpose(
pandas.DataFrame.from_dict(map_dict)
)
map_df["p-peptide"] = map_df.index # make index a column too
cols_map_df = map_df.columns.tolist()
cols_map_df = [cols_map_df[1]] + [cols_map_df[0]]
map_df = map_df[cols_map_df]
# join map_df to uniprot_regsite_df
output_df = uniprot_regsite_df.merge(
map_df, how="left", left_on=PHOSPHOPEPTIDE, right_on="p-peptide"
)
output_df = output_df[
[
PHOSPHOPEPTIDE,
SEQUENCE10,
SEQUENCE7,
GENE_NAME,
PHOSPHORESIDUE,
UNIPROT_ID,
DESCRIPTION,
FUNCTION_PHOSPHORESIDUE,
PUTATIVE_UPSTREAM_DOMAINS,
]
]
# cols_output_prelim = output_df.columns.tolist()
#
# print("cols_output_prelim")
# print(cols_output_prelim)
#
# cols_output = cols_output_prelim[:8]+[cols_output_prelim[9]]+[cols_output_prelim[10]]
#
# print("cols_output with p-peptide")
# print(cols_output)
#
# cols_output = [col for col in cols_output if not col == "p-peptide"]
#
# print("cols_output")
# print(cols_output)
#
# output_df = output_df[cols_output]
# join output_df back to quantitative columns in data_in df
quant_cols = data_in.columns.tolist()
quant_cols = quant_cols[1:]
quant_data = data_in[quant_cols]
# ----------- Write merge/filter metadata to SQLite database (start) -----------
# Open SwissProt SQLite database
conn = sql.connect(output_sqlite)
cur = conn.cursor()
cur.executescript(MRGFLTR_DDL)
cur.execute(
CITATION_INSERT_STMT,
("mrgfltr_metadata_view", CITATION_INSERT_PSP),
)
cur.execute(
CITATION_INSERT_STMT, ("mrgfltr_metadata", CITATION_INSERT_PSP)
)
cur.execute(
CITATION_INSERT_STMT,
("mrgfltr_metadata_view", CITATION_INSERT_PSP_REF),
)
cur.execute(
CITATION_INSERT_STMT, ("mrgfltr_metadata", CITATION_INSERT_PSP_REF)
)
# Read ppep-to-sequence LUT
ppep_lut_df = pandas.read_sql_query(PPEP_ID_SQL, conn)
# write only metadata for merged/filtered records to SQLite
mrgfltr_metadata_df = output_df.copy()
# replace phosphopeptide seq with ppep.id
mrgfltr_metadata_df = ppep_lut_df.merge(
mrgfltr_metadata_df,
left_on="ppep_seq",
right_on=PHOSPHOPEPTIDE,
how="inner",
)
mrgfltr_metadata_df.drop(
columns=[PHOSPHOPEPTIDE, "ppep_seq"], inplace=True
)
# rename columns
mrgfltr_metadata_df.columns = MRGFLTR_METADATA_COLUMNS
mrgfltr_metadata_df.to_sql(
"mrgfltr_metadata",
con=conn,
if_exists="append",
index=False,
method="multi",
)
# Close SwissProt SQLite database
conn.close()
# ----------- Write merge/filter metadata to SQLite database (finish) -----------
output_df = output_df.merge(
quant_data,
how="right",
left_on=PHOSPHOPEPTIDE,
right_on=PHOSPHOPEPTIDE_MATCH,
)
output_cols = output_df.columns.tolist()
output_cols = output_cols[:-1]
output_df = output_df[output_cols]
# cosmetic changes to Upstream column
output_df[PUTATIVE_UPSTREAM_DOMAINS] = output_df[
PUTATIVE_UPSTREAM_DOMAINS
].fillna(
""
) # fill the NaN with "" for those Phosphopeptides that got a "WARNING: Failed match for " in the upstream mapping
us_series = pandas.Series(output_df[PUTATIVE_UPSTREAM_DOMAINS])
i = 0
while i < len(us_series):
# turn blanks into N_A to signify the info was searched for but cannot be found
if us_series[i] == "":
us_series[i] = N_A
i += 1
output_df[PUTATIVE_UPSTREAM_DOMAINS] = us_series
end_time = time.process_time() # timer
print(
"%0.6f establisheed output [3]" % (end_time - start_time,),
file=sys.stderr,
) # timer
(output_rows, output_cols) = output_df.shape
output_df = output_df.convert_dtypes(convert_integer=True)
# Output onto Final CSV file
output_df.to_csv(output_filename_csv, index=False)
output_df.to_csv(
output_filename_tab, quoting=None, sep="\t", index=False
)
end_time = time.process_time() # timer
print(
"%0.6f wrote output [4]" % (end_time - start_time,),
file=sys.stderr,
) # timer
print(
"{:>10} phosphopeptides written to output".format(str(output_rows))
)
end_time = time.process_time() # timer
print(
"%0.6f seconds of non-system CPU time were consumed"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# Rev. 7/1/2016
# Rev. 7/3/2016 : fill NaN in Upstream column to replace to N/A's
# Rev. 7/3/2016: renamed Upstream column to PUTATIVE_UPSTREAM_DOMAINS
# Rev. 12/2/2021: Converted to Python from ipynb; use fast Aho-Corasick searching; \
# read from SwissProt SQLite database
# Rev. 12/9/2021: Transfer code to Galaxy tool wrapper
#
# copied from Excel Output Script.ipynb END #
#
try:
catch(
mqpep_getswissprot,
)
exit(0)
except Exception as e:
exit("Internal error running mqpep_getswissprot(): %s" % (e))
if __name__ == "__main__":
__main__()
| <filename>tools/mqppep/mqppep_mrgfltr.py
#!/usr/bin/env python
# Import the packages needed
import argparse
import operator # for operator.itemgetter
import os.path
import re
import shutil # for shutil.copyfile(src, dest)
import sqlite3 as sql
import sys # import the sys module for exc_info
import time
import traceback # for formatting stack-trace
from codecs import getreader as cx_getreader
import numpy as np
import pandas
# global constants
N_A = "N/A"
# ref: https://stackoverflow.com/a/8915613/15509512
# answers: "How to handle exceptions in a list comprehensions"
# usage:
# from math import log
# eggs = [1,3,0,3,2]
# print([x for x in [catch(log, egg) for egg in eggs] if x is not None])
# producing:
# for <built-in function log>
# with args (0,)
# exception: math domain error
# [0.0, 1.0986122886681098, 1.0986122886681098, 0.6931471805599453]
def catch(func, *args, handle=lambda e: e, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
print("For %s" % str(func))
print(" with args %s" % str(args))
print(" caught exception: %s" % str(e))
(ty, va, tb) = sys.exc_info()
print(" stack trace: " + str(traceback.format_exception(ty, va, tb)))
exit(-1)
return None
def whine(func, *args, handle=lambda e: e, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
print("Warning: For %s" % str(func))
print(" with args %s" % str(args))
print(" caught exception: %s" % str(e))
(ty, va, tb) = sys.exc_info()
print(" stack trace: " + str(traceback.format_exception(ty, va, tb)))
return None
def ppep_join(x):
x = [i for i in x if N_A != i]
result = "%s" % " | ".join(x)
if result != "":
return result
else:
return N_A
def melt_join(x):
tmp = {key.lower(): key for key in x}
result = "%s" % " | ".join([tmp[key] for key in tmp])
return result
def __main__():
# Parse Command Line
parser = argparse.ArgumentParser(
description="Phopsphoproteomic Enrichment Pipeline Merge and Filter."
)
# inputs:
# Phosphopeptide data for experimental results, including the intensities
# and the mapping to kinase domains, in tabular format.
parser.add_argument(
"--phosphopeptides",
"-p",
nargs=1,
required=True,
dest="phosphopeptides",
help="Phosphopeptide data for experimental results, including the intensities and the mapping to kinase domains, in tabular format",
)
# UniProtKB/SwissProt DB input, SQLite
parser.add_argument(
"--ppep_mapping_db",
"-d",
nargs=1,
required=True,
dest="ppep_mapping_db",
help="UniProtKB/SwissProt SQLite Database",
)
# species to limit records chosed from PhosPhositesPlus
parser.add_argument(
"--species",
"-x",
nargs=1,
required=False,
default=[],
dest="species",
help="limit PhosphoSitePlus records to indicated species (field may be empty)",
)
# outputs:
# tabular output
parser.add_argument(
"--mrgfltr_tab",
"-o",
nargs=1,
required=True,
dest="mrgfltr_tab",
help="Tabular output file for results",
)
# CSV output
parser.add_argument(
"--mrgfltr_csv",
"-c",
nargs=1,
required=True,
dest="mrgfltr_csv",
help="CSV output file for results",
)
# SQLite output
parser.add_argument(
"--mrgfltr_sqlite",
"-S",
nargs=1,
required=True,
dest="mrgfltr_sqlite",
help="SQLite output file for results",
)
# "Make it so!" (parse the arguments)
options = parser.parse_args()
print("options: " + str(options))
# determine phosphopeptide ("upstream map") input tabular file access
if options.phosphopeptides is None:
exit('Argument "phosphopeptides" is required but not supplied')
try:
upstream_map_filename_tab = os.path.abspath(options.phosphopeptides[0])
input_file = open(upstream_map_filename_tab, "r")
input_file.close()
except Exception as e:
exit("Error parsing phosphopeptides argument: %s" % str(e))
# determine input SQLite access
if options.ppep_mapping_db is None:
exit('Argument "ppep_mapping_db" is required but not supplied')
try:
uniprot_sqlite = os.path.abspath(options.ppep_mapping_db[0])
input_file = open(uniprot_sqlite, "rb")
input_file.close()
except Exception as e:
exit("Error parsing ppep_mapping_db argument: %s" % str(e))
# copy input SQLite dataset to output SQLite dataset
if options.mrgfltr_sqlite is None:
exit('Argument "mrgfltr_sqlite" is required but not supplied')
try:
output_sqlite = os.path.abspath(options.mrgfltr_sqlite[0])
shutil.copyfile(uniprot_sqlite, output_sqlite)
except Exception as e:
exit("Error copying ppep_mapping_db to mrgfltr_sqlite: %s" % str(e))
# determine species to limit records from PSP_Regulatory_Sites
if options.species is None:
exit(
'Argument "species" is required (and may be empty) but not supplied'
)
try:
if len(options.species) > 0:
species = options.species[0]
else:
species = ""
except Exception as e:
exit("Error parsing species argument: %s" % str(e))
# determine tabular output destination
if options.mrgfltr_tab is None:
exit('Argument "mrgfltr_tab" is required but not supplied')
try:
output_filename_tab = os.path.abspath(options.mrgfltr_tab[0])
output_file = open(output_filename_tab, "w")
output_file.close()
except Exception as e:
exit("Error parsing mrgfltr_tab argument: %s" % str(e))
# determine CSV output destination
if options.mrgfltr_csv is None:
exit('Argument "mrgfltr_csv" is required but not supplied')
try:
output_filename_csv = os.path.abspath(options.mrgfltr_csv[0])
output_file = open(output_filename_csv, "w")
output_file.close()
except Exception as e:
exit("Error parsing mrgfltr_csv argument: %s" % str(e))
def mqpep_getswissprot():
#
# copied from Excel Output Script.ipynb BEGIN #
#
# String Constants #################
DEPHOSPHOPEP = "DephosphoPep"
DESCRIPTION = "Description"
FUNCTION_PHOSPHORESIDUE = (
"Function Phosphoresidue(PSP=PhosphoSitePlus.org)"
)
GENE_NAME = "Gene_Name" # Gene Name from UniProtKB
ON_FUNCTION = (
"ON_FUNCTION" # ON_FUNCTION column from PSP_Regulatory_Sites
)
ON_NOTES = "NOTES" # NOTES column from PSP_Regulatory_Sites
ON_OTHER_INTERACT = "ON_OTHER_INTERACT" # ON_OTHER_INTERACT column from PSP_Regulatory_Sites
ON_PROCESS = (
"ON_PROCESS" # ON_PROCESS column from PSP_Regulatory_Sites
)
ON_PROT_INTERACT = "ON_PROT_INTERACT" # ON_PROT_INTERACT column from PSP_Regulatory_Sites
PHOSPHOPEPTIDE = "Phosphopeptide"
PHOSPHOPEPTIDE_MATCH = "Phosphopeptide_match"
PHOSPHORESIDUE = "Phosphoresidue"
PUTATIVE_UPSTREAM_DOMAINS = "Putative Upstream Kinases(PSP=PhosphoSitePlus.org)/Phosphatases/Binding Domains"
SEQUENCE = "Sequence"
SEQUENCE10 = "Sequence10"
SEQUENCE7 = "Sequence7"
SITE_PLUSMINUS_7AA_SQL = "SITE_PLUSMINUS_7AA"
UNIPROT_ID = "UniProt_ID"
UNIPROT_SEQ_AND_META_SQL = """
select Uniprot_ID, Description, Gene_Name, Sequence,
Organism_Name, Organism_ID, PE, SV
from UniProtKB
order by Sequence, UniProt_ID
"""
UNIPROT_UNIQUE_SEQ_SQL = """
select distinct Sequence
from UniProtKB
group by Sequence
"""
PPEP_PEP_UNIPROTSEQ_SQL = """
select distinct phosphopeptide, peptide, sequence
from uniprotkb_pep_ppep_view
order by sequence
"""
PPEP_MELT_SQL = """
SELECT DISTINCT
phospho_peptide AS 'p_peptide',
kinase_map AS 'characterization',
'X' AS 'X'
FROM ppep_gene_site_view
"""
# CREATE TABLE PSP_Regulatory_site (
# site_plusminus_7AA TEXT PRIMARY KEY ON CONFLICT IGNORE,
# domain TEXT,
# ON_FUNCTION TEXT,
# ON_PROCESS TEXT,
# ON_PROT_INTERACT TEXT,
# ON_OTHER_INTERACT TEXT,
# notes TEXT,
# organism TEXT
# );
PSP_REGSITE_SQL = """
SELECT DISTINCT
SITE_PLUSMINUS_7AA ,
DOMAIN ,
ON_FUNCTION ,
ON_PROCESS ,
ON_PROT_INTERACT ,
ON_OTHER_INTERACT ,
NOTES ,
ORGANISM
FROM PSP_Regulatory_site
"""
PPEP_ID_SQL = """
SELECT
id AS 'ppep_id',
seq AS 'ppep_seq'
FROM ppep
"""
MRGFLTR_DDL = """
DROP VIEW IF EXISTS mrgfltr_metadata_view;
DROP TABLE IF EXISTS mrgfltr_metadata;
CREATE TABLE mrgfltr_metadata
( ppep_id INTEGER REFERENCES ppep(id)
, Sequence10 TEXT
, Sequence7 TEXT
, GeneName TEXT
, Phosphoresidue TEXT
, UniProtID TEXT
, Description TEXT
, FunctionPhosphoresidue TEXT
, PutativeUpstreamDomains TEXT
, PRIMARY KEY (ppep_id) ON CONFLICT IGNORE
)
;
CREATE VIEW mrgfltr_metadata_view AS
SELECT DISTINCT
ppep.seq AS phospho_peptide
, Sequence10
, Sequence7
, GeneName
, Phosphoresidue
, UniProtID
, Description
, FunctionPhosphoresidue
, PutativeUpstreamDomains
FROM
ppep, mrgfltr_metadata
WHERE
mrgfltr_metadata.ppep_id = ppep.id
ORDER BY
ppep.seq
;
"""
CITATION_INSERT_STMT = """
INSERT INTO Citation (
ObjectName,
CitationData
) VALUES (?,?)
"""
CITATION_INSERT_PSP = 'PhosphoSitePlus(R) (PSP) was created by Cell Signaling Technology Inc. It is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License. When using PSP data or analyses in printed publications or in online resources, the following acknowledgements must be included: (a) the words "PhosphoSitePlus(R), www.phosphosite.org" must be included at appropriate places in the text or webpage, and (b) the following citation must be included in the bibliography: "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Skrzypek E PhosphoSitePlus, 2014: mutations, PTMs and recalibrations. Nucleic Acids Res. 2015 43:D512-20. PMID: 25514926."'
CITATION_INSERT_PSP_REF = 'Hornbeck, 2014, "PhosphoSitePlus, 2014: mutations, PTMs and recalibrations.", https://pubmed.ncbi.nlm.nih.gov/22135298, https://doi.org/10.1093/nar/gkr1122'
MRGFLTR_METADATA_COLUMNS = [
"ppep_id",
"Sequence10",
"Sequence7",
"GeneName",
"Phosphoresidue",
"UniProtID",
"Description",
"FunctionPhosphoresidue",
"PutativeUpstreamDomains",
]
# String Constants (end) ############
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class PreconditionError(Error):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
# start_time = time.clock() #timer
start_time = time.process_time() # timer
# get keys from upstream tabular file using readline()
# ref: https://stackoverflow.com/a/16713581/15509512
# answer to "Use codecs to read file with correct encoding"
file1_encoded = open(upstream_map_filename_tab, "rb")
file1 = cx_getreader("latin-1")(file1_encoded)
count = 0
upstream_map_p_peptide_list = []
re_tab = re.compile("^[^\t]*")
while True:
count += 1
# Get next line from file
line = file1.readline()
# if line is empty
# end of file is reached
if not line:
break
if count > 1:
m = re_tab.match(line)
upstream_map_p_peptide_list.append(m[0])
file1.close()
file1_encoded.close()
# Get the list of phosphopeptides with the p's that represent the phosphorylation sites removed
re_phos = re.compile("p")
end_time = time.process_time() # timer
print(
"%0.6f pre-read-SwissProt [0.1]" % (end_time - start_time,),
file=sys.stderr,
)
# ----------- Get SwissProt data from SQLite database (start) -----------
# build UniProt sequence LUT and list of unique SwissProt sequences
# Open SwissProt SQLite database
conn = sql.connect(uniprot_sqlite)
cur = conn.cursor()
# Set up structures to hold SwissProt data
uniprot_Sequence_List = []
UniProtSeqLUT = {}
# Execute query for unique seqs without fetching the results yet
uniprot_unique_seq_cur = cur.execute(UNIPROT_UNIQUE_SEQ_SQL)
while 1:
batch = uniprot_unique_seq_cur.fetchmany(size=50)
if not batch:
# handle case where no records are returned
break
for row in batch:
Sequence = row[0]
UniProtSeqLUT[(Sequence, DESCRIPTION)] = []
UniProtSeqLUT[(Sequence, GENE_NAME)] = []
UniProtSeqLUT[(Sequence, UNIPROT_ID)] = []
UniProtSeqLUT[Sequence] = []
# Execute query for seqs and metadata without fetching the results yet
uniprot_seq_and_meta = cur.execute(UNIPROT_SEQ_AND_META_SQL)
while 1:
batch = uniprot_seq_and_meta.fetchmany(size=50)
if not batch:
# handle case where no records are returned
break
for (
UniProt_ID,
Description,
Gene_Name,
Sequence,
OS,
OX,
PE,
SV,
) in batch:
uniprot_Sequence_List.append(Sequence)
UniProtSeqLUT[Sequence] = Sequence
UniProtSeqLUT[(Sequence, UNIPROT_ID)].append(UniProt_ID)
UniProtSeqLUT[(Sequence, GENE_NAME)].append(Gene_Name)
if OS != N_A:
Description += " OS=" + OS
if OX != -1:
Description += " OX=" + str(OX)
if Gene_Name != N_A:
Description += " GN=" + Gene_Name
if PE != N_A:
Description += " PE=" + PE
if SV != N_A:
Description += " SV=" + SV
UniProtSeqLUT[(Sequence, DESCRIPTION)].append(Description)
# Close SwissProt SQLite database; clean up local variables
conn.close()
Sequence = ""
UniProt_ID = ""
Description = ""
Gene_Name = ""
# ----------- Get SwissProt data from SQLite database (finish) -----------
end_time = time.process_time() # timer
print(
"%0.6f post-read-SwissProt [0.2]" % (end_time - start_time,),
file=sys.stderr,
)
# ----------- Get SwissProt data from SQLite database (start) -----------
# Open SwissProt SQLite database
conn = sql.connect(uniprot_sqlite)
cur = conn.cursor()
# Set up dictionary to aggregate results for phosphopeptides correspounding to dephosphoeptide
DephosphoPep_UniProtSeq_LUT = {}
# Set up dictionary to accumulate results
PhosphoPep_UniProtSeq_LUT = {}
# Execute query for tuples without fetching the results yet
ppep_pep_uniprotseq_cur = cur.execute(PPEP_PEP_UNIPROTSEQ_SQL)
while 1:
batch = ppep_pep_uniprotseq_cur.fetchmany(size=50)
if not batch:
# handle case where no records are returned
break
for (phospho_pep, dephospho_pep, sequence) in batch:
# do interesting stuff here...
PhosphoPep_UniProtSeq_LUT[phospho_pep] = phospho_pep
PhosphoPep_UniProtSeq_LUT[
(phospho_pep, DEPHOSPHOPEP)
] = dephospho_pep
if dephospho_pep not in DephosphoPep_UniProtSeq_LUT:
DephosphoPep_UniProtSeq_LUT[dephospho_pep] = set()
DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, DESCRIPTION)
] = []
DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, GENE_NAME)
] = []
DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, UNIPROT_ID)
] = []
DephosphoPep_UniProtSeq_LUT[(dephospho_pep, SEQUENCE)] = []
DephosphoPep_UniProtSeq_LUT[dephospho_pep].add(phospho_pep)
if (
sequence
not in DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, SEQUENCE)
]
):
DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, SEQUENCE)
].append(sequence)
for phospho_pep in DephosphoPep_UniProtSeq_LUT[dephospho_pep]:
if phospho_pep != phospho_pep:
print(
"phospho_pep:'%s' phospho_pep:'%s'"
% (phospho_pep, phospho_pep)
)
if phospho_pep not in PhosphoPep_UniProtSeq_LUT:
PhosphoPep_UniProtSeq_LUT[phospho_pep] = phospho_pep
PhosphoPep_UniProtSeq_LUT[
(phospho_pep, DEPHOSPHOPEP)
] = dephospho_pep
r = list(
zip(
[s for s in UniProtSeqLUT[(sequence, UNIPROT_ID)]],
[s for s in UniProtSeqLUT[(sequence, GENE_NAME)]],
[
s
for s in UniProtSeqLUT[(sequence, DESCRIPTION)]
],
)
)
# Sort by `UniProt_ID`
# ref: https://stackoverflow.com/a/4174955/15509512
r = sorted(r, key=operator.itemgetter(0))
# Get one tuple for each `phospho_pep`
# in DephosphoPep_UniProtSeq_LUT[dephospho_pep]
for (upid, gn, desc) in r:
# Append pseudo-tuple per UniProt_ID but only when it is not present
if (
upid
not in DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, UNIPROT_ID)
]
):
DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, UNIPROT_ID)
].append(upid)
DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, DESCRIPTION)
].append(desc)
DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, GENE_NAME)
].append(gn)
# Close SwissProt SQLite database; clean up local variables
conn.close()
# wipe local variables
phospho_pep = dephospho_pep = sequence = 0
upid = gn = desc = r = ""
# ----------- Get SwissProt data from SQLite database (finish) -----------
end_time = time.process_time() # timer
print(
"%0.6f finished reading and decoding '%s' [0.4]"
% (end_time - start_time, upstream_map_filename_tab),
file=sys.stderr,
)
print(
"{:>10} unique upstream phosphopeptides tested".format(
str(len(upstream_map_p_peptide_list))
)
)
# Read in Upstream tabular file
# We are discarding the intensity data; so read it as text
upstream_data = pandas.read_table(
upstream_map_filename_tab, dtype="str", index_col=0
)
end_time = time.process_time() # timer
print(
"%0.6f read Upstream Map from file [1g_1]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
upstream_data.index = upstream_map_p_peptide_list
end_time = time.process_time() # timer
print(
"%0.6f added index to Upstream Map [1g_2]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# trim upstream_data to include only the upstream map columns
old_cols = upstream_data.columns.tolist()
i = 0
first_intensity = -1
last_intensity = -1
intensity_re = re.compile("Intensity.*")
for col_name in old_cols:
m = intensity_re.match(col_name)
if m:
last_intensity = i
if first_intensity == -1:
first_intensity = i
i += 1
# print('last intensity = %d' % last_intensity)
col_PKCalpha = last_intensity + 2
data_in_cols = [old_cols[0]] + old_cols[
first_intensity: last_intensity + 1
]
if upstream_data.empty:
print("upstream_data is empty")
exit(0)
data_in = upstream_data.copy(deep=True)[data_in_cols]
# Convert floating-point integers to int64 integers
# ref: https://stackoverflow.com/a/68497603/15509512
data_in[list(data_in.columns[1:])] = (
data_in[list(data_in.columns[1:])]
.astype("float64")
.apply(np.int64)
)
# create another phosphopeptide column that will be used to join later;
# MAY need to change depending on Phosphopeptide column position
# data_in[PHOSPHOPEPTIDE_MATCH] = data_in[data_in.columns.tolist()[0]]
data_in[PHOSPHOPEPTIDE_MATCH] = data_in.index
end_time = time.process_time() # timer
print(
"%0.6f set data_in[PHOSPHOPEPTIDE_MATCH] [A]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# Produce a dictionary of metadata for a single phosphopeptide.
# This is a replacement of `UniProtInfo_subdict` in the original code.
def pseq_to_subdict(phospho_pep):
# Strip "p" from phosphopeptide sequence
dephospho_pep = re_phos.sub("", phospho_pep)
# Determine number of phosphoresidues in phosphopeptide
numps = len(phospho_pep) - len(dephospho_pep)
# Determine location(s) of phosphoresidue(s) in phosphopeptide
# (used later for Phosphoresidue, Sequence7, and Sequence10)
ploc = [] # list of p locations
i = 0
p = phospho_pep
while i < numps:
ploc.append(p.find("p"))
p = p[: p.find("p")] + p[p.find("p") + 1:]
i += 1
# Establish nested dictionary
result = {}
result[SEQUENCE] = []
result[UNIPROT_ID] = []
result[DESCRIPTION] = []
result[GENE_NAME] = []
result[PHOSPHORESIDUE] = []
result[SEQUENCE7] = []
result[SEQUENCE10] = []
# Add stripped sequence to dictionary
result[SEQUENCE].append(dephospho_pep)
# Locate phospho_pep in PhosphoPep_UniProtSeq_LUT
# Caller may elect to:
# try:
# ...
# except PreconditionError as pe:
# print("'{expression}': {message}".format(
# expression = pe.expression,
# message = pe.message))
# )
# )
if phospho_pep not in PhosphoPep_UniProtSeq_LUT:
raise PreconditionError(
phospho_pep,
"no matching phosphopeptide found in PhosphoPep_UniProtSeq_LUT",
)
if dephospho_pep not in DephosphoPep_UniProtSeq_LUT:
raise PreconditionError(
dephospho_pep,
"dephosphorylated phosphopeptide not found in DephosphoPep_UniProtSeq_LUT",
)
if (
dephospho_pep != PhosphoPep_UniProtSeq_LUT[(phospho_pep, DEPHOSPHOPEP)]
):
my_err_msg = "dephosphorylated phosphopeptide does not match "
my_err_msg += "PhosphoPep_UniProtSeq_LUT[(phospho_pep,DEPHOSPHOPEP)] = "
my_err_msg += PhosphoPep_UniProtSeq_LUT[(phospho_pep, DEPHOSPHOPEP)]
raise PreconditionError(dephospho_pep, my_err_msg)
result[SEQUENCE] = [dephospho_pep]
result[UNIPROT_ID] = DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, UNIPROT_ID)
]
result[DESCRIPTION] = DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, DESCRIPTION)
]
result[GENE_NAME] = DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, GENE_NAME)
]
if (dephospho_pep, SEQUENCE) not in DephosphoPep_UniProtSeq_LUT:
raise PreconditionError(
dephospho_pep,
"no matching phosphopeptide found in DephosphoPep_UniProtSeq_LUT",
)
UniProtSeqList = DephosphoPep_UniProtSeq_LUT[
(dephospho_pep, SEQUENCE)
]
if len(UniProtSeqList) < 1:
print(
"Skipping DephosphoPep_UniProtSeq_LUT[('%s',SEQUENCE)] because value has zero length"
% dephospho_pep
)
# raise PreconditionError(
# "DephosphoPep_UniProtSeq_LUT[('" + dephospho_pep + ",SEQUENCE)",
# 'value has zero length'
# )
for UniProtSeq in UniProtSeqList:
i = 0
phosphoresidues = []
seq7s_set = set()
seq7s = []
seq10s_set = set()
seq10s = []
while i < len(ploc):
start = UniProtSeq.find(dephospho_pep)
# handle case where no sequence was found for dep-pep
if start < 0:
i += 1
continue
psite = (
start + ploc[i]
) # location of phosphoresidue on protein sequence
# add Phosphoresidue
phosphosite = "p" + str(UniProtSeq)[psite] + str(psite + 1)
phosphoresidues.append(phosphosite)
# Add Sequence7
if psite < 7: # phospho_pep at N terminus
seq7 = str(UniProtSeq)[: psite + 8]
if seq7[psite] == "S": # if phosphosresidue is serine
pres = "s"
elif (
seq7[psite] == "T"
): # if phosphosresidue is threonine
pres = "t"
elif (
seq7[psite] == "Y"
): # if phosphoresidue is tyrosine
pres = "y"
else: # if not pSTY
pres = "?"
seq7 = (
seq7[:psite] + pres + seq7[psite + 1: psite + 8]
)
while (
len(seq7) < 15
): # add appropriate number of "_" to the front
seq7 = "_" + seq7
elif (
len(UniProtSeq) - psite < 8
): # phospho_pep at C terminus
seq7 = str(UniProtSeq)[psite - 7:]
if seq7[7] == "S":
pres = "s"
elif seq7[7] == "T":
pres = "t"
elif seq7[7] == "Y":
pres = "y"
else:
pres = "?"
seq7 = seq7[:7] + pres + seq7[8:]
while (
len(seq7) < 15
): # add appropriate number of "_" to the back
seq7 = seq7 + "_"
else:
seq7 = str(UniProtSeq)[psite - 7: psite + 8]
pres = "" # phosphoresidue
if seq7[7] == "S": # if phosphosresidue is serine
pres = "s"
elif seq7[7] == "T": # if phosphosresidue is threonine
pres = "t"
elif seq7[7] == "Y": # if phosphoresidue is tyrosine
pres = "y"
else: # if not pSTY
pres = "?"
seq7 = seq7[:7] + pres + seq7[8:]
if seq7 not in seq7s_set:
seq7s.append(seq7)
seq7s_set.add(seq7)
# add Sequence10
if psite < 10: # phospho_pep at N terminus
seq10 = (
str(UniProtSeq)[:psite] + "p" + str(UniProtSeq)[psite: psite + 11]
)
elif (
len(UniProtSeq) - psite < 11
): # phospho_pep at C terminus
seq10 = (
str(UniProtSeq)[psite - 10: psite] + "p" + str(UniProtSeq)[psite:]
)
else:
seq10 = str(UniProtSeq)[psite - 10: psite + 11]
seq10 = seq10[:10] + "p" + seq10[10:]
if seq10 not in seq10s_set:
seq10s.append(seq10)
seq10s_set.add(seq10)
i += 1
result[PHOSPHORESIDUE].append(phosphoresidues)
result[SEQUENCE7].append(seq7s)
# result[SEQUENCE10] is a list of lists of strings
result[SEQUENCE10].append(seq10s)
r = list(
zip(
result[UNIPROT_ID],
result[GENE_NAME],
result[DESCRIPTION],
result[PHOSPHORESIDUE],
)
)
# Sort by `UniProt_ID`
# ref: https://stackoverflow.com//4174955/15509512
s = sorted(r, key=operator.itemgetter(0))
result[UNIPROT_ID] = []
result[GENE_NAME] = []
result[DESCRIPTION] = []
result[PHOSPHORESIDUE] = []
for r in s:
result[UNIPROT_ID].append(r[0])
result[GENE_NAME].append(r[1])
result[DESCRIPTION].append(r[2])
result[PHOSPHORESIDUE].append(r[3])
# convert lists to strings in the dictionary
for key, value in result.items():
if key not in [PHOSPHORESIDUE, SEQUENCE7, SEQUENCE10]:
result[key] = "; ".join(map(str, value))
elif key in [SEQUENCE10]:
# result[SEQUENCE10] is a list of lists of strings
joined_value = ""
joined_set = set()
sep = ""
for valL in value:
# valL is a list of strings
for val in valL:
# val is a string
if val not in joined_set:
joined_set.add(val)
joined_value += sep + val
sep = "; "
# joined_value is a string
result[key] = joined_value
newstring = "; ".join(
[", ".join(prez) for prez in result[PHOSPHORESIDUE]]
)
# #separate the isoforms in PHOSPHORESIDUE column with ";"
# oldstring = result[PHOSPHORESIDUE]
# oldlist = list(oldstring)
# newstring = ""
# i = 0
# for e in oldlist:
# if e == ";":
# if numps > 1:
# if i%numps:
# newstring = newstring + ";"
# else:
# newstring = newstring + ","
# else:
# newstring = newstring + ";"
# i +=1
# else:
# newstring = newstring + e
result[PHOSPHORESIDUE] = newstring
# separate sequence7's by |
oldstring = result[SEQUENCE7]
oldlist = oldstring
newstring = ""
for ol in oldlist:
for e in ol:
if e == ";":
newstring = newstring + " |"
elif len(newstring) > 0 and 1 > newstring.count(e):
newstring = newstring + " | " + e
elif 1 > newstring.count(e):
newstring = newstring + e
result[SEQUENCE7] = newstring
return [phospho_pep, result]
# Construct list of [string, dictionary] lists
# where the dictionary provides the SwissProt metadata
# for a phosphopeptide
result_list = [
whine(pseq_to_subdict, psequence)
for psequence in data_in[PHOSPHOPEPTIDE_MATCH]
]
end_time = time.process_time() # timer
print(
"%0.6f added SwissProt annotations to phosphopeptides [B]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# Construct dictionary from list of lists
# ref: https://www.8bitavenue.com/how-to-convert-list-of-lists-to-dictionary-in-python/
UniProt_Info = {
result[0]: result[1]
for result in result_list
if result is not None
}
end_time = time.process_time() # timer
print(
"%0.6f create dictionary mapping phosphopeptide to metadata dictionary [C]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# cosmetic: add N_A to phosphopeptide rows with no hits
p_peptide_list = []
for key in UniProt_Info:
p_peptide_list.append(key)
for nestedKey in UniProt_Info[key]:
if UniProt_Info[key][nestedKey] == "":
UniProt_Info[key][nestedKey] = N_A
end_time = time.process_time() # timer
print(
"%0.6f performed cosmetic clean-up [D]" % (end_time - start_time,),
file=sys.stderr,
) # timer
# convert UniProt_Info dictionary to dataframe
uniprot_df = pandas.DataFrame.transpose(
pandas.DataFrame.from_dict(UniProt_Info)
)
# reorder columns to match expected output file
uniprot_df[
PHOSPHOPEPTIDE
] = uniprot_df.index # make index a column too
cols = uniprot_df.columns.tolist()
# cols = [cols[-1]]+cols[4:6]+[cols[1]]+[cols[2]]+[cols[6]]+[cols[0]]
# uniprot_df = uniprot_df[cols]
uniprot_df = uniprot_df[
[
PHOSPHOPEPTIDE,
SEQUENCE10,
SEQUENCE7,
GENE_NAME,
PHOSPHORESIDUE,
UNIPROT_ID,
DESCRIPTION,
]
]
end_time = time.process_time() # timer
print(
"%0.6f reordered columns to match expected output file [1]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# concat to split then groupby to collapse
seq7_df = pandas.concat(
[
pandas.Series(row[PHOSPHOPEPTIDE], row[SEQUENCE7].split(" | "))
for _, row in uniprot_df.iterrows()
]
).reset_index()
seq7_df.columns = [SEQUENCE7, PHOSPHOPEPTIDE]
# --- -------------- begin read PSP_Regulatory_sites ---------------------------------
# read in PhosphoSitePlus Regulatory Sites dataset
# ----------- Get PhosphoSitePlus Regulatory Sites data from SQLite database (start) -----------
conn = sql.connect(uniprot_sqlite)
regsites_df = pandas.read_sql_query(PSP_REGSITE_SQL, conn)
# Close SwissProt SQLite database
conn.close()
# ... -------------- end read PSP_Regulatory_sites ------------------------------------
# keep only the human entries in dataframe
if len(species) > 0:
print(
'Limit PhosphoSitesPlus records to species "' + species + '"'
)
regsites_df = regsites_df[regsites_df.ORGANISM == species]
# merge the seq7 df with the regsites df based off of the sequence7
merge_df = seq7_df.merge(
regsites_df,
left_on=SEQUENCE7,
right_on=SITE_PLUSMINUS_7AA_SQL,
how="left",
)
# after merging df, select only the columns of interest;
# note that PROTEIN is absent here
merge_df = merge_df[
[
PHOSPHOPEPTIDE,
SEQUENCE7,
ON_FUNCTION,
ON_PROCESS,
ON_PROT_INTERACT,
ON_OTHER_INTERACT,
ON_NOTES,
]
]
# combine column values of interest
# into one FUNCTION_PHOSPHORESIDUE column"
merge_df[FUNCTION_PHOSPHORESIDUE] = merge_df[ON_FUNCTION].str.cat(
merge_df[ON_PROCESS], sep="; ", na_rep=""
)
merge_df[FUNCTION_PHOSPHORESIDUE] = merge_df[
FUNCTION_PHOSPHORESIDUE
].str.cat(merge_df[ON_PROT_INTERACT], sep="; ", na_rep="")
merge_df[FUNCTION_PHOSPHORESIDUE] = merge_df[
FUNCTION_PHOSPHORESIDUE
].str.cat(merge_df[ON_OTHER_INTERACT], sep="; ", na_rep="")
merge_df[FUNCTION_PHOSPHORESIDUE] = merge_df[
FUNCTION_PHOSPHORESIDUE
].str.cat(merge_df[ON_NOTES], sep="; ", na_rep="")
# remove the columns that were combined
merge_df = merge_df[
[PHOSPHOPEPTIDE, SEQUENCE7, FUNCTION_PHOSPHORESIDUE]
]
end_time = time.process_time() # timer
print(
"%0.6f merge regsite metadata [1a]" % (end_time - start_time,),
file=sys.stderr,
) # timer
# cosmetic changes to Function Phosphoresidue column
fp_series = pandas.Series(merge_df[FUNCTION_PHOSPHORESIDUE])
end_time = time.process_time() # timer
print(
"%0.6f more cosmetic changes [1b]" % (end_time - start_time,),
file=sys.stderr,
) # timer
i = 0
while i < len(fp_series):
# remove the extra ";" so that it looks more professional
if fp_series[i] == "; ; ; ; ": # remove ; from empty hits
fp_series[i] = ""
while fp_series[i].endswith("; "): # remove ; from the ends
fp_series[i] = fp_series[i][:-2]
while fp_series[i].startswith("; "): # remove ; from the beginning
fp_series[i] = fp_series[i][2:]
fp_series[i] = fp_series[i].replace("; ; ; ; ", "; ")
fp_series[i] = fp_series[i].replace("; ; ; ", "; ")
fp_series[i] = fp_series[i].replace("; ; ", "; ")
# turn blanks into N_A to signify the info was searched for but cannot be found
if fp_series[i] == "":
fp_series[i] = N_A
i += 1
merge_df[FUNCTION_PHOSPHORESIDUE] = fp_series
end_time = time.process_time() # timer
print(
"%0.6f cleaned up semicolons [1c]" % (end_time - start_time,),
file=sys.stderr,
) # timer
# merge uniprot df with merge df
uniprot_regsites_merged_df = uniprot_df.merge(
merge_df,
left_on=PHOSPHOPEPTIDE,
right_on=PHOSPHOPEPTIDE,
how="left",
)
# collapse the merged df
uniprot_regsites_collapsed_df = pandas.DataFrame(
uniprot_regsites_merged_df.groupby(PHOSPHOPEPTIDE)[
FUNCTION_PHOSPHORESIDUE
].apply(lambda x: ppep_join(x))
)
# .apply(lambda x: "%s" % ' | '.join(x)))
end_time = time.process_time() # timer
print(
"%0.6f collapsed pandas dataframe [1d]" % (end_time - start_time,),
file=sys.stderr,
) # timer
uniprot_regsites_collapsed_df[
PHOSPHOPEPTIDE
] = (
uniprot_regsites_collapsed_df.index
) # add df index as its own column
# rename columns
uniprot_regsites_collapsed_df.columns = [
FUNCTION_PHOSPHORESIDUE,
"ppp",
]
end_time = time.process_time() # timer
print(
"%0.6f selected columns to be merged to uniprot_df [1e]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# add columns based on Sequence7 matching site_+/-7_AA
uniprot_regsite_df = pandas.merge(
left=uniprot_df,
right=uniprot_regsites_collapsed_df,
how="left",
left_on=PHOSPHOPEPTIDE,
right_on="ppp",
)
end_time = time.process_time() # timer
print(
"%0.6f added columns based on Sequence7 matching site_+/-7_AA [1f]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
data_in.rename(
{"Protein description": PHOSPHOPEPTIDE},
axis="columns",
inplace=True,
)
# data_in.sort_values(PHOSPHOPEPTIDE_MATCH, inplace=True, kind='mergesort')
res2 = sorted(
data_in[PHOSPHOPEPTIDE_MATCH].tolist(), key=lambda s: s.casefold()
)
data_in = data_in.loc[res2]
end_time = time.process_time() # timer
print(
"%0.6f sorting time [1f]" % (end_time - start_time,),
file=sys.stderr,
) # timer
cols = [old_cols[0]] + old_cols[col_PKCalpha - 1:]
upstream_data = upstream_data[cols]
end_time = time.process_time() # timer
print(
"%0.6f refactored columns for Upstream Map [1g]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# #rename upstream columns in new list
# new_cols = []
# for name in cols:
# if "_NetworKIN" in name:
# name = name.split("_")[0]
# if " motif" in name:
# name = name.split(" motif")[0]
# if " sequence " in name:
# name = name.split(" sequence")[0]
# if "_Phosida" in name:
# name = name.split("_")[0]
# if "_PhosphoSite" in name:
# name = name.split("_")[0]
# new_cols.append(name)
# rename upstream columns in new list
def col_rename(name):
if "_NetworKIN" in name:
name = name.split("_")[0]
if " motif" in name:
name = name.split(" motif")[0]
if " sequence " in name:
name = name.split(" sequence")[0]
if "_Phosida" in name:
name = name.split("_")[0]
if "_PhosphoSite" in name:
name = name.split("_")[0]
return name
new_cols = [col_rename(col) for col in cols]
upstream_data.columns = new_cols
end_time = time.process_time() # timer
print(
"%0.6f renamed columns for Upstream Map [1h_1]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# Create upstream_data_cast as a copy of upstream_data
# but with first column substituted by the phosphopeptide sequence
upstream_data_cast = upstream_data.copy()
new_cols_cast = new_cols
new_cols_cast[0] = "p_peptide"
upstream_data_cast.columns = new_cols_cast
upstream_data_cast["p_peptide"] = upstream_data.index
# --- -------------- begin read upstream_data_melt ------------------------------------
# ----------- Get melted kinase mapping data from SQLite database (start) -----------
conn = sql.connect(uniprot_sqlite)
upstream_data_melt_df = pandas.read_sql_query(PPEP_MELT_SQL, conn)
# Close SwissProt SQLite database
conn.close()
upstream_data_melt = upstream_data_melt_df.copy()
upstream_data_melt.columns = ["p_peptide", "characterization", "X"]
upstream_data_melt["characterization"] = [
col_rename(s) for s in upstream_data_melt["characterization"]
]
print(
"%0.6f upstream_data_melt_df initially has %d rows"
% (end_time - start_time, len(upstream_data_melt.axes[0])),
file=sys.stderr,
)
# ref: https://stackoverflow.com/a/27360130/15509512
# e.g. df.drop(df[df.score < 50].index, inplace=True)
upstream_data_melt.drop(
upstream_data_melt[upstream_data_melt.X != "X"].index, inplace=True
)
print(
"%0.6f upstream_data_melt_df pre-dedup has %d rows"
% (end_time - start_time, len(upstream_data_melt.axes[0])),
file=sys.stderr,
)
# ----------- Get melted kinase mapping data from SQLite database (finish) -----------
# ... -------------- end read upstream_data_melt --------------------------------------
end_time = time.process_time() # timer
print(
"%0.6f melted and minimized Upstream Map dataframe [1h_2]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# ... end read upstream_data_melt
end_time = time.process_time() # timer
print(
"%0.6f indexed melted Upstream Map [1h_2a]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
upstream_delta_melt_LoL = upstream_data_melt.values.tolist()
melt_dict = {}
for key in upstream_map_p_peptide_list:
melt_dict[key] = []
for el in upstream_delta_melt_LoL:
(p_peptide, characterization, X) = tuple(el)
if p_peptide in melt_dict:
melt_dict[p_peptide].append(characterization)
else:
exit(
'Phosphopeptide %s not found in ppep_mapping_db: "phopsphopeptides" and "ppep_mapping_db" must both originate from the same run of mqppep_kinase_mapping'
% (p_peptide)
)
end_time = time.process_time() # timer
print(
"%0.6f appended peptide characterizations [1h_2b]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# for key in upstream_map_p_peptide_list:
# melt_dict[key] = ' | '.join(melt_dict[key])
for key in upstream_map_p_peptide_list:
melt_dict[key] = melt_join(melt_dict[key])
end_time = time.process_time() # timer
print(
"%0.6f concatenated multiple characterizations [1h_2c]"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# map_dict is a dictionary of dictionaries
map_dict = {}
for key in upstream_map_p_peptide_list:
map_dict[key] = {}
map_dict[key][PUTATIVE_UPSTREAM_DOMAINS] = melt_dict[key]
end_time = time.process_time() # timer
print(
"%0.6f instantiated map dictionary [2]" % (end_time - start_time,),
file=sys.stderr,
) # timer
# convert map_dict to dataframe
map_df = pandas.DataFrame.transpose(
pandas.DataFrame.from_dict(map_dict)
)
map_df["p-peptide"] = map_df.index # make index a column too
cols_map_df = map_df.columns.tolist()
cols_map_df = [cols_map_df[1]] + [cols_map_df[0]]
map_df = map_df[cols_map_df]
# join map_df to uniprot_regsite_df
output_df = uniprot_regsite_df.merge(
map_df, how="left", left_on=PHOSPHOPEPTIDE, right_on="p-peptide"
)
output_df = output_df[
[
PHOSPHOPEPTIDE,
SEQUENCE10,
SEQUENCE7,
GENE_NAME,
PHOSPHORESIDUE,
UNIPROT_ID,
DESCRIPTION,
FUNCTION_PHOSPHORESIDUE,
PUTATIVE_UPSTREAM_DOMAINS,
]
]
# cols_output_prelim = output_df.columns.tolist()
#
# print("cols_output_prelim")
# print(cols_output_prelim)
#
# cols_output = cols_output_prelim[:8]+[cols_output_prelim[9]]+[cols_output_prelim[10]]
#
# print("cols_output with p-peptide")
# print(cols_output)
#
# cols_output = [col for col in cols_output if not col == "p-peptide"]
#
# print("cols_output")
# print(cols_output)
#
# output_df = output_df[cols_output]
# join output_df back to quantitative columns in data_in df
quant_cols = data_in.columns.tolist()
quant_cols = quant_cols[1:]
quant_data = data_in[quant_cols]
# ----------- Write merge/filter metadata to SQLite database (start) -----------
# Open SwissProt SQLite database
conn = sql.connect(output_sqlite)
cur = conn.cursor()
cur.executescript(MRGFLTR_DDL)
cur.execute(
CITATION_INSERT_STMT,
("mrgfltr_metadata_view", CITATION_INSERT_PSP),
)
cur.execute(
CITATION_INSERT_STMT, ("mrgfltr_metadata", CITATION_INSERT_PSP)
)
cur.execute(
CITATION_INSERT_STMT,
("mrgfltr_metadata_view", CITATION_INSERT_PSP_REF),
)
cur.execute(
CITATION_INSERT_STMT, ("mrgfltr_metadata", CITATION_INSERT_PSP_REF)
)
# Read ppep-to-sequence LUT
ppep_lut_df = pandas.read_sql_query(PPEP_ID_SQL, conn)
# write only metadata for merged/filtered records to SQLite
mrgfltr_metadata_df = output_df.copy()
# replace phosphopeptide seq with ppep.id
mrgfltr_metadata_df = ppep_lut_df.merge(
mrgfltr_metadata_df,
left_on="ppep_seq",
right_on=PHOSPHOPEPTIDE,
how="inner",
)
mrgfltr_metadata_df.drop(
columns=[PHOSPHOPEPTIDE, "ppep_seq"], inplace=True
)
# rename columns
mrgfltr_metadata_df.columns = MRGFLTR_METADATA_COLUMNS
mrgfltr_metadata_df.to_sql(
"mrgfltr_metadata",
con=conn,
if_exists="append",
index=False,
method="multi",
)
# Close SwissProt SQLite database
conn.close()
# ----------- Write merge/filter metadata to SQLite database (finish) -----------
output_df = output_df.merge(
quant_data,
how="right",
left_on=PHOSPHOPEPTIDE,
right_on=PHOSPHOPEPTIDE_MATCH,
)
output_cols = output_df.columns.tolist()
output_cols = output_cols[:-1]
output_df = output_df[output_cols]
# cosmetic changes to Upstream column
output_df[PUTATIVE_UPSTREAM_DOMAINS] = output_df[
PUTATIVE_UPSTREAM_DOMAINS
].fillna(
""
) # fill the NaN with "" for those Phosphopeptides that got a "WARNING: Failed match for " in the upstream mapping
us_series = pandas.Series(output_df[PUTATIVE_UPSTREAM_DOMAINS])
i = 0
while i < len(us_series):
# turn blanks into N_A to signify the info was searched for but cannot be found
if us_series[i] == "":
us_series[i] = N_A
i += 1
output_df[PUTATIVE_UPSTREAM_DOMAINS] = us_series
end_time = time.process_time() # timer
print(
"%0.6f establisheed output [3]" % (end_time - start_time,),
file=sys.stderr,
) # timer
(output_rows, output_cols) = output_df.shape
output_df = output_df.convert_dtypes(convert_integer=True)
# Output onto Final CSV file
output_df.to_csv(output_filename_csv, index=False)
output_df.to_csv(
output_filename_tab, quoting=None, sep="\t", index=False
)
end_time = time.process_time() # timer
print(
"%0.6f wrote output [4]" % (end_time - start_time,),
file=sys.stderr,
) # timer
print(
"{:>10} phosphopeptides written to output".format(str(output_rows))
)
end_time = time.process_time() # timer
print(
"%0.6f seconds of non-system CPU time were consumed"
% (end_time - start_time,),
file=sys.stderr,
) # timer
# Rev. 7/1/2016
# Rev. 7/3/2016 : fill NaN in Upstream column to replace to N/A's
# Rev. 7/3/2016: renamed Upstream column to PUTATIVE_UPSTREAM_DOMAINS
# Rev. 12/2/2021: Converted to Python from ipynb; use fast Aho-Corasick searching; \
# read from SwissProt SQLite database
# Rev. 12/9/2021: Transfer code to Galaxy tool wrapper
#
# copied from Excel Output Script.ipynb END #
#
try:
catch(
mqpep_getswissprot,
)
exit(0)
except Exception as e:
exit("Internal error running mqpep_getswissprot(): %s" % (e))
if __name__ == "__main__":
__main__()
| en | 0.624435 | #!/usr/bin/env python # Import the packages needed # for operator.itemgetter # for shutil.copyfile(src, dest) # import the sys module for exc_info # for formatting stack-trace # global constants # ref: https://stackoverflow.com/a/8915613/15509512 # answers: "How to handle exceptions in a list comprehensions" # usage: # from math import log # eggs = [1,3,0,3,2] # print([x for x in [catch(log, egg) for egg in eggs] if x is not None]) # producing: # for <built-in function log> # with args (0,) # exception: math domain error # [0.0, 1.0986122886681098, 1.0986122886681098, 0.6931471805599453] # Parse Command Line # inputs: # Phosphopeptide data for experimental results, including the intensities # and the mapping to kinase domains, in tabular format. # UniProtKB/SwissProt DB input, SQLite # species to limit records chosed from PhosPhositesPlus # outputs: # tabular output # CSV output # SQLite output # "Make it so!" (parse the arguments) # determine phosphopeptide ("upstream map") input tabular file access # determine input SQLite access # copy input SQLite dataset to output SQLite dataset # determine species to limit records from PSP_Regulatory_Sites # determine tabular output destination # determine CSV output destination # # copied from Excel Output Script.ipynb BEGIN # # # String Constants ################# # Gene Name from UniProtKB # ON_FUNCTION column from PSP_Regulatory_Sites # NOTES column from PSP_Regulatory_Sites # ON_OTHER_INTERACT column from PSP_Regulatory_Sites # ON_PROCESS column from PSP_Regulatory_Sites # ON_PROT_INTERACT column from PSP_Regulatory_Sites select Uniprot_ID, Description, Gene_Name, Sequence, Organism_Name, Organism_ID, PE, SV from UniProtKB order by Sequence, UniProt_ID select distinct Sequence from UniProtKB group by Sequence select distinct phosphopeptide, peptide, sequence from uniprotkb_pep_ppep_view order by sequence SELECT DISTINCT phospho_peptide AS 'p_peptide', kinase_map AS 'characterization', 'X' AS 'X' FROM ppep_gene_site_view # CREATE TABLE PSP_Regulatory_site ( # site_plusminus_7AA TEXT PRIMARY KEY ON CONFLICT IGNORE, # domain TEXT, # ON_FUNCTION TEXT, # ON_PROCESS TEXT, # ON_PROT_INTERACT TEXT, # ON_OTHER_INTERACT TEXT, # notes TEXT, # organism TEXT # ); SELECT DISTINCT SITE_PLUSMINUS_7AA , DOMAIN , ON_FUNCTION , ON_PROCESS , ON_PROT_INTERACT , ON_OTHER_INTERACT , NOTES , ORGANISM FROM PSP_Regulatory_site SELECT id AS 'ppep_id', seq AS 'ppep_seq' FROM ppep DROP VIEW IF EXISTS mrgfltr_metadata_view; DROP TABLE IF EXISTS mrgfltr_metadata; CREATE TABLE mrgfltr_metadata ( ppep_id INTEGER REFERENCES ppep(id) , Sequence10 TEXT , Sequence7 TEXT , GeneName TEXT , Phosphoresidue TEXT , UniProtID TEXT , Description TEXT , FunctionPhosphoresidue TEXT , PutativeUpstreamDomains TEXT , PRIMARY KEY (ppep_id) ON CONFLICT IGNORE ) ; CREATE VIEW mrgfltr_metadata_view AS SELECT DISTINCT ppep.seq AS phospho_peptide , Sequence10 , Sequence7 , GeneName , Phosphoresidue , UniProtID , Description , FunctionPhosphoresidue , PutativeUpstreamDomains FROM ppep, mrgfltr_metadata WHERE mrgfltr_metadata.ppep_id = ppep.id ORDER BY ppep.seq ; INSERT INTO Citation ( ObjectName, CitationData ) VALUES (?,?) # String Constants (end) ############ Base class for exceptions in this module. Exception raised for errors in the input. Attributes: expression -- input expression in which the error occurred message -- explanation of the error # start_time = time.clock() #timer # timer # get keys from upstream tabular file using readline() # ref: https://stackoverflow.com/a/16713581/15509512 # answer to "Use codecs to read file with correct encoding" # Get next line from file # if line is empty # end of file is reached # Get the list of phosphopeptides with the p's that represent the phosphorylation sites removed # timer # ----------- Get SwissProt data from SQLite database (start) ----------- # build UniProt sequence LUT and list of unique SwissProt sequences # Open SwissProt SQLite database # Set up structures to hold SwissProt data # Execute query for unique seqs without fetching the results yet # handle case where no records are returned # Execute query for seqs and metadata without fetching the results yet # handle case where no records are returned # Close SwissProt SQLite database; clean up local variables # ----------- Get SwissProt data from SQLite database (finish) ----------- # timer # ----------- Get SwissProt data from SQLite database (start) ----------- # Open SwissProt SQLite database # Set up dictionary to aggregate results for phosphopeptides correspounding to dephosphoeptide # Set up dictionary to accumulate results # Execute query for tuples without fetching the results yet # handle case where no records are returned # do interesting stuff here... # Sort by `UniProt_ID` # ref: https://stackoverflow.com/a/4174955/15509512 # Get one tuple for each `phospho_pep` # in DephosphoPep_UniProtSeq_LUT[dephospho_pep] # Append pseudo-tuple per UniProt_ID but only when it is not present # Close SwissProt SQLite database; clean up local variables # wipe local variables # ----------- Get SwissProt data from SQLite database (finish) ----------- # timer # Read in Upstream tabular file # We are discarding the intensity data; so read it as text # timer # timer # timer # timer # trim upstream_data to include only the upstream map columns # print('last intensity = %d' % last_intensity) # Convert floating-point integers to int64 integers # ref: https://stackoverflow.com/a/68497603/15509512 # create another phosphopeptide column that will be used to join later; # MAY need to change depending on Phosphopeptide column position # data_in[PHOSPHOPEPTIDE_MATCH] = data_in[data_in.columns.tolist()[0]] # timer # timer # Produce a dictionary of metadata for a single phosphopeptide. # This is a replacement of `UniProtInfo_subdict` in the original code. # Strip "p" from phosphopeptide sequence # Determine number of phosphoresidues in phosphopeptide # Determine location(s) of phosphoresidue(s) in phosphopeptide # (used later for Phosphoresidue, Sequence7, and Sequence10) # list of p locations # Establish nested dictionary # Add stripped sequence to dictionary # Locate phospho_pep in PhosphoPep_UniProtSeq_LUT # Caller may elect to: # try: # ... # except PreconditionError as pe: # print("'{expression}': {message}".format( # expression = pe.expression, # message = pe.message)) # ) # ) # raise PreconditionError( # "DephosphoPep_UniProtSeq_LUT[('" + dephospho_pep + ",SEQUENCE)", # 'value has zero length' # ) # handle case where no sequence was found for dep-pep # location of phosphoresidue on protein sequence # add Phosphoresidue # Add Sequence7 # phospho_pep at N terminus # if phosphosresidue is serine # if phosphosresidue is threonine # if phosphoresidue is tyrosine # if not pSTY # add appropriate number of "_" to the front # phospho_pep at C terminus # add appropriate number of "_" to the back # phosphoresidue # if phosphosresidue is serine # if phosphosresidue is threonine # if phosphoresidue is tyrosine # if not pSTY # add Sequence10 # phospho_pep at N terminus # phospho_pep at C terminus # result[SEQUENCE10] is a list of lists of strings # Sort by `UniProt_ID` # ref: https://stackoverflow.com//4174955/15509512 # convert lists to strings in the dictionary # result[SEQUENCE10] is a list of lists of strings # valL is a list of strings # val is a string # joined_value is a string # #separate the isoforms in PHOSPHORESIDUE column with ";" # oldstring = result[PHOSPHORESIDUE] # oldlist = list(oldstring) # newstring = "" # i = 0 # for e in oldlist: # if e == ";": # if numps > 1: # if i%numps: # newstring = newstring + ";" # else: # newstring = newstring + "," # else: # newstring = newstring + ";" # i +=1 # else: # newstring = newstring + e # separate sequence7's by | # Construct list of [string, dictionary] lists # where the dictionary provides the SwissProt metadata # for a phosphopeptide # timer # timer # Construct dictionary from list of lists # ref: https://www.8bitavenue.com/how-to-convert-list-of-lists-to-dictionary-in-python/ # timer # timer # cosmetic: add N_A to phosphopeptide rows with no hits # timer # timer # convert UniProt_Info dictionary to dataframe # reorder columns to match expected output file # make index a column too # cols = [cols[-1]]+cols[4:6]+[cols[1]]+[cols[2]]+[cols[6]]+[cols[0]] # uniprot_df = uniprot_df[cols] # timer # timer # concat to split then groupby to collapse # --- -------------- begin read PSP_Regulatory_sites --------------------------------- # read in PhosphoSitePlus Regulatory Sites dataset # ----------- Get PhosphoSitePlus Regulatory Sites data from SQLite database (start) ----------- # Close SwissProt SQLite database # ... -------------- end read PSP_Regulatory_sites ------------------------------------ # keep only the human entries in dataframe # merge the seq7 df with the regsites df based off of the sequence7 # after merging df, select only the columns of interest; # note that PROTEIN is absent here # combine column values of interest # into one FUNCTION_PHOSPHORESIDUE column" # remove the columns that were combined # timer # timer # cosmetic changes to Function Phosphoresidue column # timer # timer # remove the extra ";" so that it looks more professional # remove ; from empty hits # remove ; from the ends # remove ; from the beginning # turn blanks into N_A to signify the info was searched for but cannot be found # timer # timer # merge uniprot df with merge df # collapse the merged df # .apply(lambda x: "%s" % ' | '.join(x))) # timer # timer # add df index as its own column # rename columns # timer # timer # add columns based on Sequence7 matching site_+/-7_AA # timer # timer # data_in.sort_values(PHOSPHOPEPTIDE_MATCH, inplace=True, kind='mergesort') # timer # timer # timer # timer # #rename upstream columns in new list # new_cols = [] # for name in cols: # if "_NetworKIN" in name: # name = name.split("_")[0] # if " motif" in name: # name = name.split(" motif")[0] # if " sequence " in name: # name = name.split(" sequence")[0] # if "_Phosida" in name: # name = name.split("_")[0] # if "_PhosphoSite" in name: # name = name.split("_")[0] # new_cols.append(name) # rename upstream columns in new list # timer # timer # Create upstream_data_cast as a copy of upstream_data # but with first column substituted by the phosphopeptide sequence # --- -------------- begin read upstream_data_melt ------------------------------------ # ----------- Get melted kinase mapping data from SQLite database (start) ----------- # Close SwissProt SQLite database # ref: https://stackoverflow.com/a/27360130/15509512 # e.g. df.drop(df[df.score < 50].index, inplace=True) # ----------- Get melted kinase mapping data from SQLite database (finish) ----------- # ... -------------- end read upstream_data_melt -------------------------------------- # timer # timer # ... end read upstream_data_melt # timer # timer # timer # timer # for key in upstream_map_p_peptide_list: # melt_dict[key] = ' | '.join(melt_dict[key]) # timer # timer # map_dict is a dictionary of dictionaries # timer # timer # convert map_dict to dataframe # make index a column too # join map_df to uniprot_regsite_df # cols_output_prelim = output_df.columns.tolist() # # print("cols_output_prelim") # print(cols_output_prelim) # # cols_output = cols_output_prelim[:8]+[cols_output_prelim[9]]+[cols_output_prelim[10]] # # print("cols_output with p-peptide") # print(cols_output) # # cols_output = [col for col in cols_output if not col == "p-peptide"] # # print("cols_output") # print(cols_output) # # output_df = output_df[cols_output] # join output_df back to quantitative columns in data_in df # ----------- Write merge/filter metadata to SQLite database (start) ----------- # Open SwissProt SQLite database # Read ppep-to-sequence LUT # write only metadata for merged/filtered records to SQLite # replace phosphopeptide seq with ppep.id # rename columns # Close SwissProt SQLite database # ----------- Write merge/filter metadata to SQLite database (finish) ----------- # cosmetic changes to Upstream column # fill the NaN with "" for those Phosphopeptides that got a "WARNING: Failed match for " in the upstream mapping # turn blanks into N_A to signify the info was searched for but cannot be found # timer # timer # Output onto Final CSV file # timer # timer # timer # timer # Rev. 7/1/2016 # Rev. 7/3/2016 : fill NaN in Upstream column to replace to N/A's # Rev. 7/3/2016: renamed Upstream column to PUTATIVE_UPSTREAM_DOMAINS # Rev. 12/2/2021: Converted to Python from ipynb; use fast Aho-Corasick searching; \ # read from SwissProt SQLite database # Rev. 12/9/2021: Transfer code to Galaxy tool wrapper # # copied from Excel Output Script.ipynb END # # | 2.538256 | 3 |
calculate_fitness.py | atr10116068/Genetic_Algorithm_python | 0 | 6624123 | <gh_stars>0
#function fitnes
def fitness(gen,target):
fitnes=0
for x in range(len(target)):
if(target[x:x+1] == gen[x:x+1]):
#print("-> {} sama".format(gen[x:x+1]))
fitnes += 1
fitness = str((fitnes/len(target))*100)
return fitness
| #function fitnes
def fitness(gen,target):
fitnes=0
for x in range(len(target)):
if(target[x:x+1] == gen[x:x+1]):
#print("-> {} sama".format(gen[x:x+1]))
fitnes += 1
fitness = str((fitnes/len(target))*100)
return fitness | en | 0.148303 | #function fitnes #print("-> {} sama".format(gen[x:x+1])) | 3.434464 | 3 |
main.py | haselmann/dev-tracker-reddit | 0 | 6624124 | import praw
import re
from data import ThreadData
from config import config
from collections import deque
from datetime import datetime
from urllib.parse import quote_plus
if __name__ == '__main__':
r = praw.Reddit(client_id=config["client_id"],
client_secret=config["client_secret"],
password=config["password"],
user_agent=config["user_agent"],
username=config["username"])
# the most recent threads a dev has replied in
recent_threads = deque(maxlen=config["store_size"])
subreddit = r.subreddit("+".join(config["subreddit"]))
for comment in subreddit.stream.comments():
try:
comment_author = comment.author.name.lower()
except AttributeError:
# author is deleted, don't care about this comment
continue
intro_template = config["intro"]
intro = intro_template.format(bot_name=r.config.username,
subreddit=str(comment.subreddit),
users=", ".join(config["users"]))
subreddit_url_encoded = quote_plus("/r/{subreddit}".format(subreddit=str(comment.subreddit)))
outtro_template = "[source](https://github.com/NNTin/dev-tracker-reddit) on GitHub, " \
"[message](https://www.reddit.com/message/compose?to={url}) " \
"the moderators"
outtro = outtro_template.format(bot_name=r.config.username,
subreddit=str(comment.subreddit),
users=", ".join(config["users"]),
url=subreddit_url_encoded)
outtro = " " + outtro
outtro = " ^^".join(outtro.split(" "))
if comment_author in config["users"]:
submission_id = comment.submission.fullname
bot_comment_exist = submission_id in map(lambda x: x.submission_id, recent_threads)
if bot_comment_exist:
print("[A] {author}: {link}".format(author=comment.author.name,
link="https://dm.reddit.com" + comment.permalink))
else:
print("[N] {author}: {link}".format(author=comment.author.name,
link="https://dm.reddit.com" + comment.permalink))
old_comments = ""
if bot_comment_exist:
regex = r"(?P<fullmatch>\* \[Comment by (?P<redditname>\w+)\]\(\/r\/(?P<subreddit>\w+)\/comments" \
r"\/(?P<submissionid>\w+)\/\w*\/(?P<commentid>\w+)\/\?context=\d+(?P<hasnote> \"(?P<note>.+)" \
r"\")?\):\n\n(?P<message>( >.+\n)+))"
data = next(filter(lambda x: x.submission_id == submission_id, recent_threads)).dict()
comment_id = data["comment_id"]
bot_comment = next(r.info([comment_id]))
matches = re.finditer(regex, bot_comment.body, re.MULTILINE)
for match in matches:
old_comments += match["fullmatch"] + "\n"
header_template = '* [Comment by {user_name}]({permalink}?context=9 "posted on {datetime}"):\n'
header = header_template.format(user_name=comment.author.name,
permalink=comment.permalink,
datetime=str(datetime.utcfromtimestamp(
comment.created_utc)) + " UTC")
quote = "\n" + comment.body
quote = "\n > ".join(quote.split("\n"))
new_comment = header + quote
reply = intro + "\n\n" + old_comments + new_comment + "\n\n---\n\n" + outtro
if bot_comment_exist:
bot_comment.edit(reply)
continue
else:
bot_comment = comment.submission.reply(reply)
comment_id = bot_comment.fullname
recent_threads.append(ThreadData(submission_id=submission_id,
comment_id=comment_id))
continue
| import praw
import re
from data import ThreadData
from config import config
from collections import deque
from datetime import datetime
from urllib.parse import quote_plus
if __name__ == '__main__':
r = praw.Reddit(client_id=config["client_id"],
client_secret=config["client_secret"],
password=config["password"],
user_agent=config["user_agent"],
username=config["username"])
# the most recent threads a dev has replied in
recent_threads = deque(maxlen=config["store_size"])
subreddit = r.subreddit("+".join(config["subreddit"]))
for comment in subreddit.stream.comments():
try:
comment_author = comment.author.name.lower()
except AttributeError:
# author is deleted, don't care about this comment
continue
intro_template = config["intro"]
intro = intro_template.format(bot_name=r.config.username,
subreddit=str(comment.subreddit),
users=", ".join(config["users"]))
subreddit_url_encoded = quote_plus("/r/{subreddit}".format(subreddit=str(comment.subreddit)))
outtro_template = "[source](https://github.com/NNTin/dev-tracker-reddit) on GitHub, " \
"[message](https://www.reddit.com/message/compose?to={url}) " \
"the moderators"
outtro = outtro_template.format(bot_name=r.config.username,
subreddit=str(comment.subreddit),
users=", ".join(config["users"]),
url=subreddit_url_encoded)
outtro = " " + outtro
outtro = " ^^".join(outtro.split(" "))
if comment_author in config["users"]:
submission_id = comment.submission.fullname
bot_comment_exist = submission_id in map(lambda x: x.submission_id, recent_threads)
if bot_comment_exist:
print("[A] {author}: {link}".format(author=comment.author.name,
link="https://dm.reddit.com" + comment.permalink))
else:
print("[N] {author}: {link}".format(author=comment.author.name,
link="https://dm.reddit.com" + comment.permalink))
old_comments = ""
if bot_comment_exist:
regex = r"(?P<fullmatch>\* \[Comment by (?P<redditname>\w+)\]\(\/r\/(?P<subreddit>\w+)\/comments" \
r"\/(?P<submissionid>\w+)\/\w*\/(?P<commentid>\w+)\/\?context=\d+(?P<hasnote> \"(?P<note>.+)" \
r"\")?\):\n\n(?P<message>( >.+\n)+))"
data = next(filter(lambda x: x.submission_id == submission_id, recent_threads)).dict()
comment_id = data["comment_id"]
bot_comment = next(r.info([comment_id]))
matches = re.finditer(regex, bot_comment.body, re.MULTILINE)
for match in matches:
old_comments += match["fullmatch"] + "\n"
header_template = '* [Comment by {user_name}]({permalink}?context=9 "posted on {datetime}"):\n'
header = header_template.format(user_name=comment.author.name,
permalink=comment.permalink,
datetime=str(datetime.utcfromtimestamp(
comment.created_utc)) + " UTC")
quote = "\n" + comment.body
quote = "\n > ".join(quote.split("\n"))
new_comment = header + quote
reply = intro + "\n\n" + old_comments + new_comment + "\n\n---\n\n" + outtro
if bot_comment_exist:
bot_comment.edit(reply)
continue
else:
bot_comment = comment.submission.reply(reply)
comment_id = bot_comment.fullname
recent_threads.append(ThreadData(submission_id=submission_id,
comment_id=comment_id))
continue
| en | 0.987716 | # the most recent threads a dev has replied in # author is deleted, don't care about this comment | 2.556285 | 3 |
django_archive/apps.py | nathan-osman/django-archive | 28 | 6624125 | <gh_stars>10-100
from django.apps import AppConfig
class DjangoArchiveConfig(AppConfig):
"""
Configuration for the django_archive app
"""
name = 'django_archive'
verbose_name = "Django Archive"
| from django.apps import AppConfig
class DjangoArchiveConfig(AppConfig):
"""
Configuration for the django_archive app
"""
name = 'django_archive'
verbose_name = "Django Archive" | en | 0.514786 | Configuration for the django_archive app | 1.516111 | 2 |
saga/resource/__init__.py | nikmagini/pilot | 13 | 6624126 |
__author__ = "<NAME>"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
from saga.resource.constants import *
from saga.resource.description import *
from saga.resource.manager import *
from saga.resource.resource import *
|
__author__ = "<NAME>"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
from saga.resource.constants import *
from saga.resource.description import *
from saga.resource.manager import *
from saga.resource.resource import *
| none | 1 | 1.07107 | 1 | |
varfilter/types.py | gomibaya/pyVarfilter | 0 | 6624127 | <reponame>gomibaya/pyVarfilter
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Definiciones de tipos básicos habituales.
Definición de clases para Db,Query y Datos usuarios.
Funciones de conversión a int,bool,float
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME> ,EBP"
__license__ = "MIT"
__email__ = "<EMAIL>"
__status__ = "Alpha"
__version__ = "1.0.0a10"
class UserAuthInfo:
def __init__(self, uname, upass):
self._data = {'user': uname,
'pass': upass}
def getName(self):
return self._data.get('user')
def getPass(self):
return self._data.get('pass')
class DbInfo:
def __init__(self, uinfo, dbname, host, port):
self._data = {'uinfo': uinfo,
'dbname': dbname,
'host': host,
'port': port}
def getUinfo(self):
return self._data.get('uinfo')
def getDbname(self):
return self._data.get('dbname')
def getHost(self):
return self._data.get('host')
def getPort(self):
return self._data.get('port')
class SQLQuery:
def __init__(self, query, params):
self._query = query
self._params = params
def getQuery(self):
return self._query
def getParams(self):
return self._params
if __name__ == "__main__":
print("Este fichero pertenece a un módulo, "
"no es operativo como aplicación independiente.")
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Definiciones de tipos básicos habituales.
Definición de clases para Db,Query y Datos usuarios.
Funciones de conversión a int,bool,float
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME> ,EBP"
__license__ = "MIT"
__email__ = "<EMAIL>"
__status__ = "Alpha"
__version__ = "1.0.0a10"
class UserAuthInfo:
def __init__(self, uname, upass):
self._data = {'user': uname,
'pass': upass}
def getName(self):
return self._data.get('user')
def getPass(self):
return self._data.get('pass')
class DbInfo:
def __init__(self, uinfo, dbname, host, port):
self._data = {'uinfo': uinfo,
'dbname': dbname,
'host': host,
'port': port}
def getUinfo(self):
return self._data.get('uinfo')
def getDbname(self):
return self._data.get('dbname')
def getHost(self):
return self._data.get('host')
def getPort(self):
return self._data.get('port')
class SQLQuery:
def __init__(self, query, params):
self._query = query
self._params = params
def getQuery(self):
return self._query
def getParams(self):
return self._params
if __name__ == "__main__":
print("Este fichero pertenece a un módulo, "
"no es operativo como aplicación independiente.") | es | 0.839567 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Definiciones de tipos básicos habituales. Definición de clases para Db,Query y Datos usuarios. Funciones de conversión a int,bool,float | 3.343529 | 3 |
zenduty/api/incidents_api.py | Zenduty/zenduty-python-sdk | 0 | 6624128 | from zenduty.api_client import ApiClient
class IncidentsApi(object):
def __init__(self,api_client=None):
if api_client is None:
api_client=ApiClient()
self.api_client = api_client
def get_incidents(self,body):
#Returns the incidents from your zenduty account
#params dict body: contains all the required details of your account
# Sample body:
# {'page':1,
# 'status':5,
# 'team_id':['a2c6322b-4c1b-4884-8f7a-a7f270de98cb'],
# 'service_ids':[],
# 'user_ids':[]}
return self.api_client.call_api('GET','/api/incidents/',body=body)
def get_incidents_by_number(self,incident_number):
#Returns the incidents belonging to a given incident number
#params int incident_number: incident number of event
return self.api_client.call_api('GET','/api/incidents/{}/'.format(incident_number))
def get_incident_alerts(self,incident_number):
#Returns all alerts of a particular incident
#params int incident_number: incident number of event
return self.api_client.call_api('GET','/api/incidents/{}/alerts/'.format(incident_number))
def get_incident_notes(self,incident_number):
#Gets the notes regarding an incident, identified by incident number
#params int incident_number: incident number of event
return self.api_client.call_api('GET','/api/incidents/{}/note/'.format(incident_number))
def acknowledge_or_resolve_incidents(self,incident_number,body):
#Used to acknowledge or resolve incident, identified by incident number
#params str incident_number: incident number of event
#params dict body: contains the changed values of incident
# Sample body:
# {'status':3,
# 'incident_number':12}
return self.api_client.call_api('PATCH','/api/incidents/{}/'.format(incident_number),body=body)
def create_incident(self,body):
#Used to create an incident for a particular service, identified by id
#params dict body: contains necessary details for creating incident
# Sample body:
# {"service":"c7fff4c5-2def-41e8-9120-c63f649a825c",
# "escalation_policy":"a70244c8-e343-4dd0-8d87-2f767115568a",
# "user":null,
# "title":"Name of trial",
# "summary":"summary of trial"}
# escalation_policy,service, title and summary are required fields.
# if escalation_policy is not set (set to None then), then assigned_to is required, as follows
# {"service":"b1559a26-c51f-45a1-886d-f6caeaf0fc7e",
# "escalation_policy":null,
# "assigned_to":"826032d6-7ccd-4d58-b114-f",
# "title":"Name of trial",
# "summary":"Summary of trial"}
return self.api_client.call_api('POST','/api/incidents/',body=body) | from zenduty.api_client import ApiClient
class IncidentsApi(object):
def __init__(self,api_client=None):
if api_client is None:
api_client=ApiClient()
self.api_client = api_client
def get_incidents(self,body):
#Returns the incidents from your zenduty account
#params dict body: contains all the required details of your account
# Sample body:
# {'page':1,
# 'status':5,
# 'team_id':['a2c6322b-4c1b-4884-8f7a-a7f270de98cb'],
# 'service_ids':[],
# 'user_ids':[]}
return self.api_client.call_api('GET','/api/incidents/',body=body)
def get_incidents_by_number(self,incident_number):
#Returns the incidents belonging to a given incident number
#params int incident_number: incident number of event
return self.api_client.call_api('GET','/api/incidents/{}/'.format(incident_number))
def get_incident_alerts(self,incident_number):
#Returns all alerts of a particular incident
#params int incident_number: incident number of event
return self.api_client.call_api('GET','/api/incidents/{}/alerts/'.format(incident_number))
def get_incident_notes(self,incident_number):
#Gets the notes regarding an incident, identified by incident number
#params int incident_number: incident number of event
return self.api_client.call_api('GET','/api/incidents/{}/note/'.format(incident_number))
def acknowledge_or_resolve_incidents(self,incident_number,body):
#Used to acknowledge or resolve incident, identified by incident number
#params str incident_number: incident number of event
#params dict body: contains the changed values of incident
# Sample body:
# {'status':3,
# 'incident_number':12}
return self.api_client.call_api('PATCH','/api/incidents/{}/'.format(incident_number),body=body)
def create_incident(self,body):
#Used to create an incident for a particular service, identified by id
#params dict body: contains necessary details for creating incident
# Sample body:
# {"service":"c7fff4c5-2def-41e8-9120-c63f649a825c",
# "escalation_policy":"a70244c8-e343-4dd0-8d87-2f767115568a",
# "user":null,
# "title":"Name of trial",
# "summary":"summary of trial"}
# escalation_policy,service, title and summary are required fields.
# if escalation_policy is not set (set to None then), then assigned_to is required, as follows
# {"service":"b1559a26-c51f-45a1-886d-f6caeaf0fc7e",
# "escalation_policy":null,
# "assigned_to":"826032d6-7ccd-4d58-b114-f",
# "title":"Name of trial",
# "summary":"Summary of trial"}
return self.api_client.call_api('POST','/api/incidents/',body=body) | en | 0.69742 | #Returns the incidents from your zenduty account #params dict body: contains all the required details of your account # Sample body: # {'page':1, # 'status':5, # 'team_id':['a2c6322b-4c1b-4884-8f7a-a7f270de98cb'], # 'service_ids':[], # 'user_ids':[]} #Returns the incidents belonging to a given incident number #params int incident_number: incident number of event #Returns all alerts of a particular incident #params int incident_number: incident number of event #Gets the notes regarding an incident, identified by incident number #params int incident_number: incident number of event #Used to acknowledge or resolve incident, identified by incident number #params str incident_number: incident number of event #params dict body: contains the changed values of incident # Sample body: # {'status':3, # 'incident_number':12} #Used to create an incident for a particular service, identified by id #params dict body: contains necessary details for creating incident # Sample body: # {"service":"c7fff4c5-2def-41e8-9120-c63f649a825c", # "escalation_policy":"a70244c8-e343-4dd0-8d87-2f767115568a", # "user":null, # "title":"Name of trial", # "summary":"summary of trial"} # escalation_policy,service, title and summary are required fields. # if escalation_policy is not set (set to None then), then assigned_to is required, as follows # {"service":"b1559a26-c51f-45a1-886d-f6caeaf0fc7e", # "escalation_policy":null, # "assigned_to":"826032d6-7ccd-4d58-b114-f", # "title":"Name of trial", # "summary":"Summary of trial"} | 2.53649 | 3 |
references/segmentation/utils.py | burro-robotics/vision | 0 | 6624129 | from collections import defaultdict, deque
import datetime
import time
import torch
import torch.distributed as dist
import errno
import os
import cv2
import numpy as np
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class ConfusionMatrix(object):
def __init__(self, num_classes):
self.num_classes = num_classes
self.mat = None
def update(self, a, b):
n = self.num_classes
if self.mat is None:
self.mat = torch.zeros((n, n), dtype=torch.int64, device=a.device)
with torch.no_grad():
k = (a >= 0) & (a < n)
inds = n * a[k].to(torch.int64) + b[k]
self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n)
def reset(self):
self.mat.zero_()
def compute(self):
h = self.mat.float()
acc_global = torch.diag(h).sum() / h.sum()
acc = torch.diag(h) / h.sum(1)
iu = torch.diag(h) / (h.sum(1) + h.sum(0) - torch.diag(h))
return acc_global, acc, iu
def reduce_from_all_processes(self):
if not torch.distributed.is_available():
return
if not torch.distributed.is_initialized():
return
torch.distributed.barrier()
torch.distributed.all_reduce(self.mat)
def __str__(self):
acc_global, acc, iu = self.compute()
return (
'global correct: {:.1f}\n'
'average row correct: {}\n'
'IoU: {}\n'
'mean IoU: {:.1f}').format(
acc_global.item() * 100,
['{:.1f}'.format(i) for i in (acc * 100).tolist()],
['{:.1f}'.format(i) for i in (iu * 100).tolist()],
iu.mean().item() * 100)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {}'.format(header, total_time_str))
def cat_list(images, fill_value=0):
max_size = tuple(max(s) for s in zip(*[img.shape for img in images]))
batch_shape = (len(images),) + max_size
batched_imgs = images[0].new(*batch_shape).fill_(fill_value)
for img, pad_img in zip(images, batched_imgs):
pad_img[..., :img.shape[-2], :img.shape[-1]].copy_(img)
return batched_imgs
def collate_fn(batch):
images, targets = list(zip(*batch))
batched_imgs = cat_list(images, fill_value=0)
batched_targets = cat_list(targets, fill_value=255)
return batched_imgs, batched_targets
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
elif hasattr(args, "rank"):
pass
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
setup_for_distributed(args.rank == 0)
def viz_output_tensor(output_tensor, input_tensor):
unorm = UnNormalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])
# m = torch.tensor([0.485, 0.456, 0.406], dtype=torch.float32)
# s = torch.tensor([0.229, 0.224, 0.225], dtype=torch.float32)
vis = []
masks = torch.argmax(output_tensor, 1)
# import ipdb; ipdb.set_trace()
# input_tensor = unorm(input_tensor)
# import ipdb; ipdb.set_trace()
for single_mask, single_input in zip(masks, input_tensor):
single_mask = single_mask.cpu().numpy()
# Unnormalize input_tensor
# single_input = single_input.sub_(m).div_(s)
single_input_denorm = unorm(single_input)*255
single_input_denorm = single_input_denorm.cpu().numpy().astype(np.uint8)
single_image = single_input_denorm.transpose(1, 2, 0)
single_vis = mask_to_overlaid_vis(single_mask, single_image)
vis.append(single_vis)
return vis
def mask_to_overlaid_vis(mask, image, alpha=0.4):
seg_image = label_to_color_image(mask.astype(np.int64)).astype(np.uint8)
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) if len(image.shape) < 3 else image
image = np.ascontiguousarray(image, dtype=np.uint8)
seg_image = np.ascontiguousarray(seg_image, dtype=np.uint8)
# import ipdb; ipdb.set_trace()
cv2.addWeighted(seg_image, alpha, image, 1 - alpha, 0, image)
return image
def create_pascal_label_colormap():
"""Creates a label colormap used in PASCAL VOC segmentation benchmark.
Returns:
A Colormap for visualizing segmentation results.
"""
colormap = np.zeros((256, 3), dtype=int)
ind = np.arange(256, dtype=int)
for shift in reversed(range(8)):
for channel in range(3):
colormap[:, channel] |= ((ind >> channel) & 1) << shift
ind >>= 3
return colormap
def label_to_color_image(label):
"""Adds color defined by the dataset colormap to the label.
Args:
label: A 2D array with integer type, storing the segmentation label.
Returns:
result: A 2D array with floating type. The element of the array
is the color indexed by the corresponding element in the input label
to the PASCAL color map.
Raises:
ValueError: If label is not of rank 2 or its value is larger than color
map maximum entry.
"""
if label.ndim != 2:
raise ValueError('Expect 2-D input label')
colormap = create_pascal_label_colormap()
if np.max(label) >= len(colormap):
raise ValueError('label value too large.')
return colormap[label]
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor | from collections import defaultdict, deque
import datetime
import time
import torch
import torch.distributed as dist
import errno
import os
import cv2
import numpy as np
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class ConfusionMatrix(object):
def __init__(self, num_classes):
self.num_classes = num_classes
self.mat = None
def update(self, a, b):
n = self.num_classes
if self.mat is None:
self.mat = torch.zeros((n, n), dtype=torch.int64, device=a.device)
with torch.no_grad():
k = (a >= 0) & (a < n)
inds = n * a[k].to(torch.int64) + b[k]
self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n)
def reset(self):
self.mat.zero_()
def compute(self):
h = self.mat.float()
acc_global = torch.diag(h).sum() / h.sum()
acc = torch.diag(h) / h.sum(1)
iu = torch.diag(h) / (h.sum(1) + h.sum(0) - torch.diag(h))
return acc_global, acc, iu
def reduce_from_all_processes(self):
if not torch.distributed.is_available():
return
if not torch.distributed.is_initialized():
return
torch.distributed.barrier()
torch.distributed.all_reduce(self.mat)
def __str__(self):
acc_global, acc, iu = self.compute()
return (
'global correct: {:.1f}\n'
'average row correct: {}\n'
'IoU: {}\n'
'mean IoU: {:.1f}').format(
acc_global.item() * 100,
['{:.1f}'.format(i) for i in (acc * 100).tolist()],
['{:.1f}'.format(i) for i in (iu * 100).tolist()],
iu.mean().item() * 100)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {}'.format(header, total_time_str))
def cat_list(images, fill_value=0):
max_size = tuple(max(s) for s in zip(*[img.shape for img in images]))
batch_shape = (len(images),) + max_size
batched_imgs = images[0].new(*batch_shape).fill_(fill_value)
for img, pad_img in zip(images, batched_imgs):
pad_img[..., :img.shape[-2], :img.shape[-1]].copy_(img)
return batched_imgs
def collate_fn(batch):
images, targets = list(zip(*batch))
batched_imgs = cat_list(images, fill_value=0)
batched_targets = cat_list(targets, fill_value=255)
return batched_imgs, batched_targets
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
elif hasattr(args, "rank"):
pass
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
setup_for_distributed(args.rank == 0)
def viz_output_tensor(output_tensor, input_tensor):
unorm = UnNormalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])
# m = torch.tensor([0.485, 0.456, 0.406], dtype=torch.float32)
# s = torch.tensor([0.229, 0.224, 0.225], dtype=torch.float32)
vis = []
masks = torch.argmax(output_tensor, 1)
# import ipdb; ipdb.set_trace()
# input_tensor = unorm(input_tensor)
# import ipdb; ipdb.set_trace()
for single_mask, single_input in zip(masks, input_tensor):
single_mask = single_mask.cpu().numpy()
# Unnormalize input_tensor
# single_input = single_input.sub_(m).div_(s)
single_input_denorm = unorm(single_input)*255
single_input_denorm = single_input_denorm.cpu().numpy().astype(np.uint8)
single_image = single_input_denorm.transpose(1, 2, 0)
single_vis = mask_to_overlaid_vis(single_mask, single_image)
vis.append(single_vis)
return vis
def mask_to_overlaid_vis(mask, image, alpha=0.4):
seg_image = label_to_color_image(mask.astype(np.int64)).astype(np.uint8)
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) if len(image.shape) < 3 else image
image = np.ascontiguousarray(image, dtype=np.uint8)
seg_image = np.ascontiguousarray(seg_image, dtype=np.uint8)
# import ipdb; ipdb.set_trace()
cv2.addWeighted(seg_image, alpha, image, 1 - alpha, 0, image)
return image
def create_pascal_label_colormap():
"""Creates a label colormap used in PASCAL VOC segmentation benchmark.
Returns:
A Colormap for visualizing segmentation results.
"""
colormap = np.zeros((256, 3), dtype=int)
ind = np.arange(256, dtype=int)
for shift in reversed(range(8)):
for channel in range(3):
colormap[:, channel] |= ((ind >> channel) & 1) << shift
ind >>= 3
return colormap
def label_to_color_image(label):
"""Adds color defined by the dataset colormap to the label.
Args:
label: A 2D array with integer type, storing the segmentation label.
Returns:
result: A 2D array with floating type. The element of the array
is the color indexed by the corresponding element in the input label
to the PASCAL color map.
Raises:
ValueError: If label is not of rank 2 or its value is larger than color
map maximum entry.
"""
if label.ndim != 2:
raise ValueError('Expect 2-D input label')
colormap = create_pascal_label_colormap()
if np.max(label) >= len(colormap):
raise ValueError('label value too large.')
return colormap[label]
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor | en | 0.578494 | Track a series of values and provide access to smoothed values over a window or the global series average. Warning: does not synchronize the deque! This function disables printing when not in master process # m = torch.tensor([0.485, 0.456, 0.406], dtype=torch.float32) # s = torch.tensor([0.229, 0.224, 0.225], dtype=torch.float32) # import ipdb; ipdb.set_trace() # input_tensor = unorm(input_tensor) # import ipdb; ipdb.set_trace() # Unnormalize input_tensor # single_input = single_input.sub_(m).div_(s) # import ipdb; ipdb.set_trace() Creates a label colormap used in PASCAL VOC segmentation benchmark. Returns: A Colormap for visualizing segmentation results. Adds color defined by the dataset colormap to the label. Args: label: A 2D array with integer type, storing the segmentation label. Returns: result: A 2D array with floating type. The element of the array is the color indexed by the corresponding element in the input label to the PASCAL color map. Raises: ValueError: If label is not of rank 2 or its value is larger than color map maximum entry. Args: tensor (Tensor): Tensor image of size (C, H, W) to be normalized. Returns: Tensor: Normalized image. # The normalize code -> t.sub_(m).div_(s) | 2.491872 | 2 |
tools/yolov3roi.py | tanishq1g/visual-grounding-NMTree | 0 | 6624130 | import random
import numpy as np
import torch
import pickle
from functools import partial
import sys
import time
from PIL import Image, ImageDraw
sys.path.append(sys.path[0] + "/..")
sys.path.append(sys.path[0] + "/../yolov3")
print(sys.path)
from yolov3.utils import *
from yolov3.image import letterbox_image, correct_yolo_boxes
from yolov3.darknet import Darknet
from ROI import *
class YOLOROI:
def __init__(self):
print("Loading Yolov3 model")
self.cfgfile = "yolov3/cfg/yolo_v3.cfg"
self.weightfile = "yolov3/yolov3.weights"
self.yolo = Darknet(self.cfgfile)
# # self.yolo.print_network()
self.yolo.load_weights(self.weightfile)
print('Loading weights from %s... Done!' % (self.weightfile))
self.yolo.cuda()
print("Loading ROI Pool")
self.roi_pool = TorchROIPool(2, 1.0)
# get reverse coordinates for yolov3 model
def get_coordinates(self, boxes, im_w, im_h):
# im_w, im_h = float(im_w), float(im_h)
# net_w, net_h = float(net_w), float(net_h)
# if net_w/im_w < net_h/im_h:
# new_w = net_w
# new_h = (im_h * net_w)/im_w
# else:
# new_w = (im_w * net_h)/im_h
# new_h = net_h
# xo, xs = (net_w - new_w)/(2*net_w), net_w/new_w
# yo, ys = (net_h - new_h)/(2*net_h), net_h/new_h
b = boxes
# from x1, y1, w, h to x1, y1, x2, y2
# b[2] = b[0] + b[2]
# b[3] = b[1] + b[3]
target_size = 13
x_scale = target_size / im_w
y_scale = target_size / im_h
b[0] = int(np.round(b[0] * x_scale))
b[1] = int(np.round(b[1] * y_scale))
b[2] = int(np.round(b[2] * x_scale))
b[3] = int(np.round(b[3] * y_scale))
# from x1, y1, x2, y2 to correct yolo coord
# b[0] = (b[2] + b[0]) / (im_w * 2.0)
# b[2] = (b[2] - b[0]) / im_w
# b[1] = (b[3] + b[1]) / (im_h * 2.0)
# b[3] = (b[3] - b[1]) / im_h
# from correct yolo coord to mode layers coord
# b[0] = b[0] / xs + xo
# b[1] = b[1] / ys + yo
# b[2] /= xs
# b[3] /= ys
return b
def run_yolo(self, img_path):
img = Image.open(img_path).convert('RGB')
sized = letterbox_image(img, self.yolo.width, self.yolo.height)
use_cuda = True
boxes, layer74 = do_detect(self.yolo, sized, 0.1, 0.4, use_cuda)
print("Layer 74", np.shape(layer74))
correct_yolo_boxes(boxes, img.width, img.height, self.yolo.width, self.yolo.height)
print("boxes ", boxes)
width = img.width
height = img.height
for box in boxes:
box[0], box[1], box[2], box[3] = (box[0] - box[2]/2.0) * width, (box[1] - box[3]/2.0) * height, (box[0] + box[2]/2.0) * width, (box[1] + box[3]/2.0) * height
# print(x1, y1, x2, y2)
# box = [x1, y1, x2, y2]
# print(boxes)
for box in boxes:
box = self.get_coordinates(box, img.width, img.height)
if box[0] == box[2]:
if box[2] != 13:
box[2] += 1
else:
box[0] -= 1
if box[1] == box[3]:
if box[3] != 13:
box[3] += 1
else:
box[1] -= 1
# print(boxes)
ann_boxes = torch.Tensor(boxes)
# print(ann_boxes, "********")
v = self.roi_pool(layer74, ann_boxes)
# print(np.shape(v))
v = v.reshape(1, v.size(0), v.size(1) * v.size(2) * v.size(3))
print(np.shape(v))
vis = v.detach()
# convert to tensor
vis = torch.from_numpy(np.asarray(vis)).cuda()
return vis
if __name__ == '__main__':
yo = YOLOROI()
img_path = "yolov3/data/person.jpg"
vis = yo.run_yolo(img_path)
print(vis)
| import random
import numpy as np
import torch
import pickle
from functools import partial
import sys
import time
from PIL import Image, ImageDraw
sys.path.append(sys.path[0] + "/..")
sys.path.append(sys.path[0] + "/../yolov3")
print(sys.path)
from yolov3.utils import *
from yolov3.image import letterbox_image, correct_yolo_boxes
from yolov3.darknet import Darknet
from ROI import *
class YOLOROI:
def __init__(self):
print("Loading Yolov3 model")
self.cfgfile = "yolov3/cfg/yolo_v3.cfg"
self.weightfile = "yolov3/yolov3.weights"
self.yolo = Darknet(self.cfgfile)
# # self.yolo.print_network()
self.yolo.load_weights(self.weightfile)
print('Loading weights from %s... Done!' % (self.weightfile))
self.yolo.cuda()
print("Loading ROI Pool")
self.roi_pool = TorchROIPool(2, 1.0)
# get reverse coordinates for yolov3 model
def get_coordinates(self, boxes, im_w, im_h):
# im_w, im_h = float(im_w), float(im_h)
# net_w, net_h = float(net_w), float(net_h)
# if net_w/im_w < net_h/im_h:
# new_w = net_w
# new_h = (im_h * net_w)/im_w
# else:
# new_w = (im_w * net_h)/im_h
# new_h = net_h
# xo, xs = (net_w - new_w)/(2*net_w), net_w/new_w
# yo, ys = (net_h - new_h)/(2*net_h), net_h/new_h
b = boxes
# from x1, y1, w, h to x1, y1, x2, y2
# b[2] = b[0] + b[2]
# b[3] = b[1] + b[3]
target_size = 13
x_scale = target_size / im_w
y_scale = target_size / im_h
b[0] = int(np.round(b[0] * x_scale))
b[1] = int(np.round(b[1] * y_scale))
b[2] = int(np.round(b[2] * x_scale))
b[3] = int(np.round(b[3] * y_scale))
# from x1, y1, x2, y2 to correct yolo coord
# b[0] = (b[2] + b[0]) / (im_w * 2.0)
# b[2] = (b[2] - b[0]) / im_w
# b[1] = (b[3] + b[1]) / (im_h * 2.0)
# b[3] = (b[3] - b[1]) / im_h
# from correct yolo coord to mode layers coord
# b[0] = b[0] / xs + xo
# b[1] = b[1] / ys + yo
# b[2] /= xs
# b[3] /= ys
return b
def run_yolo(self, img_path):
img = Image.open(img_path).convert('RGB')
sized = letterbox_image(img, self.yolo.width, self.yolo.height)
use_cuda = True
boxes, layer74 = do_detect(self.yolo, sized, 0.1, 0.4, use_cuda)
print("Layer 74", np.shape(layer74))
correct_yolo_boxes(boxes, img.width, img.height, self.yolo.width, self.yolo.height)
print("boxes ", boxes)
width = img.width
height = img.height
for box in boxes:
box[0], box[1], box[2], box[3] = (box[0] - box[2]/2.0) * width, (box[1] - box[3]/2.0) * height, (box[0] + box[2]/2.0) * width, (box[1] + box[3]/2.0) * height
# print(x1, y1, x2, y2)
# box = [x1, y1, x2, y2]
# print(boxes)
for box in boxes:
box = self.get_coordinates(box, img.width, img.height)
if box[0] == box[2]:
if box[2] != 13:
box[2] += 1
else:
box[0] -= 1
if box[1] == box[3]:
if box[3] != 13:
box[3] += 1
else:
box[1] -= 1
# print(boxes)
ann_boxes = torch.Tensor(boxes)
# print(ann_boxes, "********")
v = self.roi_pool(layer74, ann_boxes)
# print(np.shape(v))
v = v.reshape(1, v.size(0), v.size(1) * v.size(2) * v.size(3))
print(np.shape(v))
vis = v.detach()
# convert to tensor
vis = torch.from_numpy(np.asarray(vis)).cuda()
return vis
if __name__ == '__main__':
yo = YOLOROI()
img_path = "yolov3/data/person.jpg"
vis = yo.run_yolo(img_path)
print(vis)
| en | 0.423241 | # # self.yolo.print_network() # get reverse coordinates for yolov3 model # im_w, im_h = float(im_w), float(im_h) # net_w, net_h = float(net_w), float(net_h) # if net_w/im_w < net_h/im_h: # new_w = net_w # new_h = (im_h * net_w)/im_w # else: # new_w = (im_w * net_h)/im_h # new_h = net_h # xo, xs = (net_w - new_w)/(2*net_w), net_w/new_w # yo, ys = (net_h - new_h)/(2*net_h), net_h/new_h # from x1, y1, w, h to x1, y1, x2, y2 # b[2] = b[0] + b[2] # b[3] = b[1] + b[3] # from x1, y1, x2, y2 to correct yolo coord # b[0] = (b[2] + b[0]) / (im_w * 2.0) # b[2] = (b[2] - b[0]) / im_w # b[1] = (b[3] + b[1]) / (im_h * 2.0) # b[3] = (b[3] - b[1]) / im_h # from correct yolo coord to mode layers coord # b[0] = b[0] / xs + xo # b[1] = b[1] / ys + yo # b[2] /= xs # b[3] /= ys # print(x1, y1, x2, y2) # box = [x1, y1, x2, y2] # print(boxes) # print(boxes) # print(ann_boxes, "********") # print(np.shape(v)) # convert to tensor | 2.2179 | 2 |
src/main.py | VU-BEAM-Lab/DNNBeamforming | 1 | 6624131 | # Copyright 2020 <NAME>, <NAME>, and <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the license at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import torch
import os
import numpy as np
import warnings
import time
import argparse
import json
import sys
from pprint import pprint
from utils import read_model_params, save_model_params, ensure_dir, add_suffix_to_path
from dataloader import ApertureDataset
from model import FullyConnectedNet
from logger import Logger
from trainer import Trainer
if __name__ == '__main__':
# parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--model_params_path', help='Option to load model params from a file. Values in this file take precedence.')
parser.add_argument('-k', default=4, help='Integer value. DFT frequency to analyze.', type=int)
parser.add_argument('-s', '--save_dir', default=None, help='Directory to save the model.')
parser.add_argument('-b', '--batch_size', default=100, help='Option to specify batch size.', type=int, )
parser.add_argument('--data_noise_gaussian', help='Option to enable gaussian noise in channel data during training.', default=0, type=int)
parser.add_argument('--dropout_input', help='Specify dropout probability for hidden nodes', default=0, type=int)
parser.add_argument('-p', '--patience', type=int, default=20, help='Option to patience.')
parser.add_argument('-c', '--cuda', help='Option to use GPU.', default=0, type=int)
parser.add_argument('--save_initial', help='Option to save initial checkpoint of the model.', default=0, type=int)
parser.add_argument('--input_dim', help='Specify input dimension for the network', default=130, type=int)
parser.add_argument('--output_dim', help='Specify output dimension for the network', default=130, type=int)
parser.add_argument('--layer_width', help='Specify layer_width for the network', default=260, type=int)
parser.add_argument('--dropout', help='Specify dropout probability for hidden nodes', default=0, type=float)
parser.add_argument('--weight_decay', help='Specify weight decay for hidden nodes', default=0, type=float)
parser.add_argument('--num_hidden', help='Specify number of hidden layers', default=1, type=int)
parser.add_argument('--data_is_target', help='Specify if targets are input data (autoencoder option).', default=0, type=int)
parser.add_argument('--num_samples_train', help='Specify number of samples to use during training.', default=1000, type=int)
parser.add_argument('--num_samples_val', help='Specify number of samples to use during validation.', default=10000, type=int)
parser.add_argument('--starting_weights', help='Specify path/file for model starting weights.', default=None)
parser.add_argument('--loss_function', help='Specify loss function.', default='MSELoss')
args = parser.parse_args()
# load model params if it is specified
if args.model_params_path:
model_params = read_model_params(args.model_params_path)
else:
model_params = {}
# merge model_params and input args, giving preference to model_params
model_params = {**vars(args), **model_params}
# cuda flag
print('torch.cuda.is_available(): ' + str(torch.cuda.is_available()))
if model_params['cuda'] and torch.cuda.is_available():
print('Using ' + str(torch.cuda.get_device_name(0)))
else:
print('Not using CUDA')
model_params['cuda']=False
# set device based on cuda flag
device = torch.device("cuda:0" if model_params['cuda'] else "cpu")
# load training data specification file
with open(model_params['training_data_file'], 'r') as f:
data_json = json.load(f)
# Load primary training data
dat_list = []
for item in data_json['train']:
fname = item['file']
N = int( item['N'])
dat_list.append( ApertureDataset(fname, N, model_params['k'], model_params['data_is_target']) )
# print datasets
print('\nTrain Data:')
for dat in dat_list:
print(dat)
print('\n')
dat_train = torch.utils.data.ConcatDataset(dat_list)
# Load eval training data
dat_list = []
for item in data_json['train_eval']:
fname = item['file']
N = int( item['N'])
dat_list.append( ApertureDataset(fname, N, model_params['k'], model_params['data_is_target']) )
# print datasets
print('\nTrain Eval Data:')
for dat in dat_list:
print(dat)
print('\n')
dat_eval = torch.utils.data.ConcatDataset(dat_list)
# Load val data
dat_list = []
for item in data_json['val']:
fname = item['file']
N = int( item['N'])
dat_list.append( ApertureDataset(fname, N, model_params['k'], model_params['data_is_target']) )
# print datasets
print('\nValidation Data:')
for dat in dat_list:
print(dat)
print('\n')
dat_val = torch.utils.data.ConcatDataset(dat_list)
# setup data loaders
last_batch_size = (len(dat_train) % model_params['batch_size'])
last_batch_size = model_params['batch_size'] if (last_batch_size == 0) else last_batch_size
print(f"\nLast batch size for train data: {last_batch_size}\n")
drop_last = True if ( last_batch_size == 1) else False
print(f"Drop last batch: {drop_last}")
loader_train = torch.utils.data.DataLoader(dat_train, batch_size=model_params['batch_size'],
shuffle=True, num_workers=1, drop_last=drop_last)
drop_last=False
loader_train_eval = torch.utils.data.DataLoader(dat_eval, batch_size=len(dat_eval), shuffle=False,
num_workers=1, drop_last=drop_last)
drop_last=False
loader_val = torch.utils.data.DataLoader(dat_val, batch_size=len(dat_val), shuffle=False,
num_workers=1, drop_last=drop_last)
# create model
model = FullyConnectedNet(input_dim=model_params['input_dim'],
output_dim=model_params['output_dim'],
layer_width=model_params['layer_width'],
dropout=model_params['dropout'],
dropout_input=model_params['dropout_input'],
num_hidden=model_params['num_hidden'],
starting_weights=model_params['starting_weights'],
batch_norm_enable=model_params['batch_norm_enable'])
# save initial weights
if model_params['save_initial'] and model_params['save_dir']:
suffix = '_initial'
path = add_suffix_to_path(model_parmas['save_dir'], suffix)
print('Saving model weights in : ' + path)
ensure_dir(path)
torch.save(model.state_dict(), os.path.join(path, 'model.dat'))
save_model_params(os.path.join(path, 'model_params.txt'), model_params)
# loss
if model_params['loss_function'] == 'L1Loss':
loss = torch.nn.L1Loss()
elif model_params['loss_function'] == 'MSELoss':
loss = torch.nn.MSELoss()
elif model_params['loss_function'] == 'SmoothL1Loss':
loss = torch.nn.SmoothL1Loss()
# optimizer
#optimizer = torch.optim.SGD(model.parameters(),
# lr=model_params['lr'],
# momentum=model_params['momentum'])
optimizer = torch.optim.Adam(model.parameters(),
lr=model_params['lr'],
betas=(model_params['beta1'], model_params['beta2']),
weight_decay=model_params['weight_decay'])
# scheduler
#scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
# mode='min',
# factor=0.1,
# patience=model_params['lr_patience'],
# min_lr=10**-7,
# verbose=True)
scheduler = None
# logger
logger = Logger()
# send things to gpu if enabled
loss = loss.to(device)
model = model.to(device)
# update model params
model_params['num_samples_train'] = len(dat_train)
model_params['num_samples_train_eval'] = len(dat_eval)
model_params['num_samples_val'] = len(dat_val)
model_params_path = os.path.join(model_params['save_dir'], 'model_params.txt')
model_params['model_params_path'] = model_params_path
if model_params['save_dir']:
ensure_dir(model_params['save_dir'])
save_model_params(model_params_path, model_params)
# display input arguments
print('\n')
pprint(model_params)
print('\n')
# trainer
trainer = Trainer(model=model,
loss=loss,
optimizer=optimizer,
scheduler=scheduler,
patience=model_params['patience'],
loader_train=loader_train,
loader_train_eval=loader_train_eval,
loader_val=loader_val,
cuda=model_params['cuda'],
logger=logger,
data_noise_gaussian=model_params['data_noise_gaussian'],
save_dir=model_params['save_dir'])
# run training
trainer.train()
| # Copyright 2020 <NAME>, <NAME>, and <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the license at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import torch
import os
import numpy as np
import warnings
import time
import argparse
import json
import sys
from pprint import pprint
from utils import read_model_params, save_model_params, ensure_dir, add_suffix_to_path
from dataloader import ApertureDataset
from model import FullyConnectedNet
from logger import Logger
from trainer import Trainer
if __name__ == '__main__':
# parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--model_params_path', help='Option to load model params from a file. Values in this file take precedence.')
parser.add_argument('-k', default=4, help='Integer value. DFT frequency to analyze.', type=int)
parser.add_argument('-s', '--save_dir', default=None, help='Directory to save the model.')
parser.add_argument('-b', '--batch_size', default=100, help='Option to specify batch size.', type=int, )
parser.add_argument('--data_noise_gaussian', help='Option to enable gaussian noise in channel data during training.', default=0, type=int)
parser.add_argument('--dropout_input', help='Specify dropout probability for hidden nodes', default=0, type=int)
parser.add_argument('-p', '--patience', type=int, default=20, help='Option to patience.')
parser.add_argument('-c', '--cuda', help='Option to use GPU.', default=0, type=int)
parser.add_argument('--save_initial', help='Option to save initial checkpoint of the model.', default=0, type=int)
parser.add_argument('--input_dim', help='Specify input dimension for the network', default=130, type=int)
parser.add_argument('--output_dim', help='Specify output dimension for the network', default=130, type=int)
parser.add_argument('--layer_width', help='Specify layer_width for the network', default=260, type=int)
parser.add_argument('--dropout', help='Specify dropout probability for hidden nodes', default=0, type=float)
parser.add_argument('--weight_decay', help='Specify weight decay for hidden nodes', default=0, type=float)
parser.add_argument('--num_hidden', help='Specify number of hidden layers', default=1, type=int)
parser.add_argument('--data_is_target', help='Specify if targets are input data (autoencoder option).', default=0, type=int)
parser.add_argument('--num_samples_train', help='Specify number of samples to use during training.', default=1000, type=int)
parser.add_argument('--num_samples_val', help='Specify number of samples to use during validation.', default=10000, type=int)
parser.add_argument('--starting_weights', help='Specify path/file for model starting weights.', default=None)
parser.add_argument('--loss_function', help='Specify loss function.', default='MSELoss')
args = parser.parse_args()
# load model params if it is specified
if args.model_params_path:
model_params = read_model_params(args.model_params_path)
else:
model_params = {}
# merge model_params and input args, giving preference to model_params
model_params = {**vars(args), **model_params}
# cuda flag
print('torch.cuda.is_available(): ' + str(torch.cuda.is_available()))
if model_params['cuda'] and torch.cuda.is_available():
print('Using ' + str(torch.cuda.get_device_name(0)))
else:
print('Not using CUDA')
model_params['cuda']=False
# set device based on cuda flag
device = torch.device("cuda:0" if model_params['cuda'] else "cpu")
# load training data specification file
with open(model_params['training_data_file'], 'r') as f:
data_json = json.load(f)
# Load primary training data
dat_list = []
for item in data_json['train']:
fname = item['file']
N = int( item['N'])
dat_list.append( ApertureDataset(fname, N, model_params['k'], model_params['data_is_target']) )
# print datasets
print('\nTrain Data:')
for dat in dat_list:
print(dat)
print('\n')
dat_train = torch.utils.data.ConcatDataset(dat_list)
# Load eval training data
dat_list = []
for item in data_json['train_eval']:
fname = item['file']
N = int( item['N'])
dat_list.append( ApertureDataset(fname, N, model_params['k'], model_params['data_is_target']) )
# print datasets
print('\nTrain Eval Data:')
for dat in dat_list:
print(dat)
print('\n')
dat_eval = torch.utils.data.ConcatDataset(dat_list)
# Load val data
dat_list = []
for item in data_json['val']:
fname = item['file']
N = int( item['N'])
dat_list.append( ApertureDataset(fname, N, model_params['k'], model_params['data_is_target']) )
# print datasets
print('\nValidation Data:')
for dat in dat_list:
print(dat)
print('\n')
dat_val = torch.utils.data.ConcatDataset(dat_list)
# setup data loaders
last_batch_size = (len(dat_train) % model_params['batch_size'])
last_batch_size = model_params['batch_size'] if (last_batch_size == 0) else last_batch_size
print(f"\nLast batch size for train data: {last_batch_size}\n")
drop_last = True if ( last_batch_size == 1) else False
print(f"Drop last batch: {drop_last}")
loader_train = torch.utils.data.DataLoader(dat_train, batch_size=model_params['batch_size'],
shuffle=True, num_workers=1, drop_last=drop_last)
drop_last=False
loader_train_eval = torch.utils.data.DataLoader(dat_eval, batch_size=len(dat_eval), shuffle=False,
num_workers=1, drop_last=drop_last)
drop_last=False
loader_val = torch.utils.data.DataLoader(dat_val, batch_size=len(dat_val), shuffle=False,
num_workers=1, drop_last=drop_last)
# create model
model = FullyConnectedNet(input_dim=model_params['input_dim'],
output_dim=model_params['output_dim'],
layer_width=model_params['layer_width'],
dropout=model_params['dropout'],
dropout_input=model_params['dropout_input'],
num_hidden=model_params['num_hidden'],
starting_weights=model_params['starting_weights'],
batch_norm_enable=model_params['batch_norm_enable'])
# save initial weights
if model_params['save_initial'] and model_params['save_dir']:
suffix = '_initial'
path = add_suffix_to_path(model_parmas['save_dir'], suffix)
print('Saving model weights in : ' + path)
ensure_dir(path)
torch.save(model.state_dict(), os.path.join(path, 'model.dat'))
save_model_params(os.path.join(path, 'model_params.txt'), model_params)
# loss
if model_params['loss_function'] == 'L1Loss':
loss = torch.nn.L1Loss()
elif model_params['loss_function'] == 'MSELoss':
loss = torch.nn.MSELoss()
elif model_params['loss_function'] == 'SmoothL1Loss':
loss = torch.nn.SmoothL1Loss()
# optimizer
#optimizer = torch.optim.SGD(model.parameters(),
# lr=model_params['lr'],
# momentum=model_params['momentum'])
optimizer = torch.optim.Adam(model.parameters(),
lr=model_params['lr'],
betas=(model_params['beta1'], model_params['beta2']),
weight_decay=model_params['weight_decay'])
# scheduler
#scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
# mode='min',
# factor=0.1,
# patience=model_params['lr_patience'],
# min_lr=10**-7,
# verbose=True)
scheduler = None
# logger
logger = Logger()
# send things to gpu if enabled
loss = loss.to(device)
model = model.to(device)
# update model params
model_params['num_samples_train'] = len(dat_train)
model_params['num_samples_train_eval'] = len(dat_eval)
model_params['num_samples_val'] = len(dat_val)
model_params_path = os.path.join(model_params['save_dir'], 'model_params.txt')
model_params['model_params_path'] = model_params_path
if model_params['save_dir']:
ensure_dir(model_params['save_dir'])
save_model_params(model_params_path, model_params)
# display input arguments
print('\n')
pprint(model_params)
print('\n')
# trainer
trainer = Trainer(model=model,
loss=loss,
optimizer=optimizer,
scheduler=scheduler,
patience=model_params['patience'],
loader_train=loader_train,
loader_train_eval=loader_train_eval,
loader_val=loader_val,
cuda=model_params['cuda'],
logger=logger,
data_noise_gaussian=model_params['data_noise_gaussian'],
save_dir=model_params['save_dir'])
# run training
trainer.train()
| en | 0.61609 | # Copyright 2020 <NAME>, <NAME>, and <NAME> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the license at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/env python # parse input arguments # load model params if it is specified # merge model_params and input args, giving preference to model_params # cuda flag # set device based on cuda flag # load training data specification file # Load primary training data # print datasets # Load eval training data # print datasets # Load val data # print datasets # setup data loaders # create model # save initial weights # loss # optimizer #optimizer = torch.optim.SGD(model.parameters(), # lr=model_params['lr'], # momentum=model_params['momentum']) # scheduler #scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, # mode='min', # factor=0.1, # patience=model_params['lr_patience'], # min_lr=10**-7, # verbose=True) # logger # send things to gpu if enabled # update model params # display input arguments # trainer # run training | 1.97731 | 2 |
preacher/compilation/yaml/__init__.py | lasta/preacher | 0 | 6624132 | """
YAML handling.
"""
from .error import YamlError
from .loader import load, load_all, load_from_path, load_all_from_path
__all__ = [
'YamlError',
'load',
'load_from_path',
'load_all',
'load_all_from_path',
]
| """
YAML handling.
"""
from .error import YamlError
from .loader import load, load_all, load_from_path, load_all_from_path
__all__ = [
'YamlError',
'load',
'load_from_path',
'load_all',
'load_all_from_path',
]
| en | 0.641523 | YAML handling. | 1.521476 | 2 |
migrations/versions/535906879d1f_db_small_fixes.py | asidlare/todos | 0 | 6624133 | """db small fixes
Revision ID: 535906879d1f
Revises: <KEY>
Create Date: 2019-08-13 18:52:54.607312
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '535906879d1f'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_Task_status'), 'Task', ['status'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_Task_status'), table_name='Task')
# ### end Alembic commands ###
| """db small fixes
Revision ID: 535906879d1f
Revises: <KEY>
Create Date: 2019-08-13 18:52:54.607312
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '535906879d1f'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_Task_status'), 'Task', ['status'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_Task_status'), table_name='Task')
# ### end Alembic commands ###
| en | 0.506282 | db small fixes Revision ID: 535906879d1f Revises: <KEY> Create Date: 2019-08-13 18:52:54.607312 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.303475 | 1 |
resources/gubbana.py | leondz/bornholmsk | 0 | 6624134 | <reponame>leondz/bornholmsk<gh_stars>0
#!/usr/bin/env python3
import bs4
soup = bs4.BeautifulSoup(open('view-source_gubbana.dk_borrinjholmsk_.html','r'), features='html.parser')
c1 = soup.find_all('td', {'class':'column-1'})
c2 = soup.find_all('td', {'class':'column-2'})
c3 = soup.find_all('td', {'class':'column-3'})
c4 = soup.find_all('td', {'class':'column-4'})
c5 = soup.find_all('td', {'class':'column-5'})
tokpair_file = open('bo_da_word_gubbana.tsv', 'w')
sentpair_file = open('bo_da_sent_gubbana.txt', 'w')
for i in range(len(c1)):
tokpair_file.write(c1[i].text.lower() + "\t" + c2[i].text.lower() + "\n")
sentpair_file.write("\n".join([c3[i].text, c4[i].text, '', '']) )
tokpair_file.close()
sentpair_file.close() | #!/usr/bin/env python3
import bs4
soup = bs4.BeautifulSoup(open('view-source_gubbana.dk_borrinjholmsk_.html','r'), features='html.parser')
c1 = soup.find_all('td', {'class':'column-1'})
c2 = soup.find_all('td', {'class':'column-2'})
c3 = soup.find_all('td', {'class':'column-3'})
c4 = soup.find_all('td', {'class':'column-4'})
c5 = soup.find_all('td', {'class':'column-5'})
tokpair_file = open('bo_da_word_gubbana.tsv', 'w')
sentpair_file = open('bo_da_sent_gubbana.txt', 'w')
for i in range(len(c1)):
tokpair_file.write(c1[i].text.lower() + "\t" + c2[i].text.lower() + "\n")
sentpair_file.write("\n".join([c3[i].text, c4[i].text, '', '']) )
tokpair_file.close()
sentpair_file.close() | fr | 0.221828 | #!/usr/bin/env python3 | 2.69381 | 3 |
task_viewer/urls.py | farahaulita/pbp-tk | 0 | 6624135 | from django.urls import path
from .views import view_task, view_subject_task
urlpatterns = [
path('view-task/<str:name>/<int:identitas>/<str:tambahan>', view_task, name='view-task'),
path('view-subject-task/<str:name>/<int:identitas>/<str:tambahan>/<int:id>', view_subject_task, name='view-subject-task'),
] | from django.urls import path
from .views import view_task, view_subject_task
urlpatterns = [
path('view-task/<str:name>/<int:identitas>/<str:tambahan>', view_task, name='view-task'),
path('view-subject-task/<str:name>/<int:identitas>/<str:tambahan>/<int:id>', view_subject_task, name='view-subject-task'),
] | none | 1 | 1.611288 | 2 | |
lcdfonteditor/ui/ui.py | KiLLAAA/LCD_Font_Editor | 1 | 6624136 | <filename>lcdfonteditor/ui/ui.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2020, <NAME> aka KiLLA
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
################
# IMPORTS
import os
import sys
import wx
import dataprocessing.core
from glyphwidget import GlyphWidget
from fontwidget import FontWidget
from ui_options import OptionsFrame
################
# DEBUG
DEBUG = False # True / False
showInspectionWindow = False # True / False
if DEBUG and showInspectionWindow: import wx.lib.inspection # import widgets inspection tool
################################################################
######################### MAIN WINDOW ##########################
class MainFrame(wx.Frame):
def __init__(self, *args, **kwargs):
super(MainFrame, self).__init__(*args, **kwargs)
self.platform = wx.Platform
self.InitUI()
################################
# INIT UI
def InitUI(self):
################
# BASIC SETUP
self.debugInfo("================================================================") # print wx version to stdout
self.debugInfo("python version ",sys.version)
self.debugInfo("wx version ", wx.version())
self.debugInfo("================================================================")
if self.platform == "__WXMSW__": self.locale = wx.Locale(wx.LANGUAGE_ENGLISH) # /wx.LANGUAGE_DEFAULT/ -> Windows hack!
self.basePath = self.getBasePath()
self.clipboard = None
################
# DATA PROCESSING
DEFAULT_BYTEWIDTH = 5 # DEFAULT CONSTANT VALUE > for fonts 5 bytes/pixels wide
self.processing = dataprocessing.core.DataProcessing(self, DEFAULT_BYTEWIDTH) # pass self - main window
################
# WINDOW with OPTIONS & SETTINGS
self.optionsWindow = None
################
# LAYOUT PANELS
self.mainPanel = wx.Panel(self)
self.leftPanel = wx.Panel(self.mainPanel)
self.leftPanel.SetBackgroundColour('#4f5049')
self.buttonPanel = wx.Panel(self.leftPanel)
self.buttonPanel.SetBackgroundColour('#cccccc')
################
# GLYPH WIDGET
self.screenWidth, self.screenHeight = wx.GetDisplaySize() # Get screen size and select smaller modes for netbooks
self.debugInfo("Screen size:", self.screenWidth, "x", self.screenHeight)
if self.screenHeight < 768:
glyphWidgetMode = 2
fonthPanelMode = 2
else:
glyphWidgetMode = 0
fonthPanelMode = 0
self.glyphWidget = GlyphWidget(self, self.leftPanel, DEFAULT_BYTEWIDTH, glyphWidgetMode)
self.glyphWidget.Bind(wx.EVT_LEFT_UP, self.onGlyphWidgetMouseUp)
self.glyphWidget.Bind(wx.EVT_LEFT_DOWN, self.onGlyphWidgetMouseDown)
self.glyphWidget.Bind(wx.EVT_MOTION, self.onGlyphWidgetMouseMove)
################
# FONT WIDGET
ColourActiveSelected = "#cc0000" #OPTIONAL "#cc0033" "#a800a8" #FF0033
ColourActiveHighlighted = "#00cc00" # OPTIONAL "#00cc99" "#00A8A8" #00FFCC
self.fontWidget = FontWidget(self, self.leftPanel, DEFAULT_BYTEWIDTH, fonthPanelMode)
#self.fontWidget.setActiveColours("#FFFFFF", ColourActiveSelected, ColourActiveHighlighted)
self.fontWidget.Bind(wx.EVT_LEFT_UP, self.onFontWidgetMouseUp)
self.fontWidget.Bind(wx.EVT_MOTION, self.onFontWidgetMouseMove)
self.fontWidget.Bind(wx.EVT_LEAVE_WINDOW, self.onFontWidgetMouseLeave)
################
# BUTTON PANEL SIZER
self.buttonsGridSizer = wx.GridBagSizer(1, 1)
################
# MOVE BUTTONS
self.bmpArrowUp = wx.Bitmap(os.path.join(self.basePath, "icons", "up-arrow.png"), wx.BITMAP_TYPE_PNG)
self.bmpArrowLeft = wx.Bitmap(os.path.join(self.basePath, "icons", "left-arrow.png"), wx.BITMAP_TYPE_PNG)
self.bmpArrowRight = wx.Bitmap(os.path.join(self.basePath, "icons", "right-arrow.png"), wx.BITMAP_TYPE_PNG)
self.bmpArrowDown = wx.Bitmap(os.path.join(self.basePath, "icons", "down-arrow.png"), wx.BITMAP_TYPE_PNG)
self.moveUpButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.bmpArrowUp, size=wx.DefaultSize)
self.moveUpButton.identifier = "moveup"
self.moveUpButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.moveUpButton.SetToolTip(wx.ToolTip("Move up"))
self.buttonsGridSizer.Add(self.moveUpButton, pos = (0, 1), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4) # ↑
self.moveDownButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.bmpArrowDown, size=wx.DefaultSize)
self.moveDownButton.identifier = "movedown"
self.moveDownButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.moveDownButton.SetToolTip(wx.ToolTip("Move down"))
self.buttonsGridSizer.Add(self.moveDownButton, pos = (2, 1), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4) # ↓
self.moveLeftButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.bmpArrowLeft, size=wx.DefaultSize)
self.moveLeftButton.identifier = "moveleft"
self.moveLeftButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.moveLeftButton.SetToolTip(wx.ToolTip("Move left"))
self.buttonsGridSizer.Add(self.moveLeftButton, pos = (1, 0), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4) # ←
self.moveRightButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.bmpArrowRight, size=wx.DefaultSize)
self.moveRightButton.identifier = "moveright"
self.moveRightButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.moveRightButton.SetToolTip(wx.ToolTip("Move right"))
self.buttonsGridSizer.Add(self.moveRightButton, pos = (1, 2), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4) # →
################
# ACTION BUTTONS
self.copyButtonBmp = wx.Bitmap(os.path.join(self.basePath, "icons", "copy-square.png"), wx.BITMAP_TYPE_PNG)
self.copyButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.copyButtonBmp, size=wx.DefaultSize)
self.copyButton.identifier = "copy"
self.copyButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.copyButton.SetToolTip(wx.ToolTip("Copy glyph"))
self.buttonsGridSizer.Add(self.copyButton, pos = (2, 0), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4)
self.pasteButtonBmp = wx.Bitmap(os.path.join(self.basePath, "icons", "download-square.png"), wx.BITMAP_TYPE_PNG)
self.pasteButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.pasteButtonBmp, size=wx.DefaultSize)
self.pasteButton.identifier = "paste"
self.pasteButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.pasteButton.SetToolTip(wx.ToolTip("Paste glyph"))
self.buttonsGridSizer.Add(self.pasteButton, pos = (2, 2), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4)
self.clearButtonBmp = wx.Bitmap(os.path.join(self.basePath, "icons", "sun-light-theme.png"), wx.BITMAP_TYPE_PNG)
self.clearButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.clearButtonBmp, size=wx.DefaultSize)
self.clearButton.identifier = "clear"
self.clearButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.clearButton.SetToolTip(wx.ToolTip("Clear glyph"))
self.buttonsGridSizer.Add(self.clearButton, pos = (1, 1), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4)
self.moreButtonBmp = wx.Bitmap(os.path.join(self.basePath, "icons", "more.png"), wx.BITMAP_TYPE_PNG)
self.moreButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.moreButtonBmp, size=wx.DefaultSize)
self.moreButton.identifier = "more"
self.moreButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.moreButton.SetToolTip(wx.ToolTip("Open Options & Settings window"))
self.buttonsGridSizer.Add(self.moreButton, pos = (0, 2), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4)
self.buttonPanel.SetSizer(self.buttonsGridSizer)
################
# INDICATOR PANEL
self.indicatorLabelModes = [{"id" : 0, "name" : "index", "selectedtootip" : "Selected glyph index", "hovertooltip" : "Highlighted glyph index"}, {"id" : 0, "name" : "hexindex", "selectedtootip" : "Selected glyph base 16 index", "hovertooltip" : "Highlighted glyph base 16 index"}, {"id" : 0, "name" : "character", "selectedtootip" : "Selected glyph char in ascii encoding", "hovertooltip" : "Highlighted glyph char in ascii encoding"}] # ascii hardcoded > gets changed with encoding selection
self.indicatorSelectedLabelMode = 0 # DEFAULT
self.indicatorPanelEncodings = [{"name":"ascii"}, {"name":"utf8"}, {"name":"iso-8859-1"}, {"name":"iso-8859-2"}, {"name":"iso-8859-3"}, {"name":"iso-8859-4"}, {"name":"iso-8859-5"}, {"name":"iso-8859-6"}, {"name":"iso-8859-7"}, {"name":"iso-8859-8"}, {"name":"iso-8859-9"}, {"name":"iso-8859-10"}, {"name":"iso-8859-13"}, {"name":"iso-8859-14"}, {"name":"iso-8859-15"}, {"name":"windows-1250"}, {"name":"windows-1251"}, {"name":"windows-1252"}, {"name":"windows-1253"}, {"name":"windows-1254"}, {"name":"windows-1255"}, {"name":"windows-1256"}, {"name":"windows-1257"}, {"name":"windows-1258"}]
self.selectedIndicatorPanelEncoding = 0 # DEFAULT
# colours
self.indicatorPanelColourActiveSelected = ColourActiveSelected # OPTIONAL "#cc0033" "#a800a8"
self.indicatorPanelColourActiveHighlighted = ColourActiveHighlighted # OPTIONAL "#00cc99" "#00A8A8"
self.indicatorPanel = wx.Panel(self.leftPanel)
self.indicatorPanel.Bind(wx.EVT_LEFT_UP, self.onIndicatorPanelMouseUp)
self.indicatorPanel.SetBackgroundColour('#cccccc') # HARDCODED COLOUR
self.indicatorPanel.SetToolTip(wx.ToolTip("Indicator, click to toggle mode"))
self.indicatorSizer = wx.BoxSizer(wx.HORIZONTAL)
self.selectedLabel = wx.StaticText(self.indicatorPanel)
self.hoverLabel = wx.StaticText(self.indicatorPanel, style=wx.ALIGN_RIGHT)
font = wx.Font(12, wx.FONTFAMILY_TELETYPE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD) # wx.FONTFAMILY_DEFAULT
self.selectedLabel.SetFont(font)
self.selectedLabel.SetForegroundColour(self.indicatorPanelColourActiveSelected) # DEFAULT "#00cc99"
self.selectedLabel.SetLabel(self.indicatorPanelLabelFormat(self.processing.getSelectedGlyphIndex()))
self.selectedLabel.SetToolTip(wx.ToolTip("Selected glyph index, click to toggle mode"))
self.selectedLabel.Bind(wx.EVT_LEFT_UP, self.onIndicatorPanelMouseUp)
self.selectedLabel.SetMinSize ((60, -1)) # set min width only to prevent too small click area
self.hoverLabel.SetFont(font)
self.hoverLabel.SetForegroundColour(self.indicatorPanelColourActiveHighlighted) # DEFAULT "#cc0033"
self.hoverLabel.SetLabel(self.indicatorPanelLabelFormat(self.fontWidget.cellToIndex(self.fontWidget.highlightedCell)))
self.hoverLabel.SetToolTip(wx.ToolTip("Highlighted glyph index, click to toggle mode"))
self.hoverLabel.Bind(wx.EVT_LEFT_UP, self.onIndicatorPanelMouseUp)
self.hoverLabel.SetMinSize ((60, -1)) # set min width only to prevent too small click area
self.indicatorSizer.Add(self.selectedLabel, 0,wx.ALL|wx.ALIGN_CENTER_VERTICAL, 10)
self.indicatorSizer.Add(self.hoverLabel, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 10)
#self.indicatorSizer.SetMinSize ((160, 32))
# https://wxpython.org/Phoenix/docs/html/wx.Sizer.html#wx.Sizer.GetMinSize
self.debugInfo("ui", "IndicatorPanel", "> indicator sizer > GetMinSize", self.indicatorSizer.GetMinSize())
self.indicatorPanel.SetSizer(self.indicatorSizer)
################
# LEFT PANEL SIZER
self.mainSizer = wx.BoxSizer(wx.VERTICAL)
self.mainSizer.Add(self.glyphWidget, 0, wx.ALL|wx.CENTER, 20)
self.mainSizer.Add(self.buttonPanel, 0, wx.BOTTOM | wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_HORIZONTAL, 20)
self.mainSizer.Add(self.indicatorPanel, 0, wx.BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 6)
self.mainSizer.Add(self.fontWidget, 0, wx.BOTTOM | wx.LEFT |wx.RIGHT | wx.ALIGN_CENTER_HORIZONTAL, 20)
self.leftPanel.SetSizer(self.mainSizer)
################
# TEXT FIELD
self.textCtrlModes = self.modes = [{"id" : 0, "name" : "Smart (fast)", "method" : 0}, {"id" : 1, "name" : "Simple (failsafe)", "method" : 1}, {"id" : 2, "name" : "Full redraw (slow)", "method" : 2}]
self.selectedTextCtrlMode = 0 # DEFAULT mode > Smart
self.ignoreTextEvent = False
self.textCtrl = wx.TextCtrl(self.mainPanel, size = (320,320), style = wx.TE_MULTILINE | wx.TE_RICH) # another windows hack -> wx.TE_RICH
textCtrlFont = wx.Font(10, wx.FONTFAMILY_TELETYPE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL) # wx.FONTFAMILY_TELETYPE -> monospace
self.textCtrl.SetFont(textCtrlFont)
self.textCtrl.Bind(wx.EVT_TEXT,self.OnKeyTyped) # EVT_TEXT_ENTER, EVT_TEXT, wx.EVT_CHAR
################
# MAIN SIZER
self.mainSizer = wx.BoxSizer(wx.HORIZONTAL)
self.mainSizer.Add(self.leftPanel, 0, wx.LEFT | wx.TOP, 20)
self.mainSizer.Add(self.textCtrl, 1, wx.TOP | wx.BOTTOM | wx.LEFT | wx.EXPAND, 20)
self.mainPanel.SetSizer(self.mainSizer)
################
# WINDOW RELATED STUFF
self.SetSize((640, 700))
self.SetTitle("LCD Font Editor")
self.icon = wx.IconFromBitmap(wx.Bitmap(os.path.join(self.basePath, "icons", "edit-square.png"), wx.BITMAP_TYPE_PNG))
self.SetIcon(self.icon)
self.Centre()
#self.mainSizer.Fit(self) # make sizer resize parent window to best size # optional, hardcoded size looks better, on 600px screen height window size fits itself
################################################################
########################### METHODS ############################
################################
# EVENTS
################
# BUTTON EVENTS
def onButtons(self, event):
"""Process events of all buttons including Options window"""
btn = event.GetEventObject()
self.debugInfo("ui", "Event", "Button > %s" % (event.GetEventObject().identifier))
# recognize button and performa action
if event.GetEventObject().identifier == "copy":
self.debugInfo("ui", "info:", "Button", "copy data >", self.glyphWidget.data)
self.clipboard = list(self.glyphWidget.data) # copy data
elif event.GetEventObject().identifier == "paste":
if self.clipboard == None: return
#self.glyphWidget.data = list(self.clipboard) # copy data with no check - future use with SetData
self.glyphWidget.data = list(self.clipboard) + [0] * (self.processing.getFontByteWidth() - len(list(self.clipboard))) # add missing data if font byte width changed between copy/paste
self.debugInfo("ui", "info:", "Button", "paste data >", self.clipboard, "> new data", self.glyphWidget.data)
self.glyphWidget.Refresh()
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
elif event.GetEventObject().identifier == "clear":
self.glyphWidget.data = [0] * self.processing.getFontByteWidth() # set zero
self.glyphWidget.Refresh()
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
elif event.GetEventObject().identifier == "more":
if not self.optionsWindow:
buttonsPanelPositionX, buttonsPanelPositionY = self.buttonPanel.GetScreenPosition()
buttonsPanelSizeX, buttonsPanelSizeY = self.buttonPanel.GetSize()
position = (buttonsPanelPositionX + buttonsPanelSizeX, buttonsPanelPositionY) # position next to buttonsPanel
self.optionsWindow = OptionsFrame(self, position) #
self.optionsWindow.Show()
else:
self.optionsWindow.Close()
elif event.GetEventObject().identifier == "moveup":
self.glyphWidget.data = [ (byte>>1) for byte in self.glyphWidget.data] # DESTRUCTIVE
self.glyphWidget.Refresh()
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
elif event.GetEventObject().identifier == "movedown":
self.glyphWidget.data = [ ((byte<<1)& 0xFF) for byte in self.glyphWidget.data] # DESTRUCTIVE
self.glyphWidget.Refresh()
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
elif event.GetEventObject().identifier == "moveleft":
#self.glyphWidget.data = self.glyphWidget.data[1:] + [self.glyphWidget.data[0]] # NONDESTRUCTIVE
self.glyphWidget.data = self.glyphWidget.data[1:] + [0] # DESTRUCTIVE
self.glyphWidget.Refresh()
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
elif event.GetEventObject().identifier == "moveright":
#self.glyphWidget.data = [self.glyphWidget.data[-1]] + self.glyphWidget.data[:-1] # NONDESTRUCTIVE
self.glyphWidget.data = [0] + self.glyphWidget.data[:-1] # DESTRUCTIVE
self.glyphWidget.Refresh()
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
elif event.GetEventObject().identifier == "insertright":
self.processing.insertToRight()
self.setWidgetsByteWidth()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
self.loadGlyphWidgetImageData()
self.glyphWidget.Refresh()
newString = self.processing.getCompleteString()
self.textCtrl.ChangeValue(newString)
elif event.GetEventObject().identifier == "insertleft":
self.processing.insertToLeft()
self.setWidgetsByteWidth()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
self.loadGlyphWidgetImageData()
self.glyphWidget.Refresh()
newString = self.processing.getCompleteString()
self.textCtrl.ChangeValue(newString)
elif event.GetEventObject().identifier == "removeright":
self.processing.eraseFromRight()
self.setWidgetsByteWidth()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
self.loadGlyphWidgetImageData()
self.glyphWidget.Refresh()
newString = self.processing.getCompleteString()
self.textCtrl.ChangeValue(newString)
elif event.GetEventObject().identifier == "removeleft":
self.processing.eraseFromLeft()
self.setWidgetsByteWidth()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
self.loadGlyphWidgetImageData()
self.glyphWidget.Refresh()
newString = self.processing.getCompleteString()
self.textCtrl.ChangeValue(newString)
################
# MOUSE EVENTS
def onGlyphWidgetMouseDown(self, event):
"""onMouseDown-parent"""
self.glyphWidget.onMouseDown(event)
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
def onGlyphWidgetMouseUp(self, event):
"""onMouseUp-parent"""
self.glyphWidget.onMouseUp(event)
def onGlyphWidgetMouseMove(self, event):
"""onMouseMove-parent"""
if self.glyphWidget.onMouseMove(event):
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
def onFontWidgetMouseUp(self, event):
"""onFontWidgetMouseUp"""
self.fontWidget.onMouseUp(event)
self.processing.setSelectedGlyphIndex(self.fontWidget.getSelectedIndex()) #
self.selectedLabel.SetLabel(self.indicatorPanelLabelFormat(self.processing.getSelectedGlyphIndex()))
self.loadGlyphWidgetImageData() # load glyph image
self.fontWidget.Refresh()
self.glyphWidget.Refresh()
self.selectedLabel.GetParent().GetContainingSizer().Layout()
self.fontWidget.GetContainingSizer().Layout()
def onFontWidgetMouseMove(self, event):
"""onFontWidgetMouseMove"""
self.fontWidget.onMouseMove(event)
# Hover label refresh
self.hoverLabel.SetLabel(self.indicatorPanelLabelFormat(self.fontWidget.cellToIndex(self.fontWidget.highlightedCell)))
self.hoverLabel.GetParent().GetContainingSizer().Layout()
#self.fontWidget.Refresh() # unnecessary refresh > handled in FontWidget
def onFontWidgetMouseLeave(self, event):
""" """
self.fontWidget.onMouseLeave(event)
self.hoverLabel.SetLabel("") # Empty
self.hoverLabel.GetParent().GetContainingSizer().Layout()
self.fontWidget.Refresh()
def onIndicatorPanelMouseUp(self, event):
"""Toggle mode of indicator panel"""
if self.indicatorSelectedLabelMode == 2: self.indicatorSelectedLabelMode = 0
else: self.indicatorSelectedLabelMode += 1
self.selectedLabel.SetLabel(self.indicatorPanelLabelFormat(self.processing.getSelectedGlyphIndex()))
#self.selectedLabel.Refresh()
# Update ToolTips - those are tied to mode selected
self.selectedLabel.SetToolTip(wx.ToolTip(self.indicatorLabelModes[self.indicatorSelectedLabelMode]["selectedtootip"]))
self.hoverLabel.SetToolTip(wx.ToolTip(self.indicatorLabelModes[self.indicatorSelectedLabelMode]["hovertooltip"]))
################
# TEXT FIELD EVENTS
def OnKeyTyped(self, event):
"""TextCtrl changed event, parse new data"""
#self.debugInfo("ui", "Event", "OnKeyTyped") # ultra verbose while text updates
if not self.ignoreTextEvent:
tempData = self.textCtrl.GetValue() # get string from TextCtrl
self.debugInfo("New textfield input\n", tempData, "\n")
# process import
self.processing.importData(tempData) # <-------------------------------------------------------- import -> parse data
self.setWidgetsByteWidth()
self.fontWidget.setFieldSize(self.processing.getGlyphCount()) # SET FONT WIDGET SIZE
self.fontWidget.setSelectedIndex(self.processing.getSelectedGlyphIndex())
self.loadGlyphWidgetImageData()
self.glyphWidget.Refresh()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
self.selectedLabel.SetLabel(self.indicatorPanelLabelFormat(self.processing.getSelectedGlyphIndex()))
self.selectedLabel.GetParent().GetContainingSizer().Layout()
else:
pass
#self.debugInfo("Text event skip!") # very verbose while TextCtrl updates
################################
# SETTERS AND GETTERS
def setWidgetsByteWidth(self):
"""Sets byte width to all widgets using it to match data"""
self.glyphWidget.setByteWidth(self.processing.getFontByteWidth())
self.fontWidget.setByteWidth(self.processing.getFontByteWidth())
# FontWidget
def getFontWidgetModesAvailable(self):
"""For purpose of Options window - returns list of dicts"""
return self.fontWidget.getModesAvailable()
def setFontWidgetMode(self, mode):
"""Set mode of font widget"""
self.fontWidget.setMode(mode)
def getFontWidgetMode(self):
"""Returns int mode of font widget"""
return self.fontWidget.getMode()
# IndicatorPanel
def setIndicatorPanelActiveColours(self, selected, highlighted):
"""Set colours of indicator panel"""
self.indicatorPanelColourActiveSelected = selected
self.indicatorPanelColourActiveHighlighted = highlighted
def getIndicatorPanelEncodingsAvailable(self):
"""For purpose of Options window - returns list of dicts"""
return self.indicatorPanelEncodings
def setIndicatorPanelLabelsEncoding(self, encoding):
"""Set indicator panel labels encoding"""
self.selectedIndicatorPanelEncoding = encoding
# Selected label refresh
self.selectedLabel.SetLabel(self.indicatorPanelLabelFormat(self.processing.getSelectedGlyphIndex()))
# Update ToolTips
self.indicatorLabelModes[2]["selectedtootip"] = "Selected glyph char in %s encoding" % self.indicatorPanelEncodings[self.selectedIndicatorPanelEncoding]["name"] # updatae dict with new value
self.indicatorLabelModes[2]["hovertooltip"] = "Highlighted glyph char in %s encoding" % self.indicatorPanelEncodings[self.selectedIndicatorPanelEncoding]["name"] # updatae dict with new value
self.selectedLabel.SetToolTip(wx.ToolTip(self.indicatorLabelModes[self.indicatorSelectedLabelMode]["selectedtootip"]))
self.hoverLabel.SetToolTip(wx.ToolTip(self.indicatorLabelModes[self.indicatorSelectedLabelMode]["hovertooltip"]))
#self.selectedLabel.Refresh()
# Hover label refresh
self.hoverLabel.SetLabel(self.indicatorPanelLabelFormat(self.fontWidget.cellToIndex(self.fontWidget.highlightedCell)))
self.hoverLabel.GetParent().GetContainingSizer().Layout()
# TextCtrl
def getTextCtrlModesAvailable(self):
"""For purpose of Options window - returns list of dicts"""
return self.textCtrlModes
def setTextCtrlMode(self, mode):
"""Set TextCtrl Mode"""
self.selectedTextCtrlMode = mode
def getTextCtrlMode(self):
"""Returns int TextCtrl Mode"""
return self.selectedTextCtrlMode
# GlyphWidget
def getGlyphWidgetModesAvailable(self):
"""For purpose of Options window - returns list of dicts"""
return self.glyphWidget.getModesAvailable()
def setGlyphWidgetMode(self, mode):
"""Set mode of Font Panel"""
self.glyphWidget.setMode(mode)
def getGlyphWidgetMode(self):
"""Returns int mode of glyph widget """
return self.glyphWidget.getMode()
################################
# DATA UPDATERS
def indicatorPanelLabelFormat(self, data):
"""Format data - returns string according to mode selected"""
if data is not None: pass
else: return " " # return space as placeholder if no data
""" Param int Returns str """
if self.indicatorSelectedLabelMode == 0: return str(data)
elif self.indicatorSelectedLabelMode == 1: return "0x%02X" % data
elif self.indicatorSelectedLabelMode == 2:
controlCharacters = ["NUL", "SOH", "STX", "ETX", "EOT", "ENQ", "ACK", "BEL", "BS", "HT", "LF", "VT", "FF", "CR", "SO", "SI", "DLE", "DC1", "DC2", "DC3", "DC4", "NAK", "SYN", "ETB", "CAN", "EM", "SUB", "ESC", "FS", "GS", "RS", "US"]
if data < 32: return controlCharacters[data]
else: return chr(data).decode(self.indicatorPanelEncodings[self.selectedIndicatorPanelEncoding]["name"], "replace") #
def updateSelectedGlyph(self):
"""UPDATES SLECTED GLYPH IN BOTH TEXTFIELD AND PARSED DATA"""
completeGlyphList = self.processing.getCompleteGlyphList()
selectedGlyphIndex = self.processing.getSelectedGlyphIndex()
if len(completeGlyphList) > 0:
# need at least one glyph
pass
else:
# if no glyphs on list - nothing to update
return
self.debugInfo("\n\n================================= DATA UPDATE START =======================================")
showPosition = completeGlyphList[selectedGlyphIndex][0]["start"] # move textfield cursor to first byte of selected glyph
self.ignoreTextEvent = True
for byteindex in range(0,len(completeGlyphList[selectedGlyphIndex])):
if len(completeGlyphList) > 0:
tempDict = completeGlyphList[selectedGlyphIndex][byteindex]
else: break
startpos = tempDict["start"]
endpos = tempDict["end"]
data = self.glyphWidget.data[byteindex]
self.debugInfo("ui", "UPDATE DATA > startpos", startpos, "> endpos", endpos, "> data", data, "> base 16 > 0x%02X" % (data)) #
self.processing.updateCurrentDataset(startpos, endpos, data)
# Simple method - no colours
if self.textCtrlModes[self.selectedTextCtrlMode]["method"] == 0: self.updateTextCtrlDataSmart(startpos, endpos, data)
if self.textCtrlModes[self.selectedTextCtrlMode]["method"] == 1:
self.textCtrl.ChangeValue(self.processing.getCompleteString())
#self.textCtrl.ShowPosition(0) # move to start, append leaves cursor at the end
self.textCtrl.ShowPosition(self.processing.startOffset + showPosition) # move to selected position + add offset
elif self.textCtrlModes[self.selectedTextCtrlMode]["method"] == 2:
self.recreateTextfieldFromCurrentData()
#self.textCtrl.ShowPosition(0) # move to start, append leaves cursor at the end
self.textCtrl.ShowPosition(self.processing.startOffset + showPosition) # move to selected position + add offset
self.debugInfo("====================================== DATA UPDATE END ===================================\n\n")
self.ignoreTextEvent = False
# FASTEST - most preferred way
def updateTextCtrlDataSmart(self, startpos, endpos, data):
"""Fastest method, -> newline chars must be fixed before"""
textfieldStartpos = startpos + self.processing.startOffset
textfieldEndpos = endpos + self.processing.startOffset
word_colour = wx.TextAttr(wx.RED, wx.LIGHT_GREY) # optional bg: wx.NullColour
self.textCtrl.Replace(textfieldStartpos, textfieldEndpos, "0x%02X" % (data))
self.textCtrl.SetStyle(textfieldStartpos, textfieldEndpos, word_colour)
# OPTIONAL Super SLOW - most featured
def recreateTextfieldFromCurrentData(self):
"""Fully recreates the textfield, every single value can have its colour depending on state -> using two states, futureproof"""
self.textCtrl.SetValue("")
self.textCtrl.SetDefaultStyle(wx.TextAttr(wx.NullColour))
self.textCtrl.AppendText(self.processing.getStartText()) # append data from start of textfield up to the beginning of parsedText which is startOffset
peviousEnd = 0
completeGlyphList = self.processing.getCompleteGlyphList() #
for glyph in completeGlyphList:
for glyphData in glyph:
startpos = glyphData.get('start')
endpos = glyphData.get('end')
text = (glyphData.get('hexdata'))
self.textCtrl.AppendText(self.processing.parsedText[peviousEnd:startpos])# append start of textfield - before the hex values
peviousEnd = endpos
# Select colour by item state
style = wx.TextAttr(wx.NullColour)
state = (glyphData.get('state'))
if state == "inserted":
style = wx.TextAttr(wx.BLUE) # wx.NullColour
elif state == "modified":
style = wx.TextAttr(wx.RED, wx.LIGHT_GREY) #
self.textCtrl.SetDefaultStyle(style)
self.textCtrl.AppendText(text)
self.textCtrl.SetDefaultStyle(wx.TextAttr(wx.NullColour))
self.textCtrl.AppendText(self.processing.parsedText[peviousEnd:]) # append rest of string
self.textCtrl.AppendText(self.processing.getEndText()) # append rest of textfield
################################
# WIDGET IMAGE DATA LOADERS
def loadGlyphWidgetImageData(self):
if not self.processing.glyphList:
self.debugInfo("ui", "Warning:", "self.processing.glyphList is empty!")
pass # return
else:
self.glyphWidget.data = [ int(sub['hexdata'], 16) for sub in self.processing.glyphList[self.processing.getSelectedGlyphIndex()] ]
self.debugInfo("ui", "info:", "self.glyphWidget.data loaded with >", self.glyphWidget.data) #
def loadFontWidgetImageData(self):
if not self.processing.glyphList:
self.debugInfo("ui", "Warning:", "self.processing.glyphList is empty!")
pass # return
else:
self.fontWidget.data = [ int(sub['hexdata'], 16) for glyph in self.processing.glyphList for sub in glyph ]
self.debugInfo("ui", "info:", "self.fontWidget.data loaded with >", len(self.fontWidget.data), "items.") #
################################
# MISC. OTHER
def getBasePath(self):
"""Returns str path to launch script"""
return os.path.dirname(os.path.realpath(__file__)) # return dir containing this file
def debugInfo(self, *text):
"""Prints debug messages to stdout"""
if DEBUG:
if self.platform != "__WXMSW__":
# COLOURS NOT WORKING ON WINDOWS 7
if ("Event") in text : self.printToStdout("\033[1;32;1m", "\033[0m", *text) # highlight event
elif ("info:") in text : self.printToStdout("\033[1;34;1m", "\033[0m", *text) # highlight messgae
elif ("Error") in text : self.printToStdout("\033[1;31;1m", "\033[0m", *text) # highlight error
elif ("Warning:") in text : self.printToStdout("\033[1;31;1m", "\033[0m", *text) # highlight error
else: self.printToStdout("", "", *text) # other info
else:
self.printToStdout("", "", *text) # no colours available
def printToStdout(self, header, footer, *text):
"""Replaces print"""
sys.stdout.write(header)
for string in text:
#all to string
if not isinstance(string, str):
try:
string = str(string)
except: pass
try:
sys.stdout.write(string + " ")
except: pass
sys.stdout.write(footer + "\n")
################################################################
# MAIN , wx.App
def main():
app = wx.App()
window = MainFrame(None)
window.Show()
if DEBUG and showInspectionWindow: wx.lib.inspection.InspectionTool().Show() # DEBUG
app.MainLoop()
if __name__ == '__main__':
main()
################################################################
| <filename>lcdfonteditor/ui/ui.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2020, <NAME> aka KiLLA
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
################
# IMPORTS
import os
import sys
import wx
import dataprocessing.core
from glyphwidget import GlyphWidget
from fontwidget import FontWidget
from ui_options import OptionsFrame
################
# DEBUG
DEBUG = False # True / False
showInspectionWindow = False # True / False
if DEBUG and showInspectionWindow: import wx.lib.inspection # import widgets inspection tool
################################################################
######################### MAIN WINDOW ##########################
class MainFrame(wx.Frame):
def __init__(self, *args, **kwargs):
super(MainFrame, self).__init__(*args, **kwargs)
self.platform = wx.Platform
self.InitUI()
################################
# INIT UI
def InitUI(self):
################
# BASIC SETUP
self.debugInfo("================================================================") # print wx version to stdout
self.debugInfo("python version ",sys.version)
self.debugInfo("wx version ", wx.version())
self.debugInfo("================================================================")
if self.platform == "__WXMSW__": self.locale = wx.Locale(wx.LANGUAGE_ENGLISH) # /wx.LANGUAGE_DEFAULT/ -> Windows hack!
self.basePath = self.getBasePath()
self.clipboard = None
################
# DATA PROCESSING
DEFAULT_BYTEWIDTH = 5 # DEFAULT CONSTANT VALUE > for fonts 5 bytes/pixels wide
self.processing = dataprocessing.core.DataProcessing(self, DEFAULT_BYTEWIDTH) # pass self - main window
################
# WINDOW with OPTIONS & SETTINGS
self.optionsWindow = None
################
# LAYOUT PANELS
self.mainPanel = wx.Panel(self)
self.leftPanel = wx.Panel(self.mainPanel)
self.leftPanel.SetBackgroundColour('#4f5049')
self.buttonPanel = wx.Panel(self.leftPanel)
self.buttonPanel.SetBackgroundColour('#cccccc')
################
# GLYPH WIDGET
self.screenWidth, self.screenHeight = wx.GetDisplaySize() # Get screen size and select smaller modes for netbooks
self.debugInfo("Screen size:", self.screenWidth, "x", self.screenHeight)
if self.screenHeight < 768:
glyphWidgetMode = 2
fonthPanelMode = 2
else:
glyphWidgetMode = 0
fonthPanelMode = 0
self.glyphWidget = GlyphWidget(self, self.leftPanel, DEFAULT_BYTEWIDTH, glyphWidgetMode)
self.glyphWidget.Bind(wx.EVT_LEFT_UP, self.onGlyphWidgetMouseUp)
self.glyphWidget.Bind(wx.EVT_LEFT_DOWN, self.onGlyphWidgetMouseDown)
self.glyphWidget.Bind(wx.EVT_MOTION, self.onGlyphWidgetMouseMove)
################
# FONT WIDGET
ColourActiveSelected = "#cc0000" #OPTIONAL "#cc0033" "#a800a8" #FF0033
ColourActiveHighlighted = "#00cc00" # OPTIONAL "#00cc99" "#00A8A8" #00FFCC
self.fontWidget = FontWidget(self, self.leftPanel, DEFAULT_BYTEWIDTH, fonthPanelMode)
#self.fontWidget.setActiveColours("#FFFFFF", ColourActiveSelected, ColourActiveHighlighted)
self.fontWidget.Bind(wx.EVT_LEFT_UP, self.onFontWidgetMouseUp)
self.fontWidget.Bind(wx.EVT_MOTION, self.onFontWidgetMouseMove)
self.fontWidget.Bind(wx.EVT_LEAVE_WINDOW, self.onFontWidgetMouseLeave)
################
# BUTTON PANEL SIZER
self.buttonsGridSizer = wx.GridBagSizer(1, 1)
################
# MOVE BUTTONS
self.bmpArrowUp = wx.Bitmap(os.path.join(self.basePath, "icons", "up-arrow.png"), wx.BITMAP_TYPE_PNG)
self.bmpArrowLeft = wx.Bitmap(os.path.join(self.basePath, "icons", "left-arrow.png"), wx.BITMAP_TYPE_PNG)
self.bmpArrowRight = wx.Bitmap(os.path.join(self.basePath, "icons", "right-arrow.png"), wx.BITMAP_TYPE_PNG)
self.bmpArrowDown = wx.Bitmap(os.path.join(self.basePath, "icons", "down-arrow.png"), wx.BITMAP_TYPE_PNG)
self.moveUpButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.bmpArrowUp, size=wx.DefaultSize)
self.moveUpButton.identifier = "moveup"
self.moveUpButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.moveUpButton.SetToolTip(wx.ToolTip("Move up"))
self.buttonsGridSizer.Add(self.moveUpButton, pos = (0, 1), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4) # ↑
self.moveDownButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.bmpArrowDown, size=wx.DefaultSize)
self.moveDownButton.identifier = "movedown"
self.moveDownButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.moveDownButton.SetToolTip(wx.ToolTip("Move down"))
self.buttonsGridSizer.Add(self.moveDownButton, pos = (2, 1), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4) # ↓
self.moveLeftButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.bmpArrowLeft, size=wx.DefaultSize)
self.moveLeftButton.identifier = "moveleft"
self.moveLeftButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.moveLeftButton.SetToolTip(wx.ToolTip("Move left"))
self.buttonsGridSizer.Add(self.moveLeftButton, pos = (1, 0), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4) # ←
self.moveRightButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.bmpArrowRight, size=wx.DefaultSize)
self.moveRightButton.identifier = "moveright"
self.moveRightButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.moveRightButton.SetToolTip(wx.ToolTip("Move right"))
self.buttonsGridSizer.Add(self.moveRightButton, pos = (1, 2), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4) # →
################
# ACTION BUTTONS
self.copyButtonBmp = wx.Bitmap(os.path.join(self.basePath, "icons", "copy-square.png"), wx.BITMAP_TYPE_PNG)
self.copyButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.copyButtonBmp, size=wx.DefaultSize)
self.copyButton.identifier = "copy"
self.copyButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.copyButton.SetToolTip(wx.ToolTip("Copy glyph"))
self.buttonsGridSizer.Add(self.copyButton, pos = (2, 0), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4)
self.pasteButtonBmp = wx.Bitmap(os.path.join(self.basePath, "icons", "download-square.png"), wx.BITMAP_TYPE_PNG)
self.pasteButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.pasteButtonBmp, size=wx.DefaultSize)
self.pasteButton.identifier = "paste"
self.pasteButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.pasteButton.SetToolTip(wx.ToolTip("Paste glyph"))
self.buttonsGridSizer.Add(self.pasteButton, pos = (2, 2), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4)
self.clearButtonBmp = wx.Bitmap(os.path.join(self.basePath, "icons", "sun-light-theme.png"), wx.BITMAP_TYPE_PNG)
self.clearButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.clearButtonBmp, size=wx.DefaultSize)
self.clearButton.identifier = "clear"
self.clearButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.clearButton.SetToolTip(wx.ToolTip("Clear glyph"))
self.buttonsGridSizer.Add(self.clearButton, pos = (1, 1), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4)
self.moreButtonBmp = wx.Bitmap(os.path.join(self.basePath, "icons", "more.png"), wx.BITMAP_TYPE_PNG)
self.moreButton = wx.BitmapButton(self.buttonPanel, id = wx.ID_ANY, bitmap = self.moreButtonBmp, size=wx.DefaultSize)
self.moreButton.identifier = "more"
self.moreButton.Bind(wx.EVT_BUTTON, self.onButtons)
self.moreButton.SetToolTip(wx.ToolTip("Open Options & Settings window"))
self.buttonsGridSizer.Add(self.moreButton, pos = (0, 2), span = (1, 1), flag = wx.ALL|wx.CENTER, border = 4)
self.buttonPanel.SetSizer(self.buttonsGridSizer)
################
# INDICATOR PANEL
self.indicatorLabelModes = [{"id" : 0, "name" : "index", "selectedtootip" : "Selected glyph index", "hovertooltip" : "Highlighted glyph index"}, {"id" : 0, "name" : "hexindex", "selectedtootip" : "Selected glyph base 16 index", "hovertooltip" : "Highlighted glyph base 16 index"}, {"id" : 0, "name" : "character", "selectedtootip" : "Selected glyph char in ascii encoding", "hovertooltip" : "Highlighted glyph char in ascii encoding"}] # ascii hardcoded > gets changed with encoding selection
self.indicatorSelectedLabelMode = 0 # DEFAULT
self.indicatorPanelEncodings = [{"name":"ascii"}, {"name":"utf8"}, {"name":"iso-8859-1"}, {"name":"iso-8859-2"}, {"name":"iso-8859-3"}, {"name":"iso-8859-4"}, {"name":"iso-8859-5"}, {"name":"iso-8859-6"}, {"name":"iso-8859-7"}, {"name":"iso-8859-8"}, {"name":"iso-8859-9"}, {"name":"iso-8859-10"}, {"name":"iso-8859-13"}, {"name":"iso-8859-14"}, {"name":"iso-8859-15"}, {"name":"windows-1250"}, {"name":"windows-1251"}, {"name":"windows-1252"}, {"name":"windows-1253"}, {"name":"windows-1254"}, {"name":"windows-1255"}, {"name":"windows-1256"}, {"name":"windows-1257"}, {"name":"windows-1258"}]
self.selectedIndicatorPanelEncoding = 0 # DEFAULT
# colours
self.indicatorPanelColourActiveSelected = ColourActiveSelected # OPTIONAL "#cc0033" "#a800a8"
self.indicatorPanelColourActiveHighlighted = ColourActiveHighlighted # OPTIONAL "#00cc99" "#00A8A8"
self.indicatorPanel = wx.Panel(self.leftPanel)
self.indicatorPanel.Bind(wx.EVT_LEFT_UP, self.onIndicatorPanelMouseUp)
self.indicatorPanel.SetBackgroundColour('#cccccc') # HARDCODED COLOUR
self.indicatorPanel.SetToolTip(wx.ToolTip("Indicator, click to toggle mode"))
self.indicatorSizer = wx.BoxSizer(wx.HORIZONTAL)
self.selectedLabel = wx.StaticText(self.indicatorPanel)
self.hoverLabel = wx.StaticText(self.indicatorPanel, style=wx.ALIGN_RIGHT)
font = wx.Font(12, wx.FONTFAMILY_TELETYPE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD) # wx.FONTFAMILY_DEFAULT
self.selectedLabel.SetFont(font)
self.selectedLabel.SetForegroundColour(self.indicatorPanelColourActiveSelected) # DEFAULT "#00cc99"
self.selectedLabel.SetLabel(self.indicatorPanelLabelFormat(self.processing.getSelectedGlyphIndex()))
self.selectedLabel.SetToolTip(wx.ToolTip("Selected glyph index, click to toggle mode"))
self.selectedLabel.Bind(wx.EVT_LEFT_UP, self.onIndicatorPanelMouseUp)
self.selectedLabel.SetMinSize ((60, -1)) # set min width only to prevent too small click area
self.hoverLabel.SetFont(font)
self.hoverLabel.SetForegroundColour(self.indicatorPanelColourActiveHighlighted) # DEFAULT "#cc0033"
self.hoverLabel.SetLabel(self.indicatorPanelLabelFormat(self.fontWidget.cellToIndex(self.fontWidget.highlightedCell)))
self.hoverLabel.SetToolTip(wx.ToolTip("Highlighted glyph index, click to toggle mode"))
self.hoverLabel.Bind(wx.EVT_LEFT_UP, self.onIndicatorPanelMouseUp)
self.hoverLabel.SetMinSize ((60, -1)) # set min width only to prevent too small click area
self.indicatorSizer.Add(self.selectedLabel, 0,wx.ALL|wx.ALIGN_CENTER_VERTICAL, 10)
self.indicatorSizer.Add(self.hoverLabel, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 10)
#self.indicatorSizer.SetMinSize ((160, 32))
# https://wxpython.org/Phoenix/docs/html/wx.Sizer.html#wx.Sizer.GetMinSize
self.debugInfo("ui", "IndicatorPanel", "> indicator sizer > GetMinSize", self.indicatorSizer.GetMinSize())
self.indicatorPanel.SetSizer(self.indicatorSizer)
################
# LEFT PANEL SIZER
self.mainSizer = wx.BoxSizer(wx.VERTICAL)
self.mainSizer.Add(self.glyphWidget, 0, wx.ALL|wx.CENTER, 20)
self.mainSizer.Add(self.buttonPanel, 0, wx.BOTTOM | wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_HORIZONTAL, 20)
self.mainSizer.Add(self.indicatorPanel, 0, wx.BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 6)
self.mainSizer.Add(self.fontWidget, 0, wx.BOTTOM | wx.LEFT |wx.RIGHT | wx.ALIGN_CENTER_HORIZONTAL, 20)
self.leftPanel.SetSizer(self.mainSizer)
################
# TEXT FIELD
self.textCtrlModes = self.modes = [{"id" : 0, "name" : "Smart (fast)", "method" : 0}, {"id" : 1, "name" : "Simple (failsafe)", "method" : 1}, {"id" : 2, "name" : "Full redraw (slow)", "method" : 2}]
self.selectedTextCtrlMode = 0 # DEFAULT mode > Smart
self.ignoreTextEvent = False
self.textCtrl = wx.TextCtrl(self.mainPanel, size = (320,320), style = wx.TE_MULTILINE | wx.TE_RICH) # another windows hack -> wx.TE_RICH
textCtrlFont = wx.Font(10, wx.FONTFAMILY_TELETYPE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL) # wx.FONTFAMILY_TELETYPE -> monospace
self.textCtrl.SetFont(textCtrlFont)
self.textCtrl.Bind(wx.EVT_TEXT,self.OnKeyTyped) # EVT_TEXT_ENTER, EVT_TEXT, wx.EVT_CHAR
################
# MAIN SIZER
self.mainSizer = wx.BoxSizer(wx.HORIZONTAL)
self.mainSizer.Add(self.leftPanel, 0, wx.LEFT | wx.TOP, 20)
self.mainSizer.Add(self.textCtrl, 1, wx.TOP | wx.BOTTOM | wx.LEFT | wx.EXPAND, 20)
self.mainPanel.SetSizer(self.mainSizer)
################
# WINDOW RELATED STUFF
self.SetSize((640, 700))
self.SetTitle("LCD Font Editor")
self.icon = wx.IconFromBitmap(wx.Bitmap(os.path.join(self.basePath, "icons", "edit-square.png"), wx.BITMAP_TYPE_PNG))
self.SetIcon(self.icon)
self.Centre()
#self.mainSizer.Fit(self) # make sizer resize parent window to best size # optional, hardcoded size looks better, on 600px screen height window size fits itself
################################################################
########################### METHODS ############################
################################
# EVENTS
################
# BUTTON EVENTS
def onButtons(self, event):
"""Process events of all buttons including Options window"""
btn = event.GetEventObject()
self.debugInfo("ui", "Event", "Button > %s" % (event.GetEventObject().identifier))
# recognize button and performa action
if event.GetEventObject().identifier == "copy":
self.debugInfo("ui", "info:", "Button", "copy data >", self.glyphWidget.data)
self.clipboard = list(self.glyphWidget.data) # copy data
elif event.GetEventObject().identifier == "paste":
if self.clipboard == None: return
#self.glyphWidget.data = list(self.clipboard) # copy data with no check - future use with SetData
self.glyphWidget.data = list(self.clipboard) + [0] * (self.processing.getFontByteWidth() - len(list(self.clipboard))) # add missing data if font byte width changed between copy/paste
self.debugInfo("ui", "info:", "Button", "paste data >", self.clipboard, "> new data", self.glyphWidget.data)
self.glyphWidget.Refresh()
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
elif event.GetEventObject().identifier == "clear":
self.glyphWidget.data = [0] * self.processing.getFontByteWidth() # set zero
self.glyphWidget.Refresh()
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
elif event.GetEventObject().identifier == "more":
if not self.optionsWindow:
buttonsPanelPositionX, buttonsPanelPositionY = self.buttonPanel.GetScreenPosition()
buttonsPanelSizeX, buttonsPanelSizeY = self.buttonPanel.GetSize()
position = (buttonsPanelPositionX + buttonsPanelSizeX, buttonsPanelPositionY) # position next to buttonsPanel
self.optionsWindow = OptionsFrame(self, position) #
self.optionsWindow.Show()
else:
self.optionsWindow.Close()
elif event.GetEventObject().identifier == "moveup":
self.glyphWidget.data = [ (byte>>1) for byte in self.glyphWidget.data] # DESTRUCTIVE
self.glyphWidget.Refresh()
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
elif event.GetEventObject().identifier == "movedown":
self.glyphWidget.data = [ ((byte<<1)& 0xFF) for byte in self.glyphWidget.data] # DESTRUCTIVE
self.glyphWidget.Refresh()
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
elif event.GetEventObject().identifier == "moveleft":
#self.glyphWidget.data = self.glyphWidget.data[1:] + [self.glyphWidget.data[0]] # NONDESTRUCTIVE
self.glyphWidget.data = self.glyphWidget.data[1:] + [0] # DESTRUCTIVE
self.glyphWidget.Refresh()
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
elif event.GetEventObject().identifier == "moveright":
#self.glyphWidget.data = [self.glyphWidget.data[-1]] + self.glyphWidget.data[:-1] # NONDESTRUCTIVE
self.glyphWidget.data = [0] + self.glyphWidget.data[:-1] # DESTRUCTIVE
self.glyphWidget.Refresh()
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
elif event.GetEventObject().identifier == "insertright":
self.processing.insertToRight()
self.setWidgetsByteWidth()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
self.loadGlyphWidgetImageData()
self.glyphWidget.Refresh()
newString = self.processing.getCompleteString()
self.textCtrl.ChangeValue(newString)
elif event.GetEventObject().identifier == "insertleft":
self.processing.insertToLeft()
self.setWidgetsByteWidth()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
self.loadGlyphWidgetImageData()
self.glyphWidget.Refresh()
newString = self.processing.getCompleteString()
self.textCtrl.ChangeValue(newString)
elif event.GetEventObject().identifier == "removeright":
self.processing.eraseFromRight()
self.setWidgetsByteWidth()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
self.loadGlyphWidgetImageData()
self.glyphWidget.Refresh()
newString = self.processing.getCompleteString()
self.textCtrl.ChangeValue(newString)
elif event.GetEventObject().identifier == "removeleft":
self.processing.eraseFromLeft()
self.setWidgetsByteWidth()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
self.loadGlyphWidgetImageData()
self.glyphWidget.Refresh()
newString = self.processing.getCompleteString()
self.textCtrl.ChangeValue(newString)
################
# MOUSE EVENTS
def onGlyphWidgetMouseDown(self, event):
"""onMouseDown-parent"""
self.glyphWidget.onMouseDown(event)
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
def onGlyphWidgetMouseUp(self, event):
"""onMouseUp-parent"""
self.glyphWidget.onMouseUp(event)
def onGlyphWidgetMouseMove(self, event):
"""onMouseMove-parent"""
if self.glyphWidget.onMouseMove(event):
self.updateSelectedGlyph()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
def onFontWidgetMouseUp(self, event):
"""onFontWidgetMouseUp"""
self.fontWidget.onMouseUp(event)
self.processing.setSelectedGlyphIndex(self.fontWidget.getSelectedIndex()) #
self.selectedLabel.SetLabel(self.indicatorPanelLabelFormat(self.processing.getSelectedGlyphIndex()))
self.loadGlyphWidgetImageData() # load glyph image
self.fontWidget.Refresh()
self.glyphWidget.Refresh()
self.selectedLabel.GetParent().GetContainingSizer().Layout()
self.fontWidget.GetContainingSizer().Layout()
def onFontWidgetMouseMove(self, event):
"""onFontWidgetMouseMove"""
self.fontWidget.onMouseMove(event)
# Hover label refresh
self.hoverLabel.SetLabel(self.indicatorPanelLabelFormat(self.fontWidget.cellToIndex(self.fontWidget.highlightedCell)))
self.hoverLabel.GetParent().GetContainingSizer().Layout()
#self.fontWidget.Refresh() # unnecessary refresh > handled in FontWidget
def onFontWidgetMouseLeave(self, event):
""" """
self.fontWidget.onMouseLeave(event)
self.hoverLabel.SetLabel("") # Empty
self.hoverLabel.GetParent().GetContainingSizer().Layout()
self.fontWidget.Refresh()
def onIndicatorPanelMouseUp(self, event):
"""Toggle mode of indicator panel"""
if self.indicatorSelectedLabelMode == 2: self.indicatorSelectedLabelMode = 0
else: self.indicatorSelectedLabelMode += 1
self.selectedLabel.SetLabel(self.indicatorPanelLabelFormat(self.processing.getSelectedGlyphIndex()))
#self.selectedLabel.Refresh()
# Update ToolTips - those are tied to mode selected
self.selectedLabel.SetToolTip(wx.ToolTip(self.indicatorLabelModes[self.indicatorSelectedLabelMode]["selectedtootip"]))
self.hoverLabel.SetToolTip(wx.ToolTip(self.indicatorLabelModes[self.indicatorSelectedLabelMode]["hovertooltip"]))
################
# TEXT FIELD EVENTS
def OnKeyTyped(self, event):
"""TextCtrl changed event, parse new data"""
#self.debugInfo("ui", "Event", "OnKeyTyped") # ultra verbose while text updates
if not self.ignoreTextEvent:
tempData = self.textCtrl.GetValue() # get string from TextCtrl
self.debugInfo("New textfield input\n", tempData, "\n")
# process import
self.processing.importData(tempData) # <-------------------------------------------------------- import -> parse data
self.setWidgetsByteWidth()
self.fontWidget.setFieldSize(self.processing.getGlyphCount()) # SET FONT WIDGET SIZE
self.fontWidget.setSelectedIndex(self.processing.getSelectedGlyphIndex())
self.loadGlyphWidgetImageData()
self.glyphWidget.Refresh()
self.loadFontWidgetImageData()
self.fontWidget.Refresh()
self.selectedLabel.SetLabel(self.indicatorPanelLabelFormat(self.processing.getSelectedGlyphIndex()))
self.selectedLabel.GetParent().GetContainingSizer().Layout()
else:
pass
#self.debugInfo("Text event skip!") # very verbose while TextCtrl updates
################################
# SETTERS AND GETTERS
def setWidgetsByteWidth(self):
"""Sets byte width to all widgets using it to match data"""
self.glyphWidget.setByteWidth(self.processing.getFontByteWidth())
self.fontWidget.setByteWidth(self.processing.getFontByteWidth())
# FontWidget
def getFontWidgetModesAvailable(self):
"""For purpose of Options window - returns list of dicts"""
return self.fontWidget.getModesAvailable()
def setFontWidgetMode(self, mode):
"""Set mode of font widget"""
self.fontWidget.setMode(mode)
def getFontWidgetMode(self):
"""Returns int mode of font widget"""
return self.fontWidget.getMode()
# IndicatorPanel
def setIndicatorPanelActiveColours(self, selected, highlighted):
"""Set colours of indicator panel"""
self.indicatorPanelColourActiveSelected = selected
self.indicatorPanelColourActiveHighlighted = highlighted
def getIndicatorPanelEncodingsAvailable(self):
"""For purpose of Options window - returns list of dicts"""
return self.indicatorPanelEncodings
def setIndicatorPanelLabelsEncoding(self, encoding):
"""Set indicator panel labels encoding"""
self.selectedIndicatorPanelEncoding = encoding
# Selected label refresh
self.selectedLabel.SetLabel(self.indicatorPanelLabelFormat(self.processing.getSelectedGlyphIndex()))
# Update ToolTips
self.indicatorLabelModes[2]["selectedtootip"] = "Selected glyph char in %s encoding" % self.indicatorPanelEncodings[self.selectedIndicatorPanelEncoding]["name"] # updatae dict with new value
self.indicatorLabelModes[2]["hovertooltip"] = "Highlighted glyph char in %s encoding" % self.indicatorPanelEncodings[self.selectedIndicatorPanelEncoding]["name"] # updatae dict with new value
self.selectedLabel.SetToolTip(wx.ToolTip(self.indicatorLabelModes[self.indicatorSelectedLabelMode]["selectedtootip"]))
self.hoverLabel.SetToolTip(wx.ToolTip(self.indicatorLabelModes[self.indicatorSelectedLabelMode]["hovertooltip"]))
#self.selectedLabel.Refresh()
# Hover label refresh
self.hoverLabel.SetLabel(self.indicatorPanelLabelFormat(self.fontWidget.cellToIndex(self.fontWidget.highlightedCell)))
self.hoverLabel.GetParent().GetContainingSizer().Layout()
# TextCtrl
def getTextCtrlModesAvailable(self):
"""For purpose of Options window - returns list of dicts"""
return self.textCtrlModes
def setTextCtrlMode(self, mode):
"""Set TextCtrl Mode"""
self.selectedTextCtrlMode = mode
def getTextCtrlMode(self):
"""Returns int TextCtrl Mode"""
return self.selectedTextCtrlMode
# GlyphWidget
def getGlyphWidgetModesAvailable(self):
"""For purpose of Options window - returns list of dicts"""
return self.glyphWidget.getModesAvailable()
def setGlyphWidgetMode(self, mode):
"""Set mode of Font Panel"""
self.glyphWidget.setMode(mode)
def getGlyphWidgetMode(self):
"""Returns int mode of glyph widget """
return self.glyphWidget.getMode()
################################
# DATA UPDATERS
def indicatorPanelLabelFormat(self, data):
"""Format data - returns string according to mode selected"""
if data is not None: pass
else: return " " # return space as placeholder if no data
""" Param int Returns str """
if self.indicatorSelectedLabelMode == 0: return str(data)
elif self.indicatorSelectedLabelMode == 1: return "0x%02X" % data
elif self.indicatorSelectedLabelMode == 2:
controlCharacters = ["NUL", "SOH", "STX", "ETX", "EOT", "ENQ", "ACK", "BEL", "BS", "HT", "LF", "VT", "FF", "CR", "SO", "SI", "DLE", "DC1", "DC2", "DC3", "DC4", "NAK", "SYN", "ETB", "CAN", "EM", "SUB", "ESC", "FS", "GS", "RS", "US"]
if data < 32: return controlCharacters[data]
else: return chr(data).decode(self.indicatorPanelEncodings[self.selectedIndicatorPanelEncoding]["name"], "replace") #
def updateSelectedGlyph(self):
"""UPDATES SLECTED GLYPH IN BOTH TEXTFIELD AND PARSED DATA"""
completeGlyphList = self.processing.getCompleteGlyphList()
selectedGlyphIndex = self.processing.getSelectedGlyphIndex()
if len(completeGlyphList) > 0:
# need at least one glyph
pass
else:
# if no glyphs on list - nothing to update
return
self.debugInfo("\n\n================================= DATA UPDATE START =======================================")
showPosition = completeGlyphList[selectedGlyphIndex][0]["start"] # move textfield cursor to first byte of selected glyph
self.ignoreTextEvent = True
for byteindex in range(0,len(completeGlyphList[selectedGlyphIndex])):
if len(completeGlyphList) > 0:
tempDict = completeGlyphList[selectedGlyphIndex][byteindex]
else: break
startpos = tempDict["start"]
endpos = tempDict["end"]
data = self.glyphWidget.data[byteindex]
self.debugInfo("ui", "UPDATE DATA > startpos", startpos, "> endpos", endpos, "> data", data, "> base 16 > 0x%02X" % (data)) #
self.processing.updateCurrentDataset(startpos, endpos, data)
# Simple method - no colours
if self.textCtrlModes[self.selectedTextCtrlMode]["method"] == 0: self.updateTextCtrlDataSmart(startpos, endpos, data)
if self.textCtrlModes[self.selectedTextCtrlMode]["method"] == 1:
self.textCtrl.ChangeValue(self.processing.getCompleteString())
#self.textCtrl.ShowPosition(0) # move to start, append leaves cursor at the end
self.textCtrl.ShowPosition(self.processing.startOffset + showPosition) # move to selected position + add offset
elif self.textCtrlModes[self.selectedTextCtrlMode]["method"] == 2:
self.recreateTextfieldFromCurrentData()
#self.textCtrl.ShowPosition(0) # move to start, append leaves cursor at the end
self.textCtrl.ShowPosition(self.processing.startOffset + showPosition) # move to selected position + add offset
self.debugInfo("====================================== DATA UPDATE END ===================================\n\n")
self.ignoreTextEvent = False
# FASTEST - most preferred way
def updateTextCtrlDataSmart(self, startpos, endpos, data):
"""Fastest method, -> newline chars must be fixed before"""
textfieldStartpos = startpos + self.processing.startOffset
textfieldEndpos = endpos + self.processing.startOffset
word_colour = wx.TextAttr(wx.RED, wx.LIGHT_GREY) # optional bg: wx.NullColour
self.textCtrl.Replace(textfieldStartpos, textfieldEndpos, "0x%02X" % (data))
self.textCtrl.SetStyle(textfieldStartpos, textfieldEndpos, word_colour)
# OPTIONAL Super SLOW - most featured
def recreateTextfieldFromCurrentData(self):
"""Fully recreates the textfield, every single value can have its colour depending on state -> using two states, futureproof"""
self.textCtrl.SetValue("")
self.textCtrl.SetDefaultStyle(wx.TextAttr(wx.NullColour))
self.textCtrl.AppendText(self.processing.getStartText()) # append data from start of textfield up to the beginning of parsedText which is startOffset
peviousEnd = 0
completeGlyphList = self.processing.getCompleteGlyphList() #
for glyph in completeGlyphList:
for glyphData in glyph:
startpos = glyphData.get('start')
endpos = glyphData.get('end')
text = (glyphData.get('hexdata'))
self.textCtrl.AppendText(self.processing.parsedText[peviousEnd:startpos])# append start of textfield - before the hex values
peviousEnd = endpos
# Select colour by item state
style = wx.TextAttr(wx.NullColour)
state = (glyphData.get('state'))
if state == "inserted":
style = wx.TextAttr(wx.BLUE) # wx.NullColour
elif state == "modified":
style = wx.TextAttr(wx.RED, wx.LIGHT_GREY) #
self.textCtrl.SetDefaultStyle(style)
self.textCtrl.AppendText(text)
self.textCtrl.SetDefaultStyle(wx.TextAttr(wx.NullColour))
self.textCtrl.AppendText(self.processing.parsedText[peviousEnd:]) # append rest of string
self.textCtrl.AppendText(self.processing.getEndText()) # append rest of textfield
################################
# WIDGET IMAGE DATA LOADERS
def loadGlyphWidgetImageData(self):
if not self.processing.glyphList:
self.debugInfo("ui", "Warning:", "self.processing.glyphList is empty!")
pass # return
else:
self.glyphWidget.data = [ int(sub['hexdata'], 16) for sub in self.processing.glyphList[self.processing.getSelectedGlyphIndex()] ]
self.debugInfo("ui", "info:", "self.glyphWidget.data loaded with >", self.glyphWidget.data) #
def loadFontWidgetImageData(self):
if not self.processing.glyphList:
self.debugInfo("ui", "Warning:", "self.processing.glyphList is empty!")
pass # return
else:
self.fontWidget.data = [ int(sub['hexdata'], 16) for glyph in self.processing.glyphList for sub in glyph ]
self.debugInfo("ui", "info:", "self.fontWidget.data loaded with >", len(self.fontWidget.data), "items.") #
################################
# MISC. OTHER
def getBasePath(self):
"""Returns str path to launch script"""
return os.path.dirname(os.path.realpath(__file__)) # return dir containing this file
def debugInfo(self, *text):
"""Prints debug messages to stdout"""
if DEBUG:
if self.platform != "__WXMSW__":
# COLOURS NOT WORKING ON WINDOWS 7
if ("Event") in text : self.printToStdout("\033[1;32;1m", "\033[0m", *text) # highlight event
elif ("info:") in text : self.printToStdout("\033[1;34;1m", "\033[0m", *text) # highlight messgae
elif ("Error") in text : self.printToStdout("\033[1;31;1m", "\033[0m", *text) # highlight error
elif ("Warning:") in text : self.printToStdout("\033[1;31;1m", "\033[0m", *text) # highlight error
else: self.printToStdout("", "", *text) # other info
else:
self.printToStdout("", "", *text) # no colours available
def printToStdout(self, header, footer, *text):
"""Replaces print"""
sys.stdout.write(header)
for string in text:
#all to string
if not isinstance(string, str):
try:
string = str(string)
except: pass
try:
sys.stdout.write(string + " ")
except: pass
sys.stdout.write(footer + "\n")
################################################################
# MAIN , wx.App
def main():
app = wx.App()
window = MainFrame(None)
window.Show()
if DEBUG and showInspectionWindow: wx.lib.inspection.InspectionTool().Show() # DEBUG
app.MainLoop()
if __name__ == '__main__':
main()
################################################################
| en | 0.444347 | #!/usr/bin/env python # -*- coding: utf-8 -*- Copyright (c) 2020, <NAME> aka KiLLA All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ################ # IMPORTS ################ # DEBUG # True / False # True / False # import widgets inspection tool ################################################################ ######################### MAIN WINDOW ########################## ################################ # INIT UI ################ # BASIC SETUP # print wx version to stdout # /wx.LANGUAGE_DEFAULT/ -> Windows hack! ################ # DATA PROCESSING # DEFAULT CONSTANT VALUE > for fonts 5 bytes/pixels wide # pass self - main window ################ # WINDOW with OPTIONS & SETTINGS ################ # LAYOUT PANELS ################ # GLYPH WIDGET # Get screen size and select smaller modes for netbooks ################ # FONT WIDGET #OPTIONAL "#cc0033" "#a800a8" #FF0033 # OPTIONAL "#00cc99" "#00A8A8" #00FFCC #self.fontWidget.setActiveColours("#FFFFFF", ColourActiveSelected, ColourActiveHighlighted) ################ # BUTTON PANEL SIZER ################ # MOVE BUTTONS # ↑ # ↓ # ← # → ################ # ACTION BUTTONS ################ # INDICATOR PANEL # ascii hardcoded > gets changed with encoding selection # DEFAULT # DEFAULT # colours # OPTIONAL "#cc0033" "#a800a8" # OPTIONAL "#00cc99" "#00A8A8" # HARDCODED COLOUR # wx.FONTFAMILY_DEFAULT # DEFAULT "#00cc99" # set min width only to prevent too small click area # DEFAULT "#cc0033" # set min width only to prevent too small click area #self.indicatorSizer.SetMinSize ((160, 32)) # https://wxpython.org/Phoenix/docs/html/wx.Sizer.html#wx.Sizer.GetMinSize ################ # LEFT PANEL SIZER ################ # TEXT FIELD # DEFAULT mode > Smart # another windows hack -> wx.TE_RICH # wx.FONTFAMILY_TELETYPE -> monospace # EVT_TEXT_ENTER, EVT_TEXT, wx.EVT_CHAR ################ # MAIN SIZER ################ # WINDOW RELATED STUFF #self.mainSizer.Fit(self) # make sizer resize parent window to best size # optional, hardcoded size looks better, on 600px screen height window size fits itself ################################################################ ########################### METHODS ############################ ################################ # EVENTS ################ # BUTTON EVENTS Process events of all buttons including Options window # recognize button and performa action # copy data #self.glyphWidget.data = list(self.clipboard) # copy data with no check - future use with SetData # add missing data if font byte width changed between copy/paste # set zero # position next to buttonsPanel # # DESTRUCTIVE # DESTRUCTIVE #self.glyphWidget.data = self.glyphWidget.data[1:] + [self.glyphWidget.data[0]] # NONDESTRUCTIVE # DESTRUCTIVE #self.glyphWidget.data = [self.glyphWidget.data[-1]] + self.glyphWidget.data[:-1] # NONDESTRUCTIVE # DESTRUCTIVE ################ # MOUSE EVENTS onMouseDown-parent onMouseUp-parent onMouseMove-parent onFontWidgetMouseUp # # load glyph image onFontWidgetMouseMove # Hover label refresh #self.fontWidget.Refresh() # unnecessary refresh > handled in FontWidget # Empty Toggle mode of indicator panel #self.selectedLabel.Refresh() # Update ToolTips - those are tied to mode selected ################ # TEXT FIELD EVENTS TextCtrl changed event, parse new data #self.debugInfo("ui", "Event", "OnKeyTyped") # ultra verbose while text updates # get string from TextCtrl # process import # <-------------------------------------------------------- import -> parse data # SET FONT WIDGET SIZE #self.debugInfo("Text event skip!") # very verbose while TextCtrl updates ################################ # SETTERS AND GETTERS Sets byte width to all widgets using it to match data # FontWidget For purpose of Options window - returns list of dicts Set mode of font widget Returns int mode of font widget # IndicatorPanel Set colours of indicator panel For purpose of Options window - returns list of dicts Set indicator panel labels encoding # Selected label refresh # Update ToolTips # updatae dict with new value # updatae dict with new value #self.selectedLabel.Refresh() # Hover label refresh # TextCtrl For purpose of Options window - returns list of dicts Set TextCtrl Mode Returns int TextCtrl Mode # GlyphWidget For purpose of Options window - returns list of dicts Set mode of Font Panel Returns int mode of glyph widget ################################ # DATA UPDATERS Format data - returns string according to mode selected # return space as placeholder if no data Param int Returns str # UPDATES SLECTED GLYPH IN BOTH TEXTFIELD AND PARSED DATA # need at least one glyph # if no glyphs on list - nothing to update # move textfield cursor to first byte of selected glyph # # Simple method - no colours #self.textCtrl.ShowPosition(0) # move to start, append leaves cursor at the end # move to selected position + add offset #self.textCtrl.ShowPosition(0) # move to start, append leaves cursor at the end # move to selected position + add offset # FASTEST - most preferred way Fastest method, -> newline chars must be fixed before # optional bg: wx.NullColour # OPTIONAL Super SLOW - most featured Fully recreates the textfield, every single value can have its colour depending on state -> using two states, futureproof # append data from start of textfield up to the beginning of parsedText which is startOffset # # append start of textfield - before the hex values # Select colour by item state # wx.NullColour # # append rest of string # append rest of textfield ################################ # WIDGET IMAGE DATA LOADERS # return # # return # ################################ # MISC. OTHER Returns str path to launch script # return dir containing this file Prints debug messages to stdout # COLOURS NOT WORKING ON WINDOWS 7 # highlight event # highlight messgae # highlight error # highlight error # other info # no colours available Replaces print #all to string ################################################################ # MAIN , wx.App # DEBUG ################################################################ | 1.122421 | 1 |
berliner/utils/machine.py | hypergravity/berliner | 4 | 6624137 | import numpy as np
from emcee import EnsembleSampler
from .ls import wls_simple
class TGMMachine():
""" the TGM Machine class """
def __init__(self, isoc_stacked, tgm_cols=("teff", "logg", "_mhini"),
tgm_sigma=(100, 0.2, 0.1), w_sigma=(100, 0.2, 0.1),
pred_cols=("teff", "logg"), wcol="w"):
""" the input should be a stacked table of isochrones """
self.data = isoc_stacked # stacked isochrones
self.tgm = np.array(isoc_stacked[tgm_cols].to_pandas()) # TGM array
self.tgm_sigma = np.array(tgm_sigma).reshape(1, -1) # TGM sigma
self.pred_array = np.array(
self.data[pred_cols].to_pandas()) # Qs to predict
self.w = isoc_stacked[wcol].data # weight array
self.w_sigma = np.array(w_sigma).reshape(1, -1)
def predict(self, test_tgm):
""" predict MLE of SED and weight at the given TGM position """
test_tgm = np.array(test_tgm).reshape(1, -1)
test_w = self.w * np.exp(-0.5 * np.sum(((self.tgm - test_tgm) / self.tgm_sigma) ** 2., axis=1))
pred_result = np.sum(self.pred_array * test_w.reshape(-1, 1), axis=0) / np.sum(test_w)
# smooth weight in a wider volume
test_w = self.w * np.exp(-0.5 * np.sum(((self.tgm - test_tgm) / self.w_sigma) ** 2., axis=1))
return pred_result, np.sum(test_w)
class SED2TG():
""" the TG Machine class """
def __init__(self, r, Alambda, p_bounds=None, phot_bands=[]):
self.r = r
self.Alambda = Alambda
self.phot_bands = phot_bands
self.p_bounds = p_bounds
def predict(self, *args, **kwargs):
sampler = self.runsample(*args, **kwargs)
return np.median(sampler.flatchain, axis=0), np.std(sampler.flatchain, axis=0)
def runsample(self, sed_obs, sed_obs_err, vpi_obs, vpi_obs_err,
Lvpi=1.0, Lprior=1.0, nsteps=(1000, 1000, 2000), p0try=None):
ndim = 4 # 4 stands for [Teff, logg, Av, DM]
nwalkers = len(p0try) # number of chains
for i in range(len(nsteps)):
if i == 0:
# initialize sampler
sampler = EnsembleSampler(nwalkers, ndim, costfun,
args=(self.r, self.p_bounds,
self.Alambda, sed_obs,
sed_obs_err, vpi_obs,
vpi_obs_err, Lvpi, Lprior))
# guess Av and DM for p0try
p0try = np.array([initial_guess(_, self.r, self.Alambda, sed_obs, sed_obs_err) for _ in p0try])
# run sampler
pos, _, __ = sampler.run_mcmc(p0try, nsteps[i])
else:
# generate new p
p_rand = random_p(sampler, nloopmax=1000, method="mle",
costfun=costfun, args=(self.r, self.p_bounds,
self.Alambda, sed_obs,
sed_obs_err, vpi_obs,
vpi_obs_err,
Lvpi, Lprior))
# reset sampler
sampler.reset()
# run at new p
pos1, lnprob1, rstate1 = sampler.run_mcmc(p_rand, nsteps[i])
return sampler
def grid_search(self, test_sed_obs, test_sed_obs_err=None,
test_vpi_obs=None, test_vpi_obs_err=None,
Lvpi=1.0, Lprior=1.0, sed_err_typical=0.1,
cost_order=2, av_llim=0., return_est=False):
p_mle, p_mean, p_std = grid_search2(self.r, self.Alambda,
test_sed_obs, test_sed_obs_err,
test_vpi_obs, test_vpi_obs_err,
Lvpi, Lprior, sed_err_typical,
cost_order, av_llim, return_est)
sed_mean = self.r(p_mean[:2])[:-1]+self.Alambda*p_mean[2]+p_mean[3]
sed_rmse = np.sqrt(np.nanmean(np.square(sed_mean-test_sed_obs)))
return p_mle, p_mean, p_std, sed_rmse
def grid_search2(r2, Alambda,
test_sed_obs, test_sed_obs_err=None,
test_vpi_obs=None, test_vpi_obs_err=None,
Lvpi=1.0, Lprior=1.0, sed_err_typical=0.1, cost_order=2,
av_llim=0., return_est=False):
"""
when p = [T, G, Av, DM],
given a set of SED,
find the best T, G and estimate the corresponding Av and DM
"""
# select good bands
if test_sed_obs_err is None:
# all bands will be used
ind_good_band = np.isfinite(test_sed_obs)
else:
ind_good_band = np.isfinite(test_sed_obs) & (test_sed_obs_err > 0)
n_good_band = np.sum(ind_good_band)
if n_good_band < 5:
return [np.ones((4,),)*np.nan for i in range(3)]
# lnprior
lnprior = r2.values[:, -1]
# T & G grid
t_est, g_est = r2.flats.T
# model SED
sed_mod = r2.values[:, :-1][:, ind_good_band]
# observed SED
sed_obs = test_sed_obs[ind_good_band]
# observed SED error
if sed_err_typical is not None:
sed_obs_err = np.ones_like(sed_obs, float)*sed_err_typical
else:
sed_obs_err = test_sed_obs_err[ind_good_band]
# WLS to guess Av and DM
av_est, dm_est = guess_avdm_wls(
sed_mod, sed_obs, sed_obs_err, Alambda[ind_good_band])
# cost(SED)
res_sed = sed_mod + av_est.reshape(-1, 1) * Alambda[ind_good_band] + dm_est.reshape(-1, 1) - sed_obs
if sed_err_typical is not None:
cost_sed = np.nansum(np.abs(res_sed / sed_err_typical) ** cost_order, axis=1)
else:
cost_sed = np.nansum(np.abs(res_sed / sed_obs_err) ** cost_order, axis=1)
lnprob = -0.5 * cost_sed
# cost(VPI)
if test_vpi_obs is not None and test_vpi_obs_err is not None and Lvpi > 0:
vpi_mod = 10 ** (2 - 0.2 * dm_est)
cost_vpi = ((vpi_mod - test_vpi_obs) / test_vpi_obs_err) ** 2.
if np.all(np.isfinite(cost_vpi)):
lnprob -= 0.5*cost_vpi
# lnprob = cost(SED) + cost(VPI) + prior
if Lprior > 0:
lnprob += lnprior * Lprior
# eliminate neg Av
lnprob[av_est < av_llim] = -np.inf
lnprob -= np.nanmax(lnprob)
if return_est:
return t_est, g_est, av_est, dm_est, cost_sed, lnprob
# normalization
prob = np.exp(lnprob)
prob /= np.sum(prob)
# weighted mean
av_mle = av_est[np.argmax(lnprob)]
dm_mle = dm_est[np.argmax(lnprob)]
t_mle = t_est[np.argmax(lnprob)]
g_mle = g_est[np.argmax(lnprob)]
av_mean = np.sum(av_est * prob)
dm_mean = np.sum(dm_est * prob)
t_mean = np.sum(t_est * prob)
g_mean = np.sum(g_est * prob)
av_std = np.sum((av_est - av_mean) ** 2 * prob)
dm_std = np.sum((dm_est - dm_mean) ** 2 * prob)
t_std = np.sum((t_est - t_mean) ** 2 * prob)
g_std = np.sum((g_est - g_mean) ** 2 * prob)
p_mle = np.array([t_mle, g_mle, av_mle, dm_mle])
p_mean = np.array([t_mean, g_mean, av_mean, dm_mean])
p_std = np.array([t_std, g_std, av_std, dm_std])
return p_mle, p_mean, p_std
def model_sed_abs(x, r):
""" interpolate with r(Regli) at position x """
test_tgm = x[:2]
return r(test_tgm)[:-1]
def initial_guess(x, r, Alambda, sed_obs, sed_obs_err):
""" initial guess of Av and DM with OLS method """
# select good bands
ind_good_band = np.isfinite(sed_obs) & (sed_obs_err > 0)
sed_mod = model_sed_abs(x, r).reshape(1, -1)[:, ind_good_band]
sed_obs = sed_obs.reshape(1, -1)[:, ind_good_band]
# solve Av and DM
av_est, dm_est = guess_avdm_ols(sed_mod, sed_obs, Alambda[ind_good_band])
# neg Av --> 0.001
if av_est <= 0:
av_est = 0.001
return np.array([x[0], x[1], av_est, dm_est])
def guess_avdm_ols(sed_mod, sed_obs, Alambda):
""" matrix form OLS solution for Av and DM """
sed_mod = np.array(sed_mod)
sed_obs = np.array(sed_obs)
assert sed_mod.ndim == 2
assert sed_obs.ndim == 2
n_band = sed_obs.size
# color
X = np.array([Alambda, np.ones_like(Alambda)]).T
y = np.matrix((sed_obs - sed_mod).T)
# av_ols, dm_ols = np.array(np.dot(np.dot(np.linalg.inv(np.dot(X.T,X)),X.T),Y))
av_est, dm_est = np.array(
np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y))
return av_est, dm_est
def guess_avdm_wls(sed_mod, sed_obs, sed_obs_err, Alambda):
""" matrix form OLS solution for Av and DM """
sed_mod = np.array(sed_mod)
sed_obs = np.array(sed_obs)
sed_obs_err = np.array(sed_obs_err)
assert sed_mod.ndim == 2
# d_mag
X = np.array([Alambda, np.ones_like(Alambda, float)]).T
y = (sed_obs.reshape(1, -1) - sed_mod).T
yerr = sed_obs_err
# solve Av & DM with WLS
av_est, dm_est = wls_simple(X, y, yerr)
return av_est, dm_est
def costfun(x, r, p_bounds, Alambda, sed_obs, sed_obs_err, vpi_obs, vpi_obs_err, Lvpi, Lprior):
""" cost function of MCMC
Returns
-------
-0.5*(chi2_sed + chi2_vpi*Lvpi) + lnprior*Lprior
"""
ind_good_band = np.isfinite(sed_obs) & (sed_obs_err > 0)
# unpack parameters
test_tg = x[:2]
test_av = x[2]
test_dm = x[3]
# check bounds
if p_bounds is not None:
if not check_bounds(x, p_bounds):
return -np.inf
# predict model
pred_mod = r(test_tg)
# lnprior
lnprior = pred_mod[-1]
# predicted SED_obs
sed_mod = pred_mod[:-1] + Alambda * test_av + test_dm
# vpi_model
vpi_mod = 10 ** (2 - 0.2 * test_dm) # mas
# chi2_sed
chi2_sed = np.nansum(
(((sed_obs - sed_mod) / sed_obs_err) ** 2.)[ind_good_band])
if not np.isfinite(chi2_sed):
return -np.inf
# include vpi
if Lvpi > 0:
# eval chi2_vpi
chi2_vpi = ((vpi_obs - vpi_mod) / vpi_obs_err) ** 2.
if np.isfinite(chi2_vpi):
return -0.5 * (chi2_sed + chi2_vpi) + lnprior*Lprior
else:
return -0.5 * chi2_sed + lnprior*Lprior
else:
return -0.5 * chi2_sed + lnprior*Lprior
def mcostfun(*args):
""" minus of costfun """
return -costfun(*args)
def generate_p(p0, pstd, shrink=0.5):
""" generate (normal) random p """
return p0 + shrink * pstd * np.random.randn(len(p0))
def check_bounds(p, p_bounds=None):
""" check bounds """
if p_bounds is not None:
p_bounds = np.array(p_bounds)
if np.any(np.array(p) <= p_bounds[:, 0]) or np.any(np.array(p) >= p_bounds[:, 1]):
return False
return True
def random_p(sampler, nloopmax=1000, method="mle", costfun=None, args=()):
""" given a sampler, generate new random p """
n_walkers, _, n_dim = sampler.chain.shape
# MLE p
if method == "mle":
p_mle = sampler.flatchain[np.nanargmax(sampler.flatlnprobability)]
else:
p_mle = np.median(sampler.flatchain, axis=0)
# STD p
p_std = np.std(sampler.flatchain, axis=0)
# generate new p
p_rand = []
for i in range(nloopmax):
p_new = generate_p(p_mle, p_std, shrink=0.6)
if not np.isfinite(costfun(p_new, *args)):
continue
else:
p_rand.append(p_new)
if i == nloopmax - 1:
raise (ValueError("Unable to get good random ps..."))
if len(p_rand) >= n_walkers:
break
if len(p_rand) == n_walkers:
return np.array(p_rand)
else:
raise (ValueError("random_p failed!"))
# def guess_avdm(sed_mod, sed_obs, Alambda):
# """ guess Av and DM with OLS method
# Parameters
# ----------
# sed_mod:
# (n_band, ) array
# sed_obs:
# (n_band, ) array
# """
#
# n_band = sed_obs.size
# # X = [[Alambda_i, 1], [], ...]
# X = np.matrix(np.ones((n_band, 2), float))
# X[:, 0] = Alambda[:, None]
# # Y = [[d_sed_i], [], ...]
# Y = np.matrix((sed_obs - sed_mod).reshape(-1, 1))
#
# # OLS solution
# av_ols, dm_ols = np.array(np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), Y))
# #av_est, dm_est = np.array(np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), Y))
#
# return np.array([av_ols, dm_ols])
def general_search(params, sed_mod, lnprior,
Alambda,
test_sed_obs, test_sed_obs_err=None,
test_vpi_obs=None, test_vpi_obs_err=None,
Lvpi=1.0, Lprior=1.0, sed_err_typical=0.1, cost_order=2,
av_llim=0., debug=False):
"""
when p = [T, G, Av, DM],
given a set of SED,
find the best T, G and estimate the corresponding Av and DM
"""
# select good bands
if test_sed_obs_err is None:
# all bands will be used
ind_good_band = np.isfinite(test_sed_obs)
else:
ind_good_band = np.isfinite(test_sed_obs) & (test_sed_obs_err > 0)
n_good_band = np.sum(ind_good_band)
if n_good_band < 5:
return [np.ones((4,), ) * np.nan for i in range(3)]
# lnprior
# lnprior = r2.values[:, -1]
# T & G grid
# t_est, g_est = r2.flats.T
# params
# model SED
# sed_mod = r2.values[:, :-1][:, ind_good_band]
sed_mod = sed_mod[:, ind_good_band]
# observed SED
sed_obs = test_sed_obs[ind_good_band]
# observed SED error
if sed_err_typical is not None:
sed_obs_err = np.ones_like(sed_obs, float) * sed_err_typical
else:
sed_obs_err = test_sed_obs_err[ind_good_band]
# WLS to guess Av and DM
av_est, dm_est = guess_avdm_wls(
sed_mod, sed_obs, sed_obs_err, Alambda[ind_good_band])
# cost(SED)
res_sed = sed_mod + av_est.reshape(-1, 1) * Alambda[
ind_good_band] + dm_est.reshape(-1, 1) - sed_obs
if sed_err_typical is not None:
cost_sed = np.nansum(np.abs(res_sed / sed_err_typical) ** cost_order,
axis=1)
else:
cost_sed = np.nansum(np.abs(res_sed / sed_obs_err) ** cost_order,
axis=1)
lnprob = -0.5 * cost_sed
# cost(VPI)
if test_vpi_obs is not None and test_vpi_obs_err is not None and Lvpi > 0:
vpi_mod = 10 ** (2 - 0.2 * dm_est)
cost_vpi = ((vpi_mod - test_vpi_obs) / test_vpi_obs_err) ** 2.
if np.all(np.isfinite(cost_vpi)):
lnprob -= 0.5 * cost_vpi
# lnprob = cost(SED) + cost(VPI) + prior
if Lprior > 0:
lnprob += lnprior * Lprior
# eliminate neg Av
lnprob[av_est < av_llim] = -np.inf
lnprob -= np.nanmax(lnprob)
if debug:
return params, av_est, dm_est, cost_sed, lnprob
# normalization
prob = np.exp(lnprob)
prob /= np.sum(prob)
# weighted mean
ind_mle = np.argmax(lnprob)
av_mle = av_est[ind_mle]
dm_mle = dm_est[ind_mle]
p_mle = params[ind_mle]
av_mean = np.sum(av_est * prob)
dm_mean = np.sum(dm_est * prob)
p_mean = np.sum(params * prob.reshape(-1, 1), axis=0)
av_std = np.sum((av_est - av_mean) ** 2 * prob)
dm_std = np.sum((dm_est - dm_mean) ** 2 * prob)
p_std = np.sum((params - p_mean) ** 2 * prob.reshape(-1, 1), axis=0)
p_mle = np.hstack([p_mle, av_mle, dm_mle])
p_mean = np.hstack([p_mean, av_mean, dm_mean])
p_std = np.hstack([p_std, av_std, dm_std])
rms_sed_mle = np.sqrt(np.nanmean(res_sed[ind_mle] ** 2.))
rms_sed_min = np.min(np.sqrt(np.nanmean(res_sed ** 2., axis=1)))
return dict(
p_mle=p_mle,
p_mean=p_mean,
p_std=p_std,
rmsmle=rms_sed_mle,
rmsmin=rms_sed_min,
ind_mle=ind_mle,
n_good=np.sum(ind_good_band)
)
def general_search_v2(params, sed_mod, lnprior, Alambda,
sed_obs, sed_obs_err=0.1,
vpi_obs=None, vpi_obs_err=None,
Lvpi=1.0, Lprior=1.0,
cost_order=2, av_llim=-0.001, debug=False):
"""
when p = [teff, logg, [M/H], Av, DM], theta = [teff, logg, [M/H]],
given a set of SED,
find the best theta and estimate the corresponding Av and DM
"""
n_band = len(sed_obs)
n_mod = sed_mod.shape[0]
# cope with scalar sed_obs_err
if isinstance(sed_obs_err, np.float):
sed_obs_err = np.ones_like(sed_obs, np.float) * sed_obs_err
# select good bands
ind_good_band = np.isfinite(sed_obs) & (sed_obs_err > 0)
n_good_band = np.sum(ind_good_band)
if n_good_band < 4:
# n_good_band = 3: unique solution
# so n_good_band should be at least 4
return [np.ones((4,), ) * np.nan for i in range(3)]
# use a subset of bands
sed_mod_select = sed_mod[:, ind_good_band]
# observed SED
sed_obs_select = sed_obs[ind_good_band]
sed_obs_err_select = sed_obs_err[ind_good_band]
# extinction coefs
Alambda_select = Alambda[ind_good_band]
# WLS to guess Av and DM
av_est, dm_est = guess_avdm_wls(
sed_mod_select, sed_obs_select, sed_obs_err_select, Alambda_select)
# cost(SED)
res_sed = sed_mod_select + av_est.reshape(-1, 1) * Alambda_select \
+ dm_est.reshape(-1, 1) - sed_obs_select
lnprob_sed = -0.5 * np.nansum(
np.abs(res_sed / sed_obs_err_select) ** cost_order, axis=1)
# cost(VPI)
if vpi_obs is not None and vpi_obs_err is not None and Lvpi > 0:
vpi_mod = 10 ** (2 - 0.2 * dm_est)
lnprob_vpi = -0.5 * ((vpi_mod - vpi_obs) / vpi_obs_err) ** 2.
else:
lnprob_vpi = np.zeros((n_mod,), np.float)
lnprob_vpi = np.where(np.isfinite(lnprob_vpi), lnprob_vpi, 0) * Lvpi
# lnprob = cost(SED) + cost(VPI) + prior
if Lprior > 0:
lnprob_prior = lnprior * Lprior
# posterior probability
lnpost = lnprob_sed + lnprob_vpi + lnprob_prior
# eliminate neg Av
lnpost[av_est < av_llim] = -np.inf
lnpost -= np.nanmax(lnpost)
# for debugging the code
if debug:
return dict(params=params,
av_est=av_est,
dm_est=dm_est,
lnprob_sed=lnprob_sed,
lnprob_vpi=lnprob_vpi,
lnprior=lnprior)
# normalization
post = np.exp(lnpost)
L0 = np.sum(post)
# weighted mean
# ind_mle = np.argmax(lnpost)
# av_mle = av_est[ind_mle]
# dm_mle = dm_est[ind_mle]
# p_mle = params[ind_mle]
L1_av = np.sum(av_est * post)
L1_dm = np.sum(dm_est * post)
L1_p = np.sum(params * post.reshape(-1, 1), axis=0)
L2_av = np.sum(av_est ** 2 * post)
L2_dm = np.sum(dm_est ** 2 * post)
L2_p = np.sum(params ** 2 * post.reshape(-1, 1), axis=0)
sigma_av = np.sqrt(L2_av / L0 - L1_av ** 2 / L0 ** 2)
sigma_dm = np.sqrt(L2_dm / L0 - L1_dm ** 2 / L0 ** 2)
sigma_p = np.sqrt(L2_p / L0 - L1_p ** 2 / L0 ** 2)
# MLE model
ind_mle = np.argmax(lnprob_sed + lnprob_vpi)
av_mle = av_est[ind_mle]
dm_mle = dm_est[ind_mle]
p_mle = params[ind_mle]
p_mle = np.hstack([p_mle, av_mle, dm_mle])
p_mean = np.hstack([L1_p/L0, L1_av/L0, L1_dm/L0])
p_err = np.hstack([sigma_p, sigma_av, sigma_dm])
rms_sed_mle = np.sqrt(np.nanmean(res_sed[ind_mle] ** 2.))
rms_sed_min = np.min(np.sqrt(np.nanmean(res_sed ** 2., axis=1)))
return dict(p_mle=p_mle,
p_mean=p_mean,
p_err=p_err,
rmsmle=rms_sed_mle,
rmsmin=rms_sed_min,
ind_mle=ind_mle,
n_good=np.sum(ind_good_band))
| import numpy as np
from emcee import EnsembleSampler
from .ls import wls_simple
class TGMMachine():
""" the TGM Machine class """
def __init__(self, isoc_stacked, tgm_cols=("teff", "logg", "_mhini"),
tgm_sigma=(100, 0.2, 0.1), w_sigma=(100, 0.2, 0.1),
pred_cols=("teff", "logg"), wcol="w"):
""" the input should be a stacked table of isochrones """
self.data = isoc_stacked # stacked isochrones
self.tgm = np.array(isoc_stacked[tgm_cols].to_pandas()) # TGM array
self.tgm_sigma = np.array(tgm_sigma).reshape(1, -1) # TGM sigma
self.pred_array = np.array(
self.data[pred_cols].to_pandas()) # Qs to predict
self.w = isoc_stacked[wcol].data # weight array
self.w_sigma = np.array(w_sigma).reshape(1, -1)
def predict(self, test_tgm):
""" predict MLE of SED and weight at the given TGM position """
test_tgm = np.array(test_tgm).reshape(1, -1)
test_w = self.w * np.exp(-0.5 * np.sum(((self.tgm - test_tgm) / self.tgm_sigma) ** 2., axis=1))
pred_result = np.sum(self.pred_array * test_w.reshape(-1, 1), axis=0) / np.sum(test_w)
# smooth weight in a wider volume
test_w = self.w * np.exp(-0.5 * np.sum(((self.tgm - test_tgm) / self.w_sigma) ** 2., axis=1))
return pred_result, np.sum(test_w)
class SED2TG():
""" the TG Machine class """
def __init__(self, r, Alambda, p_bounds=None, phot_bands=[]):
self.r = r
self.Alambda = Alambda
self.phot_bands = phot_bands
self.p_bounds = p_bounds
def predict(self, *args, **kwargs):
sampler = self.runsample(*args, **kwargs)
return np.median(sampler.flatchain, axis=0), np.std(sampler.flatchain, axis=0)
def runsample(self, sed_obs, sed_obs_err, vpi_obs, vpi_obs_err,
Lvpi=1.0, Lprior=1.0, nsteps=(1000, 1000, 2000), p0try=None):
ndim = 4 # 4 stands for [Teff, logg, Av, DM]
nwalkers = len(p0try) # number of chains
for i in range(len(nsteps)):
if i == 0:
# initialize sampler
sampler = EnsembleSampler(nwalkers, ndim, costfun,
args=(self.r, self.p_bounds,
self.Alambda, sed_obs,
sed_obs_err, vpi_obs,
vpi_obs_err, Lvpi, Lprior))
# guess Av and DM for p0try
p0try = np.array([initial_guess(_, self.r, self.Alambda, sed_obs, sed_obs_err) for _ in p0try])
# run sampler
pos, _, __ = sampler.run_mcmc(p0try, nsteps[i])
else:
# generate new p
p_rand = random_p(sampler, nloopmax=1000, method="mle",
costfun=costfun, args=(self.r, self.p_bounds,
self.Alambda, sed_obs,
sed_obs_err, vpi_obs,
vpi_obs_err,
Lvpi, Lprior))
# reset sampler
sampler.reset()
# run at new p
pos1, lnprob1, rstate1 = sampler.run_mcmc(p_rand, nsteps[i])
return sampler
def grid_search(self, test_sed_obs, test_sed_obs_err=None,
test_vpi_obs=None, test_vpi_obs_err=None,
Lvpi=1.0, Lprior=1.0, sed_err_typical=0.1,
cost_order=2, av_llim=0., return_est=False):
p_mle, p_mean, p_std = grid_search2(self.r, self.Alambda,
test_sed_obs, test_sed_obs_err,
test_vpi_obs, test_vpi_obs_err,
Lvpi, Lprior, sed_err_typical,
cost_order, av_llim, return_est)
sed_mean = self.r(p_mean[:2])[:-1]+self.Alambda*p_mean[2]+p_mean[3]
sed_rmse = np.sqrt(np.nanmean(np.square(sed_mean-test_sed_obs)))
return p_mle, p_mean, p_std, sed_rmse
def grid_search2(r2, Alambda,
test_sed_obs, test_sed_obs_err=None,
test_vpi_obs=None, test_vpi_obs_err=None,
Lvpi=1.0, Lprior=1.0, sed_err_typical=0.1, cost_order=2,
av_llim=0., return_est=False):
"""
when p = [T, G, Av, DM],
given a set of SED,
find the best T, G and estimate the corresponding Av and DM
"""
# select good bands
if test_sed_obs_err is None:
# all bands will be used
ind_good_band = np.isfinite(test_sed_obs)
else:
ind_good_band = np.isfinite(test_sed_obs) & (test_sed_obs_err > 0)
n_good_band = np.sum(ind_good_band)
if n_good_band < 5:
return [np.ones((4,),)*np.nan for i in range(3)]
# lnprior
lnprior = r2.values[:, -1]
# T & G grid
t_est, g_est = r2.flats.T
# model SED
sed_mod = r2.values[:, :-1][:, ind_good_band]
# observed SED
sed_obs = test_sed_obs[ind_good_band]
# observed SED error
if sed_err_typical is not None:
sed_obs_err = np.ones_like(sed_obs, float)*sed_err_typical
else:
sed_obs_err = test_sed_obs_err[ind_good_band]
# WLS to guess Av and DM
av_est, dm_est = guess_avdm_wls(
sed_mod, sed_obs, sed_obs_err, Alambda[ind_good_band])
# cost(SED)
res_sed = sed_mod + av_est.reshape(-1, 1) * Alambda[ind_good_band] + dm_est.reshape(-1, 1) - sed_obs
if sed_err_typical is not None:
cost_sed = np.nansum(np.abs(res_sed / sed_err_typical) ** cost_order, axis=1)
else:
cost_sed = np.nansum(np.abs(res_sed / sed_obs_err) ** cost_order, axis=1)
lnprob = -0.5 * cost_sed
# cost(VPI)
if test_vpi_obs is not None and test_vpi_obs_err is not None and Lvpi > 0:
vpi_mod = 10 ** (2 - 0.2 * dm_est)
cost_vpi = ((vpi_mod - test_vpi_obs) / test_vpi_obs_err) ** 2.
if np.all(np.isfinite(cost_vpi)):
lnprob -= 0.5*cost_vpi
# lnprob = cost(SED) + cost(VPI) + prior
if Lprior > 0:
lnprob += lnprior * Lprior
# eliminate neg Av
lnprob[av_est < av_llim] = -np.inf
lnprob -= np.nanmax(lnprob)
if return_est:
return t_est, g_est, av_est, dm_est, cost_sed, lnprob
# normalization
prob = np.exp(lnprob)
prob /= np.sum(prob)
# weighted mean
av_mle = av_est[np.argmax(lnprob)]
dm_mle = dm_est[np.argmax(lnprob)]
t_mle = t_est[np.argmax(lnprob)]
g_mle = g_est[np.argmax(lnprob)]
av_mean = np.sum(av_est * prob)
dm_mean = np.sum(dm_est * prob)
t_mean = np.sum(t_est * prob)
g_mean = np.sum(g_est * prob)
av_std = np.sum((av_est - av_mean) ** 2 * prob)
dm_std = np.sum((dm_est - dm_mean) ** 2 * prob)
t_std = np.sum((t_est - t_mean) ** 2 * prob)
g_std = np.sum((g_est - g_mean) ** 2 * prob)
p_mle = np.array([t_mle, g_mle, av_mle, dm_mle])
p_mean = np.array([t_mean, g_mean, av_mean, dm_mean])
p_std = np.array([t_std, g_std, av_std, dm_std])
return p_mle, p_mean, p_std
def model_sed_abs(x, r):
""" interpolate with r(Regli) at position x """
test_tgm = x[:2]
return r(test_tgm)[:-1]
def initial_guess(x, r, Alambda, sed_obs, sed_obs_err):
""" initial guess of Av and DM with OLS method """
# select good bands
ind_good_band = np.isfinite(sed_obs) & (sed_obs_err > 0)
sed_mod = model_sed_abs(x, r).reshape(1, -1)[:, ind_good_band]
sed_obs = sed_obs.reshape(1, -1)[:, ind_good_band]
# solve Av and DM
av_est, dm_est = guess_avdm_ols(sed_mod, sed_obs, Alambda[ind_good_band])
# neg Av --> 0.001
if av_est <= 0:
av_est = 0.001
return np.array([x[0], x[1], av_est, dm_est])
def guess_avdm_ols(sed_mod, sed_obs, Alambda):
""" matrix form OLS solution for Av and DM """
sed_mod = np.array(sed_mod)
sed_obs = np.array(sed_obs)
assert sed_mod.ndim == 2
assert sed_obs.ndim == 2
n_band = sed_obs.size
# color
X = np.array([Alambda, np.ones_like(Alambda)]).T
y = np.matrix((sed_obs - sed_mod).T)
# av_ols, dm_ols = np.array(np.dot(np.dot(np.linalg.inv(np.dot(X.T,X)),X.T),Y))
av_est, dm_est = np.array(
np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y))
return av_est, dm_est
def guess_avdm_wls(sed_mod, sed_obs, sed_obs_err, Alambda):
""" matrix form OLS solution for Av and DM """
sed_mod = np.array(sed_mod)
sed_obs = np.array(sed_obs)
sed_obs_err = np.array(sed_obs_err)
assert sed_mod.ndim == 2
# d_mag
X = np.array([Alambda, np.ones_like(Alambda, float)]).T
y = (sed_obs.reshape(1, -1) - sed_mod).T
yerr = sed_obs_err
# solve Av & DM with WLS
av_est, dm_est = wls_simple(X, y, yerr)
return av_est, dm_est
def costfun(x, r, p_bounds, Alambda, sed_obs, sed_obs_err, vpi_obs, vpi_obs_err, Lvpi, Lprior):
""" cost function of MCMC
Returns
-------
-0.5*(chi2_sed + chi2_vpi*Lvpi) + lnprior*Lprior
"""
ind_good_band = np.isfinite(sed_obs) & (sed_obs_err > 0)
# unpack parameters
test_tg = x[:2]
test_av = x[2]
test_dm = x[3]
# check bounds
if p_bounds is not None:
if not check_bounds(x, p_bounds):
return -np.inf
# predict model
pred_mod = r(test_tg)
# lnprior
lnprior = pred_mod[-1]
# predicted SED_obs
sed_mod = pred_mod[:-1] + Alambda * test_av + test_dm
# vpi_model
vpi_mod = 10 ** (2 - 0.2 * test_dm) # mas
# chi2_sed
chi2_sed = np.nansum(
(((sed_obs - sed_mod) / sed_obs_err) ** 2.)[ind_good_band])
if not np.isfinite(chi2_sed):
return -np.inf
# include vpi
if Lvpi > 0:
# eval chi2_vpi
chi2_vpi = ((vpi_obs - vpi_mod) / vpi_obs_err) ** 2.
if np.isfinite(chi2_vpi):
return -0.5 * (chi2_sed + chi2_vpi) + lnprior*Lprior
else:
return -0.5 * chi2_sed + lnprior*Lprior
else:
return -0.5 * chi2_sed + lnprior*Lprior
def mcostfun(*args):
""" minus of costfun """
return -costfun(*args)
def generate_p(p0, pstd, shrink=0.5):
""" generate (normal) random p """
return p0 + shrink * pstd * np.random.randn(len(p0))
def check_bounds(p, p_bounds=None):
""" check bounds """
if p_bounds is not None:
p_bounds = np.array(p_bounds)
if np.any(np.array(p) <= p_bounds[:, 0]) or np.any(np.array(p) >= p_bounds[:, 1]):
return False
return True
def random_p(sampler, nloopmax=1000, method="mle", costfun=None, args=()):
""" given a sampler, generate new random p """
n_walkers, _, n_dim = sampler.chain.shape
# MLE p
if method == "mle":
p_mle = sampler.flatchain[np.nanargmax(sampler.flatlnprobability)]
else:
p_mle = np.median(sampler.flatchain, axis=0)
# STD p
p_std = np.std(sampler.flatchain, axis=0)
# generate new p
p_rand = []
for i in range(nloopmax):
p_new = generate_p(p_mle, p_std, shrink=0.6)
if not np.isfinite(costfun(p_new, *args)):
continue
else:
p_rand.append(p_new)
if i == nloopmax - 1:
raise (ValueError("Unable to get good random ps..."))
if len(p_rand) >= n_walkers:
break
if len(p_rand) == n_walkers:
return np.array(p_rand)
else:
raise (ValueError("random_p failed!"))
# def guess_avdm(sed_mod, sed_obs, Alambda):
# """ guess Av and DM with OLS method
# Parameters
# ----------
# sed_mod:
# (n_band, ) array
# sed_obs:
# (n_band, ) array
# """
#
# n_band = sed_obs.size
# # X = [[Alambda_i, 1], [], ...]
# X = np.matrix(np.ones((n_band, 2), float))
# X[:, 0] = Alambda[:, None]
# # Y = [[d_sed_i], [], ...]
# Y = np.matrix((sed_obs - sed_mod).reshape(-1, 1))
#
# # OLS solution
# av_ols, dm_ols = np.array(np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), Y))
# #av_est, dm_est = np.array(np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), Y))
#
# return np.array([av_ols, dm_ols])
def general_search(params, sed_mod, lnprior,
Alambda,
test_sed_obs, test_sed_obs_err=None,
test_vpi_obs=None, test_vpi_obs_err=None,
Lvpi=1.0, Lprior=1.0, sed_err_typical=0.1, cost_order=2,
av_llim=0., debug=False):
"""
when p = [T, G, Av, DM],
given a set of SED,
find the best T, G and estimate the corresponding Av and DM
"""
# select good bands
if test_sed_obs_err is None:
# all bands will be used
ind_good_band = np.isfinite(test_sed_obs)
else:
ind_good_band = np.isfinite(test_sed_obs) & (test_sed_obs_err > 0)
n_good_band = np.sum(ind_good_band)
if n_good_band < 5:
return [np.ones((4,), ) * np.nan for i in range(3)]
# lnprior
# lnprior = r2.values[:, -1]
# T & G grid
# t_est, g_est = r2.flats.T
# params
# model SED
# sed_mod = r2.values[:, :-1][:, ind_good_band]
sed_mod = sed_mod[:, ind_good_band]
# observed SED
sed_obs = test_sed_obs[ind_good_band]
# observed SED error
if sed_err_typical is not None:
sed_obs_err = np.ones_like(sed_obs, float) * sed_err_typical
else:
sed_obs_err = test_sed_obs_err[ind_good_band]
# WLS to guess Av and DM
av_est, dm_est = guess_avdm_wls(
sed_mod, sed_obs, sed_obs_err, Alambda[ind_good_band])
# cost(SED)
res_sed = sed_mod + av_est.reshape(-1, 1) * Alambda[
ind_good_band] + dm_est.reshape(-1, 1) - sed_obs
if sed_err_typical is not None:
cost_sed = np.nansum(np.abs(res_sed / sed_err_typical) ** cost_order,
axis=1)
else:
cost_sed = np.nansum(np.abs(res_sed / sed_obs_err) ** cost_order,
axis=1)
lnprob = -0.5 * cost_sed
# cost(VPI)
if test_vpi_obs is not None and test_vpi_obs_err is not None and Lvpi > 0:
vpi_mod = 10 ** (2 - 0.2 * dm_est)
cost_vpi = ((vpi_mod - test_vpi_obs) / test_vpi_obs_err) ** 2.
if np.all(np.isfinite(cost_vpi)):
lnprob -= 0.5 * cost_vpi
# lnprob = cost(SED) + cost(VPI) + prior
if Lprior > 0:
lnprob += lnprior * Lprior
# eliminate neg Av
lnprob[av_est < av_llim] = -np.inf
lnprob -= np.nanmax(lnprob)
if debug:
return params, av_est, dm_est, cost_sed, lnprob
# normalization
prob = np.exp(lnprob)
prob /= np.sum(prob)
# weighted mean
ind_mle = np.argmax(lnprob)
av_mle = av_est[ind_mle]
dm_mle = dm_est[ind_mle]
p_mle = params[ind_mle]
av_mean = np.sum(av_est * prob)
dm_mean = np.sum(dm_est * prob)
p_mean = np.sum(params * prob.reshape(-1, 1), axis=0)
av_std = np.sum((av_est - av_mean) ** 2 * prob)
dm_std = np.sum((dm_est - dm_mean) ** 2 * prob)
p_std = np.sum((params - p_mean) ** 2 * prob.reshape(-1, 1), axis=0)
p_mle = np.hstack([p_mle, av_mle, dm_mle])
p_mean = np.hstack([p_mean, av_mean, dm_mean])
p_std = np.hstack([p_std, av_std, dm_std])
rms_sed_mle = np.sqrt(np.nanmean(res_sed[ind_mle] ** 2.))
rms_sed_min = np.min(np.sqrt(np.nanmean(res_sed ** 2., axis=1)))
return dict(
p_mle=p_mle,
p_mean=p_mean,
p_std=p_std,
rmsmle=rms_sed_mle,
rmsmin=rms_sed_min,
ind_mle=ind_mle,
n_good=np.sum(ind_good_band)
)
def general_search_v2(params, sed_mod, lnprior, Alambda,
sed_obs, sed_obs_err=0.1,
vpi_obs=None, vpi_obs_err=None,
Lvpi=1.0, Lprior=1.0,
cost_order=2, av_llim=-0.001, debug=False):
"""
when p = [teff, logg, [M/H], Av, DM], theta = [teff, logg, [M/H]],
given a set of SED,
find the best theta and estimate the corresponding Av and DM
"""
n_band = len(sed_obs)
n_mod = sed_mod.shape[0]
# cope with scalar sed_obs_err
if isinstance(sed_obs_err, np.float):
sed_obs_err = np.ones_like(sed_obs, np.float) * sed_obs_err
# select good bands
ind_good_band = np.isfinite(sed_obs) & (sed_obs_err > 0)
n_good_band = np.sum(ind_good_band)
if n_good_band < 4:
# n_good_band = 3: unique solution
# so n_good_band should be at least 4
return [np.ones((4,), ) * np.nan for i in range(3)]
# use a subset of bands
sed_mod_select = sed_mod[:, ind_good_band]
# observed SED
sed_obs_select = sed_obs[ind_good_band]
sed_obs_err_select = sed_obs_err[ind_good_band]
# extinction coefs
Alambda_select = Alambda[ind_good_band]
# WLS to guess Av and DM
av_est, dm_est = guess_avdm_wls(
sed_mod_select, sed_obs_select, sed_obs_err_select, Alambda_select)
# cost(SED)
res_sed = sed_mod_select + av_est.reshape(-1, 1) * Alambda_select \
+ dm_est.reshape(-1, 1) - sed_obs_select
lnprob_sed = -0.5 * np.nansum(
np.abs(res_sed / sed_obs_err_select) ** cost_order, axis=1)
# cost(VPI)
if vpi_obs is not None and vpi_obs_err is not None and Lvpi > 0:
vpi_mod = 10 ** (2 - 0.2 * dm_est)
lnprob_vpi = -0.5 * ((vpi_mod - vpi_obs) / vpi_obs_err) ** 2.
else:
lnprob_vpi = np.zeros((n_mod,), np.float)
lnprob_vpi = np.where(np.isfinite(lnprob_vpi), lnprob_vpi, 0) * Lvpi
# lnprob = cost(SED) + cost(VPI) + prior
if Lprior > 0:
lnprob_prior = lnprior * Lprior
# posterior probability
lnpost = lnprob_sed + lnprob_vpi + lnprob_prior
# eliminate neg Av
lnpost[av_est < av_llim] = -np.inf
lnpost -= np.nanmax(lnpost)
# for debugging the code
if debug:
return dict(params=params,
av_est=av_est,
dm_est=dm_est,
lnprob_sed=lnprob_sed,
lnprob_vpi=lnprob_vpi,
lnprior=lnprior)
# normalization
post = np.exp(lnpost)
L0 = np.sum(post)
# weighted mean
# ind_mle = np.argmax(lnpost)
# av_mle = av_est[ind_mle]
# dm_mle = dm_est[ind_mle]
# p_mle = params[ind_mle]
L1_av = np.sum(av_est * post)
L1_dm = np.sum(dm_est * post)
L1_p = np.sum(params * post.reshape(-1, 1), axis=0)
L2_av = np.sum(av_est ** 2 * post)
L2_dm = np.sum(dm_est ** 2 * post)
L2_p = np.sum(params ** 2 * post.reshape(-1, 1), axis=0)
sigma_av = np.sqrt(L2_av / L0 - L1_av ** 2 / L0 ** 2)
sigma_dm = np.sqrt(L2_dm / L0 - L1_dm ** 2 / L0 ** 2)
sigma_p = np.sqrt(L2_p / L0 - L1_p ** 2 / L0 ** 2)
# MLE model
ind_mle = np.argmax(lnprob_sed + lnprob_vpi)
av_mle = av_est[ind_mle]
dm_mle = dm_est[ind_mle]
p_mle = params[ind_mle]
p_mle = np.hstack([p_mle, av_mle, dm_mle])
p_mean = np.hstack([L1_p/L0, L1_av/L0, L1_dm/L0])
p_err = np.hstack([sigma_p, sigma_av, sigma_dm])
rms_sed_mle = np.sqrt(np.nanmean(res_sed[ind_mle] ** 2.))
rms_sed_min = np.min(np.sqrt(np.nanmean(res_sed ** 2., axis=1)))
return dict(p_mle=p_mle,
p_mean=p_mean,
p_err=p_err,
rmsmle=rms_sed_mle,
rmsmin=rms_sed_min,
ind_mle=ind_mle,
n_good=np.sum(ind_good_band))
| en | 0.595417 | the TGM Machine class the input should be a stacked table of isochrones # stacked isochrones # TGM array # TGM sigma # Qs to predict # weight array predict MLE of SED and weight at the given TGM position # smooth weight in a wider volume the TG Machine class # 4 stands for [Teff, logg, Av, DM] # number of chains # initialize sampler # guess Av and DM for p0try # run sampler # generate new p # reset sampler # run at new p when p = [T, G, Av, DM], given a set of SED, find the best T, G and estimate the corresponding Av and DM # select good bands # all bands will be used # lnprior # T & G grid # model SED # observed SED # observed SED error # WLS to guess Av and DM # cost(SED) # cost(VPI) # lnprob = cost(SED) + cost(VPI) + prior # eliminate neg Av # normalization # weighted mean interpolate with r(Regli) at position x initial guess of Av and DM with OLS method # select good bands # solve Av and DM # neg Av --> 0.001 matrix form OLS solution for Av and DM # color # av_ols, dm_ols = np.array(np.dot(np.dot(np.linalg.inv(np.dot(X.T,X)),X.T),Y)) matrix form OLS solution for Av and DM # d_mag # solve Av & DM with WLS cost function of MCMC Returns ------- -0.5*(chi2_sed + chi2_vpi*Lvpi) + lnprior*Lprior # unpack parameters # check bounds # predict model # lnprior # predicted SED_obs # vpi_model # mas # chi2_sed # include vpi # eval chi2_vpi minus of costfun generate (normal) random p check bounds given a sampler, generate new random p # MLE p # STD p # generate new p # def guess_avdm(sed_mod, sed_obs, Alambda): # """ guess Av and DM with OLS method # Parameters # ---------- # sed_mod: # (n_band, ) array # sed_obs: # (n_band, ) array # """ # # n_band = sed_obs.size # # X = [[Alambda_i, 1], [], ...] # X = np.matrix(np.ones((n_band, 2), float)) # X[:, 0] = Alambda[:, None] # # Y = [[d_sed_i], [], ...] # Y = np.matrix((sed_obs - sed_mod).reshape(-1, 1)) # # # OLS solution # av_ols, dm_ols = np.array(np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), Y)) # #av_est, dm_est = np.array(np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), Y)) # # return np.array([av_ols, dm_ols]) when p = [T, G, Av, DM], given a set of SED, find the best T, G and estimate the corresponding Av and DM # select good bands # all bands will be used # lnprior # lnprior = r2.values[:, -1] # T & G grid # t_est, g_est = r2.flats.T # params # model SED # sed_mod = r2.values[:, :-1][:, ind_good_band] # observed SED # observed SED error # WLS to guess Av and DM # cost(SED) # cost(VPI) # lnprob = cost(SED) + cost(VPI) + prior # eliminate neg Av # normalization # weighted mean when p = [teff, logg, [M/H], Av, DM], theta = [teff, logg, [M/H]], given a set of SED, find the best theta and estimate the corresponding Av and DM # cope with scalar sed_obs_err # select good bands # n_good_band = 3: unique solution # so n_good_band should be at least 4 # use a subset of bands # observed SED # extinction coefs # WLS to guess Av and DM # cost(SED) # cost(VPI) # lnprob = cost(SED) + cost(VPI) + prior # posterior probability # eliminate neg Av # for debugging the code # normalization # weighted mean # ind_mle = np.argmax(lnpost) # av_mle = av_est[ind_mle] # dm_mle = dm_est[ind_mle] # p_mle = params[ind_mle] # MLE model | 2.499905 | 2 |
sensordata_generator.py | jessicasena/WearableSensorDataGenerator | 2 | 6624138 | # ---------------------------------------------------------------------------------
# Keras DataGenerator for datasets from the benchmark "Human Activity Recognition
# Based on Wearable Sensor Data: A Standardization of the State-of-the-Art"
#
# The data used here is created by the npz_to_fold.py file.
#
# (C) 2020 <NAME>, Brazil
# Released under GNU Public License (GPL)
# email <EMAIL>
# ---------------------------------------------------------------------------------
import numpy as np
import keras
import sys
import os
class DataGenerator(keras.utils.Sequence):
"""Generates data for Keras"""
def __init__(self, dataset_path, list_ids, labels, batch_size, shuffle, multimodal = False):
"""Initialization"""
self.indexes = np.arange(len(list_ids))
self.batch_size = batch_size
self.labels = labels
self.n_classes = len(list(labels.values())[0])
self.list_ids = list_ids
self.dataset_path = dataset_path
self.shuffle = shuffle
self.input_number = 1 if not multimodal else self.n_inputs()
self.multimodal = multimodal
self.on_epoch_end()
def __len__(self):
"""Denotes the number of batches per epoch"""
return int(np.floor(len(self.list_ids) / self.batch_size))
def __getitem__(self, index):
"""Generate one batch of data"""
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
list_ids_temp = [self.list_ids[k] for k in indexes]
x, y = self.__data_generation(list_ids_temp)
return x, y
def on_epoch_end(self):
"""Updates indexes after each epoch"""
if self.shuffle:
np.random.shuffle(self.indexes)
def __data_generation(self, list_ids_temp):
"""Generates data containing batch_size samples"""
shape = self.get_shape()
y = np.empty((self.batch_size, self.n_classes))
if self.multimodal:
x = [np.empty((self.batch_size, shape[1], shape[2], 3)) for k in
range(self.input_number)]
for i, ID in enumerate(list_ids_temp):
sample = np.load(self.dataset_path + '/samples/' + ID + '.npy')
for j, data in enumerate(self.select_sensors(sample)):
x[j][i,] = data
y[i] = self.labels[ID]
else:
x = np.empty((self.batch_size, shape[1], shape[2], shape[3]))
for i, ID in enumerate(list_ids_temp):
sample[i,] = np.load(self.dataset_path + '/samples/' + ID + '.npy')
y[i] = self.labels[ID]
return x, y
def n_inputs(self):
dataset_name = self.dataset_path.split("/")[-1]
sample = np.load(self.dataset_path + '/samples/' + self.list_ids[0] + '.npy')
input_vec = self.select_sensors(sample)
return len(input_vec)
def select_sensors(self, sample):
dataset_name = os.path.normpath(self.dataset_path).split(os.path.sep)[-1]
data = []
if dataset_name == 'MHEALTH':
data.append(sample[:, :, 0:3]) # ACC chest-sensor
data.append(sample[:, :, 5:8]) # ACC left-ankle sensor
data.append(sample[:, :, 8:11]) # GYR left-ankle sensor
data.append(sample[:, :, 11:14]) # MAG left-ankle sensor
data.append(sample[:, :, 14:17]) # ACC right-lower-arm
data.append(sample[:, :, 17:20]) # GYR right-lower-arm
data.append(sample[:, :, 20:23]) # MAG right-lower-arm
elif dataset_name == 'PAMAP2P':
data.append(sample[:, :, 1:4]) # ACC1 over the wrist on the dominant arm
data.append(sample[:, :, 4:7]) # ACC2 over the wrist on the dominant arm
data.append(sample[:, :, 7:10]) # GYR over the wrist on the dominant arm
data.append(sample[:, :, 10:13]) # MAG over the wrist on the dominant arm
data.append(sample[:, :, 14:17]) # ACC1 chest-sensor
data.append(sample[:, :, 17:20]) # ACC2 chest-sensor
data.append(sample[:, :, 20:23]) # GYR chest-sensor
data.append(sample[:, :, 23:26]) # MAG chest-sensor
data.append(sample[:, :, 27:30]) # ACC1 on the dominant side's ankle
data.append(sample[:, :, 30:33]) # ACC2 on the dominant side's ankle
data.append(sample[:, :, 33:36]) # GYR on the dominant side's ankle
data.append(sample[:, :, 36:39]) # MAG on the dominant side's ankle
elif dataset_name == 'UTD-MHAD1_1s' or dataset_name == 'UTD-MHAD2_1s' or dataset_name == 'USCHAD':
# UTD-MHAD1_1s: ACC right-wrist
# UTD-MHAD2_1s: ACC right-thigh
# USCHAD: ACC subject’s front right hip inside a mobile phone pouch
data.append(sample[:, :, 0:3])
# UTD-MHAD1_1s: GYR right-wrist
# UTD-MHAD2_1s: GYR right-thigh
# USCHAD: GYR subject’s front right hip inside a mobile phone pouch
data.append(sample[:, :, 3:6])
elif dataset_name == 'WHARF' or dataset_name == 'WISDM':
# WHARF: ACC right-wrist
# WISDM: ACC 5 different body positions (apparently)
data.append(sample[:, :, 0:3])
else:
sys.exit("Dataset name ({}) is wrong.".format(dataset_name))
return data
def get_shape(self):
"""Get dataset shape"""
sample = np.load(self.dataset_path + '/samples/' + self.list_ids[0] + '.npy')
if self.multimodal:
shape = (len(self.list_ids), sample.shape[0], sample.shape[1], 3)
else:
shape = (len(self.list_ids), sample.shape[0], sample.shape[1], sample.shape[2])
return shape
def get_nclasses(self):
"""Get number of classes"""
return self.n_classes
def get_moda_names(self):
dataset_name = os.path.normpath(self.dataset_path).split(os.path.sep)[-1]
data = []
if dataset_name == 'MHEALTH':
names = ["a_chest", "a_left-ankle", "g_left-ankle", "m_left-ankle",
"a_right-wrist", "g_right-wrist", "m_right-wrist"]
elif dataset_name == 'PAMAP2P':
names = ["a1_dominant-wrist", "a2_dominant-wrist", "g_dominant-wrist", "m_dominant-wrist",
"a1_chest", "a2_chest", "g_chest", "m_chest",
"a1_dominant_ankle", "a2_dominant_ankle", "g_dominant_ankle", "m_dominant_ankle"]
elif dataset_name == 'UTD-MHAD1_1s':
names = ["a_right-wrist", "g_right-wrist"]
elif dataset_name == 'UTD-MHAD2_1s':
names = ["a_right-thigh", "g_right-thigh"]
elif dataset_name == 'USCHAD':
names = ["a_front-right-hip", "g_front-right-hip"]
elif dataset_name == 'WHARF':
names = ["a_right-wrist"]
elif dataset_name == 'WISDM':
names = ["acc"]
else:
sys.exit("Dataset name ({}) is wrong.".format(dataset_name))
return names
| # ---------------------------------------------------------------------------------
# Keras DataGenerator for datasets from the benchmark "Human Activity Recognition
# Based on Wearable Sensor Data: A Standardization of the State-of-the-Art"
#
# The data used here is created by the npz_to_fold.py file.
#
# (C) 2020 <NAME>, Brazil
# Released under GNU Public License (GPL)
# email <EMAIL>
# ---------------------------------------------------------------------------------
import numpy as np
import keras
import sys
import os
class DataGenerator(keras.utils.Sequence):
"""Generates data for Keras"""
def __init__(self, dataset_path, list_ids, labels, batch_size, shuffle, multimodal = False):
"""Initialization"""
self.indexes = np.arange(len(list_ids))
self.batch_size = batch_size
self.labels = labels
self.n_classes = len(list(labels.values())[0])
self.list_ids = list_ids
self.dataset_path = dataset_path
self.shuffle = shuffle
self.input_number = 1 if not multimodal else self.n_inputs()
self.multimodal = multimodal
self.on_epoch_end()
def __len__(self):
"""Denotes the number of batches per epoch"""
return int(np.floor(len(self.list_ids) / self.batch_size))
def __getitem__(self, index):
"""Generate one batch of data"""
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
list_ids_temp = [self.list_ids[k] for k in indexes]
x, y = self.__data_generation(list_ids_temp)
return x, y
def on_epoch_end(self):
"""Updates indexes after each epoch"""
if self.shuffle:
np.random.shuffle(self.indexes)
def __data_generation(self, list_ids_temp):
"""Generates data containing batch_size samples"""
shape = self.get_shape()
y = np.empty((self.batch_size, self.n_classes))
if self.multimodal:
x = [np.empty((self.batch_size, shape[1], shape[2], 3)) for k in
range(self.input_number)]
for i, ID in enumerate(list_ids_temp):
sample = np.load(self.dataset_path + '/samples/' + ID + '.npy')
for j, data in enumerate(self.select_sensors(sample)):
x[j][i,] = data
y[i] = self.labels[ID]
else:
x = np.empty((self.batch_size, shape[1], shape[2], shape[3]))
for i, ID in enumerate(list_ids_temp):
sample[i,] = np.load(self.dataset_path + '/samples/' + ID + '.npy')
y[i] = self.labels[ID]
return x, y
def n_inputs(self):
dataset_name = self.dataset_path.split("/")[-1]
sample = np.load(self.dataset_path + '/samples/' + self.list_ids[0] + '.npy')
input_vec = self.select_sensors(sample)
return len(input_vec)
def select_sensors(self, sample):
dataset_name = os.path.normpath(self.dataset_path).split(os.path.sep)[-1]
data = []
if dataset_name == 'MHEALTH':
data.append(sample[:, :, 0:3]) # ACC chest-sensor
data.append(sample[:, :, 5:8]) # ACC left-ankle sensor
data.append(sample[:, :, 8:11]) # GYR left-ankle sensor
data.append(sample[:, :, 11:14]) # MAG left-ankle sensor
data.append(sample[:, :, 14:17]) # ACC right-lower-arm
data.append(sample[:, :, 17:20]) # GYR right-lower-arm
data.append(sample[:, :, 20:23]) # MAG right-lower-arm
elif dataset_name == 'PAMAP2P':
data.append(sample[:, :, 1:4]) # ACC1 over the wrist on the dominant arm
data.append(sample[:, :, 4:7]) # ACC2 over the wrist on the dominant arm
data.append(sample[:, :, 7:10]) # GYR over the wrist on the dominant arm
data.append(sample[:, :, 10:13]) # MAG over the wrist on the dominant arm
data.append(sample[:, :, 14:17]) # ACC1 chest-sensor
data.append(sample[:, :, 17:20]) # ACC2 chest-sensor
data.append(sample[:, :, 20:23]) # GYR chest-sensor
data.append(sample[:, :, 23:26]) # MAG chest-sensor
data.append(sample[:, :, 27:30]) # ACC1 on the dominant side's ankle
data.append(sample[:, :, 30:33]) # ACC2 on the dominant side's ankle
data.append(sample[:, :, 33:36]) # GYR on the dominant side's ankle
data.append(sample[:, :, 36:39]) # MAG on the dominant side's ankle
elif dataset_name == 'UTD-MHAD1_1s' or dataset_name == 'UTD-MHAD2_1s' or dataset_name == 'USCHAD':
# UTD-MHAD1_1s: ACC right-wrist
# UTD-MHAD2_1s: ACC right-thigh
# USCHAD: ACC subject’s front right hip inside a mobile phone pouch
data.append(sample[:, :, 0:3])
# UTD-MHAD1_1s: GYR right-wrist
# UTD-MHAD2_1s: GYR right-thigh
# USCHAD: GYR subject’s front right hip inside a mobile phone pouch
data.append(sample[:, :, 3:6])
elif dataset_name == 'WHARF' or dataset_name == 'WISDM':
# WHARF: ACC right-wrist
# WISDM: ACC 5 different body positions (apparently)
data.append(sample[:, :, 0:3])
else:
sys.exit("Dataset name ({}) is wrong.".format(dataset_name))
return data
def get_shape(self):
"""Get dataset shape"""
sample = np.load(self.dataset_path + '/samples/' + self.list_ids[0] + '.npy')
if self.multimodal:
shape = (len(self.list_ids), sample.shape[0], sample.shape[1], 3)
else:
shape = (len(self.list_ids), sample.shape[0], sample.shape[1], sample.shape[2])
return shape
def get_nclasses(self):
"""Get number of classes"""
return self.n_classes
def get_moda_names(self):
dataset_name = os.path.normpath(self.dataset_path).split(os.path.sep)[-1]
data = []
if dataset_name == 'MHEALTH':
names = ["a_chest", "a_left-ankle", "g_left-ankle", "m_left-ankle",
"a_right-wrist", "g_right-wrist", "m_right-wrist"]
elif dataset_name == 'PAMAP2P':
names = ["a1_dominant-wrist", "a2_dominant-wrist", "g_dominant-wrist", "m_dominant-wrist",
"a1_chest", "a2_chest", "g_chest", "m_chest",
"a1_dominant_ankle", "a2_dominant_ankle", "g_dominant_ankle", "m_dominant_ankle"]
elif dataset_name == 'UTD-MHAD1_1s':
names = ["a_right-wrist", "g_right-wrist"]
elif dataset_name == 'UTD-MHAD2_1s':
names = ["a_right-thigh", "g_right-thigh"]
elif dataset_name == 'USCHAD':
names = ["a_front-right-hip", "g_front-right-hip"]
elif dataset_name == 'WHARF':
names = ["a_right-wrist"]
elif dataset_name == 'WISDM':
names = ["acc"]
else:
sys.exit("Dataset name ({}) is wrong.".format(dataset_name))
return names
| en | 0.672376 | # --------------------------------------------------------------------------------- # Keras DataGenerator for datasets from the benchmark "Human Activity Recognition # Based on Wearable Sensor Data: A Standardization of the State-of-the-Art" # # The data used here is created by the npz_to_fold.py file. # # (C) 2020 <NAME>, Brazil # Released under GNU Public License (GPL) # email <EMAIL> # --------------------------------------------------------------------------------- Generates data for Keras Initialization Denotes the number of batches per epoch Generate one batch of data Updates indexes after each epoch Generates data containing batch_size samples # ACC chest-sensor # ACC left-ankle sensor # GYR left-ankle sensor # MAG left-ankle sensor # ACC right-lower-arm # GYR right-lower-arm # MAG right-lower-arm # ACC1 over the wrist on the dominant arm # ACC2 over the wrist on the dominant arm # GYR over the wrist on the dominant arm # MAG over the wrist on the dominant arm # ACC1 chest-sensor # ACC2 chest-sensor # GYR chest-sensor # MAG chest-sensor # ACC1 on the dominant side's ankle # ACC2 on the dominant side's ankle # GYR on the dominant side's ankle # MAG on the dominant side's ankle # UTD-MHAD1_1s: ACC right-wrist # UTD-MHAD2_1s: ACC right-thigh # USCHAD: ACC subject’s front right hip inside a mobile phone pouch # UTD-MHAD1_1s: GYR right-wrist # UTD-MHAD2_1s: GYR right-thigh # USCHAD: GYR subject’s front right hip inside a mobile phone pouch # WHARF: ACC right-wrist # WISDM: ACC 5 different body positions (apparently) Get dataset shape Get number of classes | 3.13194 | 3 |
Expenses/urls.py | adithyanps/netprofit-django | 0 | 6624139 | <gh_stars>0
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import (
ExpenseCategoryViewSet,
ExpenseViewSet,
)
router = DefaultRouter()
router.register('expense-category', ExpenseCategoryViewSet)
router.register('expenses', ExpenseViewSet)
# router.register('sales-partner-chart', SalesPartnerChartViewset,base_name='CustomersYearChart')
# router.register('sales-year-chart', SalesYearIncomeChartViewset,base_name='SalesYearChart')
# router.register('sales-PartnerWithYear-chart', Sales_PartnerWithYear_ChartViewSet,base_name='Sales_PartnerWithYearChart')
# router.register('expense-year-chart', ExpenseYearIncomeChartViewset,base_name='expense-year-chart')
# router.register('expenseCat-year-amount-chart', Expense_Cat_Vers_Year_AmountChartViewset,base_name='expenseCat-year-amount-chart')
# router.register('expenseCat-amount-chart', Expense_Year_Vers_AmountChartViewset,base_name='expenseCat-amount-chart')
app_name = 'expenses'
urlpatterns = [
path('',include(router.urls)),
]
| from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import (
ExpenseCategoryViewSet,
ExpenseViewSet,
)
router = DefaultRouter()
router.register('expense-category', ExpenseCategoryViewSet)
router.register('expenses', ExpenseViewSet)
# router.register('sales-partner-chart', SalesPartnerChartViewset,base_name='CustomersYearChart')
# router.register('sales-year-chart', SalesYearIncomeChartViewset,base_name='SalesYearChart')
# router.register('sales-PartnerWithYear-chart', Sales_PartnerWithYear_ChartViewSet,base_name='Sales_PartnerWithYearChart')
# router.register('expense-year-chart', ExpenseYearIncomeChartViewset,base_name='expense-year-chart')
# router.register('expenseCat-year-amount-chart', Expense_Cat_Vers_Year_AmountChartViewset,base_name='expenseCat-year-amount-chart')
# router.register('expenseCat-amount-chart', Expense_Year_Vers_AmountChartViewset,base_name='expenseCat-amount-chart')
app_name = 'expenses'
urlpatterns = [
path('',include(router.urls)),
] | en | 0.374488 | # router.register('sales-partner-chart', SalesPartnerChartViewset,base_name='CustomersYearChart') # router.register('sales-year-chart', SalesYearIncomeChartViewset,base_name='SalesYearChart') # router.register('sales-PartnerWithYear-chart', Sales_PartnerWithYear_ChartViewSet,base_name='Sales_PartnerWithYearChart') # router.register('expense-year-chart', ExpenseYearIncomeChartViewset,base_name='expense-year-chart') # router.register('expenseCat-year-amount-chart', Expense_Cat_Vers_Year_AmountChartViewset,base_name='expenseCat-year-amount-chart') # router.register('expenseCat-amount-chart', Expense_Year_Vers_AmountChartViewset,base_name='expenseCat-amount-chart') | 1.890561 | 2 |
tests/unit/test_seed.py | man-group/hiveminder | 5 | 6624140 | from hiveminder.seed import Seed
from hiveminder.headings import heading_to_delta, LEGAL_HEADINGS
from hiveminder._util import is_even
from mock import sentinel
import pytest
def test_can_not_hash_a_seed():
h = Seed(sentinel.x, sentinel.y, sentinel.h)
with pytest.raises(TypeError) as err:
hash(h)
assert str(err.value) == "unhashable type: 'Seed'"
@pytest.mark.parametrize("x1, y1, heading1, x2, y2, heading2, are_equal",
[(sentinel.x, sentinel.y, sentinel.h,
sentinel.x, sentinel.y, sentinel.h, True),
(sentinel.x1, sentinel.y, sentinel.h,
sentinel.x, sentinel.y, sentinel.h, False),
(sentinel.x, sentinel.y1, sentinel.h,
sentinel.x, sentinel.y, sentinel.h, False),
(sentinel.x, sentinel.y, sentinel.h1,
sentinel.x, sentinel.y, sentinel.h, False),
])
def test_seeds_equality(x1, y1, heading1, x2, y2, heading2, are_equal):
h1 = Seed(x1, y1, heading1)
h2 = Seed(x2, y2, heading2)
assert (h1 == h2) == are_equal
assert (h1 != h2) == (not are_equal)
def test_seed_not_equal_to_other_types():
assert not (Seed(sentinel.x, sentinel.y, sentinel.h) == sentinel.seed)
assert Seed(sentinel.x, sentinel.y, sentinel.h) != sentinel.seed
def test_can_convert_seed_to_json():
assert (Seed(sentinel.x, sentinel.y, sentinel.h).to_json()
== ["Seed", sentinel.x, sentinel.y, sentinel.h])
def test_can_read_seed_from_json():
assert Seed.from_json([sentinel.type, sentinel.x, sentinel.y, sentinel.h]) == Seed(sentinel.x, sentinel.y, sentinel.h)
def test_repr():
assert repr(Seed(1337, 2000, 123456)) == "Seed(1337, 2000, 123456)"
def test_str():
assert str(Seed(1337, 2000, 123456)) == "Seed(1337, 2000, 123456)"
def test_can_get_position_and_heading_from_a_seed():
assert Seed(sentinel.x, sentinel.y, sentinel.h).xyh == (sentinel.x, sentinel.y, sentinel.h)
def test_can_set_position():
seed = Seed(sentinel.x, sentinel.y, sentinel.h)
assert seed.set_position(sentinel.newx, sentinel.newy) == Seed(sentinel.newx, sentinel.newy, sentinel.h)
# Original instance is not actually changed
assert seed == Seed(sentinel.x, sentinel.y, sentinel.h)
@pytest.mark.parametrize("heading", LEGAL_HEADINGS)
@pytest.mark.parametrize("column", [4, 5])
def test_can_advance_seed_along_a_heading(heading, column):
# Rather than patch heading_to_delta in Seed.advance we just
# assert that the effect of advancing the seed matches that
# defined by heading_to_delta
expected_dx, expected_dy = heading_to_delta(heading, is_even(column))
seed = Seed(column, 5, heading)
assert seed.advance() == Seed(column + expected_dx, 5 + expected_dy, heading)
# Original instance is not actually changed
assert seed == Seed(column, 5, heading)
def test_can_reverse_a_seed_along_a_heading():
seed = Seed(5, 5, 0)
assert seed.advance(reverse=True) == Seed(5, 4, 180)
# Original instance is not actually changed
assert seed == Seed(5, 5, 0)
| from hiveminder.seed import Seed
from hiveminder.headings import heading_to_delta, LEGAL_HEADINGS
from hiveminder._util import is_even
from mock import sentinel
import pytest
def test_can_not_hash_a_seed():
h = Seed(sentinel.x, sentinel.y, sentinel.h)
with pytest.raises(TypeError) as err:
hash(h)
assert str(err.value) == "unhashable type: 'Seed'"
@pytest.mark.parametrize("x1, y1, heading1, x2, y2, heading2, are_equal",
[(sentinel.x, sentinel.y, sentinel.h,
sentinel.x, sentinel.y, sentinel.h, True),
(sentinel.x1, sentinel.y, sentinel.h,
sentinel.x, sentinel.y, sentinel.h, False),
(sentinel.x, sentinel.y1, sentinel.h,
sentinel.x, sentinel.y, sentinel.h, False),
(sentinel.x, sentinel.y, sentinel.h1,
sentinel.x, sentinel.y, sentinel.h, False),
])
def test_seeds_equality(x1, y1, heading1, x2, y2, heading2, are_equal):
h1 = Seed(x1, y1, heading1)
h2 = Seed(x2, y2, heading2)
assert (h1 == h2) == are_equal
assert (h1 != h2) == (not are_equal)
def test_seed_not_equal_to_other_types():
assert not (Seed(sentinel.x, sentinel.y, sentinel.h) == sentinel.seed)
assert Seed(sentinel.x, sentinel.y, sentinel.h) != sentinel.seed
def test_can_convert_seed_to_json():
assert (Seed(sentinel.x, sentinel.y, sentinel.h).to_json()
== ["Seed", sentinel.x, sentinel.y, sentinel.h])
def test_can_read_seed_from_json():
assert Seed.from_json([sentinel.type, sentinel.x, sentinel.y, sentinel.h]) == Seed(sentinel.x, sentinel.y, sentinel.h)
def test_repr():
assert repr(Seed(1337, 2000, 123456)) == "Seed(1337, 2000, 123456)"
def test_str():
assert str(Seed(1337, 2000, 123456)) == "Seed(1337, 2000, 123456)"
def test_can_get_position_and_heading_from_a_seed():
assert Seed(sentinel.x, sentinel.y, sentinel.h).xyh == (sentinel.x, sentinel.y, sentinel.h)
def test_can_set_position():
seed = Seed(sentinel.x, sentinel.y, sentinel.h)
assert seed.set_position(sentinel.newx, sentinel.newy) == Seed(sentinel.newx, sentinel.newy, sentinel.h)
# Original instance is not actually changed
assert seed == Seed(sentinel.x, sentinel.y, sentinel.h)
@pytest.mark.parametrize("heading", LEGAL_HEADINGS)
@pytest.mark.parametrize("column", [4, 5])
def test_can_advance_seed_along_a_heading(heading, column):
# Rather than patch heading_to_delta in Seed.advance we just
# assert that the effect of advancing the seed matches that
# defined by heading_to_delta
expected_dx, expected_dy = heading_to_delta(heading, is_even(column))
seed = Seed(column, 5, heading)
assert seed.advance() == Seed(column + expected_dx, 5 + expected_dy, heading)
# Original instance is not actually changed
assert seed == Seed(column, 5, heading)
def test_can_reverse_a_seed_along_a_heading():
seed = Seed(5, 5, 0)
assert seed.advance(reverse=True) == Seed(5, 4, 180)
# Original instance is not actually changed
assert seed == Seed(5, 5, 0)
| en | 0.925018 | # Original instance is not actually changed # Rather than patch heading_to_delta in Seed.advance we just # assert that the effect of advancing the seed matches that # defined by heading_to_delta # Original instance is not actually changed # Original instance is not actually changed | 2.475602 | 2 |
examples/chording_example.py | thomas-weigel/thuja | 1 | 6624141 | <filename>examples/chording_example.py
from thuja.itemstream import notetypes
from thuja.itemstream import Itemstream
from thuja.generator import Generator
from thuja.generator import keys
from thuja import csound_utils
rhythms = Itemstream("q",
tempo=120,
notetype=notetypes.rhythm)
pitches = Itemstream(["c5 d e".split(),
"d e f".split(),
"e f g".split(),
"f g a".split(),
"g a b".split(),
"a b c6".split(),
"b c6 d".split()],
notetype=notetypes.pitch)
g = Generator(
streams=[
(keys.instrument, 1),
(keys.rhythm, rhythms),
(keys.duration, .1),
(keys.amplitude, 1),
(keys.frequency, pitches),
],
note_limit=(len(pitches.values)*4*3),
gen_lines=[';sine', 'f 1 0 16384 10 1']
)
g.generate_notes()
score_string = g.generate_score_string()
print(score_string)
csound_utils.play_csound("sine.orc", g, silent=True)
| <filename>examples/chording_example.py
from thuja.itemstream import notetypes
from thuja.itemstream import Itemstream
from thuja.generator import Generator
from thuja.generator import keys
from thuja import csound_utils
rhythms = Itemstream("q",
tempo=120,
notetype=notetypes.rhythm)
pitches = Itemstream(["c5 d e".split(),
"d e f".split(),
"e f g".split(),
"f g a".split(),
"g a b".split(),
"a b c6".split(),
"b c6 d".split()],
notetype=notetypes.pitch)
g = Generator(
streams=[
(keys.instrument, 1),
(keys.rhythm, rhythms),
(keys.duration, .1),
(keys.amplitude, 1),
(keys.frequency, pitches),
],
note_limit=(len(pitches.values)*4*3),
gen_lines=[';sine', 'f 1 0 16384 10 1']
)
g.generate_notes()
score_string = g.generate_score_string()
print(score_string)
csound_utils.play_csound("sine.orc", g, silent=True)
| none | 1 | 2.419048 | 2 | |
examples/expe_conv_logreg/figure_logreg.py | idc9/andersoncd | 0 | 6624142 | import pandas
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from andersoncd.plot_utils import configure_plt, _plot_legend_apart
configure_plt()
current_palette = sns.color_palette("colorblind")
dict_color = {}
dict_color["pgd"] = current_palette[0]
dict_color["fista"] = current_palette[0]
dict_color["cd"] = current_palette[1]
dict_color["apcg"] = current_palette[1]
dict_linestyle = {}
dict_linestyle[False, "pgd"] = "-"
dict_linestyle[False, "cd"] = "-"
dict_linestyle[True, "pgd"] = "--"
dict_linestyle[True, "cd"] = "--"
dict_linestyle[False, "fista"] = 'dotted'
dict_linestyle[False, "apcg"] = 'dotted'
dict_algo_name = {}
dict_algo_name[False, "pgd"] = "PGD"
dict_algo_name[False, "cd"] = "PCD"
dict_algo_name[True, "pgd"] = "PGD - Anderson"
dict_algo_name[True, "cd"] = "PCD - Anderson"
dict_algo_name[False, "fista"] = "PGD - inertial"
dict_algo_name[False, "apcg"] = "PCD - inertial"
dataset_title = {}
dataset_title["leukemia"] = "leukemia"
dataset_title["gina_agnostic"] = "gina agnostic"
dataset_title["hiva_agnostic"] = "hiva agnostic"
dataset_title["upselling"] = "upselling"
dataset_title["rcv1.binary"] = "rcv1"
dataset_title["news20.binary"] = "news20"
dataset_title["kdda_train"] = "kdd"
dataset_title["finance"] = "finance"
dict_xlim = {}
dict_xlim["gina_agnostic", 10] = 2500
dict_xlim["gina_agnostic", 100] = 3000
dict_xlim["gina_agnostic", 1000] = 10_000
dict_xlim["rcv1.binary", 10] = 1_000
dict_xlim["rcv1.binary", 100] = 5_000
dict_xlim["rcv1.binary", 1000] = 200_000
dict_xlim["news20.binary", 10] = 3_000
dict_xlim["news20.binary", 100] = 10_000
dict_xlim["news20.binary", 1000] = 150_000
dataset_names = ["gina_agnostic", 'rcv1.binary', "news20.binary"]
div_alphas = [10, 100, 1_000]
fig, axarr = plt.subplots(
len(dataset_names), len(div_alphas), sharex=False, sharey=True,
figsize=[14, 8], constrained_layout=True)
fig_E, axarr_E = plt.subplots(
len(dataset_names), len(div_alphas), sharex=False, sharey=True,
figsize=[14, 8], constrained_layout=True)
for idx1, dataset in enumerate(dataset_names):
for idx2, div_alpha in enumerate(div_alphas):
df_data_all = pandas.read_pickle(
"results/%s_%i.pkl" % (dataset, div_alpha))
df_data = df_data_all[df_data_all['div_alpha'] == div_alpha]
gaps = df_data['gaps']
f_gaps = df_data['f_gaps']
use_accs = df_data['use_acc']
algo_names = df_data['algo_name']
for gap, f_gap, use_acc, algo_name in zip(
gaps, f_gaps, use_accs, algo_names):
axarr.flat[idx1 * len(div_alphas) + idx2].semilogy(
f_gap * np.arange(len(gap)), gap / gap[0],
label=dict_algo_name[use_acc, algo_name],
linestyle=dict_linestyle[use_acc, algo_name],
color=dict_color[algo_name])
try:
axarr.flat[idx1 * len(div_alphas) + idx2].set_xlim(
0, dict_xlim[dataset, div_alpha])
except Exception:
print("no xlim")
axarr.flat[idx1 * len(div_alphas) + idx2].set_ylim((1e-10, 1))
if idx1 == len(dataset_names) - 1:
axarr.flat[
(len(dataset_names) - 1) * len(div_alphas) + idx2].set_xlabel(
r"iteration $k$")
axarr_E.flat[
(len(dataset_names) - 1) * len(div_alphas) + idx2].set_xlabel(
r"iteration $k$")
if idx1 == 0:
axarr.flat[idx2].set_title(
r"$\lambda =\lambda_{\max} / %i $ " % div_alpha)
axarr_E.flat[idx2].set_title(
r"$\lambda =\lambda_{\max} / %i $ " % div_alpha)
axarr_E.flat[idx1 * len(div_alphas) + idx2].set_ylim((1e-12, 1))
Es = df_data['E']
pobj_star = min(E.min() for E in Es)
for E, f_gap, use_acc, algo_name in zip(
Es, f_gaps, use_accs, algo_names):
axarr_E.flat[idx1 * len(div_alphas) + idx2].semilogy(
f_gap * np.arange(len(E)), (E - pobj_star) / E[0],
label=dict_algo_name[use_acc, algo_name],
linestyle=dict_linestyle[use_acc, algo_name],
color=dict_color[algo_name])
try:
axarr_E.flat[idx1 * len(div_alphas) + idx2].set_xlim(
0, dict_xlim[dataset, div_alpha])
except Exception:
print("no xlim")
axarr.flat[idx1 * len(div_alphas) + idx2].set_yticks(
(1, 1e-4, 1e-8, 1e-12))
axarr_E.flat[idx1 * len(div_alphas) + idx2].set_yticks(
(1, 1e-4, 1e-8, 1e-12))
axarr.flat[idx1 * len(div_alphas)].set_ylabel(
"%s" % dataset_title[dataset])
axarr_E.flat[idx1 * len(div_alphas)].set_ylabel(
"%s" % dataset_title[dataset])
save_fig = False
if save_fig:
fig_dir = "../"
fig_dir_svg = "../"
fig.savefig(
"%sgaps_real_logreg.pdf" % fig_dir, bbox_inches="tight")
fig.savefig(
"%sgaps_real_logreg.svg" % fig_dir_svg, bbox_inches="tight")
fig_E.savefig(
"%senergies_real_logreg.pdf" % fig_dir, bbox_inches="tight")
fig_E.savefig(
"%senergies_real_logreg.svg" % fig_dir_svg, bbox_inches="tight")
_plot_legend_apart(
axarr[0][0], "%senergies_real_logreg_legend.pdf" % fig_dir, ncol=6)
fig.show()
fig_E.show()
| import pandas
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from andersoncd.plot_utils import configure_plt, _plot_legend_apart
configure_plt()
current_palette = sns.color_palette("colorblind")
dict_color = {}
dict_color["pgd"] = current_palette[0]
dict_color["fista"] = current_palette[0]
dict_color["cd"] = current_palette[1]
dict_color["apcg"] = current_palette[1]
dict_linestyle = {}
dict_linestyle[False, "pgd"] = "-"
dict_linestyle[False, "cd"] = "-"
dict_linestyle[True, "pgd"] = "--"
dict_linestyle[True, "cd"] = "--"
dict_linestyle[False, "fista"] = 'dotted'
dict_linestyle[False, "apcg"] = 'dotted'
dict_algo_name = {}
dict_algo_name[False, "pgd"] = "PGD"
dict_algo_name[False, "cd"] = "PCD"
dict_algo_name[True, "pgd"] = "PGD - Anderson"
dict_algo_name[True, "cd"] = "PCD - Anderson"
dict_algo_name[False, "fista"] = "PGD - inertial"
dict_algo_name[False, "apcg"] = "PCD - inertial"
dataset_title = {}
dataset_title["leukemia"] = "leukemia"
dataset_title["gina_agnostic"] = "gina agnostic"
dataset_title["hiva_agnostic"] = "hiva agnostic"
dataset_title["upselling"] = "upselling"
dataset_title["rcv1.binary"] = "rcv1"
dataset_title["news20.binary"] = "news20"
dataset_title["kdda_train"] = "kdd"
dataset_title["finance"] = "finance"
dict_xlim = {}
dict_xlim["gina_agnostic", 10] = 2500
dict_xlim["gina_agnostic", 100] = 3000
dict_xlim["gina_agnostic", 1000] = 10_000
dict_xlim["rcv1.binary", 10] = 1_000
dict_xlim["rcv1.binary", 100] = 5_000
dict_xlim["rcv1.binary", 1000] = 200_000
dict_xlim["news20.binary", 10] = 3_000
dict_xlim["news20.binary", 100] = 10_000
dict_xlim["news20.binary", 1000] = 150_000
dataset_names = ["gina_agnostic", 'rcv1.binary', "news20.binary"]
div_alphas = [10, 100, 1_000]
fig, axarr = plt.subplots(
len(dataset_names), len(div_alphas), sharex=False, sharey=True,
figsize=[14, 8], constrained_layout=True)
fig_E, axarr_E = plt.subplots(
len(dataset_names), len(div_alphas), sharex=False, sharey=True,
figsize=[14, 8], constrained_layout=True)
for idx1, dataset in enumerate(dataset_names):
for idx2, div_alpha in enumerate(div_alphas):
df_data_all = pandas.read_pickle(
"results/%s_%i.pkl" % (dataset, div_alpha))
df_data = df_data_all[df_data_all['div_alpha'] == div_alpha]
gaps = df_data['gaps']
f_gaps = df_data['f_gaps']
use_accs = df_data['use_acc']
algo_names = df_data['algo_name']
for gap, f_gap, use_acc, algo_name in zip(
gaps, f_gaps, use_accs, algo_names):
axarr.flat[idx1 * len(div_alphas) + idx2].semilogy(
f_gap * np.arange(len(gap)), gap / gap[0],
label=dict_algo_name[use_acc, algo_name],
linestyle=dict_linestyle[use_acc, algo_name],
color=dict_color[algo_name])
try:
axarr.flat[idx1 * len(div_alphas) + idx2].set_xlim(
0, dict_xlim[dataset, div_alpha])
except Exception:
print("no xlim")
axarr.flat[idx1 * len(div_alphas) + idx2].set_ylim((1e-10, 1))
if idx1 == len(dataset_names) - 1:
axarr.flat[
(len(dataset_names) - 1) * len(div_alphas) + idx2].set_xlabel(
r"iteration $k$")
axarr_E.flat[
(len(dataset_names) - 1) * len(div_alphas) + idx2].set_xlabel(
r"iteration $k$")
if idx1 == 0:
axarr.flat[idx2].set_title(
r"$\lambda =\lambda_{\max} / %i $ " % div_alpha)
axarr_E.flat[idx2].set_title(
r"$\lambda =\lambda_{\max} / %i $ " % div_alpha)
axarr_E.flat[idx1 * len(div_alphas) + idx2].set_ylim((1e-12, 1))
Es = df_data['E']
pobj_star = min(E.min() for E in Es)
for E, f_gap, use_acc, algo_name in zip(
Es, f_gaps, use_accs, algo_names):
axarr_E.flat[idx1 * len(div_alphas) + idx2].semilogy(
f_gap * np.arange(len(E)), (E - pobj_star) / E[0],
label=dict_algo_name[use_acc, algo_name],
linestyle=dict_linestyle[use_acc, algo_name],
color=dict_color[algo_name])
try:
axarr_E.flat[idx1 * len(div_alphas) + idx2].set_xlim(
0, dict_xlim[dataset, div_alpha])
except Exception:
print("no xlim")
axarr.flat[idx1 * len(div_alphas) + idx2].set_yticks(
(1, 1e-4, 1e-8, 1e-12))
axarr_E.flat[idx1 * len(div_alphas) + idx2].set_yticks(
(1, 1e-4, 1e-8, 1e-12))
axarr.flat[idx1 * len(div_alphas)].set_ylabel(
"%s" % dataset_title[dataset])
axarr_E.flat[idx1 * len(div_alphas)].set_ylabel(
"%s" % dataset_title[dataset])
save_fig = False
if save_fig:
fig_dir = "../"
fig_dir_svg = "../"
fig.savefig(
"%sgaps_real_logreg.pdf" % fig_dir, bbox_inches="tight")
fig.savefig(
"%sgaps_real_logreg.svg" % fig_dir_svg, bbox_inches="tight")
fig_E.savefig(
"%senergies_real_logreg.pdf" % fig_dir, bbox_inches="tight")
fig_E.savefig(
"%senergies_real_logreg.svg" % fig_dir_svg, bbox_inches="tight")
_plot_legend_apart(
axarr[0][0], "%senergies_real_logreg_legend.pdf" % fig_dir, ncol=6)
fig.show()
fig_E.show()
| none | 1 | 2.344579 | 2 | |
Curso Udemy 2022/Curso_Luiz_Otavio/rascunho.py | Matheusfarmaceutico/Exercicios-Python | 0 | 6624143 | from random import randint
while True:
try:
quantos = int(input('Quantos CPFs gostaria de gerar? '))
except:
print('Caracter Inválido! Tente novamente.')
continue
lista = []
for c in range(quantos):
cpf = str(randint(100000000,999999999))
novo_cpf = cpf
reverso = 10
total = 0
for index in range(19):
if index > 8:
index -= 9
total += int(novo_cpf[index]) * reverso
reverso -= 1
if reverso < 2:
reverso = 11
d = 11 - (total % 11)
if d > 9:
d = 0
novo_cpf += str(d)
total = 0
lista.append(novo_cpf)
print(lista)
option = ' '
while option not in 'SsNn':
option = str(input("Quer continuar gerando Cpf's? [S/N]: "))
if option in 'Nn':
break | from random import randint
while True:
try:
quantos = int(input('Quantos CPFs gostaria de gerar? '))
except:
print('Caracter Inválido! Tente novamente.')
continue
lista = []
for c in range(quantos):
cpf = str(randint(100000000,999999999))
novo_cpf = cpf
reverso = 10
total = 0
for index in range(19):
if index > 8:
index -= 9
total += int(novo_cpf[index]) * reverso
reverso -= 1
if reverso < 2:
reverso = 11
d = 11 - (total % 11)
if d > 9:
d = 0
novo_cpf += str(d)
total = 0
lista.append(novo_cpf)
print(lista)
option = ' '
while option not in 'SsNn':
option = str(input("Quer continuar gerando Cpf's? [S/N]: "))
if option in 'Nn':
break | none | 1 | 3.368083 | 3 | |
tests/unit/test_debug.py | RBrearton/islatu | 0 | 6624144 | <reponame>RBrearton/islatu
"""
This module contains a couple of simple tests for Islatu's debugger.
"""
from islatu.debug import debug
def test_debug_default_log_lvl():
"""
Make sure that the debugger starts out with a logging_lvl of 1.
"""
assert debug.logging_level == 1
def test_debug_log_lvl_change():
"""
Make sure that we can change the logging level, if required.
"""
debug.logging_level = 2
assert debug.logging_level == 2
debug.logging_level = 1
assert debug.logging_level == 1
| """
This module contains a couple of simple tests for Islatu's debugger.
"""
from islatu.debug import debug
def test_debug_default_log_lvl():
"""
Make sure that the debugger starts out with a logging_lvl of 1.
"""
assert debug.logging_level == 1
def test_debug_log_lvl_change():
"""
Make sure that we can change the logging level, if required.
"""
debug.logging_level = 2
assert debug.logging_level == 2
debug.logging_level = 1
assert debug.logging_level == 1 | en | 0.862165 | This module contains a couple of simple tests for Islatu's debugger. Make sure that the debugger starts out with a logging_lvl of 1. Make sure that we can change the logging level, if required. | 2.263861 | 2 |
URCALC.py | piyushgarg116/codechef-solution | 5 | 6624145 | def URCALC():
ans=0.00000000
a=(float)(input())
b=(float)(input())
operator=raw_input()
if operator=="+":
ans=a+b
elif operator=="-":
ans=a-b
elif operator=="*":
ans=a*b
else:
ans=a/b
print ans
URCALC();
| def URCALC():
ans=0.00000000
a=(float)(input())
b=(float)(input())
operator=raw_input()
if operator=="+":
ans=a+b
elif operator=="-":
ans=a-b
elif operator=="*":
ans=a*b
else:
ans=a/b
print ans
URCALC();
| none | 1 | 3.830186 | 4 | |
books_database_oop/personal_bookstore.py | rachida-sghr/misc-python | 1 | 6624146 | <filename>books_database_oop/personal_bookstore.py
from tkinter import *
from backend import Database
database = Database("books.db")
class Window:
def __init__(self, window):
self.window=window
self.window.wm_title("My Books")
# title
label_title=Label(window, text="Title")
label_title.grid(row=0, column=0)
self.title=StringVar()
self.e_title=Entry(window, textvariable=self.title)
self.e_title.grid(row=0,column=1)
# author
label_author=Label(window, text="Author")
label_author.grid(row=0, column=2)
self.author=StringVar()
self.e_author=Entry(window, textvariable=self.author)
self.e_author.grid(row=0,column=3)
# year
label_year=Label(window, text="Year")
label_year.grid(row=1, column=0)
self.year=StringVar()
self.e_year=Entry(window, textvariable=self.year)
self.e_year.grid(row=1,column=1)
# country
label_country=Label(window, text="Country")
label_country.grid(row=1, column=2)
self.country=StringVar()
self.e_country=Entry(window, textvariable=self.country)
self.e_country.grid(row=1,column=3)
# borrowed
label_borrowed=Label(window, text="Borrowed")
label_borrowed.grid(row=2, column=0)
self.borrowed=StringVar()
self.e_borrowed=Entry(window, textvariable=self.borrowed)
self.e_borrowed.grid(row=2,column=1)
# list box and attached scroll bar
self.listbox=Listbox(window, height=10, width=40)
self.listbox.grid(row=3, column=0, rowspan=6, columnspan=3)
# get_selected_row function is triggered when user select an item in the listbox
self.listbox.bind("<<ListboxSelect>>", self.get_selected_row)
# scrollbar=Scrollbar(window)
# scrollbar.grid(row=2,column=2,rowspan=6)
# listbox.configure(yscrollcommand=scrollbar.set)
# scrollbar.configure(command=listbox.yview)
# buttons
b_view=Button(window, text="View all", width=12, command=self.view_command)
b_view.grid(row=3, column=3)
b_search=Button(window, text="Search", width=12, command=self.search_command)
b_search.grid(row=4, column=3)
b_add=Button(window, text="Add", width=12, command=self.add_command)
b_add.grid(row=5, column=3)
b_update=Button(window, text="Update", width=12, command=self.update_command)
b_update.grid(row=6, column=3)
b_delete=Button(window, text="Delete", width=12, command=self.delete_command)
b_delete.grid(row=7, column=3)
def view_command(self):
self.listbox.delete(0,END)
for row in database.view():
self.listbox.insert(END, row)
def search_command(self):
self.listbox.delete(0,END)
for row in database.search(self.title.get(), self.author.get(), self.year.get(), self.country.get(), self.borrowed.get()):
self.listbox.insert(END, row)
def add_command(self):
database.insert(self.title.get(), self.author.get(), self.year.get(), self.country.get(), self.borrowed.get())
self.listbox.delete(0,END)
self.listbox.insert(END, (self.title.get(), self.author.get(), self.year.get(), self.country.get(), self.borrowed.get()))
def get_selected_row(self, event):
index=self.listbox.curselection()[0] #return index of the row in form of a tuple (index,) hence the [0]
self.selected_tuple=listbox.get(index)
#fill entries with selected row
self.e_title.delete(0, END)
self.e_title.insert(END, self.selected_tuple[1])
self.e_author.delete(0, END)
self.e_author.insert(END, self.selected_tuple[2])
self.e_year.delete(0,END)
self.e_year.insert(END, self.selected_tuple[3])
self.e_country.delete(0, END)
self.e_country.insert(END, self.selected_tuple[4])
self.e_borrowed.delete(0, END)
self.e_borrowed.insert(END, self.selected_tuple[5])
def delete_command(self):
database.delete(selected_tuple[0])
self.listbox.delete(ANCHOR)
self.e_title.delete(0,END)
self.e_author.delete(0,END)
self.e_year.delete(0,END)
self.e_country.delete(0,END)
self.e_borrowed.delete(0,END)
def update_command(self):
database.update(self.selected_tuple[0], self.title.get(), self.author.get(), self.year.get(), self.country.get(), self.borrowed.get())
view_command()
window = Tk()
Window(window)
window.mainloop() | <filename>books_database_oop/personal_bookstore.py
from tkinter import *
from backend import Database
database = Database("books.db")
class Window:
def __init__(self, window):
self.window=window
self.window.wm_title("My Books")
# title
label_title=Label(window, text="Title")
label_title.grid(row=0, column=0)
self.title=StringVar()
self.e_title=Entry(window, textvariable=self.title)
self.e_title.grid(row=0,column=1)
# author
label_author=Label(window, text="Author")
label_author.grid(row=0, column=2)
self.author=StringVar()
self.e_author=Entry(window, textvariable=self.author)
self.e_author.grid(row=0,column=3)
# year
label_year=Label(window, text="Year")
label_year.grid(row=1, column=0)
self.year=StringVar()
self.e_year=Entry(window, textvariable=self.year)
self.e_year.grid(row=1,column=1)
# country
label_country=Label(window, text="Country")
label_country.grid(row=1, column=2)
self.country=StringVar()
self.e_country=Entry(window, textvariable=self.country)
self.e_country.grid(row=1,column=3)
# borrowed
label_borrowed=Label(window, text="Borrowed")
label_borrowed.grid(row=2, column=0)
self.borrowed=StringVar()
self.e_borrowed=Entry(window, textvariable=self.borrowed)
self.e_borrowed.grid(row=2,column=1)
# list box and attached scroll bar
self.listbox=Listbox(window, height=10, width=40)
self.listbox.grid(row=3, column=0, rowspan=6, columnspan=3)
# get_selected_row function is triggered when user select an item in the listbox
self.listbox.bind("<<ListboxSelect>>", self.get_selected_row)
# scrollbar=Scrollbar(window)
# scrollbar.grid(row=2,column=2,rowspan=6)
# listbox.configure(yscrollcommand=scrollbar.set)
# scrollbar.configure(command=listbox.yview)
# buttons
b_view=Button(window, text="View all", width=12, command=self.view_command)
b_view.grid(row=3, column=3)
b_search=Button(window, text="Search", width=12, command=self.search_command)
b_search.grid(row=4, column=3)
b_add=Button(window, text="Add", width=12, command=self.add_command)
b_add.grid(row=5, column=3)
b_update=Button(window, text="Update", width=12, command=self.update_command)
b_update.grid(row=6, column=3)
b_delete=Button(window, text="Delete", width=12, command=self.delete_command)
b_delete.grid(row=7, column=3)
def view_command(self):
self.listbox.delete(0,END)
for row in database.view():
self.listbox.insert(END, row)
def search_command(self):
self.listbox.delete(0,END)
for row in database.search(self.title.get(), self.author.get(), self.year.get(), self.country.get(), self.borrowed.get()):
self.listbox.insert(END, row)
def add_command(self):
database.insert(self.title.get(), self.author.get(), self.year.get(), self.country.get(), self.borrowed.get())
self.listbox.delete(0,END)
self.listbox.insert(END, (self.title.get(), self.author.get(), self.year.get(), self.country.get(), self.borrowed.get()))
def get_selected_row(self, event):
index=self.listbox.curselection()[0] #return index of the row in form of a tuple (index,) hence the [0]
self.selected_tuple=listbox.get(index)
#fill entries with selected row
self.e_title.delete(0, END)
self.e_title.insert(END, self.selected_tuple[1])
self.e_author.delete(0, END)
self.e_author.insert(END, self.selected_tuple[2])
self.e_year.delete(0,END)
self.e_year.insert(END, self.selected_tuple[3])
self.e_country.delete(0, END)
self.e_country.insert(END, self.selected_tuple[4])
self.e_borrowed.delete(0, END)
self.e_borrowed.insert(END, self.selected_tuple[5])
def delete_command(self):
database.delete(selected_tuple[0])
self.listbox.delete(ANCHOR)
self.e_title.delete(0,END)
self.e_author.delete(0,END)
self.e_year.delete(0,END)
self.e_country.delete(0,END)
self.e_borrowed.delete(0,END)
def update_command(self):
database.update(self.selected_tuple[0], self.title.get(), self.author.get(), self.year.get(), self.country.get(), self.borrowed.get())
view_command()
window = Tk()
Window(window)
window.mainloop() | en | 0.567808 | # title # author # year # country # borrowed # list box and attached scroll bar # get_selected_row function is triggered when user select an item in the listbox # scrollbar=Scrollbar(window) # scrollbar.grid(row=2,column=2,rowspan=6) # listbox.configure(yscrollcommand=scrollbar.set) # scrollbar.configure(command=listbox.yview) # buttons #return index of the row in form of a tuple (index,) hence the [0] #fill entries with selected row | 3.822687 | 4 |
satchless/cart/handler.py | cajun-code/satchless | 1 | 6624147 | <reponame>cajun-code/satchless
from django.shortcuts import redirect
from ..product.forms import NonConfigurableVariantForm
from ..product.models import ProductAbstract, Variant
from ..product.signals import product_view, variant_formclass_for_product
from . import forms
from . import models
class AddToCartHandler(object):
"""
Parametrized handler for `product_view`, which produces *add to cart* forms,
validates them and performs all the logic of adding an item to a cart.
"""
def __init__(self, typ='satchless_cart', addtocart_formclass=forms.AddToCartForm,
form_attribute='cart_form'):
"""
Sets up a parametrized handler for product view.
Accepts:
* `typ`: the type of the cart to add to
* `addtocart_formclass`: form class responsible for adding to cart.
* `form_attribute`: name of instance's attribute to save the form under.
"""
self.typ = typ
self.form_attribute = form_attribute
self.addtocart_formclass = addtocart_formclass
def build_formclass(self, variant_formclass):
class AddVariantToCartForm(self.addtocart_formclass, variant_formclass):
pass
return AddVariantToCartForm
def __call__(self, instances=None, request=None, extra_context=None, **kwargs):
"""
Accepts a list of Product or Variant instances. For every of them finds
add-to-cart form. For a POST request, performs validation and if it
succeeds, adds item to cart and returns redirect to the cart page.
It handles adding only a single variant to the cart, but with the quantity
specified in request.
Accepts parameters:
* `instances`: products and/or variants being viewed
* `request`: the HTTP request instance
* `extra_context`: extra context that will be passed to template
"""
for instance in instances:
formclass = []
if isinstance(instance, ProductAbstract):
product = instance
variant = None
elif isinstance(instance, Variant):
product = instance.product
variant = instance
else:
raise ValueError("Received unknown type: %s" %
type(instance).__name__)
variant_formclass_for_product.send(sender=type(product),
instance=product,
formclass=formclass)
if len(formclass) > 1:
raise ValueError("Multiple form classes returned for %s : %s." %
(product._meta.object_name, formclass))
elif not len(formclass):
formclass = [NonConfigurableVariantForm]
Form = self.build_formclass(formclass[0])
if request.method == 'POST':
cart = models.Cart.objects.get_or_create_from_request(request,
self.typ)
form = Form(data=request.POST, cart=cart, product=product,
variant=variant, typ=self.typ)
if form.is_valid():
form.save()
return redirect('satchless-cart-view', typ=self.typ)
else:
form = Form(data=None, product=product, variant=variant,
typ=self.typ)
# Attach the form to instance
setattr(instance, self.form_attribute, form)
return extra_context
| from django.shortcuts import redirect
from ..product.forms import NonConfigurableVariantForm
from ..product.models import ProductAbstract, Variant
from ..product.signals import product_view, variant_formclass_for_product
from . import forms
from . import models
class AddToCartHandler(object):
"""
Parametrized handler for `product_view`, which produces *add to cart* forms,
validates them and performs all the logic of adding an item to a cart.
"""
def __init__(self, typ='satchless_cart', addtocart_formclass=forms.AddToCartForm,
form_attribute='cart_form'):
"""
Sets up a parametrized handler for product view.
Accepts:
* `typ`: the type of the cart to add to
* `addtocart_formclass`: form class responsible for adding to cart.
* `form_attribute`: name of instance's attribute to save the form under.
"""
self.typ = typ
self.form_attribute = form_attribute
self.addtocart_formclass = addtocart_formclass
def build_formclass(self, variant_formclass):
class AddVariantToCartForm(self.addtocart_formclass, variant_formclass):
pass
return AddVariantToCartForm
def __call__(self, instances=None, request=None, extra_context=None, **kwargs):
"""
Accepts a list of Product or Variant instances. For every of them finds
add-to-cart form. For a POST request, performs validation and if it
succeeds, adds item to cart and returns redirect to the cart page.
It handles adding only a single variant to the cart, but with the quantity
specified in request.
Accepts parameters:
* `instances`: products and/or variants being viewed
* `request`: the HTTP request instance
* `extra_context`: extra context that will be passed to template
"""
for instance in instances:
formclass = []
if isinstance(instance, ProductAbstract):
product = instance
variant = None
elif isinstance(instance, Variant):
product = instance.product
variant = instance
else:
raise ValueError("Received unknown type: %s" %
type(instance).__name__)
variant_formclass_for_product.send(sender=type(product),
instance=product,
formclass=formclass)
if len(formclass) > 1:
raise ValueError("Multiple form classes returned for %s : %s." %
(product._meta.object_name, formclass))
elif not len(formclass):
formclass = [NonConfigurableVariantForm]
Form = self.build_formclass(formclass[0])
if request.method == 'POST':
cart = models.Cart.objects.get_or_create_from_request(request,
self.typ)
form = Form(data=request.POST, cart=cart, product=product,
variant=variant, typ=self.typ)
if form.is_valid():
form.save()
return redirect('satchless-cart-view', typ=self.typ)
else:
form = Form(data=None, product=product, variant=variant,
typ=self.typ)
# Attach the form to instance
setattr(instance, self.form_attribute, form)
return extra_context | en | 0.766812 | Parametrized handler for `product_view`, which produces *add to cart* forms, validates them and performs all the logic of adding an item to a cart. Sets up a parametrized handler for product view. Accepts: * `typ`: the type of the cart to add to * `addtocart_formclass`: form class responsible for adding to cart. * `form_attribute`: name of instance's attribute to save the form under. Accepts a list of Product or Variant instances. For every of them finds add-to-cart form. For a POST request, performs validation and if it succeeds, adds item to cart and returns redirect to the cart page. It handles adding only a single variant to the cart, but with the quantity specified in request. Accepts parameters: * `instances`: products and/or variants being viewed * `request`: the HTTP request instance * `extra_context`: extra context that will be passed to template # Attach the form to instance | 2.279517 | 2 |
1-100/17/17.py | Thomaw/Project-Euler | 0 | 6624148 | <reponame>Thomaw/Project-Euler<gh_stars>0
s={0:"",1:"one",2:"two",3:"three",4:"four",5:"five",6:"six"/
,7:"seven",8:"eight",9:"nine",10:"ten",11:"eleven"/
,12:"twelve",13:"thirteen",14:"fourteen",15:"fifteen"/
,16:"sixteen",17:"seventeen",18:"eighteen",19:"nineteen"/
,20:"twenty",30:"thirty",40:"forty",50:"fifty"/
,60:"sixty",70:"seventy",80:"eighty",90:"ninety"}
for i in range(1,1000):
if(not i in s.keys()):
if(i<100):
s[i]=s[i/10*10]+s[i%10]
else:
s[i]=s[i/100]+"hundred"
if(i%100):
s[i]+="and"+s[i%100]
s[1000]="onethousand"
total=0;
for i in s.values():
total+=len(i)
| s={0:"",1:"one",2:"two",3:"three",4:"four",5:"five",6:"six"/
,7:"seven",8:"eight",9:"nine",10:"ten",11:"eleven"/
,12:"twelve",13:"thirteen",14:"fourteen",15:"fifteen"/
,16:"sixteen",17:"seventeen",18:"eighteen",19:"nineteen"/
,20:"twenty",30:"thirty",40:"forty",50:"fifty"/
,60:"sixty",70:"seventy",80:"eighty",90:"ninety"}
for i in range(1,1000):
if(not i in s.keys()):
if(i<100):
s[i]=s[i/10*10]+s[i%10]
else:
s[i]=s[i/100]+"hundred"
if(i%100):
s[i]+="and"+s[i%100]
s[1000]="onethousand"
total=0;
for i in s.values():
total+=len(i) | none | 1 | 2.957864 | 3 | |
control/script/attack.py | bianzhenkun/IntelligentShip | 1 | 6624149 | #!/usr/bin/env python3
"""
Stanley Control
Author: SheffieldWang
"""
#import basic
import math
import numpy as np
#import ROS
import rospy
from nav_msgs.msg import Path
from std_msgs.msg import Int64,Bool
from geometry_msgs.msg import PoseStamped
from control.msg import Command
class AttackNode():
def __init__(self):
#command
self.command_ = Command()
self.dl_ = 0.0
# 3 attack
self.ai_ = 0
# 0:no run 1:run
self.flag_ = 1
# attack_flag
self.attack_flag=False
#voice flag
self.voice_flag = 0
#ros
self.brain_sub_ = rospy.Subscriber("BrainWave_flag",Bool,self.callbackFromBrain)
# voice sub
self.voice_sub = rospy.Subscriber("Voice_flag",Int64,self.callbackFromVoice)
self.command_pub_ = rospy.Publisher("control_command",Command,queue_size=1)
def run(self):
rospy.spin()
def callbackFromBrain(self,msg):
if(self.voice_flag==2):
self.attack_flag = msg.data
if(self.attack_flag == True):
self.ai_ = 3
else:
self.ai_=0
#print(self.ai_)
self.dl_=0
self.flag_=0
self.publishCommand()
def callbackFromVoice(self,msg):
self.voice_flag = msg.data
if(self.voice_flag == 2):
print("Attack")
def publishCommand(self):
self.command_.header.frame_id = "map"
self.command_.header.stamp = rospy.Time.now()
self.command_.steer = self.dl_
self.command_.a = self.ai_
self.command_.flag = self.flag_
if(self.command_.a == 3):
print("attack!!!")
self.command_pub_.publish(self.command_)
#rospy.sleep(0.1)
if __name__ == '__main__':
print("attack node start!")
rospy.init_node('attack_node', anonymous=True)
atcn=AttackNode()
atcn.run()
| #!/usr/bin/env python3
"""
Stanley Control
Author: SheffieldWang
"""
#import basic
import math
import numpy as np
#import ROS
import rospy
from nav_msgs.msg import Path
from std_msgs.msg import Int64,Bool
from geometry_msgs.msg import PoseStamped
from control.msg import Command
class AttackNode():
def __init__(self):
#command
self.command_ = Command()
self.dl_ = 0.0
# 3 attack
self.ai_ = 0
# 0:no run 1:run
self.flag_ = 1
# attack_flag
self.attack_flag=False
#voice flag
self.voice_flag = 0
#ros
self.brain_sub_ = rospy.Subscriber("BrainWave_flag",Bool,self.callbackFromBrain)
# voice sub
self.voice_sub = rospy.Subscriber("Voice_flag",Int64,self.callbackFromVoice)
self.command_pub_ = rospy.Publisher("control_command",Command,queue_size=1)
def run(self):
rospy.spin()
def callbackFromBrain(self,msg):
if(self.voice_flag==2):
self.attack_flag = msg.data
if(self.attack_flag == True):
self.ai_ = 3
else:
self.ai_=0
#print(self.ai_)
self.dl_=0
self.flag_=0
self.publishCommand()
def callbackFromVoice(self,msg):
self.voice_flag = msg.data
if(self.voice_flag == 2):
print("Attack")
def publishCommand(self):
self.command_.header.frame_id = "map"
self.command_.header.stamp = rospy.Time.now()
self.command_.steer = self.dl_
self.command_.a = self.ai_
self.command_.flag = self.flag_
if(self.command_.a == 3):
print("attack!!!")
self.command_pub_.publish(self.command_)
#rospy.sleep(0.1)
if __name__ == '__main__':
print("attack node start!")
rospy.init_node('attack_node', anonymous=True)
atcn=AttackNode()
atcn.run()
| en | 0.277076 | #!/usr/bin/env python3 Stanley Control Author: SheffieldWang #import basic #import ROS #command # 3 attack # 0:no run 1:run # attack_flag #voice flag #ros # voice sub #print(self.ai_) #rospy.sleep(0.1) | 2.505961 | 3 |
python-pandas/Python_Pandas/dataframe/CreateDfFromList.py | theumang100/tutorials-1 | 9 | 6624150 | import pandas as pd
df = pd.DataFrame(data=[_ for _ in range(1,6)])
print("DataFrame using list : ")
print(df)
df = pd.DataFrame(data=[['foo','<EMAIL>',],['bar','<EMAIL>'],['buz','<EMAIL>']],columns=['Username','Email'])
print("DataFrame using list with columns attribute : ")
print(df) | import pandas as pd
df = pd.DataFrame(data=[_ for _ in range(1,6)])
print("DataFrame using list : ")
print(df)
df = pd.DataFrame(data=[['foo','<EMAIL>',],['bar','<EMAIL>'],['buz','<EMAIL>']],columns=['Username','Email'])
print("DataFrame using list with columns attribute : ")
print(df) | none | 1 | 3.975735 | 4 |