content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
import random
from collections import defaultdict, deque
import logging
import operator as op
import time
from enum import unique, Flag
from functools import reduce
from BaseClasses import RegionType, Door, DoorType, Direction, Sector, CrystalBarrier
from Regions import key_only_locations
from Dungeons import hyrule_castle_regions, eastern_regions, desert_regions, hera_regions, tower_regions, pod_regions
from Dungeons import dungeon_regions, region_starts, split_region_starts, flexible_starts
from Dungeons import drop_entrances, dungeon_bigs, dungeon_keys, dungeon_hints
from Items import ItemFactory
from RoomData import DoorKind, PairedDoor
from DungeonGenerator import ExplorationState, convert_regions, generate_dungeon, validate_tr
from DungeonGenerator import create_dungeon_builders, split_dungeon_builder, simple_dungeon_builder
from KeyDoorShuffle import analyze_dungeon, validate_vanilla_key_logic, build_key_layout, validate_key_layout
def link_doors(world, player):
# Drop-down connections & push blocks
for exitName, regionName in logical_connections:
connect_simple_door(world, exitName, regionName, player)
# These should all be connected for now as normal connections
for edge_a, edge_b in interior_doors:
connect_interior_doors(edge_a, edge_b, world, player)
# These connections are here because they are currently unable to be shuffled
for entrance, ext in straight_staircases:
connect_two_way(world, entrance, ext, player)
for exitName, regionName in falldown_pits:
connect_simple_door(world, exitName, regionName, player)
for exitName, regionName in dungeon_warps:
connect_simple_door(world, exitName, regionName, player)
for ent, ext in ladders:
connect_two_way(world, ent, ext, player)
if world.doorShuffle[player] == 'vanilla':
for entrance, ext in open_edges:
connect_two_way(world, entrance, ext, player)
for exitName, regionName in vanilla_logical_connections:
connect_simple_door(world, exitName, regionName, player)
for entrance, ext in spiral_staircases:
connect_two_way(world, entrance, ext, player)
for entrance, ext in default_door_connections:
connect_two_way(world, entrance, ext, player)
for ent, ext in default_one_way_connections:
connect_one_way(world, ent, ext, player)
vanilla_key_logic(world, player)
elif world.doorShuffle[player] == 'basic':
# if not world.experimental[player]:
for entrance, ext in open_edges:
connect_two_way(world, entrance, ext, player)
within_dungeon(world, player)
elif world.doorShuffle[player] == 'crossed':
for entrance, ext in open_edges:
connect_two_way(world, entrance, ext, player)
cross_dungeon(world, player)
else:
logging.getLogger('').error('Invalid door shuffle setting: %s' % world.doorShuffle[player])
raise Exception('Invalid door shuffle setting: %s' % world.doorShuffle[player])
if world.doorShuffle[player] != 'vanilla':
create_door_spoiler(world, player)
# todo: I think this function is not necessary
def mark_regions(world, player):
# traverse dungeons and make sure dungeon property is assigned
player_dungeons = [dungeon for dungeon in world.dungeons if dungeon.player == player]
for dungeon in player_dungeons:
queue = deque(dungeon.regions)
while len(queue) > 0:
region = world.get_region(queue.popleft(), player)
if region.name not in dungeon.regions:
dungeon.regions.append(region.name)
region.dungeon = dungeon
for ext in region.exits:
d = world.check_for_door(ext.name, player)
connected = ext.connected_region
if d is not None and connected is not None:
if d.dest is not None and connected.name not in dungeon.regions and connected.type == RegionType.Dungeon and connected.name not in queue:
queue.append(connected) # needs to be added
elif connected is not None and connected.name not in dungeon.regions and connected.type == RegionType.Dungeon and connected.name not in queue:
queue.append(connected) # needs to be added
def create_door_spoiler(world, player):
logger = logging.getLogger('')
queue = deque(world.dungeon_layouts[player].values())
while len(queue) > 0:
builder = queue.popleft()
done = set()
start_regions = set(convert_regions(builder.layout_starts, world, player)) # todo: set all_entrances for basic
reg_queue = deque(start_regions)
visited = set(start_regions)
while len(reg_queue) > 0:
next = reg_queue.pop()
for ext in next.exits:
door_a = ext.door
connect = ext.connected_region
if door_a and door_a.type in [DoorType.Normal, DoorType.SpiralStairs] and door_a not in done:
done.add(door_a)
door_b = door_a.dest
if door_b:
done.add(door_b)
if not door_a.blocked and not door_b.blocked:
world.spoiler.set_door(door_a.name, door_b.name, 'both', player, builder.name)
elif door_a.blocked:
world.spoiler.set_door(door_b.name, door_a.name, 'entrance', player, builder.name)
elif door_b.blocked:
world.spoiler.set_door(door_a.name, door_b.name, 'entrance', player, builder.name)
else:
logger.warning('This is a bug during door spoiler')
else:
logger.warning('Door not connected: %s', door_a.name)
if connect and connect.type == RegionType.Dungeon and connect not in visited:
visited.add(connect)
reg_queue.append(connect)
def vanilla_key_logic(world, player):
builders = []
world.dungeon_layouts[player] = {}
for dungeon in [dungeon for dungeon in world.dungeons if dungeon.player == player]:
sector = Sector()
sector.name = dungeon.name
sector.regions.extend(convert_regions(dungeon.regions, world, player))
builder = simple_dungeon_builder(sector.name, [sector])
builder.master_sector = sector
builders.append(builder)
world.dungeon_layouts[player][builder.name] = builder
overworld_prep(world, player)
entrances_map, potentials, connections = determine_entrance_list(world, player)
enabled_entrances = {}
sector_queue = deque(builders)
last_key, loops = None, 0
while len(sector_queue) > 0:
builder = sector_queue.popleft()
origin_list = list(entrances_map[builder.name])
find_enabled_origins(builder.sectors, enabled_entrances, origin_list, entrances_map, builder.name)
origin_list_sans_drops = remove_drop_origins(origin_list)
if len(origin_list_sans_drops) <= 0:
if last_key == builder.name or loops > 1000:
origin_name = world.get_region(origin_list[0], player).entrances[0].parent_region.name
raise Exception('Infinite loop detected for "%s" located at %s' % builder.name, origin_name)
sector_queue.append(builder)
last_key = builder.name
loops += 1
else:
find_new_entrances(builder.master_sector, entrances_map, connections, potentials, enabled_entrances, world, player)
start_regions = convert_regions(origin_list, world, player)
doors = convert_key_doors(default_small_key_doors[builder.name], world, player)
key_layout = build_key_layout(builder, start_regions, doors, world, player)
valid = validate_key_layout(key_layout, world, player)
if not valid:
logging.getLogger('').warning('Vanilla key layout not valid %s', builder.name)
if player not in world.key_logic.keys():
world.key_logic[player] = {}
analyze_dungeon(key_layout, world, player)
world.key_logic[player][builder.name] = key_layout.key_logic
log_key_logic(builder.name, key_layout.key_logic)
last_key = None
if world.shuffle[player] == 'vanilla' and world.accessibility[player] == 'items' and not world.retro[player]:
validate_vanilla_key_logic(world, player)
# some useful functions
oppositemap = {
Direction.South: Direction.North,
Direction.North: Direction.South,
Direction.West: Direction.East,
Direction.East: Direction.West,
Direction.Up: Direction.Down,
Direction.Down: Direction.Up,
}
def switch_dir(direction):
return oppositemap[direction]
def convert_key_doors(key_doors, world, player):
result = []
for d in key_doors:
if type(d) is tuple:
result.append((world.get_door(d[0], player), world.get_door(d[1], player)))
else:
result.append(world.get_door(d, player))
return result
def connect_simple_door(world, exit_name, region_name, player):
region = world.get_region(region_name, player)
world.get_entrance(exit_name, player).connect(region)
d = world.check_for_door(exit_name, player)
if d is not None:
d.dest = region
def connect_door_only(world, exit_name, region, player):
d = world.check_for_door(exit_name, player)
if d is not None:
d.dest = region
def connect_interior_doors(a, b, world, player):
door_a = world.get_door(a, player)
door_b = world.get_door(b, player)
if door_a.blocked:
connect_one_way(world, b, a, player)
elif door_b.blocked:
connect_one_way(world, a, b, player)
else:
connect_two_way(world, a, b, player)
def connect_two_way(world, entrancename, exitname, player):
entrance = world.get_entrance(entrancename, player)
ext = world.get_entrance(exitname, player)
# if these were already connected somewhere, remove the backreference
if entrance.connected_region is not None:
entrance.connected_region.entrances.remove(entrance)
if ext.connected_region is not None:
ext.connected_region.entrances.remove(ext)
entrance.connect(ext.parent_region)
ext.connect(entrance.parent_region)
if entrance.parent_region.dungeon:
ext.parent_region.dungeon = entrance.parent_region.dungeon
x = world.check_for_door(entrancename, player)
y = world.check_for_door(exitname, player)
if x is not None:
x.dest = y
if y is not None:
y.dest = x
def connect_one_way(world, entrancename, exitname, player):
entrance = world.get_entrance(entrancename, player)
ext = world.get_entrance(exitname, player)
# if these were already connected somewhere, remove the backreference
if entrance.connected_region is not None:
entrance.connected_region.entrances.remove(entrance)
if ext.connected_region is not None:
ext.connected_region.entrances.remove(ext)
entrance.connect(ext.parent_region)
if entrance.parent_region.dungeon:
ext.parent_region.dungeon = entrance.parent_region.dungeon
x = world.check_for_door(entrancename, player)
y = world.check_for_door(exitname, player)
if x is not None:
x.dest = y
if y is not None:
y.dest = x
def fix_big_key_doors_with_ugly_smalls(world, player):
remove_ugly_small_key_doors(world, player)
unpair_big_key_doors(world, player)
def remove_ugly_small_key_doors(world, player):
for d in ['Eastern Hint Tile Blocked Path SE', 'Eastern Darkness S', 'Thieves Hallway SE', 'Mire Left Bridge S',
'TR Lava Escape SE', 'GT Hidden Spikes SE']:
door = world.get_door(d, player)
room = world.get_room(door.roomIndex, player)
room.change(door.doorListPos, DoorKind.Normal)
door.smallKey = False
door.ugly = False
def unpair_big_key_doors(world, player):
problematic_bk_doors = ['Eastern Courtyard N', 'Eastern Big Key NE', 'Thieves BK Corner NE', 'Mire BK Door Room N',
'TR Dodgers NE', 'GT Dash Hall NE']
for paired_door in world.paired_doors[player]:
if paired_door.door_a in problematic_bk_doors or paired_door.door_b in problematic_bk_doors:
paired_door.pair = False
def pair_existing_key_doors(world, player, door_a, door_b):
already_paired = False
door_names = [door_a.name, door_b.name]
for pd in world.paired_doors[player]:
if pd.door_a in door_names and pd.door_b in door_names:
already_paired = True
break
if already_paired:
return
for paired_door in world.paired_doors[player]:
if paired_door.door_a in door_names or paired_door.door_b in door_names:
paired_door.pair = False
world.paired_doors[player].append(PairedDoor(door_a, door_b))
# def unpair_all_doors(world, player):
# for paired_door in world.paired_doors[player]:
# paired_door.pair = False
def within_dungeon(world, player):
fix_big_key_doors_with_ugly_smalls(world, player)
overworld_prep(world, player)
entrances_map, potentials, connections = determine_entrance_list(world, player)
connections_tuple = (entrances_map, potentials, connections)
dungeon_builders = {}
for key in dungeon_regions.keys():
sector_list = convert_to_sectors(dungeon_regions[key], world, player)
dungeon_builders[key] = simple_dungeon_builder(key, sector_list)
dungeon_builders[key].entrance_list = list(entrances_map[key])
recombinant_builders = {}
handle_split_dungeons(dungeon_builders, recombinant_builders, entrances_map)
main_dungeon_generation(dungeon_builders, recombinant_builders, connections_tuple, world, player)
paths = determine_required_paths(world, player)
check_required_paths(paths, world, player)
# shuffle_key_doors for dungeons
start = time.process_time()
for builder in world.dungeon_layouts[player].values():
shuffle_key_doors(builder, world, player)
logging.getLogger('').info('Key door shuffle time: %s', time.process_time()-start)
smooth_door_pairs(world, player)
def handle_split_dungeons(dungeon_builders, recombinant_builders, entrances_map):
for name, split_list in split_region_starts.items():
builder = dungeon_builders.pop(name)
recombinant_builders[name] = builder
split_builders = split_dungeon_builder(builder, split_list)
dungeon_builders.update(split_builders)
for sub_name, split_entrances in split_list.items():
sub_builder = dungeon_builders[name+' '+sub_name]
sub_builder.split_flag = True
entrance_list = list(split_entrances)
if name in flexible_starts.keys():
add_shuffled_entrances(sub_builder.sectors, flexible_starts[name], entrance_list)
filtered_entrance_list = [x for x in entrance_list if x in entrances_map[name]]
sub_builder.entrance_list = filtered_entrance_list
def main_dungeon_generation(dungeon_builders, recombinant_builders, connections_tuple, world, player):
entrances_map, potentials, connections = connections_tuple
enabled_entrances = {}
sector_queue = deque(dungeon_builders.values())
last_key, loops = None, 0
while len(sector_queue) > 0:
builder = sector_queue.popleft()
split_dungeon = builder.name.startswith('Desert Palace') or builder.name.startswith('Skull Woods')
name = builder.name
if split_dungeon:
name = ' '.join(builder.name.split(' ')[:-1])
origin_list = list(builder.entrance_list)
find_enabled_origins(builder.sectors, enabled_entrances, origin_list, entrances_map, name)
origin_list_sans_drops = remove_drop_origins(origin_list)
if len(origin_list_sans_drops) <= 0 or name == "Turtle Rock" and not validate_tr(builder, origin_list_sans_drops, world, player):
if last_key == builder.name or loops > 1000:
origin_name = world.get_region(origin_list[0], player).entrances[0].parent_region.name
raise Exception('Infinite loop detected for "%s" located at %s' % builder.name, origin_name)
sector_queue.append(builder)
last_key = builder.name
loops += 1
else:
logging.getLogger('').info('Generating dungeon: %s', builder.name)
ds = generate_dungeon(builder, origin_list_sans_drops, split_dungeon, world, player)
find_new_entrances(ds, entrances_map, connections, potentials, enabled_entrances, world, player)
ds.name = name
builder.master_sector = ds
builder.layout_starts = origin_list if len(builder.entrance_list) <= 0 else builder.entrance_list
last_key = None
combine_layouts(recombinant_builders, dungeon_builders, entrances_map)
world.dungeon_layouts[player] = {}
for builder in dungeon_builders.values():
find_enabled_origins([builder.master_sector], enabled_entrances, builder.layout_starts, entrances_map, builder.name)
builder.path_entrances = entrances_map[builder.name]
world.dungeon_layouts[player] = dungeon_builders
def determine_entrance_list(world, player):
entrance_map = {}
potential_entrances = {}
connections = {}
for key, r_names in region_starts.items():
entrance_map[key] = []
for region_name in r_names:
region = world.get_region(region_name, player)
for ent in region.entrances:
parent = ent.parent_region
if parent.type != RegionType.Dungeon or parent.name == 'Sewer Drop':
if parent.name not in world.inaccessible_regions[player]:
entrance_map[key].append(region_name)
else:
if ent.parent_region not in potential_entrances.keys():
potential_entrances[parent] = []
potential_entrances[parent].append(region_name)
connections[region_name] = parent
return entrance_map, potential_entrances, connections
# todo: kill drop exceptions
def drop_exception(name):
return name in ['Skull Pot Circle', 'Skull Back Drop']
def add_shuffled_entrances(sectors, region_list, entrance_list):
for sector in sectors:
for region in sector.regions:
if region.name in region_list:
entrance_list.append(region.name)
def find_enabled_origins(sectors, enabled, entrance_list, entrance_map, key):
for sector in sectors:
for region in sector.regions:
if region.name in enabled.keys() and region.name not in entrance_list:
entrance_list.append(region.name)
origin_reg, origin_dungeon = enabled[region.name]
if origin_reg != region.name and origin_dungeon != region.dungeon:
if key not in entrance_map.keys():
key = ' '.join(key.split(' ')[:-1])
entrance_map[key].append(region.name)
if drop_exception(region.name): # only because they have unique regions
entrance_list.append(region.name)
def remove_drop_origins(entrance_list):
return [x for x in entrance_list if x not in drop_entrances]
def find_new_entrances(sector, entrances_map, connections, potentials, enabled, world, player):
for region in sector.regions:
if region.name in connections.keys() and (connections[region.name] in potentials.keys() or connections[region.name].name in world.inaccessible_regions[player]):
enable_new_entrances(region, connections, potentials, enabled, world, player)
inverted_aga_check(entrances_map, connections, potentials, enabled, world, player)
def enable_new_entrances(region, connections, potentials, enabled, world, player):
new_region = connections[region.name]
if new_region in potentials.keys():
for potential in potentials.pop(new_region):
enabled[potential] = (region.name, region.dungeon)
# see if this unexplored region connects elsewhere
queue = deque(new_region.exits)
visited = set()
while len(queue) > 0:
ext = queue.popleft()
visited.add(ext)
region_name = ext.connected_region.name
if region_name in connections.keys() and connections[region_name] in potentials.keys():
for potential in potentials.pop(connections[region_name]):
enabled[potential] = (region.name, region.dungeon)
if ext.connected_region.name in world.inaccessible_regions[player]:
for new_exit in ext.connected_region.exits:
if new_exit not in visited:
queue.append(new_exit)
def inverted_aga_check(entrances_map, connections, potentials, enabled, world, player):
if world.mode[player] == 'inverted':
if 'Agahnims Tower' in entrances_map.keys() or aga_tower_enabled(enabled):
for region in list(potentials.keys()):
if region.name == 'Hyrule Castle Ledge':
for r_name in potentials[region]:
new_region = world.get_region(r_name, player)
enable_new_entrances(new_region, connections, potentials, enabled, world, player)
def aga_tower_enabled(enabled):
for region_name, enabled_tuple in enabled.items():
entrance, dungeon = enabled_tuple
if dungeon.name == 'Agahnims Tower':
return True
return False
def within_dungeon_legacy(world, player):
# TODO: The "starts" regions need access logic
# Aerinon's note: I think this is handled already by ER Rules - may need to check correct requirements
dungeon_region_starts_es = ['Hyrule Castle Lobby', 'Hyrule Castle West Lobby', 'Hyrule Castle East Lobby', 'Sewers Secret Room']
dungeon_region_starts_ep = ['Eastern Lobby']
dungeon_region_starts_dp = ['Desert Back Lobby', 'Desert Main Lobby', 'Desert West Lobby', 'Desert East Lobby']
dungeon_region_starts_th = ['Hera Lobby']
dungeon_region_starts_at = ['Tower Lobby']
dungeon_region_starts_pd = ['PoD Lobby']
dungeon_region_lists = [
(dungeon_region_starts_es, hyrule_castle_regions),
(dungeon_region_starts_ep, eastern_regions),
(dungeon_region_starts_dp, desert_regions),
(dungeon_region_starts_th, hera_regions),
(dungeon_region_starts_at, tower_regions),
(dungeon_region_starts_pd, pod_regions),
]
for start_list, region_list in dungeon_region_lists:
shuffle_dungeon(world, player, start_list, region_list)
world.dungeon_layouts[player] = {}
for key in dungeon_regions.keys():
world.dungeon_layouts[player][key] = (key, region_starts[key])
def shuffle_dungeon(world, player, start_region_names, dungeon_region_names):
logger = logging.getLogger('')
# Part one - generate a random layout
available_regions = []
for name in [r for r in dungeon_region_names if r not in start_region_names]:
available_regions.append(world.get_region(name, player))
random.shuffle(available_regions)
# "Ugly" doors are doors that we don't want to see from the front, because of some
# sort of unsupported key door. To handle them, make a map of "ugly regions" and
# never link across them.
ugly_regions = {}
next_ugly_region = 1
# Add all start regions to the open set.
available_doors = []
for name in start_region_names:
logger.info("Starting in %s", name)
for door in get_doors(world, world.get_region(name, player), player):
ugly_regions[door.name] = 0
available_doors.append(door)
# Loop until all available doors are used
while len(available_doors) > 0:
# Pick a random available door to connect, prioritizing ones that aren't blocked.
# This makes them either get picked up through another door (so they head deeper
# into the dungeon), or puts them late in the dungeon (so they probably are part
# of a loop). Panic if neither of these happens.
random.shuffle(available_doors)
available_doors.sort(key=lambda door: 1 if door.blocked else 0 if door.ugly else 2)
door = available_doors.pop()
logger.info('Linking %s', door.name)
# Find an available region that has a compatible door
connect_region, connect_door = find_compatible_door_in_regions(world, door, available_regions, player)
# Also ignore compatible doors if they're blocked; these should only be used to
# create loops.
if connect_region is not None and not door.blocked:
logger.info(' Found new region %s via %s', connect_region.name, connect_door.name)
# Apply connection and add the new region's doors to the available list
maybe_connect_two_way(world, door, connect_door, player)
# Figure out the new room's ugliness region
new_room_ugly_region = ugly_regions[door.name]
if connect_door.ugly:
next_ugly_region += 1
new_room_ugly_region = next_ugly_region
is_new_region = connect_region in available_regions
# Add the doors
for door in get_doors(world, connect_region, player):
ugly_regions[door.name] = new_room_ugly_region
if is_new_region:
available_doors.append(door)
# If an ugly door is anything but the connect door, panic and die
if door != connect_door and door.ugly:
logger.info('Failed because of ugly door, trying again.')
shuffle_dungeon(world, player, start_region_names, dungeon_region_names)
return
# We've used this region and door, so don't use them again
if is_new_region:
available_regions.remove(connect_region)
if connect_door in available_doors:
available_doors.remove(connect_door)
else:
# If there's no available region with a door, use an internal connection
connect_door = find_compatible_door_in_list(ugly_regions, world, door, available_doors, player)
if connect_door is not None:
logger.info(' Adding loop via %s', connect_door.name)
maybe_connect_two_way(world, door, connect_door, player)
if connect_door in available_doors:
available_doors.remove(connect_door)
# Check that we used everything, and retry if we failed
if len(available_regions) > 0 or len(available_doors) > 0:
logger.info('Failed to add all regions to dungeon, trying again.')
shuffle_dungeon(world, player, start_region_names, dungeon_region_names)
return
# Connects a and b. Or don't if they're an unsupported connection type.
# TODO: This is gross, don't do it this way
def maybe_connect_two_way(world, a, b, player):
# Return on unsupported types.
if a.type in [DoorType.Open, DoorType.StraightStairs, DoorType.Hole, DoorType.Warp, DoorType.Ladder,
DoorType.Interior, DoorType.Logical]:
return
# Connect supported types
if a.type == DoorType.Normal or a.type == DoorType.SpiralStairs:
if a.blocked:
connect_one_way(world, b.name, a.name, player)
elif b.blocked:
connect_one_way(world, a.name, b.name, player)
else:
connect_two_way(world, a.name, b.name, player)
return
# If we failed to account for a type, panic
raise RuntimeError('Unknown door type ' + a.type.name)
# Finds a compatible door in regions, returns the region and door
def find_compatible_door_in_regions(world, door, regions, player):
if door.type in [DoorType.Hole, DoorType.Warp, DoorType.Logical]:
return door.dest, door
for region in regions:
for proposed_door in get_doors(world, region, player):
if doors_compatible(door, proposed_door):
return region, proposed_door
return None, None
def find_compatible_door_in_list(ugly_regions, world, door, doors, player):
if door.type in [DoorType.Hole, DoorType.Warp, DoorType.Logical]:
return door
for proposed_door in doors:
if ugly_regions[door.name] != ugly_regions[proposed_door.name]:
continue
if doors_compatible(door, proposed_door):
return proposed_door
def get_doors(world, region, player):
res = []
for exit in region.exits:
door = world.check_for_door(exit.name, player)
if door is not None:
res.append(door)
return res
def get_entrance_doors(world, region, player):
res = []
for exit in region.entrances:
door = world.check_for_door(exit.name, player)
if door is not None:
res.append(door)
return res
def doors_compatible(a, b):
if a.type != b.type:
return False
if a.type == DoorType.Open:
return doors_fit_mandatory_pair(open_edges, a, b)
if a.type == DoorType.StraightStairs:
return doors_fit_mandatory_pair(straight_staircases, a, b)
if a.type == DoorType.Interior:
return doors_fit_mandatory_pair(interior_doors, a, b)
if a.type == DoorType.Ladder:
return doors_fit_mandatory_pair(ladders, a, b)
if a.type == DoorType.Normal and (a.smallKey or b.smallKey or a.bigKey or b.bigKey):
return doors_fit_mandatory_pair(key_doors, a, b)
if a.type in [DoorType.Hole, DoorType.Warp, DoorType.Logical]:
return False # these aren't compatible with anything
return a.direction == switch_dir(b.direction)
def doors_fit_mandatory_pair(pair_list, a, b):
for pair_a, pair_b in pair_list:
if (a.name == pair_a and b.name == pair_b) or (a.name == pair_b and b.name == pair_a):
return True
return False
# goals:
# 1. have enough chests to be interesting (2 more than dungeon items)
# 2. have a balanced amount of regions added (check)
# 3. prevent soft locks due to key usage (algorithm written)
# 4. rules in place to affect item placement (lamp, keys, etc. -- in rules)
# 5. to be complete -- all doors linked (check, somewhat)
# 6. avoid deadlocks/dead end dungeon (check)
# 7. certain paths through dungeon must be possible - be able to reach goals (check)
def cross_dungeon(world, player):
fix_big_key_doors_with_ugly_smalls(world, player)
overworld_prep(world, player)
entrances_map, potentials, connections = determine_entrance_list(world, player)
connections_tuple = (entrances_map, potentials, connections)
all_sectors = []
for key in dungeon_regions.keys():
all_sectors.extend(convert_to_sectors(dungeon_regions[key], world, player))
dungeon_builders = create_dungeon_builders(all_sectors, world, player)
for builder in dungeon_builders.values():
builder.entrance_list = list(entrances_map[builder.name])
dungeon_obj = world.get_dungeon(builder.name, player)
for sector in builder.sectors:
for region in sector.regions:
region.dungeon = dungeon_obj
for loc in region.locations:
if loc.name in key_only_locations:
key_name = dungeon_keys[builder.name] if loc.name != 'Hyrule Castle - Big Key Drop' else dungeon_bigs[builder.name]
loc.forced_item = loc.item = ItemFactory(key_name, player)
recombinant_builders = {}
handle_split_dungeons(dungeon_builders, recombinant_builders, entrances_map)
main_dungeon_generation(dungeon_builders, recombinant_builders, connections_tuple, world, player)
paths = determine_required_paths(world, player)
check_required_paths(paths, world, player)
hc = world.get_dungeon('Hyrule Castle', player)
del hc.dungeon_items[0] # removes map
hc.dungeon_items.append(ItemFactory('Compass (Escape)', player))
at = world.get_dungeon('Agahnims Tower', player)
at.dungeon_items.append(ItemFactory('Compass (Agahnims Tower)', player))
gt = world.get_dungeon('Ganons Tower', player)
del gt.dungeon_items[0] # removes map
assign_cross_keys(dungeon_builders, world, player)
all_dungeon_items = [y for x in world.dungeons if x.player == player for y in x.all_items]
target_items = 34 if world.retro[player] else 63
d_items = target_items - len(all_dungeon_items)
if d_items > 0:
if d_items >= 1: # restore HC map
world.get_dungeon('Hyrule Castle', player).dungeon_items.append(ItemFactory('Map (Escape)', player))
if d_items >= 2: # restore GT map
world.get_dungeon('Ganons Tower', player).dungeon_items.append(ItemFactory('Map (Ganons Tower)', player))
if d_items > 2:
world.pool_adjustment[player] = d_items - 2
elif d_items < 0:
world.pool_adjustment[player] = d_items
smooth_door_pairs(world, player)
# Re-assign dungeon bosses
gt = world.get_dungeon('Ganons Tower', player)
for name, builder in dungeon_builders.items():
reassign_boss('GT Ice Armos', 'bottom', builder, gt, world, player)
reassign_boss('GT Lanmolas 2', 'middle', builder, gt, world, player)
reassign_boss('GT Moldorm', 'top', builder, gt, world, player)
if world.hints[player]:
refine_hints(dungeon_builders)
def assign_cross_keys(dungeon_builders, world, player):
start = time.process_time()
total_keys = remaining = 29
total_candidates = 0
start_regions_map = {}
# Step 1: Find Small Key Door Candidates
for name, builder in dungeon_builders.items():
dungeon = world.get_dungeon(name, player)
if not builder.bk_required or builder.bk_provided:
dungeon.big_key = None
elif builder.bk_required and not builder.bk_provided:
dungeon.big_key = ItemFactory(dungeon_bigs[name], player)
start_regions = convert_regions(builder.path_entrances, world, player)
find_small_key_door_candidates(builder, start_regions, world, player)
builder.key_doors_num = max(0, len(builder.candidates) - builder.key_drop_cnt)
total_candidates += builder.key_doors_num
start_regions_map[name] = start_regions
# Step 2: Initial Key Number Assignment & Calculate Flexibility
for name, builder in dungeon_builders.items():
calculated = int(round(builder.key_doors_num*total_keys/total_candidates))
max_keys = builder.location_cnt - calc_used_dungeon_items(builder)
cand_len = max(0, len(builder.candidates) - builder.key_drop_cnt)
limit = min(max_keys, cand_len)
suggested = min(calculated, limit)
combo_size = ncr(len(builder.candidates), suggested + builder.key_drop_cnt)
while combo_size > 500000 and suggested > 0:
suggested -= 1
combo_size = ncr(len(builder.candidates), suggested + builder.key_drop_cnt)
builder.key_doors_num = suggested + builder.key_drop_cnt
remaining -= suggested
builder.combo_size = combo_size
if suggested < limit:
builder.flex = limit - suggested
# Step 3: Initial valid combination find - reduce flex if needed
for name, builder in dungeon_builders.items():
suggested = builder.key_doors_num - builder.key_drop_cnt
find_valid_combination(builder, start_regions_map[name], world, player)
actual_chest_keys = builder.key_doors_num - builder.key_drop_cnt
if actual_chest_keys < suggested:
remaining += suggested - actual_chest_keys
builder.flex = 0
# Step 4: Try to assign remaining keys
builder_order = [x for x in dungeon_builders.values() if x.flex > 0]
builder_order.sort(key=lambda b: b.combo_size)
queue = deque(builder_order)
logger = logging.getLogger('')
while len(queue) > 0 and remaining > 0:
builder = queue.popleft()
name = builder.name
logger.info('Cross Dungeon: Increasing key count by 1 for %s', name)
builder.key_doors_num += 1
result = find_valid_combination(builder, start_regions_map[name], world, player, drop_keys=False)
if result:
remaining -= 1
builder.flex -= 1
if builder.flex > 0:
builder.combo_size = ncr(len(builder.candidates), builder.key_doors_num)
queue.append(builder)
queue = deque(sorted(queue, key=lambda b: b.combo_size))
else:
logger.info('Cross Dungeon: Increase failed for %s', name)
builder.key_doors_num -= 1
builder.flex = 0
logger.info('Cross Dungeon: Keys unable to assign in pool %s', remaining)
# Last Step: Adjust Small Key Dungeon Pool
if not world.retro[player]:
for name, builder in dungeon_builders.items():
reassign_key_doors(builder, world, player)
log_key_logic(builder.name, world.key_logic[player][builder.name])
actual_chest_keys = max(builder.key_doors_num - builder.key_drop_cnt, 0)
dungeon = world.get_dungeon(name, player)
if actual_chest_keys == 0:
dungeon.small_keys = []
else:
dungeon.small_keys = [ItemFactory(dungeon_keys[name], player)] * actual_chest_keys
logging.getLogger('').info('Cross Dungeon: Key door shuffle time: %s', time.process_time()-start)
def reassign_boss(boss_region, boss_key, builder, gt, world, player):
if boss_region in builder.master_sector.region_set():
new_dungeon = world.get_dungeon(builder.name, player)
if new_dungeon != gt:
gt_boss = gt.bosses.pop(boss_key)
new_dungeon.bosses[boss_key] = gt_boss
def refine_hints(dungeon_builders):
for name, builder in dungeon_builders.items():
for region in builder.master_sector.regions:
for location in region.locations:
if not location.event and '- Boss' not in location.name and '- Prize' not in location.name and location.name != 'Sanctuary':
location.hint_text = dungeon_hints[name]
def convert_to_sectors(region_names, world, player):
region_list = convert_regions(region_names, world, player)
sectors = []
while len(region_list) > 0:
region = region_list.pop()
new_sector = True
region_chunk = [region]
exits = []
exits.extend(region.exits)
outstanding_doors = []
matching_sectors = []
while len(exits) > 0:
ext = exits.pop()
door = ext.door
if ext.connected_region is not None or door is not None and door.controller is not None:
if door is not None and door.controller is not None:
connect_region = world.get_entrance(door.controller.name, player).parent_region
else:
connect_region = ext.connected_region
if connect_region not in region_chunk and connect_region in region_list:
region_list.remove(connect_region)
region_chunk.append(connect_region)
exits.extend(connect_region.exits)
if connect_region not in region_chunk:
for existing in sectors:
if connect_region in existing.regions:
new_sector = False
if existing not in matching_sectors:
matching_sectors.append(existing)
else:
if door is not None and door.controller is None and door.dest is None:
outstanding_doors.append(door)
sector = Sector()
if not new_sector:
for match in matching_sectors:
sector.regions.extend(match.regions)
sector.outstanding_doors.extend(match.outstanding_doors)
sectors.remove(match)
sector.regions.extend(region_chunk)
sector.outstanding_doors.extend(outstanding_doors)
sectors.append(sector)
return sectors
# those with split region starts like Desert/Skull combine for key layouts
def combine_layouts(recombinant_builders, dungeon_builders, entrances_map):
for recombine in recombinant_builders.values():
queue = deque(dungeon_builders.values())
while len(queue) > 0:
builder = queue.pop()
if builder.name.startswith(recombine.name):
del dungeon_builders[builder.name]
if recombine.master_sector is None:
recombine.master_sector = builder.master_sector
recombine.master_sector.name = recombine.name
recombine.pre_open_stonewall = builder.pre_open_stonewall
else:
recombine.master_sector.regions.extend(builder.master_sector.regions)
if builder.pre_open_stonewall:
recombine.pre_open_stonewall = builder.pre_open_stonewall
recombine.layout_starts = list(entrances_map[recombine.name])
dungeon_builders[recombine.name] = recombine
def valid_region_to_explore(region, world, player):
return region.type == RegionType.Dungeon or region.name in world.inaccessible_regions[player]
def shuffle_key_doors(builder, world, player):
start_regions = convert_regions(builder.path_entrances, world, player)
# count number of key doors - this could be a table?
num_key_doors = 0
skips = []
for region in builder.master_sector.regions:
for ext in region.exits:
d = world.check_for_door(ext.name, player)
if d is not None and d.smallKey:
if d not in skips:
if d.type == DoorType.Interior:
skips.append(d.dest)
if d.type == DoorType.Normal:
for dp in world.paired_doors[player]:
if d.name == dp.door_a:
skips.append(world.get_door(dp.door_b, player))
break
elif d.name == dp.door_b:
skips.append(world.get_door(dp.door_a, player))
break
num_key_doors += 1
builder.key_doors_num = num_key_doors
find_small_key_door_candidates(builder, start_regions, world, player)
find_valid_combination(builder, start_regions, world, player)
reassign_key_doors(builder, world, player)
log_key_logic(builder.name, world.key_logic[player][builder.name])
def find_current_key_doors(builder):
current_doors = []
for region in builder.master_sector.regions:
for ext in region.exits:
d = ext.door
if d and d.smallKey:
current_doors.append(d)
return current_doors
def find_small_key_door_candidates(builder, start_regions, world, player):
# traverse dungeon and find candidates
candidates = []
checked_doors = set()
for region in start_regions:
possible, checked = find_key_door_candidates(region, checked_doors, world, player)
candidates.extend(possible)
checked_doors.update(checked)
flat_candidates = []
for candidate in candidates:
# not valid if: Normal and Pair in is Checked and Pair is not in Candidates
if candidate.type != DoorType.Normal or candidate.dest not in checked_doors or candidate.dest in candidates:
flat_candidates.append(candidate)
paired_candidates = build_pair_list(flat_candidates)
builder.candidates = paired_candidates
def calc_used_dungeon_items(builder):
base = 4
if builder.bk_required and not builder.bk_provided:
base += 1
if builder.name == 'Hyrule Castle':
base -= 1 # Missing compass/map
if builder.name == 'Agahnims Tower':
base -= 2 # Missing both compass/map
# gt can lose map once compasses work
return base
def find_valid_combination(builder, start_regions, world, player, drop_keys=True):
logger = logging.getLogger('')
logger.info('Shuffling Key doors for %s', builder.name)
# find valid combination of candidates
if len(builder.candidates) < builder.key_doors_num:
if not drop_keys:
logger.info('No valid layouts for %s with %s doors', builder.name, builder.key_doors_num)
return False
builder.key_doors_num = len(builder.candidates) # reduce number of key doors
logger.info('Lowering key door count because not enough candidates: %s', builder.name)
combinations = ncr(len(builder.candidates), builder.key_doors_num)
itr = 0
start = time.process_time()
sample_list = list(range(0, int(combinations)))
random.shuffle(sample_list)
proposal = kth_combination(sample_list[itr], builder.candidates, builder.key_doors_num)
key_layout = build_key_layout(builder, start_regions, proposal, world, player)
while not validate_key_layout(key_layout, world, player):
itr += 1
stop_early = False
if itr % 1000 == 0:
mark = time.process_time()-start
if (mark > 10 and itr*100/combinations > 50) or (mark > 20 and itr*100/combinations > 25) or mark > 30:
stop_early = True
if itr >= combinations or stop_early:
if not drop_keys:
logger.info('No valid layouts for %s with %s doors', builder.name, builder.key_doors_num)
return False
logger.info('Lowering key door count because no valid layouts: %s', builder.name)
builder.key_doors_num -= 1
if builder.key_doors_num < 0:
raise Exception('Bad dungeon %s - 0 key doors not valid' % builder.name)
combinations = ncr(len(builder.candidates), builder.key_doors_num)
sample_list = list(range(0, int(combinations)))
random.shuffle(sample_list)
itr = 0
start = time.process_time() # reset time since itr reset
proposal = kth_combination(sample_list[itr], builder.candidates, builder.key_doors_num)
key_layout.reset(proposal, builder, world, player)
if (itr+1) % 1000 == 0:
mark = time.process_time()-start
logger.info('%s time elapsed. %s iterations/s', mark, itr/mark)
# make changes
if player not in world.key_logic.keys():
world.key_logic[player] = {}
analyze_dungeon(key_layout, world, player)
builder.key_door_proposal = proposal
world.key_logic[player][builder.name] = key_layout.key_logic
world.key_layout[player][builder.name] = key_layout
return True
def log_key_logic(d_name, key_logic):
logger = logging.getLogger('')
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Key Logic for %s', d_name)
if len(key_logic.bk_restricted) > 0:
logger.debug('-BK Restrictions')
for restriction in key_logic.bk_restricted:
logger.debug(restriction)
if len(key_logic.sm_restricted) > 0:
logger.debug('-Small Restrictions')
for restriction in key_logic.sm_restricted:
logger.debug(restriction)
for key in key_logic.door_rules.keys():
rule = key_logic.door_rules[key]
logger.debug('--Rule for %s: Nrm:%s Allow:%s Loc:%s Alt:%s', key, rule.small_key_num, rule.allow_small, rule.small_location, rule.alternate_small_key)
if rule.alternate_small_key is not None:
for loc in rule.alternate_big_key_loc:
logger.debug('---BK Loc %s', loc.name)
logger.debug('Placement rules for %s', d_name)
for rule in key_logic.placement_rules:
logger.debug('*Rule for %s:', rule.door_reference)
if rule.bk_conditional_set:
logger.debug('**BK Checks %s', ','.join([x.name for x in rule.bk_conditional_set]))
logger.debug('**BK Blocked By Door (%s) : %s', rule.needed_keys_wo_bk, ','.join([x.name for x in rule.check_locations_wo_bk]))
logger.debug('**BK Elsewhere (%s) : %s', rule.needed_keys_w_bk, ','.join([x.name for x in rule.check_locations_w_bk]))
def build_pair_list(flat_list):
paired_list = []
queue = deque(flat_list)
while len(queue) > 0:
d = queue.pop()
if d.dest in queue and d.type != DoorType.SpiralStairs:
paired_list.append((d, d.dest))
queue.remove(d.dest)
else:
paired_list.append(d)
return paired_list
def flatten_pair_list(paired_list):
flat_list = []
for d in paired_list:
if type(d) is tuple:
flat_list.append(d[0])
flat_list.append(d[1])
else:
flat_list.append(d)
return flat_list
def find_key_door_candidates(region, checked, world, player):
dungeon = region.dungeon
candidates = []
checked_doors = list(checked)
queue = deque([(region, None, None)])
while len(queue) > 0:
current, last_door, last_region = queue.pop()
for ext in current.exits:
d = ext.door
if d and d.controller:
d = d.controller
if d is not None and not d.blocked and d.dest is not last_door and d.dest is not last_region and d not in checked_doors:
valid = False
if 0 <= d.doorListPos < 4 and d.type in [DoorType.Interior, DoorType.Normal, DoorType.SpiralStairs]:
room = world.get_room(d.roomIndex, player)
position, kind = room.doorList[d.doorListPos]
if d.type == DoorType.Interior:
valid = kind in [DoorKind.Normal, DoorKind.SmallKey, DoorKind.Bombable, DoorKind.Dashable]
elif d.type == DoorType.SpiralStairs:
valid = kind in [DoorKind.StairKey, DoorKind.StairKey2, DoorKind.StairKeyLow]
elif d.type == DoorType.Normal:
d2 = d.dest
if d2 not in candidates:
room_b = world.get_room(d2.roomIndex, player)
pos_b, kind_b = room_b.doorList[d2.doorListPos]
okay_normals = [DoorKind.Normal, DoorKind.SmallKey, DoorKind.Bombable,
DoorKind.Dashable, DoorKind.DungeonChanger]
valid = kind in okay_normals and kind_b in okay_normals
if valid and 0 <= d2.doorListPos < 4:
candidates.append(d2)
else:
valid = True
if valid and d not in candidates:
candidates.append(d)
if ext.connected_region.type != RegionType.Dungeon or ext.connected_region.dungeon == dungeon:
queue.append((ext.connected_region, d, current))
if d is not None:
checked_doors.append(d)
return candidates, checked_doors
def kth_combination(k, l, r):
if r == 0:
return []
elif len(l) == r:
return l
else:
i = ncr(len(l)-1, r-1)
if k < i:
return l[0:1] + kth_combination(k, l[1:], r-1)
else:
return kth_combination(k-i, l[1:], r)
def ncr(n, r):
if r == 0:
return 1
r = min(r, n-r)
numerator = reduce(op.mul, range(n, n-r, -1), 1)
denominator = reduce(op.mul, range(1, r+1), 1)
return numerator / denominator
def reassign_key_doors(builder, world, player):
logger = logging.getLogger('')
logger.debug('Key doors for %s', builder.name)
proposal = builder.key_door_proposal
flat_proposal = flatten_pair_list(proposal)
queue = deque(find_current_key_doors(builder))
while len(queue) > 0:
d = queue.pop()
if d.type is DoorType.SpiralStairs and d not in proposal:
room = world.get_room(d.roomIndex, player)
if room.doorList[d.doorListPos][1] == DoorKind.StairKeyLow:
room.delete(d.doorListPos)
else:
if len(room.doorList) > 1:
room.mirror(d.doorListPos) # I think this works for crossed now
else:
room.delete(d.doorListPos)
d.smallKey = False
elif d.type is DoorType.Interior and d not in flat_proposal and d.dest not in flat_proposal:
world.get_room(d.roomIndex, player).change(d.doorListPos, DoorKind.Normal)
d.smallKey = False
d.dest.smallKey = False
queue.remove(d.dest)
elif d.type is DoorType.Normal and d not in flat_proposal:
world.get_room(d.roomIndex, player).change(d.doorListPos, DoorKind.Normal)
d.smallKey = False
for dp in world.paired_doors[player]:
if dp.door_a == d.name or dp.door_b == d.name:
dp.pair = False
for obj in proposal:
if type(obj) is tuple:
d1 = obj[0]
d2 = obj[1]
if d1.type is DoorType.Interior:
change_door_to_small_key(d1, world, player)
d2.smallKey = True # ensure flag is set
else:
names = [d1.name, d2.name]
found = False
for dp in world.paired_doors[player]:
if dp.door_a in names and dp.door_b in names:
dp.pair = True
found = True
elif dp.door_a in names:
dp.pair = False
elif dp.door_b in names:
dp.pair = False
if not found:
world.paired_doors[player].append(PairedDoor(d1.name, d2.name))
change_door_to_small_key(d1, world, player)
change_door_to_small_key(d2, world, player)
world.spoiler.set_door_type(d1.name+' <-> '+d2.name, 'Key Door', player)
logger.debug('Key Door: %s', d1.name+' <-> '+d2.name)
else:
d = obj
if d.type is DoorType.Interior:
change_door_to_small_key(d, world, player)
d.dest.smallKey = True # ensure flag is set
elif d.type is DoorType.SpiralStairs:
pass # we don't have spiral stairs candidates yet that aren't already key doors
elif d.type is DoorType.Normal:
change_door_to_small_key(d, world, player)
world.spoiler.set_door_type(d.name, 'Key Door', player)
logger.debug('Key Door: %s', d.name)
def change_door_to_small_key(d, world, player):
d.smallKey = True
room = world.get_room(d.roomIndex, player)
if room.doorList[d.doorListPos][1] != DoorKind.SmallKey:
room.change(d.doorListPos, DoorKind.SmallKey)
def smooth_door_pairs(world, player):
all_doors = [x for x in world.doors if x.player == player]
skip = set()
for door in all_doors:
if door.type in [DoorType.Normal, DoorType.Interior] and door not in skip:
partner = door.dest
skip.add(partner)
room_a = world.get_room(door.roomIndex, player)
room_b = world.get_room(partner.roomIndex, player)
type_a = room_a.kind(door)
type_b = room_b.kind(partner)
valid_pair = stateful_door(door, type_a) and stateful_door(partner, type_b)
if door.type == DoorType.Normal:
if type_a == DoorKind.SmallKey or type_b == DoorKind.SmallKey:
if valid_pair:
if type_a != DoorKind.SmallKey:
room_a.change(door.doorListPos, DoorKind.SmallKey)
if type_b != DoorKind.SmallKey:
room_b.change(partner.doorListPos, DoorKind.SmallKey)
add_pair(door, partner, world, player)
else:
if type_a == DoorKind.SmallKey:
remove_pair(door, world, player)
if type_b == DoorKind.SmallKey:
remove_pair(door, world, player)
elif type_a in [DoorKind.Bombable, DoorKind.Dashable] or type_b in [DoorKind.Bombable, DoorKind.Dashable]:
if valid_pair:
if type_a == type_b:
add_pair(door, partner, world, player)
spoiler_type = 'Bomb Door' if type_a == DoorKind.Bombable else 'Dash Door'
world.spoiler.set_door_type(door.name + ' <-> ' + partner.name, spoiler_type, player)
else:
new_type = DoorKind.Dashable if type_a == DoorKind.Dashable or type_b == DoorKind.Dashable else DoorKind.Bombable
if type_a != new_type:
room_a.change(door.doorListPos, new_type)
if type_b != new_type:
room_b.change(partner.doorListPos, new_type)
add_pair(door, partner, world, player)
spoiler_type = 'Bomb Door' if new_type == DoorKind.Bombable else 'Dash Door'
world.spoiler.set_door_type(door.name + ' <-> ' + partner.name, spoiler_type, player)
else:
if type_a in [DoorKind.Bombable, DoorKind.Dashable]:
room_a.change(door.doorListPos, DoorKind.Normal)
remove_pair(door, world, player)
elif type_b in [DoorKind.Bombable, DoorKind.Dashable]:
room_b.change(partner.doorListPos, DoorKind.Normal)
remove_pair(partner, world, player)
elif world.experimental[player] and valid_pair and type_a != DoorKind.SmallKey and type_b != DoorKind.SmallKey:
random_door_type(door, partner, world, player, type_a, type_b, room_a, room_b)
world.paired_doors[player] = [x for x in world.paired_doors[player] if x.pair or x.original]
def add_pair(door_a, door_b, world, player):
pair_a, pair_b = None, None
for paired_door in world.paired_doors[player]:
if paired_door.door_a == door_a.name and paired_door.door_b == door_b.name:
paired_door.pair = True
return
if paired_door.door_a == door_b.name and paired_door.door_b == door_a.name:
paired_door.pair = True
return
if paired_door.door_a == door_a.name or paired_door.door_b == door_a.name:
pair_a = paired_door
if paired_door.door_a == door_b.name or paired_door.door_b == door_b.name:
pair_b = paired_door
if pair_a:
pair_a.pair = False
if pair_b:
pair_b.pair = False
world.paired_doors[player].append(PairedDoor(door_a, door_b))
def remove_pair(door, world, player):
for paired_door in world.paired_doors[player]:
if paired_door.door_a == door.name or paired_door.door_b == door.name:
paired_door.pair = False
break
def stateful_door(door, kind):
if 0 <= door.doorListPos < 4:
return kind in [DoorKind.Normal, DoorKind.SmallKey, DoorKind.Bombable, DoorKind.Dashable] #, DoorKind.BigKey]
return False
def random_door_type(door, partner, world, player, type_a, type_b, room_a, room_b):
r_kind = random.choices([DoorKind.Normal, DoorKind.Bombable, DoorKind.Dashable], [5, 2, 3], k=1)[0]
if r_kind != DoorKind.Normal:
if door.type == DoorType.Normal:
add_pair(door, partner, world, player)
if type_a != r_kind:
room_a.change(door.doorListPos, r_kind)
if type_b != r_kind:
room_b.change(partner.doorListPos, r_kind)
spoiler_type = 'Bomb Door' if r_kind == DoorKind.Bombable else 'Dash Door'
world.spoiler.set_door_type(door.name + ' <-> ' + partner.name, spoiler_type, player)
def determine_required_paths(world, player):
paths = {
'Hyrule Castle': ['Hyrule Castle Lobby', 'Hyrule Castle West Lobby', 'Hyrule Castle East Lobby'],
'Eastern Palace': ['Eastern Boss'],
'Desert Palace': ['Desert Main Lobby', 'Desert East Lobby', 'Desert West Lobby', 'Desert Boss'],
'Tower of Hera': ['Hera Boss'],
'Agahnims Tower': ['Tower Agahnim 1'],
'Palace of Darkness': ['PoD Boss'],
'Swamp Palace': ['Swamp Boss'],
'Skull Woods': ['Skull 1 Lobby', 'Skull 2 East Lobby', 'Skull 2 West Lobby', 'Skull Boss'],
'Thieves Town': ['Thieves Boss', ('Thieves Blind\'s Cell', 'Thieves Boss')],
'Ice Palace': ['Ice Boss'],
'Misery Mire': ['Mire Boss'],
'Turtle Rock': ['TR Main Lobby', 'TR Lazy Eyes', 'TR Big Chest Entrance', 'TR Eye Bridge', 'TR Boss'],
'Ganons Tower': ['GT Agahnim 2']
}
if world.mode[player] == 'standard':
paths['Hyrule Castle'].append('Hyrule Dungeon Cellblock')
# noinspection PyTypeChecker
paths['Hyrule Castle'].append(('Hyrule Dungeon Cellblock', 'Sanctuary'))
if world.doorShuffle[player] in ['basic']:
paths['Thieves Town'].append('Thieves Attic Window')
return paths
def overworld_prep(world, player):
find_inaccessible_regions(world, player)
add_inaccessible_doors(world, player)
def find_inaccessible_regions(world, player):
world.inaccessible_regions[player] = []
if world.mode[player] != 'inverted':
start_regions = ['Links House', 'Sanctuary']
else:
start_regions = ['Inverted Links House', 'Inverted Dark Sanctuary']
regs = convert_regions(start_regions, world, player)
all_regions = set([r for r in world.regions if r.player == player and r.type is not RegionType.Dungeon])
visited_regions = set()
queue = deque(regs)
while len(queue) > 0:
next_region = queue.popleft()
visited_regions.add(next_region)
if next_region.name == 'Inverted Dark Sanctuary': # special spawn point in cave
for ent in next_region.entrances:
parent = ent.parent_region
if parent and parent.type is not RegionType.Dungeon and parent not in queue and parent not in visited_regions:
queue.append(parent)
for ext in next_region.exits:
connect = ext.connected_region
if connect and connect.type is not RegionType.Dungeon and connect not in queue and connect not in visited_regions:
queue.append(connect)
world.inaccessible_regions[player].extend([r.name for r in all_regions.difference(visited_regions) if valid_inaccessible_region(r)])
if world.mode[player] == 'standard':
world.inaccessible_regions[player].append('Hyrule Castle Ledge')
world.inaccessible_regions[player].append('Sewer Drop')
logger = logging.getLogger('')
logger.debug('Inaccessible Regions:')
for r in world.inaccessible_regions[player]:
logger.debug('%s', r)
def valid_inaccessible_region(r):
return r.type is not RegionType.Cave or (len(r.exits) > 0 and r.name not in ['Links House', 'Chris Houlihan Room'])
def add_inaccessible_doors(world, player):
# todo: ignore standard mode hyrule castle ledge?
for inaccessible_region in world.inaccessible_regions[player]:
region = world.get_region(inaccessible_region, player)
for ext in region.exits:
create_door(world, player, ext.name, region.name)
def create_door(world, player, entName, region_name):
entrance = world.get_entrance(entName, player)
connect = entrance.connected_region
for ext in connect.exits:
if ext.connected_region is not None and ext.connected_region.name == region_name:
d = Door(player, ext.name, DoorType.Logical, ext),
world.doors += d
connect_door_only(world, ext.name, ext.connected_region, player)
d = Door(player, entName, DoorType.Logical, entrance),
world.doors += d
connect_door_only(world, entName, connect, player)
def check_required_paths(paths, world, player):
for dungeon_name in paths.keys():
builder = world.dungeon_layouts[player][dungeon_name]
if len(paths[dungeon_name]) > 0:
states_to_explore = defaultdict(list)
for path in paths[dungeon_name]:
if type(path) is tuple:
states_to_explore[tuple([path[0]])].append(path[1])
else:
states_to_explore[tuple(builder.path_entrances)].append(path)
cached_initial_state = None
for start_regs, dest_regs in states_to_explore.items():
check_paths = convert_regions(dest_regs, world, player)
start_regions = convert_regions(start_regs, world, player)
initial = start_regs == tuple(builder.path_entrances)
if not initial or cached_initial_state is None:
init = determine_init_crystal(initial, cached_initial_state, start_regions)
state = ExplorationState(init, dungeon_name)
for region in start_regions:
state.visit_region(region)
state.add_all_doors_check_unattached(region, world, player)
explore_state(state, world, player)
if initial and cached_initial_state is None:
cached_initial_state = state
else:
state = cached_initial_state
valid, bad_region = check_if_regions_visited(state, check_paths)
if not valid:
if check_for_pinball_fix(state, bad_region, world, player):
explore_state(state, world, player)
valid, bad_region = check_if_regions_visited(state, check_paths)
if not valid:
raise Exception('%s cannot reach %s' % (dungeon_name, bad_region.name))
def determine_init_crystal(initial, state, start_regions):
if initial:
return CrystalBarrier.Orange
if state is None:
raise Exception('Please start path checking from the entrances')
if len(start_regions) > 1:
raise NotImplementedError('Path checking for multiple start regions (not the entrances) not implemented, use more paths instead')
start_region = start_regions[0]
if start_region in state.visited_blue and start_region in state.visited_orange:
return CrystalBarrier.Either
elif start_region in state.visited_blue:
return CrystalBarrier.Blue
elif start_region in state.visited_orange:
return CrystalBarrier.Orange
else:
raise Exception('Can\'t get to %s from initial state', start_region.name)
def explore_state(state, world, player):
while len(state.avail_doors) > 0:
door = state.next_avail_door().door
connect_region = world.get_entrance(door.name, player).connected_region
if state.can_traverse(door) and not state.visited(connect_region) and valid_region_to_explore(connect_region, world, player):
state.visit_region(connect_region)
state.add_all_doors_check_unattached(connect_region, world, player)
def check_if_regions_visited(state, check_paths):
valid = True
breaking_region = None
for region_target in check_paths:
if not state.visited_at_all(region_target):
valid = False
breaking_region = region_target
break
return valid, breaking_region
def check_for_pinball_fix(state, bad_region, world, player):
pinball_region = world.get_region('Skull Pinball', player)
if bad_region.name == 'Skull 2 West Lobby' and state.visited_at_all(pinball_region): #revisit this for entrance shuffle
door = world.get_door('Skull Pinball WS', player)
room = world.get_room(door.roomIndex, player)
if room.doorList[door.doorListPos][1] == DoorKind.Trap:
room.change(door.doorListPos, DoorKind.Normal)
door.trapFlag = 0x0
door.blocked = False
connect_two_way(world, door.name, door.dest.name, player)
state.add_all_doors_check_unattached(pinball_region, world, player)
return True
return False
@unique
class DROptions(Flag):
NoOptions = 0x00
Eternal_Mini_Bosses = 0x01 # If on, GT minibosses marked as defeated when they try to spawn a heart
Town_Portal = 0x02 # If on, Players will start with mirror scroll
Open_Desert_Wall = 0x80 # If on, pre opens the desert wall, no fire required
# DATA GOES DOWN HERE
logical_connections = [
('Hyrule Dungeon North Abyss Catwalk Dropdown', 'Hyrule Dungeon North Abyss'),
('Sewers Secret Room Push Block', 'Sewers Secret Room Blocked Path'),
('Eastern Hint Tile Push Block', 'Eastern Hint Tile'),
('Eastern Map Balcony Hook Path', 'Eastern Map Room'),
('Eastern Map Room Drop Down', 'Eastern Map Balcony'),
('Desert Main Lobby Left Path', 'Desert Left Alcove'),
('Desert Main Lobby Right Path', 'Desert Right Alcove'),
('Desert Left Alcove Path', 'Desert Main Lobby'),
('Desert Right Alcove Path', 'Desert Main Lobby'),
('Hera Big Chest Landing Exit', 'Hera 4F'),
('PoD Pit Room Block Path N', 'PoD Pit Room Blocked'),
('PoD Pit Room Block Path S', 'PoD Pit Room'),
('PoD Arena Bonk Path', 'PoD Arena Bridge'),
('PoD Arena Main Crystal Path', 'PoD Arena Crystal'),
('PoD Arena Crystal Path', 'PoD Arena Main'),
('PoD Arena Main Orange Barrier', 'PoD Arena North'),
('PoD Arena North Drop Down', 'PoD Arena Main'),
('PoD Arena Bridge Drop Down', 'PoD Arena Main'),
('PoD Map Balcony Drop Down', 'PoD Sexy Statue'),
('PoD Basement Ledge Drop Down', 'PoD Stalfos Basement'),
('PoD Falling Bridge Path N', 'PoD Falling Bridge Ledge'),
('PoD Falling Bridge Path S', 'PoD Falling Bridge'),
('Swamp Lobby Moat', 'Swamp Entrance'),
('Swamp Entrance Moat', 'Swamp Lobby'),
('Swamp Trench 1 Approach Dry', 'Swamp Trench 1 Nexus'),
('Swamp Trench 1 Approach Key', 'Swamp Trench 1 Key Ledge'),
('Swamp Trench 1 Approach Swim Depart', 'Swamp Trench 1 Departure'),
('Swamp Trench 1 Nexus Approach', 'Swamp Trench 1 Approach'),
('Swamp Trench 1 Nexus Key', 'Swamp Trench 1 Key Ledge'),
('Swamp Trench 1 Key Ledge Dry', 'Swamp Trench 1 Nexus'),
('Swamp Trench 1 Key Approach', 'Swamp Trench 1 Approach'),
('Swamp Trench 1 Key Ledge Depart', 'Swamp Trench 1 Departure'),
('Swamp Trench 1 Departure Dry', 'Swamp Trench 1 Nexus'),
('Swamp Trench 1 Departure Approach', 'Swamp Trench 1 Approach'),
('Swamp Trench 1 Departure Key', 'Swamp Trench 1 Key Ledge'),
('Swamp Hub Hook Path', 'Swamp Hub North Ledge'),
('Swamp Hub North Ledge Drop Down', 'Swamp Hub'),
('Swamp Compass Donut Push Block', 'Swamp Donut Top'),
('Swamp Shortcut Blue Barrier', 'Swamp Trench 2 Pots'),
('Swamp Trench 2 Pots Blue Barrier', 'Swamp Shortcut'),
('Swamp Trench 2 Pots Dry', 'Swamp Trench 2 Blocks'),
('Swamp Trench 2 Pots Wet', 'Swamp Trench 2 Departure'),
('Swamp Trench 2 Blocks Pots', 'Swamp Trench 2 Pots'),
('Swamp Trench 2 Departure Wet', 'Swamp Trench 2 Pots'),
('Swamp West Shallows Push Blocks', 'Swamp West Block Path'),
('Swamp West Block Path Drop Down', 'Swamp West Shallows'),
('Swamp West Ledge Drop Down', 'Swamp West Shallows'),
('Swamp West Ledge Hook Path', 'Swamp Barrier Ledge'),
('Swamp Barrier Ledge Drop Down', 'Swamp West Shallows'),
('Swamp Barrier Ledge - Orange', 'Swamp Barrier'),
('Swamp Barrier - Orange', 'Swamp Barrier Ledge'),
('Swamp Barrier Ledge Hook Path', 'Swamp West Ledge'),
('Swamp Drain Right Switch', 'Swamp Drain Left'),
('Swamp Flooded Spot Ladder', 'Swamp Flooded Room'),
('Swamp Flooded Room Ladder', 'Swamp Flooded Spot'),
('Skull Pot Circle Star Path', 'Skull Map Room'),
('Skull Big Chest Hookpath', 'Skull 1 Lobby'),
('Skull Back Drop Star Path', 'Skull Small Hall'),
('Thieves Rail Ledge Drop Down', 'Thieves BK Corner'),
('Thieves Hellway Orange Barrier', 'Thieves Hellway S Crystal'),
('Thieves Hellway Crystal Orange Barrier', 'Thieves Hellway'),
('Thieves Hellway Blue Barrier', 'Thieves Hellway N Crystal'),
('Thieves Hellway Crystal Blue Barrier', 'Thieves Hellway'),
('Thieves Basement Block Path', 'Thieves Blocked Entry'),
('Thieves Blocked Entry Path', 'Thieves Basement Block'),
('Thieves Conveyor Bridge Block Path', 'Thieves Conveyor Block'),
('Thieves Conveyor Block Path', 'Thieves Conveyor Bridge'),
('Ice Cross Bottom Push Block Left', 'Ice Floor Switch'),
('Ice Cross Right Push Block Top', 'Ice Bomb Drop'),
('Ice Big Key Push Block', 'Ice Dead End'),
('Ice Bomb Jump Ledge Orange Barrier', 'Ice Bomb Jump Catwalk'),
('Ice Bomb Jump Catwalk Orange Barrier', 'Ice Bomb Jump Ledge'),
('Ice Hookshot Ledge Path', 'Ice Hookshot Balcony'),
('Ice Hookshot Balcony Path', 'Ice Hookshot Ledge'),
('Ice Crystal Right Orange Barrier', 'Ice Crystal Left'),
('Ice Crystal Left Orange Barrier', 'Ice Crystal Right'),
('Ice Crystal Left Blue Barrier', 'Ice Crystal Block'),
('Ice Crystal Block Exit', 'Ice Crystal Left'),
('Ice Big Chest Landing Push Blocks', 'Ice Big Chest View'),
('Mire Lobby Gap', 'Mire Post-Gap'),
('Mire Post-Gap Gap', 'Mire Lobby'),
('Mire Hub Upper Blue Barrier', 'Mire Hub Top'),
('Mire Hub Lower Blue Barrier', 'Mire Hub Right'),
('Mire Hub Right Blue Barrier', 'Mire Hub'),
('Mire Hub Top Blue Barrier', 'Mire Hub'),
('Mire Map Spike Side Drop Down', 'Mire Lone Shooter'),
('Mire Map Spike Side Blue Barrier', 'Mire Crystal Dead End'),
('Mire Map Spot Blue Barrier', 'Mire Crystal Dead End'),
('Mire Crystal Dead End Left Barrier', 'Mire Map Spot'),
('Mire Crystal Dead End Right Barrier', 'Mire Map Spike Side'),
('Mire Hidden Shooters Block Path S', 'Mire Hidden Shooters'),
('Mire Hidden Shooters Block Path N', 'Mire Hidden Shooters Blocked'),
('Mire Left Bridge Hook Path', 'Mire Right Bridge'),
('Mire Crystal Right Orange Barrier', 'Mire Crystal Mid'),
('Mire Crystal Mid Orange Barrier', 'Mire Crystal Right'),
('Mire Crystal Mid Blue Barrier', 'Mire Crystal Left'),
('Mire Crystal Left Blue Barrier', 'Mire Crystal Mid'),
('Mire Firesnake Skip Orange Barrier', 'Mire Antechamber'),
('Mire Antechamber Orange Barrier', 'Mire Firesnake Skip'),
('Mire Compass Blue Barrier', 'Mire Compass Chest'),
('Mire Compass Chest Exit', 'Mire Compass Room'),
('Mire South Fish Blue Barrier', 'Mire Fishbone'),
('Mire Fishbone Blue Barrier', 'Mire South Fish'),
('TR Main Lobby Gap', 'TR Lobby Ledge'),
('TR Lobby Ledge Gap', 'TR Main Lobby'),
('TR Pipe Ledge Drop Down', 'TR Pipe Pit'),
('TR Big Chest Gap', 'TR Big Chest Entrance'),
('TR Big Chest Entrance Gap', 'TR Big Chest'),
('TR Crystal Maze Forwards Path', 'TR Crystal Maze End'),
('TR Crystal Maze Blue Path', 'TR Crystal Maze'),
('TR Crystal Maze Cane Path', 'TR Crystal Maze'),
('GT Blocked Stairs Block Path', 'GT Big Chest'),
('GT Speed Torch South Path', 'GT Speed Torch'),
('GT Speed Torch North Path', 'GT Speed Torch Upper'),
('GT Hookshot East-North Path', 'GT Hookshot North Platform'),
('GT Hookshot East-South Path', 'GT Hookshot South Platform'),
('GT Hookshot North-East Path', 'GT Hookshot East Platform'),
('GT Hookshot North-South Path', 'GT Hookshot South Platform'),
('GT Hookshot South-East Path', 'GT Hookshot East Platform'),
('GT Hookshot South-North Path', 'GT Hookshot North Platform'),
('GT Hookshot Platform Blue Barrier', 'GT Hookshot South Entry'),
('GT Hookshot Entry Blue Barrier', 'GT Hookshot South Platform'),
('GT Double Switch Orange Barrier', 'GT Double Switch Switches'),
('GT Double Switch Orange Barrier 2', 'GT Double Switch Key Spot'),
('GT Double Switch Transition Blue', 'GT Double Switch Exit'),
('GT Double Switch Blue Path', 'GT Double Switch Transition'),
('GT Double Switch Orange Path', 'GT Double Switch Entry'),
('GT Double Switch Key Blue Path', 'GT Double Switch Exit'),
('GT Double Switch Key Orange Path', 'GT Double Switch Entry'),
('GT Double Switch Blue Barrier', 'GT Double Switch Key Spot'),
('GT Warp Maze - Pit Section Warp Spot', 'GT Warp Maze - Pit Exit Warp Spot'),
('GT Warp Maze Exit Section Warp Spot', 'GT Warp Maze - Pit Exit Warp Spot'),
('GT Firesnake Room Hook Path', 'GT Firesnake Room Ledge'),
('GT Left Moldorm Ledge Drop Down', 'GT Moldorm'),
('GT Right Moldorm Ledge Drop Down', 'GT Moldorm'),
('GT Moldorm Gap', 'GT Validation'),
('GT Validation Block Path', 'GT Validation Door')
]
vanilla_logical_connections = [
('Ice Cross Left Push Block', 'Ice Compass Room'),
('Ice Cross Right Push Block Bottom', 'Ice Compass Room'),
('Ice Cross Bottom Push Block Right', 'Ice Pengator Switch'),
('Ice Cross Top Push Block Right', 'Ice Pengator Switch'),
]
spiral_staircases = [
('Hyrule Castle Back Hall Down Stairs', 'Hyrule Dungeon Map Room Up Stairs'),
('Hyrule Dungeon Armory Down Stairs', 'Hyrule Dungeon Staircase Up Stairs'),
('Hyrule Dungeon Staircase Down Stairs', 'Hyrule Dungeon Cellblock Up Stairs'),
('Sewers Behind Tapestry Down Stairs', 'Sewers Rope Room Up Stairs'),
('Sewers Secret Room Up Stairs', 'Sewers Pull Switch Down Stairs'),
('Eastern Darkness Up Stairs', 'Eastern Attic Start Down Stairs'),
('Desert Tiles 1 Up Stairs', 'Desert Bridge Down Stairs'),
('Hera Lobby Down Stairs', 'Hera Basement Cage Up Stairs'),
('Hera Lobby Key Stairs', 'Hera Tile Room Up Stairs'),
('Hera Lobby Up Stairs', 'Hera Beetles Down Stairs'),
('Hera Startile Wide Up Stairs', 'Hera 4F Down Stairs'),
('Hera 4F Up Stairs', 'Hera 5F Down Stairs'),
('Hera 5F Up Stairs', 'Hera Boss Down Stairs'),
('Tower Room 03 Up Stairs', 'Tower Lone Statue Down Stairs'),
('Tower Dark Chargers Up Stairs', 'Tower Dual Statues Down Stairs'),
('Tower Dark Archers Up Stairs', 'Tower Red Spears Down Stairs'),
('Tower Pacifist Run Up Stairs', 'Tower Push Statue Down Stairs'),
('PoD Left Cage Down Stairs', 'PoD Shooter Room Up Stairs'),
('PoD Middle Cage Down Stairs', 'PoD Warp Room Up Stairs'),
('PoD Basement Ledge Up Stairs', 'PoD Big Key Landing Down Stairs'),
('PoD Compass Room W Down Stairs', 'PoD Dark Basement W Up Stairs'),
('PoD Compass Room E Down Stairs', 'PoD Dark Basement E Up Stairs'),
('Swamp Entrance Down Stairs', 'Swamp Pot Row Up Stairs'),
('Swamp West Block Path Up Stairs', 'Swamp Attic Down Stairs'),
('Swamp Push Statue Down Stairs', 'Swamp Flooded Room Up Stairs'),
('Swamp Left Elbow Down Stairs', 'Swamp Drain Left Up Stairs'),
('Swamp Right Elbow Down Stairs', 'Swamp Drain Right Up Stairs'),
('Swamp Behind Waterfall Up Stairs', 'Swamp C Down Stairs'),
('Thieves Spike Switch Up Stairs', 'Thieves Attic Down Stairs'),
('Thieves Conveyor Maze Down Stairs', 'Thieves Basement Block Up Stairs'),
('Ice Jelly Key Down Stairs', 'Ice Floor Switch Up Stairs'),
('Ice Narrow Corridor Down Stairs', 'Ice Pengator Trap Up Stairs'),
('Ice Spike Room Up Stairs', 'Ice Hammer Block Down Stairs'),
('Ice Spike Room Down Stairs', 'Ice Spikeball Up Stairs'),
('Ice Lonely Freezor Down Stairs', 'Iced T Up Stairs'),
('Ice Backwards Room Down Stairs', 'Ice Anti-Fairy Up Stairs'),
('Mire Post-Gap Down Stairs', 'Mire 2 Up Stairs'),
('Mire Left Bridge Down Stairs', 'Mire Dark Shooters Up Stairs'),
('Mire Conveyor Barrier Up Stairs', 'Mire Torches Top Down Stairs'),
('Mire Falling Foes Up Stairs', 'Mire Firesnake Skip Down Stairs'),
('TR Chain Chomps Down Stairs', 'TR Pipe Pit Up Stairs'),
('TR Crystaroller Down Stairs', 'TR Dark Ride Up Stairs'),
('GT Lobby Left Down Stairs', 'GT Torch Up Stairs'),
('GT Lobby Up Stairs', 'GT Crystal Paths Down Stairs'),
('GT Lobby Right Down Stairs', 'GT Hope Room Up Stairs'),
('GT Blocked Stairs Down Stairs', 'GT Four Torches Up Stairs'),
('GT Cannonball Bridge Up Stairs', 'GT Gauntlet 1 Down Stairs'),
('GT Quad Pot Up Stairs', 'GT Wizzrobes 1 Down Stairs'),
('GT Moldorm Pit Up Stairs', 'GT Right Moldorm Ledge Down Stairs'),
('GT Frozen Over Up Stairs', 'GT Brightly Lit Hall Down Stairs')
]
straight_staircases = [
('Hyrule Castle Lobby North Stairs', 'Hyrule Castle Throne Room South Stairs'),
('Sewers Rope Room North Stairs', 'Sewers Dark Cross South Stairs'),
('Tower Catwalk North Stairs', 'Tower Antechamber South Stairs'),
('PoD Conveyor North Stairs', 'PoD Map Balcony South Stairs'),
('TR Crystal Maze North Stairs', 'TR Final Abyss South Stairs')
]
open_edges = [
('Hyrule Dungeon North Abyss South Edge', 'Hyrule Dungeon South Abyss North Edge'),
('Hyrule Dungeon North Abyss Catwalk Edge', 'Hyrule Dungeon South Abyss Catwalk North Edge'),
('Hyrule Dungeon South Abyss West Edge', 'Hyrule Dungeon Guardroom Abyss Edge'),
('Hyrule Dungeon South Abyss Catwalk West Edge', 'Hyrule Dungeon Guardroom Catwalk Edge'),
('Desert Main Lobby NW Edge', 'Desert North Hall SW Edge'),
('Desert Main Lobby N Edge', 'Desert Dead End Edge'),
('Desert Main Lobby NE Edge', 'Desert North Hall SE Edge'),
('Desert Main Lobby E Edge', 'Desert East Wing W Edge'),
('Desert East Wing N Edge', 'Desert Arrow Pot Corner S Edge'),
('Desert Arrow Pot Corner W Edge', 'Desert North Hall E Edge'),
('Desert North Hall W Edge', 'Desert Sandworm Corner S Edge'),
('Desert Sandworm Corner E Edge', 'Desert West Wing N Edge'),
('Thieves Lobby N Edge', 'Thieves Ambush S Edge'),
('Thieves Lobby NE Edge', 'Thieves Ambush SE Edge'),
('Thieves Ambush ES Edge', 'Thieves BK Corner WS Edge'),
('Thieves Ambush EN Edge', 'Thieves BK Corner WN Edge'),
('Thieves BK Corner S Edge', 'Thieves Compass Room N Edge'),
('Thieves BK Corner SW Edge', 'Thieves Compass Room NW Edge'),
('Thieves Compass Room WS Edge', 'Thieves Big Chest Nook ES Edge'),
('Thieves Cricket Hall Left Edge', 'Thieves Cricket Hall Right Edge')
]
falldown_pits = [
('Eastern Courtyard Potholes', 'Eastern Fairies'),
('Hera Beetles Holes', 'Hera Lobby'),
('Hera Startile Corner Holes', 'Hera Lobby'),
('Hera Startile Wide Holes', 'Hera Lobby'),
('Hera 4F Holes', 'Hera Lobby'), # failed bomb jump
('Hera Big Chest Landing Holes', 'Hera Startile Wide'), # the other holes near big chest
('Hera 5F Star Hole', 'Hera Big Chest Landing'),
('Hera 5F Pothole Chain', 'Hera Fairies'),
('Hera 5F Normal Holes', 'Hera 4F'),
('Hera Boss Outer Hole', 'Hera 5F'),
('Hera Boss Inner Hole', 'Hera 4F'),
('PoD Pit Room Freefall', 'PoD Stalfos Basement'),
('PoD Pit Room Bomb Hole', 'PoD Basement Ledge'),
('PoD Big Key Landing Hole', 'PoD Stalfos Basement'),
('Swamp Attic Right Pit', 'Swamp Barrier Ledge'),
('Swamp Attic Left Pit', 'Swamp West Ledge'),
('Skull Final Drop Hole', 'Skull Boss'),
('Ice Bomb Drop Hole', 'Ice Stalfos Hint'),
('Ice Falling Square Hole', 'Ice Tall Hint'),
('Ice Freezors Hole', 'Ice Big Chest View'),
('Ice Freezors Ledge Hole', 'Ice Big Chest View'),
('Ice Freezors Bomb Hole', 'Ice Big Chest Landing'),
('Ice Crystal Block Hole', 'Ice Switch Room'),
('Ice Crystal Right Blue Hole', 'Ice Switch Room'),
('Ice Backwards Room Hole', 'Ice Fairy'),
('Ice Antechamber Hole', 'Ice Boss'),
('Mire Attic Hint Hole', 'Mire BK Chest Ledge'),
('Mire Torches Top Holes', 'Mire Conveyor Barrier'),
('Mire Torches Bottom Holes', 'Mire Warping Pool'),
('GT Bob\'s Room Hole', 'GT Ice Armos'),
('GT Falling Torches Hole', 'GT Staredown'),
('GT Moldorm Hole', 'GT Moldorm Pit')
]
dungeon_warps = [
('Eastern Fairies\' Warp', 'Eastern Courtyard'),
('Hera Fairies\' Warp', 'Hera 5F'),
('PoD Warp Hint Warp', 'PoD Warp Room'),
('PoD Warp Room Warp', 'PoD Warp Hint'),
('PoD Stalfos Basement Warp', 'PoD Warp Room'),
('PoD Callback Warp', 'PoD Dark Alley'),
('Ice Fairy Warp', 'Ice Anti-Fairy'),
('Mire Lone Warp Warp', 'Mire BK Door Room'),
('Mire Warping Pool Warp', 'Mire Square Rail'),
('GT Compass Room Warp', 'GT Conveyor Star Pits'),
('GT Spike Crystals Warp', 'GT Firesnake Room'),
('GT Warp Maze - Left Section Warp', 'GT Warp Maze - Rando Rail'),
('GT Warp Maze - Mid Section Left Warp', 'GT Warp Maze - Main Rails'),
('GT Warp Maze - Mid Section Right Warp', 'GT Warp Maze - Main Rails'),
('GT Warp Maze - Right Section Warp', 'GT Warp Maze - Main Rails'),
('GT Warp Maze - Pit Exit Warp', 'GT Warp Maze - Pot Rail'),
('GT Warp Maze - Rail Choice Left Warp', 'GT Warp Maze - Left Section'),
('GT Warp Maze - Rail Choice Right Warp', 'GT Warp Maze - Mid Section'),
('GT Warp Maze - Rando Rail Warp', 'GT Warp Maze - Mid Section'),
('GT Warp Maze - Main Rails Best Warp', 'GT Warp Maze - Pit Section'),
('GT Warp Maze - Main Rails Mid Left Warp', 'GT Warp Maze - Mid Section'),
('GT Warp Maze - Main Rails Mid Right Warp', 'GT Warp Maze - Mid Section'),
('GT Warp Maze - Main Rails Right Top Warp', 'GT Warp Maze - Right Section'),
('GT Warp Maze - Main Rails Right Mid Warp', 'GT Warp Maze - Right Section'),
('GT Warp Maze - Pot Rail Warp', 'GT Warp Maze Exit Section'),
('GT Hidden Star Warp', 'GT Invisible Bridges')
]
ladders = [
('PoD Bow Statue Down Ladder', 'PoD Dark Pegs Up Ladder'),
('Ice Big Key Down Ladder', 'Ice Tongue Pull Up Ladder'),
('Ice Firebar Down Ladder', 'Ice Freezors Up Ladder'),
('GT Staredown Up Ladder', 'GT Falling Torches Down Ladder')
]
interior_doors = [
('Hyrule Dungeon Armory Interior Key Door S', 'Hyrule Dungeon Armory Interior Key Door N'),
('Hyrule Dungeon Armory ES', 'Hyrule Dungeon Armory Boomerang WS'),
('Hyrule Dungeon Map Room Key Door S', 'Hyrule Dungeon North Abyss Key Door N'),
('Sewers Rat Path WS', 'Sewers Secret Room ES'),
('Sewers Rat Path WN', 'Sewers Secret Room EN'),
('Sewers Yet More Rats S', 'Sewers Pull Switch N'),
('Eastern Lobby N', 'Eastern Lobby Bridge S'),
('Eastern Lobby NW', 'Eastern Lobby Left Ledge SW'),
('Eastern Lobby NE', 'Eastern Lobby Right Ledge SE'),
('Eastern East Wing EN', 'Eastern Pot Switch WN'),
('Eastern East Wing ES', 'Eastern Map Balcony WS'),
('Eastern Pot Switch SE', 'Eastern Map Room NE'),
('Eastern West Wing WS', 'Eastern Stalfos Spawn ES'),
('Eastern Stalfos Spawn NW', 'Eastern Compass Room SW'),
('Eastern Compass Room EN', 'Eastern Hint Tile WN'),
('Eastern Dark Square EN', 'Eastern Dark Pots WN'),
('Eastern Darkness NE', 'Eastern Rupees SE'),
('Eastern False Switches WS', 'Eastern Cannonball Hell ES'),
('Eastern Single Eyegore NE', 'Eastern Duo Eyegores SE'),
('Desert East Lobby WS', 'Desert East Wing ES'),
('Desert East Wing Key Door EN', 'Desert Compass Key Door WN'),
('Desert North Hall NW', 'Desert Map SW'),
('Desert North Hall NE', 'Desert Map SE'),
('Desert Arrow Pot Corner NW', 'Desert Trap Room SW'),
('Desert Sandworm Corner NE', 'Desert Bonk Torch SE'),
('Desert Sandworm Corner WS', 'Desert Circle of Pots ES'),
('Desert Circle of Pots NW', 'Desert Big Chest SW'),
('Desert West Wing WS', 'Desert West Lobby ES',),
('Desert Fairy Fountain SW', 'Desert West Lobby NW'),
('Desert Back Lobby NW', 'Desert Tiles 1 SW'),
('Desert Bridge SW', 'Desert Four Statues NW'),
('Desert Four Statues ES', 'Desert Beamos Hall WS',),
('Desert Tiles 2 NE', 'Desert Wall Slide SE'),
('Hera Tile Room EN', 'Hera Tridorm WN'),
('Hera Tridorm SE', 'Hera Torches NE'),
('Hera Beetles WS', 'Hera Startile Corner ES'),
('Hera Startile Corner NW', 'Hera Startile Wide SW'),
('Tower Lobby NW', 'Tower Gold Knights SW'),
('Tower Gold Knights EN', 'Tower Room 03 WN'),
('Tower Lone Statue WN', 'Tower Dark Maze EN'),
('Tower Dark Maze ES', 'Tower Dark Chargers WS'),
('Tower Dual Statues WS', 'Tower Dark Pits ES'),
('Tower Dark Pits EN', 'Tower Dark Archers WN'),
('Tower Red Spears WN', 'Tower Red Guards EN'),
('Tower Red Guards SW', 'Tower Circle of Pots NW'),
('Tower Circle of Pots ES', 'Tower Pacifist Run WS'),
('Tower Push Statue WS', 'Tower Catwalk ES'),
('Tower Antechamber NW', 'Tower Altar SW'),
('PoD Lobby N', 'PoD Middle Cage S'),
('PoD Lobby NW', 'PoD Left Cage SW'),
('PoD Lobby NE', 'PoD Middle Cage SE'),
('PoD Warp Hint SE', 'PoD Jelly Hall NE'),
('PoD Jelly Hall NW', 'PoD Mimics 1 SW'),
('PoD Falling Bridge EN', 'PoD Compass Room WN'),
('PoD Compass Room SE', 'PoD Harmless Hellway NE'),
('PoD Mimics 2 NW', 'PoD Bow Statue SW'),
('PoD Dark Pegs WN', 'PoD Lonely Turtle EN'),
('PoD Lonely Turtle SW', 'PoD Turtle Party NW'),
('PoD Turtle Party ES', 'PoD Callback WS'),
('Swamp Trench 1 Nexus N', 'Swamp Trench 1 Alcove S'),
('Swamp Trench 1 Key Ledge NW', 'Swamp Hammer Switch SW'),
('Swamp Donut Top SE', 'Swamp Donut Bottom NE'),
('Swamp Donut Bottom NW', 'Swamp Compass Donut SW'),
('Swamp Crystal Switch SE', 'Swamp Shortcut NE'),
('Swamp Trench 2 Blocks N', 'Swamp Trench 2 Alcove S'),
('Swamp Push Statue NW', 'Swamp Shooters SW'),
('Swamp Push Statue NE', 'Swamp Right Elbow SE'),
('Swamp Shooters EN', 'Swamp Left Elbow WN'),
('Swamp Drain WN', 'Swamp Basement Shallows EN'),
('Swamp Flooded Room WS', 'Swamp Basement Shallows ES'),
('Swamp Waterfall Room NW', 'Swamp Refill SW'),
('Swamp Waterfall Room NE', 'Swamp Behind Waterfall SE'),
('Swamp C SE', 'Swamp Waterway NE'),
('Swamp Waterway N', 'Swamp I S'),
('Swamp Waterway NW', 'Swamp T SW'),
('Skull 1 Lobby ES', 'Skull Map Room WS'),
('Skull Pot Circle WN', 'Skull Pull Switch EN'),
('Skull Pull Switch S', 'Skull Big Chest N'),
('Skull Left Drop ES', 'Skull Compass Room WS'),
('Skull 2 East Lobby NW', 'Skull Big Key SW'),
('Skull Big Key WN', 'Skull Lone Pot EN'),
('Skull Small Hall WS', 'Skull 2 West Lobby ES'),
('Skull 2 West Lobby NW', 'Skull X Room SW'),
('Skull 3 Lobby EN', 'Skull East Bridge WN'),
('Skull East Bridge WS', 'Skull West Bridge Nook ES'),
('Skull Star Pits ES', 'Skull Torch Room WS'),
('Skull Torch Room WN', 'Skull Vines EN'),
('Skull Spike Corner ES', 'Skull Final Drop WS'),
('Thieves Hallway WS', 'Thieves Pot Alcove Mid ES'),
('Thieves Conveyor Maze SW', 'Thieves Pot Alcove Top NW'),
('Thieves Conveyor Maze EN', 'Thieves Hallway WN'),
('Thieves Spike Track NE', 'Thieves Triple Bypass SE'),
('Thieves Spike Track WS', 'Thieves Hellway Crystal ES'),
('Thieves Hellway Crystal EN', 'Thieves Triple Bypass WN'),
('Thieves Attic ES', 'Thieves Cricket Hall Left WS'),
('Thieves Cricket Hall Right ES', 'Thieves Attic Window WS'),
('Thieves Blocked Entry SW', 'Thieves Lonely Zazak NW'),
('Thieves Lonely Zazak ES', 'Thieves Blind\'s Cell WS'),
('Thieves Conveyor Bridge WS', 'Thieves Big Chest Room ES'),
('Thieves Conveyor Block WN', 'Thieves Trap EN'),
('Ice Lobby WS', 'Ice Jelly Key ES'),
('Ice Floor Switch ES', 'Ice Cross Left WS'),
('Ice Cross Top NE', 'Ice Bomb Drop SE'),
('Ice Pengator Switch ES', 'Ice Dead End WS'),
('Ice Stalfos Hint SE', 'Ice Conveyor NE'),
('Ice Bomb Jump EN', 'Ice Narrow Corridor WN'),
('Ice Spike Cross WS', 'Ice Firebar ES'),
('Ice Spike Cross NE', 'Ice Falling Square SE'),
('Ice Hammer Block ES', 'Ice Tongue Pull WS'),
('Ice Freezors Ledge ES', 'Ice Tall Hint WS'),
('Ice Hookshot Balcony SW', 'Ice Spikeball NW'),
('Ice Crystal Right NE', 'Ice Backwards Room SE'),
('Ice Crystal Left WS', 'Ice Big Chest View ES'),
('Ice Anti-Fairy SE', 'Ice Switch Room NE'),
('Mire Lone Shooter ES', 'Mire Falling Bridge WS'), # technically one-way
('Mire Falling Bridge W', 'Mire Failure Bridge E'), # technically one-way
('Mire Falling Bridge WN', 'Mire Map Spike Side EN'), # technically one-way
('Mire Hidden Shooters WS', 'Mire Cross ES'), # technically one-way
('Mire Hidden Shooters NE', 'Mire Minibridge SE'),
('Mire Spikes NW', 'Mire Ledgehop SW'),
('Mire Spike Barrier ES', 'Mire Square Rail WS'),
('Mire Square Rail NW', 'Mire Lone Warp SW'),
('Mire Wizzrobe Bypass WN', 'Mire Compass Room EN'), # technically one-way
('Mire Conveyor Crystal WS', 'Mire Tile Room ES'),
('Mire Tile Room NW', 'Mire Compass Room SW'),
('Mire Neglected Room SE', 'Mire Chest View NE'),
('Mire BK Chest Ledge WS', 'Mire Warping Pool ES'), # technically one-way
('Mire Torches Top SW', 'Mire Torches Bottom NW'),
('Mire Torches Bottom WS', 'Mire Attic Hint ES'),
('Mire Dark Shooters SE', 'Mire Key Rupees NE'),
('Mire Dark Shooters SW', 'Mire Block X NW'),
('Mire Tall Dark and Roomy WS', 'Mire Crystal Right ES'),
('Mire Tall Dark and Roomy WN', 'Mire Shooter Rupees EN'),
('Mire Crystal Mid NW', 'Mire Crystal Top SW'),
('TR Tile Room NE', 'TR Refill SE'),
('TR Pokey 1 NW', 'TR Chain Chomps SW'),
('TR Twin Pokeys EN', 'TR Dodgers WN'),
('TR Twin Pokeys SW', 'TR Hallway NW'),
('TR Hallway ES', 'TR Big View WS'),
('TR Big Chest NE', 'TR Dodgers SE'),
('TR Dash Room ES', 'TR Tongue Pull WS'),
('TR Dash Room NW', 'TR Crystaroller SW'),
('TR Tongue Pull NE', 'TR Rupees SE'),
('GT Torch EN', 'GT Hope Room WN'),
('GT Torch SW', 'GT Big Chest NW'),
('GT Tile Room EN', 'GT Speed Torch WN'),
('GT Speed Torch WS', 'GT Pots n Blocks ES'),
('GT Crystal Conveyor WN', 'GT Compass Room EN'),
('GT Conveyor Cross WN', 'GT Hookshot EN'),
('GT Hookshot ES', 'GT Map Room WS'),
('GT Double Switch EN', 'GT Spike Crystals WN'),
('GT Firesnake Room SW', 'GT Warp Maze (Rails) NW'),
('GT Ice Armos NE', 'GT Big Key Room SE'),
('GT Ice Armos WS', 'GT Four Torches ES'),
('GT Four Torches NW', 'GT Fairy Abyss SW'),
('GT Crystal Paths SW', 'GT Mimics 1 NW'),
('GT Mimics 1 ES', 'GT Mimics 2 WS'),
('GT Mimics 2 NE', 'GT Dash Hall SE'),
('GT Cannonball Bridge SE', 'GT Refill NE'),
('GT Gauntlet 1 WN', 'GT Gauntlet 2 EN'),
('GT Gauntlet 2 SW', 'GT Gauntlet 3 NW'),
('GT Gauntlet 4 SW', 'GT Gauntlet 5 NW'),
('GT Beam Dash WS', 'GT Lanmolas 2 ES'),
('GT Lanmolas 2 NW', 'GT Quad Pot SW'),
('GT Wizzrobes 1 SW', 'GT Dashing Bridge NW'),
('GT Dashing Bridge NE', 'GT Wizzrobes 2 SE'),
('GT Torch Cross ES', 'GT Staredown WS'),
('GT Falling Torches NE', 'GT Mini Helmasaur Room SE'),
('GT Mini Helmasaur Room WN', 'GT Bomb Conveyor EN'),
('GT Bomb Conveyor SW', 'GT Crystal Circles NW')
]
key_doors = [
('Sewers Key Rat Key Door N', 'Sewers Secret Room Key Door S'),
('Sewers Dark Cross Key Door N', 'Sewers Water S'),
('Eastern Dark Square Key Door WN', 'Eastern Cannonball Ledge Key Door EN'),
('Eastern Darkness Up Stairs', 'Eastern Attic Start Down Stairs'),
('Eastern Big Key NE', 'Eastern Hint Tile Blocked Path SE'),
('Eastern Darkness S', 'Eastern Courtyard N'),
('Desert East Wing Key Door EN', 'Desert Compass Key Door WN'),
('Desert Tiles 1 Up Stairs', 'Desert Bridge Down Stairs'),
('Desert Beamos Hall NE', 'Desert Tiles 2 SE'),
('Desert Tiles 2 NE', 'Desert Wall Slide SE'),
('Desert Wall Slide NW', 'Desert Boss SW'),
('Hera Lobby Key Stairs', 'Hera Tile Room Up Stairs'),
('Hera Startile Corner NW', 'Hera Startile Wide SW'),
('PoD Middle Cage N', 'PoD Pit Room S'),
('PoD Arena Main NW', 'PoD Falling Bridge SW'),
('PoD Falling Bridge WN', 'PoD Dark Maze EN'),
]
default_small_key_doors = {
'Hyrule Castle': [
('Sewers Key Rat Key Door N', 'Sewers Secret Room Key Door S'),
('Sewers Dark Cross Key Door N', 'Sewers Water S'),
('Hyrule Dungeon Map Room Key Door S', 'Hyrule Dungeon North Abyss Key Door N'),
('Hyrule Dungeon Armory Interior Key Door N', 'Hyrule Dungeon Armory Interior Key Door S')
],
'Eastern Palace': [
('Eastern Dark Square Key Door WN', 'Eastern Cannonball Ledge Key Door EN'),
'Eastern Darkness Up Stairs',
],
'Desert Palace': [
('Desert East Wing Key Door EN', 'Desert Compass Key Door WN'),
'Desert Tiles 1 Up Stairs',
('Desert Beamos Hall NE', 'Desert Tiles 2 SE'),
('Desert Tiles 2 NE', 'Desert Wall Slide SE'),
],
'Tower of Hera': [
'Hera Lobby Key Stairs'
],
'Agahnims Tower': [
'Tower Room 03 Up Stairs',
('Tower Dark Maze ES', 'Tower Dark Chargers WS'),
'Tower Dark Archers Up Stairs',
('Tower Circle of Pots ES', 'Tower Pacifist Run WS'),
],
'Palace of Darkness': [
('PoD Middle Cage N', 'PoD Pit Room S'),
('PoD Arena Main NW', 'PoD Falling Bridge SW'),
('PoD Falling Bridge WN', 'PoD Dark Maze EN'),
'PoD Basement Ledge Up Stairs',
('PoD Compass Room SE', 'PoD Harmless Hellway NE'),
('PoD Dark Pegs WN', 'PoD Lonely Turtle EN')
],
'Swamp Palace': [
'Swamp Entrance Down Stairs',
('Swamp Pot Row WS', 'Swamp Trench 1 Approach ES'),
('Swamp Trench 1 Key Ledge NW', 'Swamp Hammer Switch SW'),
('Swamp Hub WN', 'Swamp Crystal Switch EN'),
('Swamp Hub North Ledge N', 'Swamp Push Statue S'),
('Swamp Waterway NW', 'Swamp T SW')
],
'Skull Woods': [
('Skull 1 Lobby WS', 'Skull Pot Prison ES'),
('Skull Map Room SE', 'Skull Pinball NE'),
('Skull 2 West Lobby NW', 'Skull X Room SW'),
('Skull 3 Lobby NW', 'Skull Star Pits SW'),
('Skull Spike Corner ES', 'Skull Final Drop WS')
],
'Thieves Town': [
('Thieves Hallway WS', 'Thieves Pot Alcove Mid ES'),
'Thieves Spike Switch Up Stairs',
('Thieves Conveyor Bridge WS', 'Thieves Big Chest Room ES')
],
'Ice Palace': [
'Ice Jelly Key Down Stairs',
('Ice Conveyor SW', 'Ice Bomb Jump NW'),
('Ice Spike Cross ES', 'Ice Spike Room WS'),
('Ice Tall Hint SE', 'Ice Lonely Freezor NE'),
'Ice Backwards Room Down Stairs',
('Ice Switch Room ES', 'Ice Refill WS')
],
'Misery Mire': [
('Mire Hub WS', 'Mire Conveyor Crystal ES'),
('Mire Hub Right EN', 'Mire Map Spot WN'),
('Mire Spikes NW', 'Mire Ledgehop SW'),
('Mire Fishbone SE', 'Mire Spike Barrier NE'),
('Mire Conveyor Crystal WS', 'Mire Tile Room ES'),
('Mire Dark Shooters SE', 'Mire Key Rupees NE')
],
'Turtle Rock': [
('TR Hub NW', 'TR Pokey 1 SW'),
('TR Pokey 1 NW', 'TR Chain Chomps SW'),
'TR Chain Chomps Down Stairs',
('TR Pokey 2 ES', 'TR Lava Island WS'),
'TR Crystaroller Down Stairs',
('TR Dash Bridge WS', 'TR Crystal Maze ES')
],
'Ganons Tower': [
('GT Torch EN', 'GT Hope Room WN'),
('GT Tile Room EN', 'GT Speed Torch WN'),
('GT Hookshot ES', 'GT Map Room WS'),
('GT Double Switch EN', 'GT Spike Crystals WN'),
('GT Firesnake Room SW', 'GT Warp Maze (Rails) NW'),
('GT Conveyor Star Pits EN', 'GT Falling Bridge WN'),
('GT Mini Helmasaur Room WN', 'GT Bomb Conveyor EN'),
('GT Crystal Circles SW', 'GT Left Moldorm Ledge NW')
]
}
default_door_connections = [
('Hyrule Castle Lobby W', 'Hyrule Castle West Lobby E'),
('Hyrule Castle Lobby E', 'Hyrule Castle East Lobby W'),
('Hyrule Castle Lobby WN', 'Hyrule Castle West Lobby EN'),
('Hyrule Castle West Lobby N', 'Hyrule Castle West Hall S'),
('Hyrule Castle East Lobby N', 'Hyrule Castle East Hall S'),
('Hyrule Castle East Lobby NW', 'Hyrule Castle East Hall SW'),
('Hyrule Castle East Hall W', 'Hyrule Castle Back Hall E'),
('Hyrule Castle West Hall E', 'Hyrule Castle Back Hall W'),
('Hyrule Castle Throne Room N', 'Sewers Behind Tapestry S'),
('Hyrule Dungeon Guardroom N', 'Hyrule Dungeon Armory S'),
('Sewers Dark Cross Key Door N', 'Sewers Water S'),
('Sewers Water W', 'Sewers Key Rat E'),
('Sewers Key Rat Key Door N', 'Sewers Secret Room Key Door S'),
('Eastern Lobby Bridge N', 'Eastern Cannonball S'),
('Eastern Cannonball N', 'Eastern Courtyard Ledge S'),
('Eastern Cannonball Ledge WN', 'Eastern Big Key EN'),
('Eastern Cannonball Ledge Key Door EN', 'Eastern Dark Square Key Door WN'),
('Eastern Courtyard Ledge W', 'Eastern West Wing E'),
('Eastern Courtyard Ledge E', 'Eastern East Wing W'),
('Eastern Hint Tile EN', 'Eastern Courtyard WN'),
('Eastern Big Key NE', 'Eastern Hint Tile Blocked Path SE'),
('Eastern Courtyard EN', 'Eastern Map Valley WN'),
('Eastern Courtyard N', 'Eastern Darkness S'),
('Eastern Map Valley SW', 'Eastern Dark Square NW'),
('Eastern Attic Start WS', 'Eastern False Switches ES'),
('Eastern Cannonball Hell WS', 'Eastern Single Eyegore ES'),
('Desert Compass NW', 'Desert Cannonball S'),
('Desert Beamos Hall NE', 'Desert Tiles 2 SE'),
('PoD Middle Cage N', 'PoD Pit Room S'),
('PoD Pit Room NW', 'PoD Arena Main SW'),
('PoD Pit Room NE', 'PoD Arena Bridge SE'),
('PoD Arena Main NW', 'PoD Falling Bridge SW'),
('PoD Arena Crystals E', 'PoD Sexy Statue W'),
('PoD Mimics 1 NW', 'PoD Conveyor SW'),
('PoD Map Balcony WS', 'PoD Arena Ledge ES'),
('PoD Falling Bridge WN', 'PoD Dark Maze EN'),
('PoD Dark Maze E', 'PoD Big Chest Balcony W'),
('PoD Sexy Statue NW', 'PoD Mimics 2 SW'),
('Swamp Pot Row WN', 'Swamp Map Ledge EN'),
('Swamp Pot Row WS', 'Swamp Trench 1 Approach ES'),
('Swamp Trench 1 Departure WS', 'Swamp Hub ES'),
('Swamp Hammer Switch WN', 'Swamp Hub Dead Ledge EN'),
('Swamp Hub S', 'Swamp Donut Top N'),
('Swamp Hub WS', 'Swamp Trench 2 Pots ES'),
('Swamp Hub WN', 'Swamp Crystal Switch EN'),
('Swamp Hub North Ledge N', 'Swamp Push Statue S'),
('Swamp Trench 2 Departure WS', 'Swamp West Shallows ES'),
('Swamp Big Key Ledge WN', 'Swamp Barrier EN'),
('Swamp Basement Shallows NW', 'Swamp Waterfall Room SW'),
('Skull 1 Lobby WS', 'Skull Pot Prison ES'),
('Skull Map Room SE', 'Skull Pinball NE'),
('Skull Pinball WS', 'Skull Compass Room ES'),
('Skull Compass Room NE', 'Skull Pot Prison SE'),
('Skull 2 East Lobby WS', 'Skull Small Hall ES'),
('Skull 3 Lobby NW', 'Skull Star Pits SW'),
('Skull Vines NW', 'Skull Spike Corner SW'),
('Thieves Lobby E', 'Thieves Compass Room W'),
('Thieves Ambush E', 'Thieves Rail Ledge W'),
('Thieves Rail Ledge NW', 'Thieves Pot Alcove Bottom SW'),
('Thieves BK Corner NE', 'Thieves Hallway SE'),
('Thieves Pot Alcove Mid WS', 'Thieves Spike Track ES'),
('Thieves Hellway NW', 'Thieves Spike Switch SW'),
('Thieves Triple Bypass EN', 'Thieves Conveyor Maze WN'),
('Thieves Basement Block WN', 'Thieves Conveyor Bridge EN'),
('Thieves Lonely Zazak WS', 'Thieves Conveyor Bridge ES'),
('Ice Cross Bottom SE', 'Ice Compass Room NE'),
('Ice Cross Right ES', 'Ice Pengator Switch WS'),
('Ice Conveyor SW', 'Ice Bomb Jump NW'),
('Ice Pengator Trap NE', 'Ice Spike Cross SE'),
('Ice Spike Cross ES', 'Ice Spike Room WS'),
('Ice Tall Hint SE', 'Ice Lonely Freezor NE'),
('Ice Tall Hint EN', 'Ice Hookshot Ledge WN'),
('Iced T EN', 'Ice Catwalk WN'),
('Ice Catwalk NW', 'Ice Many Pots SW'),
('Ice Many Pots WS', 'Ice Crystal Right ES'),
('Ice Switch Room ES', 'Ice Refill WS'),
('Ice Switch Room SE', 'Ice Antechamber NE'),
('Mire 2 NE', 'Mire Hub SE'),
('Mire Hub ES', 'Mire Lone Shooter WS'),
('Mire Hub E', 'Mire Failure Bridge W'),
('Mire Hub NE', 'Mire Hidden Shooters SE'),
('Mire Hub WN', 'Mire Wizzrobe Bypass EN'),
('Mire Hub WS', 'Mire Conveyor Crystal ES'),
('Mire Hub Right EN', 'Mire Map Spot WN'),
('Mire Hub Top NW', 'Mire Cross SW'),
('Mire Hidden Shooters ES', 'Mire Spikes WS'),
('Mire Minibridge NE', 'Mire Right Bridge SE'),
('Mire BK Door Room EN', 'Mire Ledgehop WN'),
('Mire BK Door Room N', 'Mire Left Bridge S'),
('Mire Spikes SW', 'Mire Crystal Dead End NW'),
('Mire Ledgehop NW', 'Mire Bent Bridge SW'),
('Mire Bent Bridge W', 'Mire Over Bridge E'),
('Mire Over Bridge W', 'Mire Fishbone E'),
('Mire Fishbone SE', 'Mire Spike Barrier NE'),
('Mire Spike Barrier SE', 'Mire Wizzrobe Bypass NE'),
('Mire Conveyor Crystal SE', 'Mire Neglected Room NE'),
('Mire Tile Room SW', 'Mire Conveyor Barrier NW'),
('Mire Block X WS', 'Mire Tall Dark and Roomy ES'),
('Mire Crystal Left WS', 'Mire Falling Foes ES'),
('TR Lobby Ledge NE', 'TR Hub SE'),
('TR Compass Room NW', 'TR Hub SW'),
('TR Hub ES', 'TR Torches Ledge WS'),
('TR Hub EN', 'TR Torches WN'),
('TR Hub NW', 'TR Pokey 1 SW'),
('TR Hub NE', 'TR Tile Room SE'),
('TR Torches NW', 'TR Roller Room SW'),
('TR Pipe Pit WN', 'TR Lava Dual Pipes EN'),
('TR Lava Island ES', 'TR Pipe Ledge WS'),
('TR Lava Dual Pipes WN', 'TR Pokey 2 EN'),
('TR Lava Dual Pipes SW', 'TR Twin Pokeys NW'),
('TR Pokey 2 ES', 'TR Lava Island WS'),
('TR Dodgers NE', 'TR Lava Escape SE'),
('TR Lava Escape NW', 'TR Dash Room SW'),
('TR Hallway WS', 'TR Lazy Eyes ES'),
('TR Dark Ride SW', 'TR Dash Bridge NW'),
('TR Dash Bridge SW', 'TR Eye Bridge NW'),
('TR Dash Bridge WS', 'TR Crystal Maze ES'),
('GT Torch WN', 'GT Conveyor Cross EN'),
('GT Hope Room EN', 'GT Tile Room WN'),
('GT Big Chest SW', 'GT Invisible Catwalk NW'),
('GT Bob\'s Room SE', 'GT Invisible Catwalk NE'),
('GT Speed Torch NE', 'GT Petting Zoo SE'),
('GT Speed Torch SE', 'GT Crystal Conveyor NE'),
('GT Warp Maze (Pits) ES', 'GT Invisible Catwalk WS'),
('GT Hookshot NW', 'GT DMs Room SW'),
('GT Hookshot SW', 'GT Double Switch NW'),
('GT Warp Maze (Rails) WS', 'GT Randomizer Room ES'),
('GT Conveyor Star Pits EN', 'GT Falling Bridge WN'),
('GT Falling Bridge WS', 'GT Hidden Star ES'),
('GT Dash Hall NE', 'GT Hidden Spikes SE'),
('GT Hidden Spikes EN', 'GT Cannonball Bridge WN'),
('GT Gauntlet 3 SW', 'GT Gauntlet 4 NW'),
('GT Gauntlet 5 WS', 'GT Beam Dash ES'),
('GT Wizzrobes 2 NE', 'GT Conveyor Bridge SE'),
('GT Conveyor Bridge EN', 'GT Torch Cross WN'),
('GT Crystal Circles SW', 'GT Left Moldorm Ledge NW')
]
default_one_way_connections = [
('Sewers Pull Switch S', 'Sanctuary N'),
('Eastern Duo Eyegores NE', 'Eastern Boss SE'),
('Desert Wall Slide NW', 'Desert Boss SW'),
('Tower Altar NW', 'Tower Agahnim 1 SW'),
('PoD Harmless Hellway SE', 'PoD Arena Main NE'),
('PoD Dark Alley NE', 'PoD Boss SE'),
('Swamp T NW', 'Swamp Boss SW'),
('Thieves Hallway NE', 'Thieves Boss SE'),
('Mire Antechamber NW', 'Mire Boss SW'),
('TR Final Abyss NW', 'TR Boss SW'),
('GT Invisible Bridges WS', 'GT Invisible Catwalk ES'),
('GT Validation WS', 'GT Frozen Over ES'),
('GT Brightly Lit Hall NW', 'GT Agahnim 2 SW')
]
# For crossed
# offset from 0x122e17, sram storage, write offset from compass_w_addr, 0 = jmp or # of nops, dungeon_id
compass_data = {
'Hyrule Castle': (0x1, 0xc0, 0x16, 0, 0x02),
'Eastern Palace': (0x1C, 0xc1, 0x28, 0, 0x04),
'Desert Palace': (0x35, 0xc2, 0x4a, 0, 0x06),
'Agahnims Tower': (0x51, 0xc3, 0x5c, 0, 0x08),
'Swamp Palace': (0x6A, 0xc4, 0x7e, 0, 0x0a),
'Palace of Darkness': (0x83, 0xc5, 0xa4, 0, 0x0c),
'Misery Mire': (0x9C, 0xc6, 0xca, 0, 0x0e),
'Skull Woods': (0xB5, 0xc7, 0xf0, 0, 0x10),
'Ice Palace': (0xD0, 0xc8, 0x102, 0, 0x12),
'Tower of Hera': (0xEB, 0xc9, 0x114, 0, 0x14),
'Thieves Town': (0x106, 0xca, 0x138, 0, 0x16),
'Turtle Rock': (0x11F, 0xcb, 0x15e, 0, 0x18),
'Ganons Tower': (0x13A, 0xcc, 0x170, 2, 0x1a)
}
| DoorShuffle.py | 108,835 | Drop-down connections & push blocks These should all be connected for now as normal connections These connections are here because they are currently unable to be shuffled if not world.experimental[player]: todo: I think this function is not necessary traverse dungeons and make sure dungeon property is assigned needs to be added needs to be added todo: set all_entrances for basic some useful functions if these were already connected somewhere, remove the backreference if these were already connected somewhere, remove the backreference def unpair_all_doors(world, player): for paired_door in world.paired_doors[player]: paired_door.pair = False shuffle_key_doors for dungeons todo: kill drop exceptions only because they have unique regions see if this unexplored region connects elsewhere TODO: The "starts" regions need access logic Aerinon's note: I think this is handled already by ER Rules - may need to check correct requirements Part one - generate a random layout "Ugly" doors are doors that we don't want to see from the front, because of some sort of unsupported key door. To handle them, make a map of "ugly regions" and never link across them. Add all start regions to the open set. Loop until all available doors are used Pick a random available door to connect, prioritizing ones that aren't blocked. This makes them either get picked up through another door (so they head deeper into the dungeon), or puts them late in the dungeon (so they probably are part of a loop). Panic if neither of these happens. Find an available region that has a compatible door Also ignore compatible doors if they're blocked; these should only be used to create loops. Apply connection and add the new region's doors to the available list Figure out the new room's ugliness region Add the doors If an ugly door is anything but the connect door, panic and die We've used this region and door, so don't use them again If there's no available region with a door, use an internal connection Check that we used everything, and retry if we failed Connects a and b. Or don't if they're an unsupported connection type. TODO: This is gross, don't do it this way Return on unsupported types. Connect supported types If we failed to account for a type, panic Finds a compatible door in regions, returns the region and door these aren't compatible with anything goals: 1. have enough chests to be interesting (2 more than dungeon items) 2. have a balanced amount of regions added (check) 3. prevent soft locks due to key usage (algorithm written) 4. rules in place to affect item placement (lamp, keys, etc. -- in rules) 5. to be complete -- all doors linked (check, somewhat) 6. avoid deadlocks/dead end dungeon (check) 7. certain paths through dungeon must be possible - be able to reach goals (check) removes map removes map restore HC map restore GT map Re-assign dungeon bosses Step 1: Find Small Key Door Candidates Step 2: Initial Key Number Assignment & Calculate Flexibility Step 3: Initial valid combination find - reduce flex if needed Step 4: Try to assign remaining keys Last Step: Adjust Small Key Dungeon Pool those with split region starts like Desert/Skull combine for key layouts count number of key doors - this could be a table? traverse dungeon and find candidates not valid if: Normal and Pair in is Checked and Pair is not in Candidates Missing compass/map Missing both compass/map gt can lose map once compasses work find valid combination of candidates reduce number of key doors reset time since itr reset make changes I think this works for crossed now ensure flag is set ensure flag is set we don't have spiral stairs candidates yet that aren't already key doors, DoorKind.BigKey] noinspection PyTypeChecker special spawn point in cave todo: ignore standard mode hyrule castle ledge?revisit this for entrance shuffle If on, GT minibosses marked as defeated when they try to spawn a heart If on, Players will start with mirror scroll If on, pre opens the desert wall, no fire required DATA GOES DOWN HERE failed bomb jump the other holes near big chest technically one-way technically one-way technically one-way technically one-way technically one-way technically one-way For crossed offset from 0x122e17, sram storage, write offset from compass_w_addr, 0 = jmp or of nops, dungeon_id | 4,313 | en | 0.88455 |
""" info API method."""
from ibsng.handler.handler import Handler
class getAllGatewayNames(Handler):
""" info method class."""
def setup(self, **kwargs):
"""Setup required parameters.
:param dict kwargs: input args
:return: void
:rtype: void
"""
for key, value in kwargs.items():
setattr(self, key, value)
| ibsng/handler/online_payment/get_all_gateway_names.py | 379 | info method class.
Setup required parameters.
:param dict kwargs: input args
:return: void
:rtype: void
info API method. | 122 | en | 0.242569 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operation_group_two_operations import build_test_five_request, build_test_four_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class OperationGroupTwoOperations:
"""OperationGroupTwoOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~multiapi.v3.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def test_four(
self,
input: Optional[Union[IO, "_models.SourcePath"]] = None,
**kwargs: Any
) -> None:
"""TestFour should be in OperationGroupTwoOperations.
:param input: Input parameter.
:type input: IO or ~multiapi.v3.models.SourcePath
:keyword str content_type: Media type of the body sent to the API. Default value is
"application/json". Allowed values are: "application/pdf", "image/jpeg", "image/png",
"image/tiff", "application/json."
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[Union[str, "_models.ContentType"]]
_json = None
_content = None
if content_type.split(";")[0] in ['application/json']:
if input is not None:
_json = self._serialize.body(input, 'SourcePath')
elif content_type.split(";")[0] in ['application/pdf', 'image/jpeg', 'image/png', 'image/tiff']:
_content = input
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/pdf', 'image/jpeg', 'image/png', 'image/tiff', 'application/json']".format(content_type)
)
request = build_test_four_request(
content_type=content_type,
json=_json,
content=_content,
template_url=self.test_four.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
test_four.metadata = {'url': '/multiapi/two/testFourEndpoint'} # type: ignore
@distributed_trace_async
async def test_five(
self,
**kwargs: Any
) -> None:
"""TestFive should be in OperationGroupTwoOperations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_test_five_request(
template_url=self.test_five.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
test_five.metadata = {'url': '/multiapi/two/testFiveEndpoint'} # type: ignore
| test/multiapi/Expected/AcceptanceTests/Multiapi/multiapi/v3/aio/operations/_operation_group_two_operations.py | 6,321 | OperationGroupTwoOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~multiapi.v3.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- type: ClsType[None] type: Optional[Union[str, "_models.ContentType"]] type: ignore type: ClsType[None] type: ignore | 1,064 | en | 0.626557 |
import pytest
from app.db import model, session_ctx
from app.util import exceptions
from app.server.routes import routes
from app.server.requestutils import *
import flask
import flask.testing
def test_pubsubify_excs(fake_import: model.Import, client_with_modifiable_routes: flask.testing.FlaskClient):
client = client_with_modifiable_routes
# pre-populate an import that will get error'd
with session_ctx() as sess:
new_import = fake_import
sess.add(new_import)
sess.commit()
@pubsubify_excs
def ise_exc() -> flask.Response:
raise exceptions.ISvcException("a bad happened", imports=[new_import])
client.application.add_url_rule('/test_pubsubify_excs', view_func=ise_exc, methods=["GET"])
resp = client.get('/test_pubsubify_excs')
assert resp.status_code == 202
with session_ctx() as sess:
recovered_import: Import = Import.get(new_import.id, sess)
assert recovered_import.status == model.ImportStatus.Error
assert recovered_import.error_message == "a bad happened"
| app/tests/test_requestutils.py | 1,066 | pre-populate an import that will get error'd | 44 | en | 0.819548 |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool persistence.
By default, bwscoind will dump mempool on shutdown and
then reload it on startup. This can be overridden with
the -persistmempool=0 command line option.
Test is as follows:
- start node0, node1 and node2. node1 has -persistmempool=0
- create 5 transactions on node2 to its own address. Note that these
are not sent to node0 or node1 addresses because we don't want
them to be saved in the wallet.
- check that node0 and node1 have 5 transactions in their mempools
- shutdown all nodes.
- startup node0. Verify that it still has 5 transactions
in its mempool. Shutdown node0. This tests that by default the
mempool is persistent.
- startup node1. Verify that its mempool is empty. Shutdown node1.
This tests that with -persistmempool=0, the mempool is not
dumped to disk when the node is shut down.
- Restart node0 with -persistmempool=0. Verify that its mempool is
empty. Shutdown node0. This tests that with -persistmempool=0,
the mempool is not loaded from disk on start up.
- Restart node0 with -persistmempool. Verify that it has 5
transactions in its mempool. This tests that -persistmempool=0
does not overwrite a previously valid mempool stored on disk.
- Remove node0 mempool.dat and verify savemempool RPC recreates it
and verify that node1 can load it and has 5 transaction in its
mempool.
- Verify that savemempool throws when the RPC is called if
node1 can't write to disk.
"""
import os
import time
from test_framework.test_framework import BWScoinTestFramework
from test_framework.util import *
class MempoolPersistTest(BWScoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[], ["-persistmempool=0"], []]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
self.log.debug("Mine a single block to get out of IBD")
self.nodes[0].generate(1)
self.sync_all()
self.log.debug("Send 5 transactions from node2 (to its own address)")
for i in range(5):
self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
self.sync_all()
self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Stop-start node0 and node1. Verify that node0 has the transactions in its mempool and node1 does not.")
self.stop_nodes()
self.start_node(0)
self.start_node(1)
# Give bwscoind a second to reload the mempool
time.sleep(1)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
self.stop_nodes()
self.start_node(0, extra_args=["-persistmempool=0"])
# Give bwscoind a second to reload the mempool
time.sleep(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
self.stop_nodes()
self.start_node(0)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
mempooldat0 = os.path.join(self.options.tmpdir, 'node0', 'regtest', 'mempool.dat')
mempooldat1 = os.path.join(self.options.tmpdir, 'node1', 'regtest', 'mempool.dat')
self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
os.remove(mempooldat0)
self.nodes[0].savemempool()
assert os.path.isfile(mempooldat0)
self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions")
os.rename(mempooldat0, mempooldat1)
self.stop_nodes()
self.start_node(1, extra_args=[])
wait_until(lambda: len(self.nodes[1].getrawmempool()) == 5)
self.log.debug("Prevent bwscoind from writing mempool.dat to disk. Verify that `savemempool` fails")
# to test the exception we are setting bad permissions on a tmp file called mempool.dat.new
# which is an implementation detail that could change and break this test
mempooldotnew1 = mempooldat1 + '.new'
with os.fdopen(os.open(mempooldotnew1, os.O_CREAT, 0o000), 'w'):
pass
assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool)
os.remove(mempooldotnew1)
if __name__ == '__main__':
MempoolPersistTest().main()
| test/functional/mempool_persist.py | 4,946 | Test mempool persistence.
By default, bwscoind will dump mempool on shutdown and
then reload it on startup. This can be overridden with
the -persistmempool=0 command line option.
Test is as follows:
- start node0, node1 and node2. node1 has -persistmempool=0
- create 5 transactions on node2 to its own address. Note that these
are not sent to node0 or node1 addresses because we don't want
them to be saved in the wallet.
- check that node0 and node1 have 5 transactions in their mempools
- shutdown all nodes.
- startup node0. Verify that it still has 5 transactions
in its mempool. Shutdown node0. This tests that by default the
mempool is persistent.
- startup node1. Verify that its mempool is empty. Shutdown node1.
This tests that with -persistmempool=0, the mempool is not
dumped to disk when the node is shut down.
- Restart node0 with -persistmempool=0. Verify that its mempool is
empty. Shutdown node0. This tests that with -persistmempool=0,
the mempool is not loaded from disk on start up.
- Restart node0 with -persistmempool. Verify that it has 5
transactions in its mempool. This tests that -persistmempool=0
does not overwrite a previously valid mempool stored on disk.
- Remove node0 mempool.dat and verify savemempool RPC recreates it
and verify that node1 can load it and has 5 transaction in its
mempool.
- Verify that savemempool throws when the RPC is called if
node1 can't write to disk.
!/usr/bin/env python3 Copyright (c) 2014-2017 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Give bwscoind a second to reload the mempool Give bwscoind a second to reload the mempool to test the exception we are setting bad permissions on a tmp file called mempool.dat.new which is an implementation detail that could change and break this test | 1,946 | en | 0.846379 |
# Copyright (c) 2015 Nicolas JOUANIN
#
# See the file license.txt for copying permission.
import logging
from functools import partial
class EventLoggerPlugin:
def __init__(self, context):
self.context = context
async def log_event(self, *args, **kwargs):
self.context.logger.info("### '%s' EVENT FIRED ###", kwargs['event_name'].replace('old', ''))
def __getattr__(self, name):
if name.startswith("on_"):
return partial(self.log_event, event_name=name)
class PacketLoggerPlugin:
def __init__(self, context):
self.context = context
async def on_mqtt_packet_received(self, *args, **kwargs):
packet = kwargs.get('packet')
session = kwargs.get('session', None)
if self.context.logger.isEnabledFor(logging.DEBUG):
if session:
self.context.logger.debug("%s <-in-- %r", session.client_id, packet)
else:
self.context.logger.debug("<-in-- %r", packet)
async def on_mqtt_packet_sent(self, *args, **kwargs):
packet = kwargs.get('packet')
session = kwargs.get('session', None)
if self.context.logger.isEnabledFor(logging.DEBUG):
if session:
self.context.logger.debug("%s -out-> %r", session.client_id, packet)
else:
self.context.logger.debug("-out-> %r", packet)
| distmqtt/plugins/logging.py | 1,390 | Copyright (c) 2015 Nicolas JOUANIN See the file license.txt for copying permission. | 83 | en | 0.681318 |
"""
WSGI config for share_all project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "share_all.settings")
application = get_wsgi_application()
| share_all/wsgi.py | 396 | WSGI config for share_all project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ | 216 | en | 0.781114 |
# Copyright (C) 2019-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
import attr
from contextlib import ExitStack
from distutils.util import strtobool as str_to_bool # pylint: disable=unused-import
from functools import partial, wraps
from itertools import islice
from typing import Iterable, Tuple
NOTSET = object()
def find(iterable, pred=lambda x: True, default=None):
return next((x for x in iterable if pred(x)), default)
def cast(value, type_conv, default=None):
if value is None:
return default
try:
return type_conv(value)
except Exception:
return default
def to_snake_case(s):
if not s:
return ''
name = [s[0].lower()]
for idx, char in enumerate(s[1:]):
idx = idx + 1
if char.isalpha() and char.isupper():
prev_char = s[idx - 1]
if not (prev_char.isalpha() and prev_char.isupper()):
# avoid "HTML" -> "h_t_m_l"
name.append('_')
name.append(char.lower())
else:
name.append(char)
return ''.join(name)
def pairs(iterable):
a = iter(iterable)
return zip(a, a)
def take_by(iterable, count):
"""
Returns elements from the input iterable by batches of N items.
('abcdefg', 3) -> ['a', 'b', 'c'], ['d', 'e', 'f'], ['g']
"""
it = iter(iterable)
while True:
batch = list(islice(it, count))
if len(batch) == 0:
break
yield batch
def filter_dict(d, exclude_keys):
return { k: v for k, v in d.items() if k not in exclude_keys }
def parse_str_enum_value(value, enum_class, default=NOTSET,
unknown_member_error=None):
if value is None and default is not NOTSET:
value = default
elif isinstance(value, str):
try:
value = enum_class[value]
except KeyError:
raise ValueError((unknown_member_error or
"Unknown element of {cls} '{value}'. "
"The only known are: {available}") \
.format(
cls=enum_class.__name__,
value=value,
available=', '.join(e.name for e in enum_class)
)
)
elif isinstance(value, enum_class):
pass
else:
raise TypeError("Expected value type string or %s, but got %s" % \
(enum_class.__name__, type(value).__name__))
return value
def escape(s: str, escapes: Iterable[Tuple[str, str]]) -> str:
"""
'escapes' is an iterable of (pattern, substitute) pairs
"""
for pattern, sub in escapes:
s = s.replace(pattern, sub)
return s
def unescape(s: str, escapes: Iterable[Tuple[str, str]]) -> str:
"""
'escapes' is an iterable of (pattern, substitute) pairs
"""
for pattern, sub in escapes:
s = s.replace(sub, pattern)
return s
def optional_arg_decorator(fn):
@wraps(fn)
def wrapped_decorator(*args, **kwargs):
if len(args) == 1 and callable(args[0]) and not kwargs:
return fn(args[0], **kwargs)
else:
def real_decorator(decoratee):
return fn(decoratee, *args, **kwargs)
return real_decorator
return wrapped_decorator
class Rollback:
@attr.attrs
class Handler:
callback = attr.attrib()
enabled = attr.attrib(default=True)
ignore_errors = attr.attrib(default=False)
def __call__(self):
if self.enabled:
try:
self.callback()
except: # pylint: disable=bare-except
if not self.ignore_errors:
raise
def __init__(self):
self._handlers = {}
self._stack = ExitStack()
self.enabled = True
def add(self, callback, *args,
name=None, enabled=True, ignore_errors=False,
fwd_kwargs=None, **kwargs):
if args or kwargs or fwd_kwargs:
if fwd_kwargs:
kwargs.update(fwd_kwargs)
callback = partial(callback, *args, **kwargs)
name = name or hash(callback)
assert name not in self._handlers
handler = self.Handler(callback,
enabled=enabled, ignore_errors=ignore_errors)
self._handlers[name] = handler
self._stack.callback(handler)
return name
do = add # readability alias
def enable(self, name=None):
if name:
self._handlers[name].enabled = True
else:
self.enabled = True
def disable(self, name=None):
if name:
self._handlers[name].enabled = False
else:
self.enabled = False
def clean(self):
self.__exit__(None, None, None)
def __enter__(self):
return self
# pylint: disable=redefined-builtin
def __exit__(self, type=None, value=None, traceback=None):
if type is None:
return
if not self.enabled:
return
self._stack.__exit__(type, value, traceback)
# pylint: enable=redefined-builtin
@optional_arg_decorator
def error_rollback(func, arg_name='on_error', implicit=False):
@wraps(func)
def wrapped_func(*args, **kwargs):
with Rollback() as manager:
if implicit:
fglobals = func.__globals__
has_arg = arg_name in fglobals
old_val = fglobals.get(arg_name)
fglobals[arg_name] = manager
try:
func(*args, **kwargs)
finally:
if has_arg:
func.__globals__[arg_name] = old_val
else:
func.__globals__.pop(arg_name)
else:
kwargs[arg_name] = manager
func(*args, **kwargs)
return wrapped_func
| datumaro/util/__init__.py | 5,888 | 'escapes' is an iterable of (pattern, substitute) pairs
Returns elements from the input iterable by batches of N items.
('abcdefg', 3) -> ['a', 'b', 'c'], ['d', 'e', 'f'], ['g']
'escapes' is an iterable of (pattern, substitute) pairs
Copyright (C) 2019-2020 Intel Corporation SPDX-License-Identifier: MIT pylint: disable=unused-import avoid "HTML" -> "h_t_m_l" pylint: disable=bare-except readability alias pylint: disable=redefined-builtin pylint: enable=redefined-builtin | 475 | en | 0.536523 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/protobuf/saver.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/saver.proto',
package='tensorflow',
syntax='proto3',
serialized_options=b'\n\023org.tensorflow.utilB\013SaverProtosP\001ZHgithub.com/tensorflow/tensorflow/tensorflow/go/core/core_protos_go_proto\370\001\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n$tensorflow/core/protobuf/saver.proto\x12\ntensorflow\"\x9e\x02\n\x08SaverDef\x12\x1c\n\x14\x66ilename_tensor_name\x18\x01 \x01(\t\x12\x18\n\x10save_tensor_name\x18\x02 \x01(\t\x12\x17\n\x0frestore_op_name\x18\x03 \x01(\t\x12\x13\n\x0bmax_to_keep\x18\x04 \x01(\x05\x12\x0f\n\x07sharded\x18\x05 \x01(\x08\x12%\n\x1dkeep_checkpoint_every_n_hours\x18\x06 \x01(\x02\x12=\n\x07version\x18\x07 \x01(\x0e\x32,.tensorflow.SaverDef.CheckpointFormatVersion\"5\n\x17\x43heckpointFormatVersion\x12\n\n\x06LEGACY\x10\x00\x12\x06\n\x02V1\x10\x01\x12\x06\n\x02V2\x10\x02\x42q\n\x13org.tensorflow.utilB\x0bSaverProtosP\x01ZHgithub.com/tensorflow/tensorflow/tensorflow/go/core/core_protos_go_proto\xf8\x01\x01\x62\x06proto3'
)
_SAVERDEF_CHECKPOINTFORMATVERSION = _descriptor.EnumDescriptor(
name='CheckpointFormatVersion',
full_name='tensorflow.SaverDef.CheckpointFormatVersion',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='LEGACY', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='V1', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='V2', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=286,
serialized_end=339,
)
_sym_db.RegisterEnumDescriptor(_SAVERDEF_CHECKPOINTFORMATVERSION)
_SAVERDEF = _descriptor.Descriptor(
name='SaverDef',
full_name='tensorflow.SaverDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='filename_tensor_name', full_name='tensorflow.SaverDef.filename_tensor_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='save_tensor_name', full_name='tensorflow.SaverDef.save_tensor_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='restore_op_name', full_name='tensorflow.SaverDef.restore_op_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_to_keep', full_name='tensorflow.SaverDef.max_to_keep', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sharded', full_name='tensorflow.SaverDef.sharded', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='keep_checkpoint_every_n_hours', full_name='tensorflow.SaverDef.keep_checkpoint_every_n_hours', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version', full_name='tensorflow.SaverDef.version', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_SAVERDEF_CHECKPOINTFORMATVERSION,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=53,
serialized_end=339,
)
_SAVERDEF.fields_by_name['version'].enum_type = _SAVERDEF_CHECKPOINTFORMATVERSION
_SAVERDEF_CHECKPOINTFORMATVERSION.containing_type = _SAVERDEF
DESCRIPTOR.message_types_by_name['SaverDef'] = _SAVERDEF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SaverDef = _reflection.GeneratedProtocolMessageType('SaverDef', (_message.Message,), {
'DESCRIPTOR' : _SAVERDEF,
'__module__' : 'tensorflow.core.protobuf.saver_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.SaverDef)
})
_sym_db.RegisterMessage(SaverDef)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| tensorflow/core/protobuf/saver_pb2.py | 6,539 | Generated protocol buffer code.
-*- coding: utf-8 -*- Generated by the protocol buffer compiler. DO NOT EDIT! source: tensorflow/core/protobuf/saver.proto @@protoc_insertion_point(imports) @@protoc_insertion_point(class_scope:tensorflow.SaverDef) @@protoc_insertion_point(module_scope) | 288 | en | 0.505174 |
import urllib.request, urllib.parse, urllib.error
import json
url = input('Web page: ')
print('Retrieving', url)
uh = urllib.request.urlopen(url)
data = uh.read().decode()
info = json.loads(data)
# info é um dict do tipo:
# {'note': 'This file contains the sample data for testing', 'comments': [{'name': 'Romina', 'cou ...
# * print(info['comments'])
# a primeira subdivisao é entre 'notes' e 'comments'
total = list()
for item in info['comments']:
# * print(item)
# cada item é um dict com 'name' e 'count'
total.append(int(item['count']))
total2 = [int(item['count']) for item in info['comments']]
# list Comprehensions é mais legal
print(sum(total2)) | book3/s6_ex3.py | 715 | info é um dict do tipo: {'note': 'This file contains the sample data for testing', 'comments': [{'name': 'Romina', 'cou ... * print(info['comments']) a primeira subdivisao é entre 'notes' e 'comments' * print(item) cada item é um dict com 'name' e 'count' list Comprehensions é mais legal | 297 | pt | 0.640542 |
#Embedded file name: ACEStream\Player\BaseApp.pyo
import os
import sys
import time
import shutil
import urllib
import hashlib
import binascii
import random
import subprocess
import struct
import pickle
import cookielib
from operator import itemgetter
from base64 import b64encode, encodestring
from types import DictType, StringType
if sys.platform == 'win32':
import win32file
import win32api
from ACEStream.Core.Utilities.win32regchecker import Win32RegChecker, HKLM, HKCU
from threading import enumerate, currentThread, Lock, Timer
from traceback import print_stack, print_exc
from ACEStream.Video.utils import svcextdefaults
from ACEStream.Core.Utilities.odict import odict
from ACEStream.Core.Utilities.timeouturlopen import urlOpenTimeout
if sys.platform == 'darwin':
os.chdir(os.path.abspath(os.path.dirname(sys.argv[0])))
from ACEStream.__init__ import DEFAULT_SESSION_LISTENPORT
from ACEStream.version import VERSION, VERSION_REV
from ACEStream.env import TS_ENV_PLATFORM
from ACEStream.GlobalConfig import globalConfig
from ACEStream.Core.API import *
from ACEStream.Policies.RateManager import UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager
from ACEStream.Utilities.Instance2Instance import *
from ACEStream.Utilities.TimedTaskQueue import TimedTaskQueue
from ACEStream.Core.BitTornado.__init__ import createPeerID
from ACEStream.Video.utils import videoextdefaults
from ACEStream.Core.Utilities.logger import log, log_exc
from ACEStream.Core.Utilities.unicode import unicode2str_safe
from ACEStream.Core.Ads.Manager import AdManager
from ACEStream.Core.TS.Service import TSService
from ACEStream.Core.Utilities.mp4metadata import clear_mp4_metadata_tags_from_file
from ACEStream.Core.Statistics.GoogleAnalytics import GoogleAnalytics
from ACEStream.Core.Statistics.TNS import TNS, TNSNotAllowedException
from ACEStream.Core.Statistics.Settings import RemoteStatisticsSettings
from ACEStream.Core.Statistics.TrafficStatistics import TrafficStatistics
from ACEStream.Core.APIImplementation.FakeDownload import FakeDownload
from ACEStream.Utilities.HardwareIdentity import get_hardware_key
from ACEStream.Utilities.LSO import LSO
if sys.platform == 'win32':
TNS_ENABLED = True
else:
TNS_ENABLED = False
DEVELOPER_MODE = False
DEBUG = False
DEBUG_AD_STORAGE = False
DEBUG_HIDDEN_DOWNLOADS = False
DEBUG_SERVICE_REQUESTS = False
DEBUG_STATS_TO_FILE = False
DEBUG_PREMIUM = False
RATELIMITADSL = False
DOWNLOADSPEED = 300
DEFAULT_DISKSPACE_LIMIT = 10737418240L
import os
current_file_path = os.path.dirname(os.path.realpath(__file__))
downloadlimitvalue_file = os.path.join(os.path.split(current_file_path)[0],"values","downloadlimit.txt")
f = open(downloadlimitvalue_file, "r")
string = f.read()
if int(string) == 0:
DEFAULT_DOWNLOAD_RATE_LIMIT = 100000000000.0
else:
DEFAULT_DOWNLOAD_RATE_LIMIT = int(string)
DOWNLOAD_STATES_DISPLAY_INTERVAL = 600
SHOW_HIDDEN_DOWNLOADS_INFO = False
MIN_PROGRESS_KEEP = 0.001
DOWNLOAD_STATS_INTERVAL = 1800
PREMIUM_PREVIEW_TIMEOUT = 15
CHECK_AUTH_INTERVAL_REGULAR = 3600
CHECK_AUTH_INTERVAL_ERROR = 600
CHECK_AUTH_INTERVAL_PREMIUM = 60
CHECK_AUTH_MAX_ERRORS = 5
CHECK_PRELOAD_ADS_INTERVAL = 86400
CLEANUP_HIDDEN_DOWNLOADS_INTERVAL = 86400
import os
current_file_path = os.path.dirname(os.path.realpath(__file__))
vodbuffervalue_file = os.path.join(os.path.split(current_file_path)[0],"values","vodbuffer.txt")
f = open(vodbuffervalue_file, "r")
string = f.read()
DEFAULT_PLAYER_BUFFER_TIME = int(string)
import os
current_file_path = os.path.dirname(os.path.realpath(__file__))
livebuffervalue_file = os.path.join(os.path.split(current_file_path)[0],"values","livebuffer.txt")
f = open(livebuffervalue_file, "r")
string = f.read()
DEFAULT_LIVE_BUFFER_TIME = int(string)
CACHE_DIR_NAME = '_acestream_cache_'
DEFAULT_AD_STORAGE_LIMIT = 536870912L
AD_STORAGE_LIMIT_SMALL = 536870912L
AD_STORAGE_LIMIT_BIG = 1073741824L
AD_STORAGE_MIN_FREE_SPACE = 52428800L
AD_STORAGE_MAX_AGE = 2592000
class BaseApp(InstanceConnectionHandler):
def __init__(self, wrapper, redirectstderrout, appname, appversion, params, installdir, i2i_port, session_port):
self.apptype = globalConfig.get_value('apptype')
self.ext_version = self.check_integrity()
if DEVELOPER_MODE:
self.ext_version = True
debug_level = 0
skip_metadata = False
skip_mediainfo = False
use_libavi = False
if self.apptype == 'torrentstream':
encrypted_storage = True
self.registry_key = 'TorrentStream'
else:
encrypted_storage = True
self.registry_key = 'ACEStream'
ip = None
vod_live_max_pop_time = None
piece_picker_buffering_delay = None
try:
for param in params:
if param.startswith('--debug='):
_, level = param.split('=')
debug_level = int(level)
elif param == '--skip-metadata':
skip_metadata = True
elif param == '--skip-mediainfo':
skip_mediainfo = True
elif param == '--use-libavi':
use_libavi = True
elif param.startswith('--vod-live-max-pop-time='):
_, vod_live_max_pop_time = param.split('=')
elif param.startswith('--buffering-delay='):
_, piece_picker_buffering_delay = param.split('=')
except:
print_exc()
self.debug_systray = False
self.debug_level = 0
self.ip = ip
globalConfig.set_value('encrypted_storage', encrypted_storage)
globalConfig.set_value('use_libavi', use_libavi)
if vod_live_max_pop_time is not None:
try:
vod_live_max_pop_time = int(vod_live_max_pop_time)
if vod_live_max_pop_time < 1:
vod_live_max_pop_time = 1
globalConfig.set_value('vod_live_max_pop_time', vod_live_max_pop_time)
except:
pass
if piece_picker_buffering_delay is not None:
try:
a = piece_picker_buffering_delay.split(',')
if len(a) >= 2:
_min = int(a[0])
_max = int(a[1])
if len(a) >= 3:
_offset = int(a[2])
else:
_offset = 0
piece_picker_buffering_delay = (_min, _max, _offset)
if DEBUG:
log('baseapp::__init__: piece_picker_buffering_delay', piece_picker_buffering_delay)
globalConfig.set_value('piece_picker_buffering_delay', piece_picker_buffering_delay)
except:
pass
self.set_debug_level(debug_level)
ACEStream.Core.Video.VideoOnDemand.DEBUG_SKIP_METADATA = skip_metadata
ACEStream.Core.Video.VideoStatus.DEBUG_SKIP_METADATA = skip_metadata
ACEStream.Core.Video.VideoOnDemand.DO_MEDIAINFO_ANALYSIS = not skip_mediainfo
if DEBUG_STATS_TO_FILE:
self.debug_counter = 0
self.appname = appname
self.appversion = appversion
self.params = params
self.installdir = installdir
self.i2i_port = i2i_port
self.session_port = session_port
self.error = None
self.s = None
self.wrapper = wrapper
self.auth_data = {'last_success': None,
'errors': 0}
self.playerconfig = {}
self.download_states_display_counter = 0
self.user_profile = None
self.downloads_in_vodmode = {}
self.downloads_in_admode = {}
self.dlinfo_lock = Lock()
self.cleanup_hidden_downloads_lock = Lock()
self.check_preload_ads_lock = Lock()
self.timers = {}
self.playing_premium_content = False
self.download_stats = {}
self.last_download_stats = 0
import os
current_file_path = os.path.dirname(os.path.realpath(__file__))
downloadlimitvalue_file = os.path.join(os.path.split(current_file_path)[0],"values","downloadlimit.txt")
f = open(downloadlimitvalue_file, "r")
string = f.read()
self.max_download_rate = int(string)
import os
current_file_path = os.path.dirname(os.path.realpath(__file__))
uploadlimitvalue_file = os.path.join(os.path.split(current_file_path)[0],"values","uploadlimit.txt")
f = open(uploadlimitvalue_file, "r")
string = f.read()
self.max_upload_rate = int(string)
self.avg_download_rate = 0
self.avg_download_rate_sum = 0
self.avg_download_rate_count = 0
self.avg_upload_rate = 0
self.avg_upload_rate_sum = 0
self.avg_upload_rate_count = 0
self.ratelimiter = None
self.ratelimit_update_count = 0
self.playermode = DLSTATUS_DOWNLOADING
self.getpeerlistcount = 2
self.shuttingdown = False
self.tqueue = TimedTaskQueue(nameprefix='BGTaskQueue')
self.OnInitBase()
if self.i2i_port == 0:
port_file = os.path.join(self.installdir, 'acestream.port')
else:
port_file = None
self.i2i_listen_server = Instance2InstanceServer(self.i2i_port, self, timeout=86400.0, port_file=port_file)
self.i2i_listen_server.start()
InstanceConnectionHandler.__init__(self, self.i2ithread_readlinecallback)
self.check_license()
def check_license(self):
try:
path = os.path.join(self.installdir, '..', 'LICENSE.txt')
if not os.path.isfile(path):
return
size = os.path.getsize(path)
if size < 1024:
return
import locale
lang_code, encoding = locale.getdefaultlocale()
lang_code = lang_code.lower()
if lang_code.startswith('en'):
lang_code = 'en'
elif lang_code.startswith('ru'):
lang_code = 'ru'
else:
lang_code = 'en'
if lang_code == 'ru':
txt = '\xd0\x9b\xd0\xb8\xd1\x86\xd0\xb5\xd0\xbd\xd0\xb7\xd0\xb8\xd0\xbe\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb5 \xd1\x81\xd0\xbe\xd0\xb3\xd0\xbb\xd0\xb0\xd1\x88\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xbf\xd1\x80\xd0\xb5\xd0\xb4\xd1\x81\xd1\x82\xd0\xb0\xd0\xb2\xd0\xbb\xd0\xb5\xd0\xbd\xd0\xbe \xd0\xbd\xd0\xb0 \xd1\x81\xd0\xb0\xd0\xb9\xd1\x82\xd0\xb5: http://www.acestream.org/license'
else:
txt = 'License agreement presented on the site: http://www.acestream.org/license'
f = open(path, 'w')
f.write(txt)
f.close()
except:
pass
def test_ads(self):
affiliate_id = 0
zone_id = 0
developer_id = 0
include_interruptable_ads = False
provider_key = None
provider_content_id = None
content_ext = 'mp4'
content_duration = 3600
user_login = self.s.get_ts_login()
content_id = '1234567890123456789012345678901234567890'
ads = self.ad_manager.get_ads(device_id=self.device_id, user_login=user_login, user_level=2, content_type=DLTYPE_TORRENT, content_id=content_id, content_ext=content_ext, content_duration=content_duration, affiliate_id=affiliate_id, zone_id=zone_id, developer_id=developer_id, include_interruptable_ads=include_interruptable_ads, is_live=False, user_profile=self.user_profile, provider_key=provider_key, provider_content_id=provider_content_id)
print >> sys.stderr, '>>>test_ads:', ads
def set_debug_level(self, debug_level):
if not DEVELOPER_MODE:
return
if debug_level == self.debug_level:
return
self.debug_level = debug_level
log('set_debug_level:', debug_level)
ACEStream.Plugin.BackgroundProcess.DEBUG2 = debug_level == -1 or debug_level & 1 != 0
ACEStream.Player.BaseApp.DEBUG = debug_level == -1 or debug_level & 1 != 0
ACEStream.Core.Session.DEBUG = debug_level == -1 or debug_level & 1 != 0
ACEStream.Player.BaseApp.DEBUG_HIDDEN_DOWNLOADS = debug_level == -1 or debug_level & 1 != 0
ACEStream.Player.BaseApp.DEBUG_AD_STORAGE = debug_level == -1 or debug_level & 1 != 0
ACEStream.Player.BaseApp.DEBUG_SERVICE_REQUESTS = debug_level == -1 or debug_level & 1 != 0
ACEStream.Player.BaseApp.DEBUG_PREMIUM = debug_level == -1 or debug_level & 1 != 0
ACEStream.Player.BaseApp.SHOW_HIDDEN_DOWNLOADS_INFO = debug_level == -1 or debug_level & 1 != 0
ACEStream.Core.Ads.Manager.DEBUG = debug_level == -1 or debug_level & 1 != 0
ACEStream.Core.TS.Service.DEBUG = debug_level == -1 or debug_level & 1 != 0
ACEStream.Core.APIImplementation.DirectDownload.DEBUG = debug_level == -1 or debug_level & 2 != 0
ACEStream.Core.APIImplementation.DownloadImpl.DEBUG = debug_level == -1 or debug_level & 2 != 0
ACEStream.Core.APIImplementation.LaunchManyCore.DEBUG = debug_level == -1 or debug_level & 2 != 0
ACEStream.Core.APIImplementation.SingleDownload.DEBUG = debug_level == -1 or debug_level & 2 != 0
ACEStream.Core.BitTornado.download_bt1.DEBUG = debug_level == -1 or debug_level & 2 != 0
ACEStream.Core.BitTornado.BT1.GetRightHTTPDownloader.DEBUG = debug_level == -1 or debug_level & 2 != 0
ACEStream.Core.DirectDownload.Downloader.DEBUG = debug_level == -1 or debug_level & 2 != 0
ACEStream.Core.DirectDownload.Storage.DEBUG = debug_level == -1 or debug_level & 2 != 0
ACEStream.Core.DirectDownload.VODTransporter.DEBUG = debug_level == -1 or debug_level & 2 != 0
ACEStream.Core.Statistics.GoogleAnalytics.DEBUG = debug_level == -1 or debug_level & 2 != 0
ACEStream.Core.TorrentDef.DEBUG = False
ACEStream.Core.TS.domutils.DEBUG = False
ACEStream.Core.Utilities.mp4metadata.DEBUG = debug_level == -1 or debug_level & 2 != 0
ACEStream.Video.VideoServer.DEBUGLOCK = debug_level == -1 or debug_level & 2 != 0
ACEStream.Core.Video.PiecePickerStreaming.DEBUG = debug_level == -1 or debug_level & 4 != 0
ACEStream.Core.Video.PiecePickerStreaming.DEBUGPP = debug_level == -1 or debug_level & 4 != 0
ACEStream.Core.BitTornado.SocketHandler.DEBUG = debug_level == -1 or debug_level & 8 != 0
ACEStream.Core.BitTornado.BT1.Choker.DEBUG = debug_level == -1 or debug_level & 8 != 0
ACEStream.Core.BitTornado.BT1.Connecter.DEBUG = debug_level == -1 or debug_level & 8 != 0
ACEStream.Core.BitTornado.BT1.Connecter.DEBUG_UT_PEX = debug_level == -1 or debug_level & 8 != 0
ACEStream.Core.BitTornado.BT1.Downloader.DEBUG = debug_level == -1 or debug_level & 8 != 0
ACEStream.Core.BitTornado.BT1.Uploader.DEBUG = debug_level == -1 or debug_level & 8 != 0
ACEStream.Core.BitTornado.BT1.Encrypter.DEBUG = debug_level == -1 or debug_level & 8 != 0
ACEStream.Core.BitTornado.BT1.Encrypter.DEBUG_CLOSE = debug_level == -1 or debug_level & 8 != 0
ACEStream.Core.BitTornado.BT1.Rerequester.DEBUG = debug_level == -1 or debug_level & 8 != 0
ACEStream.Core.BitTornado.BT1.Rerequester.DEBUG_DHT = debug_level == -1 or debug_level & 8 != 0
ACEStream.Core.BitTornado.BT1.Rerequester.DEBUG_CHECK_NETWORK_CONNECTION = debug_level == -1 or debug_level & 8 != 0
ACEStream.Core.Video.VideoOnDemand.DEBUG_HOOKIN = debug_level == -1 or debug_level & 16 != 0
ACEStream.Core.Video.LiveSourceAuth.DEBUG = debug_level == -1 or debug_level & 16 != 0
ACEStream.Core.Video.PiecePickerStreaming.DEBUG_LIVE = debug_level == -1 or debug_level & 16 != 0
ACEStream.Core.BitTornado.BT1.StorageWrapper.DEBUG_LIVE = debug_level == -1 or debug_level & 16 != 0
try:
ACEStream.Core.Video.VideoSource.DEBUG = debug_level == -1 or debug_level & 16 != 0
except:
pass
ACEStream.Core.Video.VideoOnDemand.DEBUG = debug_level == -1 or debug_level & 32 != 0
ACEStream.Core.Video.VideoStatus.DEBUG = debug_level == -1 or debug_level & 32 != 0
ACEStream.Video.VideoServer.DEBUG = debug_level == -1 or debug_level & 32 != 0
ACEStream.Video.VideoServer.DEBUGCONTENT = debug_level == -1 or debug_level & 32 != 0
ACEStream.Core.NATFirewall.NatCheck.DEBUG = debug_level == -1 or debug_level & 64 != 0
ACEStream.Core.NATFirewall.UPnPThread.DEBUG = debug_level == -1 or debug_level & 64 != 0
ACEStream.Core.NATFirewall.UDPPuncture.DEBUG = debug_level == -1 or debug_level & 64 != 0
ACEStream.Core.NATFirewall.upnp.DEBUG = debug_level == -1 or debug_level & 64 != 0
ACEStream.Core.NATFirewall.ConnectionCheck.DEBUG = debug_level == -1 or debug_level & 64 != 0
ACEStream.Core.BitTornado.natpunch.DEBUG = debug_level == -1 or debug_level & 64 != 0
ACEStream.Player.BaseApp.DEBUG_STATS_TO_FILE = debug_level == -1 or debug_level & 128 != 0
ACEStream.WebUI.WebUI.DEBUG = debug_level == -1 or debug_level & 256 != 0
ACEStream.Core.BitTornado.RawServer.DEBUG = debug_level == -1 or debug_level & 512 != 0
ACEStream.Core.BitTornado.RawServer.DEBUG2 = debug_level == -1 or debug_level & 512 != 0
ACEStream.Core.BitTornado.ServerPortHandler.DEBUG = debug_level == -1 or debug_level & 512 != 0
ACEStream.Core.BitTornado.ServerPortHandler.DEBUG2 = debug_level == -1 or debug_level & 512 != 0
ACEStream.Core.BitTornado.HTTPHandler.DEBUG = debug_level == -1 or debug_level & 512 != 0
ACEStream.Core.BitTornado.HTTPHandler.DEBUG2 = debug_level == -1 or debug_level & 512 != 0
ACEStream.Core.BitTornado.SocketHandler.DEBUG = debug_level == -1 or debug_level & 512 != 0
ACEStream.Core.BitTornado.SocketHandler.DEBUG2 = debug_level == -1 or debug_level & 512 != 0
ACEStream.Core.BitTornado.BT1.StorageWrapper.DEBUG = debug_level == -1 or debug_level & 8192 != 0
ACEStream.Core.BitTornado.BT1.StorageWrapper.DEBUG_WRITE = False
ACEStream.Core.BitTornado.BT1.StorageWrapper.DEBUG_HASHCHECK = debug_level == -1 or debug_level & 8192 != 0
ACEStream.Core.BitTornado.BT1.StorageWrapper.DEBUG_REQUESTS = debug_level == -1 or debug_level & 8192 != 0
ACEStream.Core.BitTornado.BT1.FileSelector.DEBUG = debug_level == -1 or debug_level & 8192 != 0
ACEStream.Core.BitTornado.download_bt1.DEBUG_ENCRYPTION = debug_level == -1 or debug_level & 8192 != 0
ACEStream.Core.BitTornado.BT1.Storage.DEBUG = debug_level == -1 or debug_level & 16384 != 0
ACEStream.Core.BitTornado.BT1.Storage.DEBUG_RESTORE = debug_level == -1 or debug_level & 16384 != 0
ACEStream.Core.Utilities.EncryptedStorage.DEBUG = debug_level == -1 or debug_level & 32768 != 0
ACEStream.Core.BitTornado.BT1.StorageWrapper.DEBUG_ENCRYPTED_STORAGE = debug_level == -1 or debug_level & 32768 != 0
ACEStream.Core.BitTornado.download_bt1.DEBUG_ENCRYPTION = debug_level == -1 or debug_level & 32768 != 0
ACEStream.Core.CacheDB.SqliteCacheDBHandler.DEBUG = debug_level == -1 or debug_level & 65536 != 0
ACEStream.Utilities.LSO.DEBUG = debug_level == -1 or debug_level & 131072 != 0
ACEStream.Core.Statistics.Settings.DEBUG = debug_level == -1 or debug_level & 131072 != 0
ACEStream.Core.Statistics.TNS.DEBUG = debug_level == -1 or debug_level & 131072 != 0
ACEStream.Core.Statistics.TrafficStatistics.DEBUG = debug_level == -1 or debug_level & 131072 != 0
def OnInitBase(self):
state_dir = Session.get_default_state_dir()
self.state_dir = state_dir
if DEBUG:
log('baseapp::init: state_dir', state_dir)
if globalConfig.get_mode() != 'client_console':
from ACEStream.Player.UtilityStub import UtilityStub
self.utility = UtilityStub(self.installdir, state_dir)
self.utility.app = self
log('build', VERSION_REV)
log('version', VERSION)
self.iconpath = os.path.join(self.installdir, 'data', 'images', 'engine.ico')
self.logopath = os.path.join(self.installdir, 'data', 'images', 'logo.png')
self.load_playerconfig(state_dir)
self.statFrame = None
self.live_frame = None
self.init_hardware_key()
cfgfilename = Session.get_default_config_filename(state_dir)
if DEBUG:
log('baseapp::init: session config', cfgfilename)
try:
self.sconfig = SessionStartupConfig.load(cfgfilename)
if self.session_port != DEFAULT_SESSION_LISTENPORT:
if DEBUG:
log('baseapp::init: non-default port specified, overwrite saved session port:', self.session_port)
self.sconfig.set_listen_port(self.session_port)
elif DEBUG:
log('baseapp::init: use session saved port', self.sconfig.get_listen_port())
except:
if DEBUG:
log('baseapp::init: cannot load config file', cfgfilename, 'Use default config')
self.sconfig = SessionStartupConfig()
self.sconfig.set_state_dir(state_dir)
self.sconfig.set_listen_port(self.session_port)
self.configure_session()
self.s = Session(self.sconfig, on_error=self.on_error)
self.s.set_download_states_callback(self.sesscb_states_callback)
self.device_id = b64encode(self.s.get_permid())
node_id = self.device_id
if self.hardware_key is not None:
node_id += ':' + self.hardware_key
self.node_id = hashlib.sha1(node_id).hexdigest()
self.traffic_stats = TrafficStatistics(TrafficStatistics.NODE_CLIENT, self.node_id)
if RATELIMITADSL:
self.ratelimiter = UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager()
self.ratelimiter.set_global_max_speed(DOWNLOAD, DOWNLOADSPEED)
self.ratelimiter.set_global_max_speed(UPLOAD, 90)
try:
self.s.load_checkpoint(initialdlstatus=DLSTATUS_STOPPED)
except:
log_exc()
ga = lambda : GoogleAnalytics.send_event('client', 'startup', VERSION)
self.run_delayed(ga)
self.tsservice = TSService(self)
self.run_delayed(self.check_auth_level, 0.1)
self.cookie_file = os.path.join(self.state_dir, 'cookies.pickle')
self.cookie_jar = cookielib.CookieJar()
self.load_cookies()
self.stat_settings = RemoteStatisticsSettings()
self.run_delayed(self.check_statistics_settings, 1)
if TNS_ENABLED:
try:
lso = LSO('source.mmi.bemobile.ua', 'mmi')
self.tns_uid = lso.get_uid()
except:
if DEBUG:
print_exc()
self.check_user_profile()
self.ad_manager = AdManager(self, self.cookie_jar)
if TS_ENV_PLATFORM == 'dune':
default_enabled = False
else:
default_enabled = True
preload_ads_enabled = self.get_preload_ads_enabled(default_enabled)
if DEBUG:
log('baseapp::init: preload_ads_enabled', preload_ads_enabled)
self.run_delayed(self.cleanup_hidden_downloads_task, 1.0)
self.run_delayed(self.remove_unknown_downloads, 20.0)
self.run_delayed(self.check_preload_ads, 1.0, 'check_preload_ads')
if sys.platform == 'win32':
self.run_delayed(self.start_updater, 60.0)
disk_cache_limit = self.get_playerconfig('disk_cache_limit')
if disk_cache_limit is None:
content_dir = self.get_default_destdir()
total, avail, used = self.get_disk_info(content_dir)
if total is not None:
disk_cache_limit = long(total * 0.5)
else:
disk_cache_limit = DEFAULT_DISKSPACE_LIMIT
self.set_playerconfig('disk_cache_limit', disk_cache_limit)
if DEBUG:
log('baseapp::init: set disk_cache_limit:', disk_cache_limit)
elif DEBUG:
log('baseapp::init: got disk_cache_limit:', disk_cache_limit)
ad_storage_limit = self.get_playerconfig('ad_storage_limit')
if ad_storage_limit is None:
ads_dir = self.s.get_ads_dir()
total, avail, used = self.get_disk_info(ads_dir)
if total is not None:
if avail < 10485760:
ad_storage_limit = AD_STORAGE_LIMIT_SMALL
else:
ad_storage_limit = AD_STORAGE_LIMIT_BIG
else:
ad_storage_limit = DEFAULT_AD_STORAGE_LIMIT
self.set_playerconfig('ad_storage_limit', ad_storage_limit)
if DEBUG:
log('baseapp::init: set ad_storage_limit:', ad_storage_limit)
elif DEBUG:
log('baseapp::init: got ad_storage_limit:', ad_storage_limit)
self.set_playerconfig('enable_http_support', True)
if DEBUG_STATS_TO_FILE:
try:
for f in os.listdir(self.installdir):
if f.startswith('stat_snapshot_'):
os.remove(os.path.join(self.installdir, f))
except:
pass
def on_error(self, exception):
try:
errmsg = str(exception)
except:
errmsg = 'Unexpected error'
try:
self.wrapper.on_error(errmsg, exit=True)
except:
print_exc()
def run_delayed(self, func, delay = 0.0, task_id = None, daemon = True, args = []):
if task_id is not None:
if self.timers.has_key(task_id):
self.timers[task_id].cancel()
t = Timer(delay, func, args)
if task_id is not None:
self.timers[task_id] = t
t.daemon = daemon
t.name = 'Timer-' + t.name
t.start()
return t
def start_updater(self):
if sys.platform != 'win32':
return
if self.apptype == 'torrentstream':
exename = 'tsupdate.exe'
else:
exename = 'ace_update.exe'
updater_path = os.path.join(self.installdir, '..', 'updater', exename)
if DEBUG:
log('baseapp::start_updater: updater_path', updater_path)
if os.path.exists(updater_path):
try:
subprocess.Popen(updater_path, close_fds=True)
except:
if DEBUG:
print_exc()
def remove_unknown_downloads(self):
try:
known_files = []
downloads = self.s.get_all_downloads()
for d in downloads:
if not d.is_hidden():
continue
destfiles = d.get_dest_files(get_all=True)
if destfiles:
for filename, savepath in destfiles:
known_files.append(savepath)
path = self.s.get_ads_dir()
filelist = os.listdir(path)
if DEBUG_AD_STORAGE:
log('baseapp::remove_unknown_downloads: known_files', known_files, 'filelist', filelist)
for basename in filelist:
filename = os.path.join(path, basename)
if filename not in known_files:
if DEBUG_AD_STORAGE:
log('baseapp::remove_unknown_downloads: remove: filename', filename)
os.remove(filename)
except:
if DEBUG:
print_exc()
def get_ad_storage_limit(self):
ad_storage_limit = self.get_playerconfig('ad_storage_limit', DEFAULT_AD_STORAGE_LIMIT)
ads_dir = self.s.get_ads_dir()
total, avail, used = self.get_disk_info(ads_dir)
if avail is None:
avail = ad_storage_limit + AD_STORAGE_MIN_FREE_SPACE
if DEBUG_AD_STORAGE:
log('baseapp::get_ad_storage_limit: failed to get disk info, set fake avail: avail', avail)
if avail < ad_storage_limit + AD_STORAGE_MIN_FREE_SPACE:
storage_limit = avail - AD_STORAGE_MIN_FREE_SPACE
else:
storage_limit = ad_storage_limit
if DEBUG_AD_STORAGE:
log('baseapp::get_ad_storage_limit: storage_limit', storage_limit, 'total', total, 'avail', avail, 'used', used)
return storage_limit
def cleanup_unused_ad_downloads(self, keep_hash_list):
if DEBUG_AD_STORAGE:
log('baseapp::cleanup_unused_ad_downloads: keep_hash_list', keep_hash_list)
downloads = self.s.get_all_downloads()
for d in downloads:
if not d.is_hidden():
continue
if d.get_hash() not in keep_hash_list:
if DEBUG_AD_STORAGE:
log('baseapp::cleanup_unused_ad_downloads: remove unused download: hash', binascii.hexlify(d.get_hash()))
self.s.remove_download(d, removecontent=True)
def cleanup_hidden_downloads_task(self):
self.cleanup_hidden_downloads()
self.run_delayed(self.cleanup_hidden_downloads_task, CLEANUP_HIDDEN_DOWNLOADS_INTERVAL)
def cleanup_hidden_downloads(self, needed = 0, priority = -1):
self.cleanup_hidden_downloads_lock.acquire()
try:
total_size = 0
dllist = []
downloads = self.s.get_all_downloads()
for d in downloads:
if not d.is_hidden():
continue
destfiles = d.get_dest_files(get_all=True)
download_priority = d.get_extra('priority', 0)
download_size = d.get_content_length()
download_last_access = 0
for filename, savepath in destfiles:
if os.path.exists(savepath):
stat = os.stat(savepath)
if stat.st_ctime > download_last_access:
download_last_access = stat.st_ctime
last_seen = self.get_ad_last_seen(d.get_hash())
if last_seen is not None:
download_last_access = last_seen
if download_size > 0:
total_size += download_size
dlinfo = (download_last_access,
download_priority,
download_size,
d)
dllist.append(dlinfo)
dllist.sort(key=itemgetter(1, 0))
storage_limit = self.get_ad_storage_limit()
free_up = total_size + needed - storage_limit
if DEBUG_AD_STORAGE:
log('baseapp::cleanup_hidden_downloads: storage_limit', storage_limit, 'total_size', total_size, 'needed', needed, 'free_up', free_up, 'dllist', dllist)
for last_access, dlpriority, size, d in dllist:
remove = False
if priority != -1 and dlpriority >= priority:
if DEBUG_AD_STORAGE:
log('baseapp::cleanup_hidden_downloads: do not remove download with higher priority: hash', binascii.hexlify(d.get_hash()), 'dlpriority', dlpriority, 'priority', priority)
continue
if d in self.downloads_in_vodmode:
if DEBUG_AD_STORAGE:
log('baseapp::cleanup_hidden_downloads: do not remove playing download: hash', binascii.hexlify(d.get_hash()))
continue
is_ad = False
for maind_d, ads in self.downloads_in_admode.iteritems():
if d in ads:
is_ad = True
break
if is_ad:
if DEBUG_AD_STORAGE:
log('baseapp::cleanup_hidden_downloads: do not remove download in admode: hash', binascii.hexlify(d.get_hash()))
continue
now = long(time.time())
if last_access < now - AD_STORAGE_MAX_AGE:
if DEBUG_AD_STORAGE:
log('baseapp::cleanup_hidden_downloads: remove outdated download: hash', binascii.hexlify(d.get_hash()), 'last_access', last_access, 'now', now, 'max_age', AD_STORAGE_MAX_AGE)
remove = True
if not remove and free_up > 0:
remove = True
free_up -= size
if DEBUG_AD_STORAGE:
log('baseapp::cleanup_hidden_downloads: remove download to free space: hash', binascii.hexlify(d.get_hash()), 'size', size, 'free_up', free_up)
if remove:
self.s.remove_download(d, removecontent=True)
if DEBUG_AD_STORAGE:
log('baseapp::cleanup_hidden_downloads: done: free_up', free_up)
return free_up <= 0
except:
log_exc()
finally:
self.cleanup_hidden_downloads_lock.release()
def check_preload_ads(self):
self.check_preload_ads_lock.acquire()
try:
preload_ads = self.ad_manager.get_preload_ads(self.device_id, self.s.get_ts_login(), self.get_playerconfig('enable_interruptable_ads', True), user_profile=self.user_profile)
if preload_ads == False:
return
self.add_preload_ads(preload_ads, True)
except:
log_exc()
finally:
self.check_preload_ads_lock.release()
self.run_delayed(self.check_preload_ads, CHECK_PRELOAD_ADS_INTERVAL, 'check_preload_ads')
def add_preload_ads(self, preload_ads, remove_unused):
dl_hash_list = []
for ad in preload_ads:
if ad['dltype'] == DLTYPE_TORRENT:
ad['dlhash'] = ad['tdef'].get_infohash()
ad['size'] = ad['tdef'].get_length()
elif ad['dltype'] == DLTYPE_DIRECT:
tdef = self.get_torrent_from_url(ad['url'])
if tdef is None:
ad['dlhash'] = hashlib.sha1(ad['url']).digest()
ad['size'] = 0
else:
ad['tdef'] = tdef
ad['dlhash'] = tdef.get_infohash()
ad['size'] = tdef.get_length()
ad['dltype'] = DLTYPE_TORRENT
if DEBUG:
log('baseapp::add_preload_ads: got torrent from url: url', ad['url'], 'infohash', binascii.hexlify(ad['dlhash']))
else:
raise ValueError('Unknown download type ' + str(ad['dltype']))
dl_hash_list.append(ad['dlhash'])
if remove_unused:
self.cleanup_unused_ad_downloads(dl_hash_list)
preload_ads.sort(key=lambda ad: ad['priority'], reverse=True)
for ad in preload_ads:
d = self.s.get_download(ad['dltype'], ad['dlhash'])
if d is not None:
pass
else:
if DEBUG:
log('baseapp::add_preload_ads: start new preload download: type', ad['dltype'], 'hash', binascii.hexlify(ad['dlhash']), 'priority', ad['priority'], 'size', ad['size'])
if not self.cleanup_hidden_downloads(needed=ad['size'], priority=ad['priority']):
if DEBUG:
log('baseapp::add_preload_ads: not enough space: hash', binascii.hexlify(ad['dlhash']), 'size', ad['size'])
continue
dcfg = DownloadStartupConfig()
dcfg.set_dest_dir(self.s.get_ads_dir())
dcfg.set_max_conns(10)
dcfg.set_max_conns_to_initiate(10)
dcfg.set_hidden(True)
dcfg.set_extra('priority', ad['priority'])
if ad['dltype'] == DLTYPE_TORRENT:
self.s.start_download(ad['tdef'], dcfg, initialdlstatus=DLSTATUS_STOPPED)
elif ad['dltype'] == DLTYPE_DIRECT:
self.s.start_direct_download(ad['url'], dcfg, initialdlstatus=DLSTATUS_STOPPED)
def check_auth_level(self, forceconnect = False):
got_error = False
try:
ts_login = unicode2str_safe(self.s.get_ts_login())
ts_password = unicode2str_safe(self.s.get_ts_password())
if len(ts_login) == 0 or len(ts_password) == 0:
self.s.set_authlevel(0)
return
if self.auth_data['last_success'] is None or forceconnect:
action = 'cn'
else:
action = 'chk'
new_authlevel = self.tsservice.get_user_level(ts_login, ts_password, action, self.device_id, self.hardware_key)
if new_authlevel is not None:
self.auth_data['last_success'] = time.time()
self.auth_data['errors'] = 0
if DEBUG:
log('baseapp::check_auth_level: got user level:', new_authlevel)
else:
got_error = True
self.auth_data['errors'] += 1
log('baseapp::check_auth_level: failed, error count', self.auth_data['errors'])
if self.auth_data['errors'] >= CHECK_AUTH_MAX_ERRORS:
log('baseapp::check_auth_level: max errors reached, reset user level')
new_authlevel = 0
if new_authlevel is not None:
current_authlevel = self.s.get_authlevel()
if new_authlevel != current_authlevel:
if DEBUG:
log('baseapp::check_auth_level: set new user level: current', current_authlevel, 'new', new_authlevel)
self.s.set_authlevel(new_authlevel)
for socket, ic in self.singsock2ic.iteritems():
ic.auth(new_authlevel)
except:
if DEBUG:
log_exc()
finally:
if got_error:
interval = CHECK_AUTH_INTERVAL_ERROR
if DEBUG:
log('baseapp::check_auth_level: got error, next try in', interval)
elif self.playing_premium_content:
interval = CHECK_AUTH_INTERVAL_PREMIUM
if DEBUG:
log('baseapp::check_auth_level: got premium, next try in', interval)
else:
interval = CHECK_AUTH_INTERVAL_REGULAR
if DEBUG:
log('baseapp::check_auth_level: regular next try in', interval)
self.run_delayed(self.check_auth_level, interval, task_id='check_auth_level')
def configure_session(self):
self.sconfig.set_install_dir(self.installdir)
if self.ip is not None:
if DEBUG:
log('baseapp::configure_session: set ip', self.ip)
self.sconfig.set_ip_for_tracker(self.ip)
self.sconfig.set_megacache(True)
import os
current_file_path = os.path.dirname(os.path.realpath(__file__))
maxconnections_file = os.path.join(os.path.split(current_file_path)[0],"values","maxconnections.txt")
f = open(maxconnections_file, "r")
self.sconfig.set_max_socket_connections(self.get_playerconfig('total_max_connects', int(string)))
self.sconfig.set_overlay(False)
self.sconfig.set_torrent_checking(False)
self.sconfig.set_buddycast(False)
self.sconfig.set_download_help(False)
self.sconfig.set_torrent_collecting(False)
self.sconfig.set_dialback(False)
self.sconfig.set_social_networking(False)
self.sconfig.set_remote_query(False)
self.sconfig.set_bartercast(False)
self.sconfig.set_crawler(False)
self.sconfig.set_multicast_local_peer_discovery(False)
self.sconfig.set_subtitles_collecting(False)
def _get_poa(self, tdef):
from ACEStream.Core.ClosedSwarm import ClosedSwarm, PaymentIntegration
print >> sys.stderr, 'Swarm_id:', encodestring(tdef.infohash).replace('\n', '')
try:
poa = ClosedSwarm.trivial_get_poa(self.s.get_state_dir(), self.s.get_permid(), tdef.infohash)
poa.verify()
if not poa.torrent_id == tdef.infohash:
raise Exception('Bad POA - wrong infohash')
print >> sys.stderr, 'Loaded poa from ', self.s.get_state_dir()
except:
swarm_id = encodestring(tdef.infohash).replace('\n', '')
my_id = encodestring(self.s.get_permid()).replace('\n', '')
try:
poa = PaymentIntegration.wx_get_poa(None, swarm_id, my_id, swarm_title=tdef.get_name())
except Exception as e:
print >> sys.stderr, 'Failed to get POA:', e
poa = None
try:
ClosedSwarm.trivial_save_poa(self.s.get_state_dir(), self.s.get_permid(), tdef.infohash, poa)
except Exception as e:
print >> sys.stderr, 'Failed to save POA', e
if poa:
if not poa.torrent_id == tdef.infohash:
raise Exception('Bad POA - wrong infohash')
return poa
def start_download(self, tdef, dlfile, extra_files_indexes = [], developer_id = 0, affiliate_id = 0, zone_id = 0, poa = None, supportedvodevents = None):
if poa:
from ACEStream.Core.ClosedSwarm import ClosedSwarm
if not poa.__class__ == ClosedSwarm.POA:
raise InvalidPOAException('Not a POA')
destdir = self.get_default_destdir()
try:
enough_space = True
length = tdef.get_length([dlfile])
if tdef.get_live():
length = long(length / 8 * 1.2)
if not self.free_up_diskspace_by_downloads(tdef.get_infohash(), length):
log('BaseApp::start_download: Not enough free diskspace')
enough_space = False
except:
log_exc()
if not enough_space:
raise Exception('Not enough disk space')
dcfg = DownloadStartupConfig()
import os
current_file_path = os.path.dirname(os.path.realpath(__file__))
maxconnectionsstream_file = os.path.join(os.path.split(current_file_path)[0],"values","maxconnectionsstream.txt")
f = open(maxconnectionsstream_file, "r")
string = f.read()
dcfg.set_max_conns(self.get_playerconfig('download_max_connects', int(string)))
if poa:
dcfg.set_poa(poa)
print >> sys.stderr, 'POA:', dcfg.get_poa()
else:
dcfg.set_poa(None)
if supportedvodevents is None:
supportedvodevents = self.get_supported_vod_events()
if DEBUG:
log('BaseApp::start_download: supportedvodevents', supportedvodevents)
dcfg.set_video_events(supportedvodevents)
prefix, ext = os.path.splitext(dlfile)
if ext != '' and ext[0] == '.':
content_ext = ext[1:]
else:
content_ext = ''
content_duration = None
if tdef.is_multifile_torrent():
svcdlfiles = self.is_svc(dlfile, tdef)
if svcdlfiles is not None:
dcfg.set_video_event_callback(self.sesscb_vod_event_callback, dlmode=DLMODE_SVC)
dcfg.set_selected_files(svcdlfiles)
else:
dcfg.set_video_event_callback(self.sesscb_vod_event_callback)
dcfg.set_selected_files([dlfile])
dcfg.set_extra_files(extra_files_indexes)
try:
p = [-1] * len(tdef.get_files())
total_duration = 0
content_length = 0
videofiles = tdef.get_files(exts=videoextdefaults)
for videofile in videofiles:
idx = tdef.get_index_of_file_in_files(videofile)
if videofile == dlfile or idx in extra_files_indexes:
p[idx] = 1
content_length += tdef.get_length(videofile)
duration = tdef.get_ts_duration(idx)
if duration is not None:
total_duration += duration
if total_duration > 0:
content_duration = total_duration
idx = tdef.get_index_of_file_in_files(dlfile)
if DEBUG:
log('BaseApp::start_download: bitrate', tdef.get_ts_bitrate(idx))
dcfg.set_files_priority(p)
if DEBUG:
log('BaseApp::start_download: got multi: dlfile', dlfile, 'priority', dcfg.get_files_priority, 'bitrate', tdef.get_ts_bitrate(idx), 'size', content_length, 'duration', content_duration, 'ext', content_ext)
except:
log_exc()
else:
dcfg.set_video_event_callback(self.sesscb_vod_event_callback)
content_duration = tdef.get_ts_duration()
content_length = tdef.get_length()
if DEBUG:
log('BaseApp::start_download: got single: bitrate', tdef.get_ts_bitrate(), 'size', content_length, 'duration', content_duration, 'ext', content_ext)
if content_duration is None:
content_duration = self.guess_duration_from_size(content_length)
if DEBUG:
log('baseapp::start_download: guess duration: size', content_length, 'duration', content_duration)
if tdef.get_live():
include_interruptable_ads = False
else:
include_interruptable_ads = self.get_playerconfig('enable_interruptable_ads', True)
newd_params = {}
provider_key = tdef.get_provider()
provider_content_id = tdef.get_content_id()
premium = tdef.get_premium()
if premium != 0 and provider_key is not None:
if DEBUG_PREMIUM:
log('baseapp::start_download: check premium status: provider_key', provider_key, 'content_id', provider_content_id)
if self.check_premium_status(provider_key, provider_content_id, tdef.get_infohash()):
newd_params['premium'] = True
newd_params['report_interval'] = 60
newd_params['user_check_interval'] = 60
auth_level = self.s.get_authlevel()
if DEBUG_PREMIUM:
log('baseapp::start_download: got premium content: provider_key', provider_key, 'content_id', provider_content_id, 'auth_level', auth_level)
if auth_level < 2:
newd_params['user_check_interval'] = 15
ads = self.ad_manager.get_ads(device_id=self.device_id, user_login=self.s.get_ts_login(), user_level=self.s.get_authlevel(), content_type=DLTYPE_TORRENT, content_id=binascii.hexlify(tdef.get_infohash()), content_ext=content_ext, content_duration=content_duration, affiliate_id=affiliate_id, zone_id=zone_id, developer_id=developer_id, include_interruptable_ads=include_interruptable_ads, is_live=tdef.get_live(), user_profile=self.user_profile, provider_key=provider_key, provider_content_id=provider_content_id)
if ads == False:
if DEBUG:
log('baseapp::start_download: failed to get ads, exit')
raise Exception, 'Cannot start playback'
dcfg.set_dest_dir(destdir)
rate = self.get_playerconfig('total_max_download_rate', DEFAULT_DOWNLOAD_RATE_LIMIT)
if DEBUG:
log('BaseApp::start_download: set download limit to', rate, 'Kb/s')
dcfg.set_max_speed(DOWNLOAD, rate, self.get_playerconfig('auto_download_limit', False))
dcfg.set_wait_sufficient_speed(self.get_playerconfig('wait_sufficient_speed', False))
dcfg.set_http_support(self.get_playerconfig('enable_http_support', True))
dcfg.set_player_buffer_time(self.get_playerconfig('player_buffer_time', DEFAULT_PLAYER_BUFFER_TIME))
dcfg.set_live_buffer_time(self.get_playerconfig('live_buffer_time', DEFAULT_LIVE_BUFFER_TIME))
infohash = tdef.get_infohash()
newd = None
for d in self.s.get_downloads():
if d.get_def().get_infohash() == infohash:
log('BaseApp::start_download: Reusing old duplicate download', infohash)
newd = d
if poa:
d.set_poa(poa)
self.s.lm.h4xor_reset_init_conn_counter()
initialdlstatus = None
got_uninterruptable_ad = False
if len(ads):
for ad in ads:
if not ad['interruptable']:
got_uninterruptable_ad = True
break
if got_uninterruptable_ad:
initialdlstatus = DLSTATUS_STOPPED
if newd is None:
log('BaseApp::start_download: starting new download: infohash', infohash, 'initialdlstatus', initialdlstatus)
newd = self.s.start_download(tdef, dcfg, initialdlstatus)
else:
newd.set_video_events(self.get_supported_vod_events())
newd.set_wait_sufficient_speed(dcfg.get_wait_sufficient_speed())
newd.set_http_support(dcfg.get_http_support())
newd.set_max_speed(UPLOAD, dcfg.get_max_speed(UPLOAD))
newd.set_max_speed(DOWNLOAD, dcfg.get_max_speed(DOWNLOAD), dcfg.get_auto_download_limit())
import os
current_file_path = os.path.dirname(os.path.realpath(__file__))
maxconnectionsstream_file = os.path.join(os.path.split(current_file_path)[0],"values","maxconnectionsstream.txt")
f = open(maxconnectionsstream_file, "r")
string = f.read()
newd.set_max_conns(self.get_playerconfig('download_max_connects', int(string)))
svcdlfiles = self.is_svc(dlfile, tdef)
if svcdlfiles is not None:
newd.set_video_event_callback(self.sesscb_vod_event_callback, dlmode=DLMODE_SVC)
newd.set_selected_files(svcdlfiles)
else:
newd.set_video_event_callback(self.sesscb_vod_event_callback)
newd.set_player_buffer_time(self.get_playerconfig('player_buffer_time', DEFAULT_PLAYER_BUFFER_TIME))
newd.set_live_buffer_time(self.get_playerconfig('live_buffer_time', DEFAULT_LIVE_BUFFER_TIME))
if tdef.is_multifile_torrent():
newd.set_selected_files([dlfile])
newd.set_extra_files(extra_files_indexes)
newd.set_files_priority(dcfg.get_files_priority())
if initialdlstatus is None:
if DEBUG:
log('BaseApp::start_download: restarting existing download: infohash', binascii.hexlify(infohash))
newd.restart(new_tdef=tdef)
else:
ds = newd.network_get_state(usercallback=None, getpeerlist=False, sessioncalling=True)
if ds.get_status != DLSTATUS_STOPPED:
if DEBUG:
log('BaseApp::start_download: existing download is active, stop it and wait for ads: infohash', binascii.hexlify(infohash))
newd.stop()
elif DEBUG:
log('BaseApp::start_download: skip restarting existing download, wait for ads: infohash', binascii.hexlify(infohash))
if DEBUG:
log('BaseApp::start_download: saving content to', newd.get_dest_files())
self.dlinfo_lock.acquire()
try:
if newd in self.downloads_in_vodmode:
self.downloads_in_vodmode[newd].update(newd_params)
else:
newd_params['start'] = time.time()
newd_params['download_id'] = hashlib.sha1(b64encode(self.s.get_ts_login()) + b64encode(tdef.get_infohash()) + str(time.time()) + str(random.randint(1, sys.maxint))).hexdigest()
if TNS_ENABLED:
if self.stat_settings.check_content('tns', tdef):
try:
newd_params['tns'] = TNS(self.stat_settings.get_url_list('tns'), self.stat_settings.get_options('tns'), self.tns_uid, self.cookie_jar, tdef)
newd_params['tns'].start()
except TNSNotAllowedException:
pass
except:
if DEBUG:
print_exc()
elif DEBUG:
log('baseapp::start_download: tns disabled: infohash', binascii.hexlify(tdef.get_infohash()))
self.downloads_in_vodmode[newd] = newd_params
if newd in self.downloads_in_admode:
if DEBUG:
log('baseapp::start_ad_downloads: remove old ad downloads on start')
del self.downloads_in_admode[newd]
if len(ads):
if got_uninterruptable_ad:
if DEBUG:
log('baseapp::start_download: got uninterruptable ad, start ads immediatelly')
start_ad_downloads_lambda = lambda : self.start_ad_downloads(newd, ads)
self.run_delayed(start_ad_downloads_lambda, 0.5)
else:
if DEBUG:
log('baseapp::start_download: no uninterruptable ad, start ads when main started')
start_ad_downloads_when_main_started_lambda = lambda : self.start_ad_downloads_when_main_started(newd, ads)
self.run_delayed(start_ad_downloads_when_main_started_lambda, 0.5)
finally:
self.dlinfo_lock.release()
func = lambda : GoogleAnalytics.send_event('client', 'play', VERSION)
self.run_delayed(func)
return newd
def start_ad_downloads_when_main_started(self, maind, ads):
self.dlinfo_lock.acquire()
try:
if maind not in self.downloads_in_vodmode:
if DEBUG:
log('baseapp::start_ad_downloads_when_main_started: download is not in vod mode, stop trying: hash', binascii.hexlify(maind.get_hash()))
return
ds = maind.network_get_state(usercallback=None, getpeerlist=False, sessioncalling=True)
dlstatus = ds.get_status()
is_vod = ds.is_vod()
playable = ds.get_vod_playable()
if DEBUG:
log('baseapp::start_ad_downloads_when_main_started: hash', binascii.hexlify(maind.get_hash()), 'status', dlstatus_strings[dlstatus], 'is_vod', is_vod, 'playable', playable)
if dlstatus == DLSTATUS_STOPPED_ON_ERROR:
if DEBUG:
log('baseapp::start_ad_downloads_when_main_started: download cannot start, stop trying: hash', binascii.hexlify(maind.get_hash()), 'error', ds.get_error())
return
start_ads = False
if dlstatus == DLSTATUS_DOWNLOADING and is_vod:
if playable:
if DEBUG:
log('baseapp::start_ad_downloads_when_main_started: download is playable, stop trying: hash', binascii.hexlify(maind.get_hash()))
return
if DEBUG:
log('baseapp::start_ad_downloads_when_main_started: download is not playable, start ads: hash', binascii.hexlify(maind.get_hash()))
start_ads = True
else:
if dlstatus == DLSTATUS_SEEDING:
if DEBUG:
log('baseapp::start_ad_downloads_when_main_started: download is finished, stop trying: hash', binascii.hexlify(maind.get_hash()))
return
if DEBUG:
log('baseapp::start_ad_downloads_when_main_started: keep trying: hash', binascii.hexlify(maind.get_hash()))
if start_ads:
self.start_ad_downloads(maind, ads)
else:
start_ad_downloads_when_main_started_lambda = lambda : self.start_ad_downloads_when_main_started(maind, ads)
self.run_delayed(start_ad_downloads_when_main_started_lambda, 1.0)
finally:
self.dlinfo_lock.release()
def start_ad_downloads(self, newd, ads):
ds = newd.network_get_state(usercallback=None, getpeerlist=False, sessioncalling=True)
main_dlstatus = ds.get_status()
main_progress = ds.get_progress()
if DEBUG:
log('baseapp::start_ad_downloads: start ads: main', binascii.hexlify(newd.get_hash()), 'status', dlstatus_strings[main_dlstatus], 'progress', main_progress)
noninterruptable_ads = []
noninterruptable_ads_hash_list = []
interruptable_ads = []
for ad in ads:
if ad['dltype'] == DLTYPE_TORRENT:
ad['dlhash'] = ad['tdef'].get_infohash()
ad['size'] = ad['tdef'].get_length()
elif ad['dltype'] == DLTYPE_DIRECT:
tdef = None
if tdef is None:
ad['dlhash'] = hashlib.sha1(ad['url']).digest()
ad['size'] = 0
else:
ad['tdef'] = tdef
ad['dlhash'] = tdef.get_infohash()
ad['size'] = tdef.get_length()
ad['dltype'] = DLTYPE_TORRENT
if DEBUG:
log('baseapp::start_ad_downloads: got torrent from url: url', ad['url'], 'infohash', binascii.hexlify(ad['dlhash']))
else:
raise ValueError('Unknown download type ' + str(ad['dltype']))
if not ad.has_key('priority'):
if ad['interruptable']:
ad['priority'] = 0
else:
ad['priority'] = -1
if ad['interruptable']:
interruptable_ads.append(ad)
else:
noninterruptable_ads.append(ad)
noninterruptable_ads_hash_list.append(ad['dlhash'])
def start_ad(ad):
d = self.s.get_download(ad['dltype'], ad['dlhash'])
if d is None:
if ad['interruptable'] or ad['wait_preload']:
if DEBUG:
log('baseapp::start_ad_downloads: interruptable or preload ad download is not in downloads, skip: dlhash', binascii.hexlify(ad['dlhash']))
return False
if DEBUG:
log('baseapp::start_ad_downloads: start new ad download: main', binascii.hexlify(newd.get_hash()), 'ad', binascii.hexlify(ad['dlhash']))
if not self.cleanup_hidden_downloads(needed=ad['size'], priority=ad['priority']):
if DEBUG:
log('baseapp::start_ad_downloads: not enough space: hash', binascii.hexlify(ad['dlhash']), 'size', ad['size'])
return False
dcfg = DownloadStartupConfig()
dcfg.set_video_event_callback(lambda d, event, params: self.sesscb_vod_event_callback(d, event, params, newd))
dcfg.set_dest_dir(self.s.get_ads_dir())
dcfg.set_player_buffer_time(1)
dcfg.set_max_conns(50)
dcfg.set_max_conns_to_initiate(60)
dcfg.set_hidden(True)
if ad['dltype'] == DLTYPE_TORRENT:
d = self.s.start_download(ad['tdef'], dcfg)
elif ad['dltype'] == DLTYPE_DIRECT:
dcfg.set_download_failed_callback(self.download_failed_callback)
if ad['predownload']:
dcfg.set_predownload(True)
d = self.s.start_direct_download(ad['url'], dcfg)
else:
if ad['interruptable']:
if main_progress == 1.0:
if DEBUG:
log('baseapp::start_ad_downloads: main content is completed, skip interruptable ad: dlhash', binascii.hexlify(ad['dlhash']))
return False
if ad['interruptable'] or ad['wait_preload']:
ds = d.network_get_state(usercallback=None, getpeerlist=False, sessioncalling=True)
progress = ds.get_progress()
if progress != 1.0:
if DEBUG:
log('baseapp::start_ad_downloads: interruptable or preload ad download is not completed, skip: dlhash', binascii.hexlify(ad['dlhash']), 'progress', progress)
return False
used_by_another_vod = False
for main_d, ads in self.downloads_in_admode.iteritems():
if d in ads:
used_by_another_vod = True
if DEBUG:
log('baseapp::start_ad_downloads: ad download is used by another vod: main', binascii.hexlify(newd.get_hash()), 'other', binascii.hexlify(main_d.get_hash()), 'ad', binascii.hexlify(d.get_hash()))
break
if used_by_another_vod:
start_ad_download_when_seeding_lambda = lambda d = d, newd = newd: self.start_ad_download_when_seeding(d, newd)
self.run_delayed(start_ad_download_when_seeding_lambda, 0.1)
else:
if DEBUG:
log('baseapp::start_ad_downloads: restart existing ad download: main', binascii.hexlify(newd.get_hash()), 'ad', binascii.hexlify(ad['dlhash']))
d.set_video_event_callback(lambda d, event, params: self.sesscb_vod_event_callback(d, event, params, newd))
d.set_player_buffer_time(1)
d.set_max_conns(10)
d.set_max_conns_to_initiate(10)
d.set_hidden(True)
if d.get_type() == DLTYPE_DIRECT:
d.set_download_failed_callback(self.download_failed_callback)
d.restart()
self.downloads_in_admode.setdefault(newd, odict())[d] = {'ad': ad,
'start_params': None,
'completed': False,
'started': None,
'finished': None,
'failed': False}
return True
started_noninterruptable_ads = 0
started_interruptable_ads = 0
for ad in noninterruptable_ads:
if start_ad(ad):
started_noninterruptable_ads += 1
for ad in interruptable_ads:
if start_ad(ad):
started_interruptable_ads += 1
if DEBUG:
log('baseapp::start_ad_downloads: started_noninterruptable_ads', started_noninterruptable_ads, 'started_interruptable_ads', started_interruptable_ads)
if started_noninterruptable_ads == 0 and started_interruptable_ads == 0 and main_dlstatus == DLSTATUS_STOPPED:
if DEBUG:
log('baseapp::start_ad_downloads: no ads started, start main download: main', binascii.hexlify(newd.get_hash()))
newd.restart()
self.add_preload_ads(interruptable_ads, False)
def guess_duration_from_size(self, content_length):
if content_length >= 734003200:
content_duration = 5400
elif content_length >= 314572800:
content_duration = 2700
elif content_length >= 104857600:
content_duration = 900
else:
content_duration = 300
return content_duration
def start_ad_download_when_seeding(self, d, main_d):
if DEBUG:
log('baseapp::start_ad_download_when_seeding: main', binascii.hexlify(main_d.get_hash()), 'ad', binascii.hexlify(d.get_hash()))
if main_d not in self.downloads_in_admode:
if DEBUG:
log('baseapp::start_ad_download_when_seeding: main download is not in admode, exit')
return
ds = d.network_get_state(usercallback=None, getpeerlist=False, sessioncalling=True)
dlstatus = ds.get_status()
if dlstatus != DLSTATUS_SEEDING:
if DEBUG:
log('baseapp::start_ad_download_when_seeding: not seeding, reschedule: dlstatus', dlstatus, 'main', binascii.hexlify(main_d.get_hash()), 'ad', binascii.hexlify(d.get_hash()))
start_ad_download_when_seeding_lambda = lambda : self.start_ad_download_when_seeding(d, main_d)
self.run_delayed(start_ad_download_when_seeding_lambda, 1.0)
return
if DEBUG:
log('baseapp::start_ad_download_when_seeding: ad download is seeding, restart')
d.set_video_event_callback(lambda d, event, params: self.sesscb_vod_event_callback(d, event, params, main_d))
d.set_player_buffer_time(1)
d.set_max_conns(10)
d.set_max_conns_to_initiate(10)
d.set_hidden(True)
d.restart()
def start_direct_download(self, main_url, download_url, developer_id, affiliate_id, zone_id):
destdir = self.get_default_destdir()
urlhash = hashlib.sha1(main_url).digest()
if DEBUG:
log('baseapp::start_direct_download: urlhash', binascii.hexlify(urlhash), 'main_url', main_url)
newd = self.s.get_download(DLTYPE_DIRECT, urlhash)
content_duration = 0
if newd is not None:
content_size = newd.get_content_length()
if content_size is not None:
content_duration = self.guess_duration_from_size(content_size)
ads = self.ad_manager.get_ads(device_id=self.device_id, user_login=self.s.get_ts_login(), user_level=self.s.get_authlevel(), content_type=DLTYPE_DIRECT, content_id=binascii.hexlify(urlhash), content_ext='', content_duration=content_duration, affiliate_id=affiliate_id, zone_id=zone_id, developer_id=developer_id, include_interruptable_ads=self.get_playerconfig('enable_interruptable_ads', True), user_profile=self.user_profile)
if ads == False:
if DEBUG:
log('baseapp::start_direct_download: failed to get ads, exit')
raise Exception, 'Cannot start playback'
initialdlstatus = None
got_uninterruptable_ad = False
if len(ads):
for ad in ads:
if not ad['interruptable']:
got_uninterruptable_ad = True
break
if got_uninterruptable_ad:
initialdlstatus = DLSTATUS_STOPPED
if newd is None:
dcfg = DownloadStartupConfig()
dcfg.set_dest_dir(destdir)
dcfg.set_wait_sufficient_speed(self.get_playerconfig('wait_sufficient_speed', False))
dcfg.set_player_buffer_time(self.get_playerconfig('player_buffer_time', DEFAULT_PLAYER_BUFFER_TIME))
dcfg.set_live_buffer_time(self.get_playerconfig('live_buffer_time', DEFAULT_LIVE_BUFFER_TIME))
dcfg.set_video_event_callback(self.sesscb_vod_event_callback)
dcfg.set_direct_download_url(download_url)
dcfg.set_download_finished_callback(lambda url, download_url, urlhash, fileinfo, developer_id = developer_id, affiliate_id = affiliate_id, zone_id = zone_id: self.direct_download_finished(url, download_url, urlhash, fileinfo, developer_id, affiliate_id, zone_id))
newd = self.s.start_direct_download(main_url, dcfg, initialdlstatus)
else:
newd.set_wait_sufficient_speed(self.get_playerconfig('wait_sufficient_speed', False))
newd.set_player_buffer_time(self.get_playerconfig('player_buffer_time', DEFAULT_PLAYER_BUFFER_TIME))
newd.set_live_buffer_time(self.get_playerconfig('live_buffer_time', DEFAULT_LIVE_BUFFER_TIME))
newd.set_video_event_callback(self.sesscb_vod_event_callback)
newd.set_direct_download_url(download_url)
newd.set_download_finished_callback(lambda url, download_url, urlhash, fileinfo, developer_id = developer_id, affiliate_id = affiliate_id, zone_id = zone_id: self.direct_download_finished(url, download_url, urlhash, fileinfo, developer_id, affiliate_id, zone_id))
if initialdlstatus is None:
if DEBUG:
log('BaseApp::start_direct_download: restarting existing download: urlhash', binascii.hexlify(urlhash))
newd.restart()
else:
ds = newd.network_get_state(usercallback=None, getpeerlist=False, sessioncalling=True)
if ds.get_status != DLSTATUS_STOPPED:
if DEBUG:
log('BaseApp::start_direct_download: existing download is active, stop it and wait for ads: urlhash', binascii.hexlify(urlhash))
newd.stop()
elif DEBUG:
log('BaseApp::start_direct_download: skip restarting existing download, wait for ads: urlhash', binascii.hexlify(urlhash))
self.dlinfo_lock.acquire()
try:
self.downloads_in_vodmode[newd] = {}
if newd in self.downloads_in_admode:
if DEBUG:
log('baseapp::start_ad_downloads: remove old ad downloads on start')
del self.downloads_in_admode[newd]
if len(ads):
if got_uninterruptable_ad:
if DEBUG:
log('baseapp::start_download: got uninterruptable ad, start ads immediatelly')
self.start_ad_downloads(newd, ads)
else:
if DEBUG:
log('baseapp::start_download: no uninterruptable ad, start ads when main started')
start_ad_downloads_when_main_started_lambda = lambda : self.start_ad_downloads_when_main_started(newd, ads)
self.run_delayed(start_ad_downloads_when_main_started_lambda, 0.5)
finally:
self.dlinfo_lock.release()
func = lambda : GoogleAnalytics.send_event('client', 'play', VERSION)
self.run_delayed(func)
return newd
def get_encrypted_file_metainfo(self, path):
f = None
try:
f = open(path, 'rb')
meta_len = f.read(4)
meta_len, = struct.unpack('l', meta_len)
if DEBUG:
log('baseapp::get_encrypted_file_metainfo: meta_len', meta_len)
meta = f.read(meta_len)
meta = pickle.loads(meta)
if DEBUG:
log('baseapp::get_encrypted_file_metainfo: meta', meta)
offset_fix = 4 + meta_len - meta['offset']
return (meta, offset_fix)
finally:
if f is not None:
f.close()
def play_encrypted_file(self, path, affiliate_id = 0, zone_id = 0, developer_id = 0):
if DEBUG:
log('baseapp::play_encrypted_file: path', path)
if not os.path.isfile(path):
if DEBUG:
log('baseapp::play_encrypted_file: play_encrypted_file')
meta, offset_fix = self.get_encrypted_file_metainfo(path)
content_duration = meta['duration']
if content_duration == 0:
content_duration = self.guess_duration_from_size(meta['file_length'])
ads = self.ad_manager.get_ads(device_id=self.device_id, user_login=self.s.get_ts_login(), user_level=self.s.get_authlevel(), content_type=DLTYPE_ENCRYPTED_FILE, content_id=binascii.hexlify(meta['hash']), content_ext='', content_duration=content_duration, affiliate_id=affiliate_id, zone_id=zone_id, developer_id=developer_id, include_interruptable_ads=False, provider_key=meta['provider'], user_profile=self.user_profile)
if ads == False:
if DEBUG:
log('baseapp::play_encrypted_file: failed to get ads, exit')
raise Exception, 'Cannot start playback'
got_uninterruptable_ad = False
if len(ads):
for ad in ads:
if not ad['interruptable']:
got_uninterruptable_ad = True
break
newd = FakeDownload(DLTYPE_ENCRYPTED_FILE, path, meta, offset_fix, self.sesscb_vod_event_callback)
self.dlinfo_lock.acquire()
try:
self.downloads_in_vodmode[newd] = {}
if newd in self.downloads_in_admode:
if DEBUG:
log('baseapp::play_encrypted_file: remove old ad downloads on start')
del self.downloads_in_admode[newd]
if len(ads) and got_uninterruptable_ad:
if DEBUG:
log('baseapp::play_encrypted_file: got uninterruptable ad, start ads immediatelly')
self.start_ad_downloads(newd, ads)
else:
newd.restart()
finally:
self.dlinfo_lock.release()
return newd
def direct_download_finished(self, url, download_url, urlhash, fileinfo, developer_id, affiliate_id, zone_id):
try:
if DEBUG:
log('baseapp::direct_download_finished: url', url, 'download_url', download_url, 'fileinfo', fileinfo, 'd', developer_id, 'a', affiliate_id, 'z', zone_id)
path = os.path.join(fileinfo['destdir'], fileinfo['filename'])
piecelen = 524288
tracker = 'http://tracker.publicbt.com:80/announce'
trackers = [['http://t1.torrentstream.net:2710/announce'],
['http://t2.torrentstream.net:2710/announce'],
['http://tracker.publicbt.com:80/announce'],
['http://tracker.openbittorrent.com:80/announce']]
if DEBUG:
log('baseapp::direct_download_finished: create torrent: path', path, 'piecelen', piecelen, 'trackers', trackers)
if fileinfo['mimetype'] == 'video/mp4':
cleared_mp4_metatags = clear_mp4_metadata_tags_from_file(path, ['gssd', 'gshh'])
if DEBUG:
log('baseapp::direct_download_finished: cleared_mp4_metatags', cleared_mp4_metatags)
else:
cleared_mp4_metatags = []
tdef = TorrentDef()
tdef.add_content(path)
tdef.set_piece_length(piecelen)
tdef.set_tracker(tracker)
tdef.set_tracker_hierarchy(trackers)
if download_url is None:
tdef.set_urllist([url])
if fileinfo.has_key('duration') and fileinfo['duration'] is not None:
tdef.set_ts_duration(0, fileinfo['duration'])
if len(cleared_mp4_metatags):
tdef.set_ts_replace_mp4_metatags(0, ','.join(cleared_mp4_metatags))
tdef.finalize()
infohash = tdef.get_infohash()
if not self.s.download_exists(DLTYPE_TORRENT, infohash):
if DEBUG:
log('baseapp::direct_download_finished: add new torrent to downloads')
dcfg = DownloadStartupConfig()
dcfg.set_dest_dir(fileinfo['destdir'])
d = self.s.start_download(tdef, dcfg)
elif DEBUG:
log('baseapp::direct_download_finished: torrent already exists in downloads: infohash', binascii.hexlify(infohash))
player_id, torrent_checksum = self.send_torrent_to_server(tdef, developer_id, affiliate_id, zone_id)
if player_id is not None:
self.save_player_data_to_db(player_id, torrent_checksum, infohash, developer_id, affiliate_id, zone_id)
self.save_url2torrent(url, infohash)
self.s.save_ts_metadata_db(infohash, tdef.get_ts_metadata())
except:
if DEBUG:
print_exc()
def got_ts_metadata(self, tdef, metadata):
if len(metadata) == 0:
return
if DEBUG:
log('baseapp::got_ts_metadata: infohash', binascii.hexlify(tdef.get_infohash()), 'metadata', metadata)
if metadata.has_key('duration'):
tdef.set_ts_duration(metadata['index'], metadata['duration'])
if metadata.has_key('prebuf_pieces'):
tdef.set_ts_prebuf_pieces(metadata['index'], metadata['prebuf_pieces'])
self.s.save_ts_metadata_db(tdef.get_infohash(), tdef.get_ts_metadata())
self.save_ts_metadata_server(tdef.get_infohash(), tdef.get_ts_metadata())
def save_ts_metadata_server(self, infohash, metadata):
if metadata is None:
return
if DEBUG:
log('baseapp::save_ts_metadata_server: infohash', binascii.hexlify(infohash), 'metadata', metadata)
lambda_save_ts_metadata_server = lambda : self._save_ts_metadata_server(infohash, metadata)
self.run_delayed(lambda_save_ts_metadata_server)
def _save_ts_metadata_server(self, infohash, metadata):
if DEBUG:
log('baseapp::_save_ts_metadata_server: infohash', binascii.hexlify(infohash), 'metadata', metadata)
try:
self.tsservice.send_metadata(infohash, metadata)
except:
if DEBUG:
log_exc()
def send_torrent_to_server(self, tdef, developer_id = 0, affiliate_id = 0, zone_id = 0):
if DEBUG_SERVICE_REQUESTS:
log('baseapp::send_torrent_to_server: infohash', binascii.hexlify(tdef.get_infohash()), 'd', developer_id, 'a', affiliate_id, 'z', zone_id)
torrent_data = tdef.save()
torrent_checksum = hashlib.sha1(torrent_data).digest()
protected = tdef.get_protected()
if protected:
infohash = tdef.get_infohash()
else:
infohash = None
player_id = self.tsservice.send_torrent(torrent_data, developer_id, affiliate_id, zone_id, protected, infohash)
if player_id is None:
return
if DEBUG_SERVICE_REQUESTS:
log('baseapp::send_torrent_to_server: torrent saved: infohash', binascii.hexlify(tdef.get_infohash()), 'checksum', binascii.hexlify(torrent_checksum), 'd', developer_id, 'a', affiliate_id, 'z', zone_id, 'player_id', player_id)
self.save_player_data_to_db(player_id, torrent_checksum, tdef.get_infohash(), developer_id, affiliate_id, zone_id)
return (player_id, torrent_checksum)
def update_torrent(self, tdef, developer_id = 0, affiliate_id = 0, zone_id = 0):
lambda_update_torrent = lambda : self._update_torrent(tdef, developer_id, affiliate_id, zone_id)
self.run_delayed(lambda_update_torrent)
def _update_torrent(self, tdef, developer_id, affiliate_id, zone_id):
try:
torrent_data = tdef.save()
torrent_checksum = hashlib.sha1(torrent_data).digest()
ret = self.tsservice.check_torrent(torrent_checksum=torrent_checksum, infohash=tdef.get_infohash(), developer_id=developer_id, affiliate_id=affiliate_id, zone_id=zone_id)
if ret is None:
if DEBUG_SERVICE_REQUESTS:
log('baseapp::_update_torrent: check_torrent failed')
return
player_id, metadata, http_seeds = ret
if DEBUG_SERVICE_REQUESTS:
log('baseapp::_update_torrent: torrent saved: infohash', binascii.hexlify(tdef.get_infohash()), 'checksum', binascii.hexlify(torrent_checksum), 'player_id', player_id, 'metadata', metadata, 'http_seeds', http_seeds)
if player_id is None:
player_id, torrent_checksum = self.send_torrent_to_server(tdef, developer_id, affiliate_id, zone_id)
else:
self.save_player_data_to_db(player_id, torrent_checksum, tdef.get_infohash(), developer_id, affiliate_id, zone_id)
if metadata is not None:
self.s.save_ts_metadata_db(tdef.get_infohash(), metadata)
try:
for d in self.s.get_downloads():
if d.get_hash() == tdef.get_infohash():
if DEBUG_SERVICE_REQUESTS:
log('baseapp::_update_torrent: send metadata to download: hash', binascii.hexlify(d.get_hash()), 'metadata', metadata)
d.got_metadata(metadata)
except:
pass
if http_seeds is not None:
self.s.set_ts_http_seeds(tdef.get_infohash(), http_seeds)
try:
for d in self.s.get_downloads():
if d.get_hash() == tdef.get_infohash():
if DEBUG_SERVICE_REQUESTS:
log('baseapp::_update_torrent: send http seeds to download: hash', binascii.hexlify(d.get_hash()), 'http_seeds', http_seeds)
d.got_http_seeds(http_seeds)
except:
pass
except:
if DEBUG:
log_exc()
def get_torrent_from_server(self, infohash = None, player_id = None):
if infohash is None and player_id is None:
raise ValueError, 'infohash or player id must be specified'
if infohash is not None and player_id is not None:
raise ValueError, 'Both infohash and player id cannot be specified at the same time'
if DEBUG_SERVICE_REQUESTS:
if infohash is not None:
log('baseapp::get_torrent_from_server: infohash', binascii.hexlify(infohash))
elif player_id is not None:
log('baseapp::get_torrent_from_server: player_id', player_id)
player_data = self.tsservice.get_torrent(infohash=infohash, player_id=player_id)
if player_data is None:
return
tdef = player_data['tdef']
self.s.save_torrent_local(tdef, player_data['checksum'])
self.s.save_ts_metadata_db(tdef.get_infohash(), tdef.get_ts_metadata())
return player_data
def get_torrent_from_adid(self, adid):
infohash = self.get_infohash_from_adid(adid)
if infohash is None:
return
ret = self.get_torrent_by_infohash(infohash)
if ret is None:
return
return ret['tdef']
def get_infohash_from_adid(self, adid):
infohash = None
infohash = self.get_infohash_from_adid_db(adid)
if infohash is not None:
return infohash
infohash = self.get_infohash_from_adid_server(adid)
if infohash is not None:
self.save_adid2infohash_db(adid, infohash)
return infohash
def get_infohash_from_adid_db(self, adid):
if DEBUG_SERVICE_REQUESTS:
t = time.time()
db = self.s.open_dbhandler(NTFY_ADID2INFOHASH)
if db is None:
return
infohash = db.get(adid)
self.s.close_dbhandler(db)
if DEBUG_SERVICE_REQUESTS:
log('baseapp::get_infohash_from_adid_db: adid', adid, 'infohash', infohash, 'time', time.time() - t)
return infohash
def get_ad_last_seen(self, infohash):
db = self.s.open_dbhandler(NTFY_ADID2INFOHASH)
if db is None:
return
last_seen = db.get_last_seen(infohash)
self.s.close_dbhandler(db)
if DEBUG_SERVICE_REQUESTS:
log('baseapp::get_ad_last_seen: infohash', binascii.hexlify(infohash), 'last_seen', last_seen)
return last_seen
def get_infohash_from_adid_server(self, adid):
return self.tsservice.get_infohash_from_adid(adid)
def save_adid2infohash_db(self, adid, infohash):
if DEBUG_SERVICE_REQUESTS:
log('baseapp::save_adid2infohash_db: adid', adid, 'infohash', binascii.hexlify(infohash))
db = self.s.open_dbhandler(NTFY_ADID2INFOHASH)
if db is None:
if DEBUG_SERVICE_REQUESTS:
log('baseapp::save_adid2infohash_db: no db')
return
db.put(adid, infohash)
self.s.close_dbhandler(db)
def get_torrent_from_url(self, url):
infohash = self.get_infohash_from_url(url)
if infohash is None:
return
ret = self.get_torrent_by_infohash(infohash)
if ret is None:
return
return ret['tdef']
def get_infohash_from_url(self, url):
infohash = None
infohash = self.get_infohash_from_url_db(url)
if infohash is not None:
return infohash
infohash = self.get_infohash_from_url_server(url)
if infohash is not None:
self.save_url2torrent_db(url, infohash)
return infohash
def get_infohash_from_url_db(self, url):
db = self.s.open_dbhandler(NTFY_URL2TORRENT)
if db is None:
return
infohash = db.get(url)
self.s.close_dbhandler(db)
if DEBUG_SERVICE_REQUESTS:
log('baseapp::get_infohash_from_url: url', url, 'infohash', infohash)
return infohash
def get_infohash_from_url_server(self, url):
return self.tsservice.get_infohash_from_url(url)
def save_url2torrent(self, url, infohash):
try:
self.save_url2torrent_db(url, infohash)
except:
log_exc()
try:
self.save_url2torrent_server(url, infohash)
except:
log_exc()
def save_url2torrent_db(self, url, infohash):
if DEBUG_SERVICE_REQUESTS:
log('baseapp::save_url2torrent: url', url, 'infohash', binascii.hexlify(infohash))
db = self.s.open_dbhandler(NTFY_URL2TORRENT)
if db is None:
if DEBUG_SERVICE_REQUESTS:
log('baseapp::save_url2torrent: no db')
return
db.put(url, infohash)
self.s.close_dbhandler(db)
def save_url2torrent_server(self, url, infohash):
self.tsservice.save_url2infohash(url, infohash)
def get_torrent_from_db(self, checksum = None, infohash = None):
if checksum is None and infohash is None:
return
torrent_db = None
tdef = None
try:
if DEBUG_SERVICE_REQUESTS:
t = time.time()
torrent_db = self.s.open_dbhandler(NTFY_TORRENTS)
if torrent_db is None:
return
if checksum is not None:
torrent = torrent_db.getTorrent(checksum=checksum, keys=['torrent_file_name'])
else:
torrent = torrent_db.getTorrent(infohash=infohash, keys=['torrent_file_name'])
if DEBUG_SERVICE_REQUESTS:
log('baseapp::get_torrent_from_db: infohash', infohash, 'checksum', checksum, 'torrent', torrent, 'time', time.time() - t)
if torrent is None:
return
torrent_dir = torrent_db.getTorrentDir()
path = os.path.join(torrent_dir, torrent['torrent_file_name'])
if os.path.exists(path):
if DEBUG_SERVICE_REQUESTS:
t = time.time()
tdef = TorrentDef.load(path)
if DEBUG_SERVICE_REQUESTS:
log('baseapp::get_torrent_from_db: load torrent from file: path', path, 'time', time.time() - t)
else:
if DEBUG_SERVICE_REQUESTS:
log('baseapp::get_torrent_from_db: torrent file removed, update db: path', path)
torrent_db.deleteTorrent(infohash)
return {'tdef': tdef,
'infohash': torrent['infohash'],
'checksum': torrent['checksum']}
except:
log_exc()
return
finally:
if torrent_db is not None:
self.s.close_dbhandler(torrent_db)
def get_torrent_by_infohash(self, infohash):
if DEBUG_SERVICE_REQUESTS:
t = time.time()
ret = self.get_torrent_from_db(infohash=infohash)
if ret is not None:
if DEBUG_SERVICE_REQUESTS:
log('baseapp::get_torrent_by_infohash: got from db: infohash', binascii.hexlify(infohash), 'time', time.time() - t)
return {'tdef': ret['tdef'],
'checksum': ret['checksum']}
if DEBUG_SERVICE_REQUESTS:
t = time.time()
player_data = self.get_torrent_from_server(infohash=infohash)
if player_data is not None:
if DEBUG_SERVICE_REQUESTS:
log('baseapp::get_torrent_by_infohash: got from server: infohash', binascii.hexlify(infohash), 'time', time.time() - t)
return {'tdef': player_data['tdef'],
'checksum': player_data['checksum']}
if DEBUG_SERVICE_REQUESTS:
log('baseapp::get_torrent_by_infohash: not found: infohash', binascii.hexlify(infohash))
def get_player_data_from_db(self, player_id):
try:
db = self.s.open_dbhandler(NTFY_TS_PLAYERS)
if db is None:
return
player_data = db.get(player_id)
if DEBUG_SERVICE_REQUESTS:
log('baseapp::get_player_data_from_db: player_id', player_id, 'player_data', player_data)
return player_data
except:
log_exc()
return
finally:
if db is not None:
self.s.close_dbhandler(db)
def get_player_id_from_db(self, checksum, infohash, developer_id, affiliate_id, zone_id):
try:
db = self.s.open_dbhandler(NTFY_TS_PLAYERS)
if db is None:
return
player_id = db.getPlayerId(checksum, infohash, developer_id, affiliate_id, zone_id)
if DEBUG_SERVICE_REQUESTS:
log('baseapp::get_player_id_from_db: player_id', player_id, 'checksum', checksum, 'infohash', binascii.hexlify(infohash), 'developer_id', developer_id, 'affiliate_id', affiliate_id, 'zone_id', zone_id)
return player_id
except:
log_exc()
return
finally:
if db is not None:
self.s.close_dbhandler(db)
def save_player_data_to_db(self, player_id, checksum, infohash, developer_id, affiliate_id, zone_id):
if DEBUG_SERVICE_REQUESTS:
log('baseapp::save_player_data_to_db: player_id', player_id, 'checksum', binascii.hexlify(checksum), 'infohash', binascii.hexlify(infohash), 'developer_id', developer_id, 'affiliate_id', affiliate_id, 'zone_id', zone_id)
try:
db = self.s.open_dbhandler(NTFY_TS_PLAYERS)
if db is None:
return
db.put(player_id, checksum, infohash, developer_id, affiliate_id, zone_id)
except:
log_exc()
finally:
if db is not None:
self.s.close_dbhandler(db)
def get_player_data(self, player_id):
player_data = self.get_player_data_from_db(player_id)
if player_data is not None:
ret = self.get_torrent_from_db(checksum=player_data['checksum'])
if ret is not None:
if DEBUG_SERVICE_REQUESTS:
log('baseapp::get_player_data: got from db: player_id', player_id, 'checksum', binascii.hexlify(player_data['checksum']), 'player_data', player_data)
player_data['tdef'] = ret['tdef']
return player_data
player_data = self.get_torrent_from_server(player_id=player_id)
if player_data is not None:
if DEBUG_SERVICE_REQUESTS:
log('baseapp::get_player_data: got from server: player_id', player_id, 'checksum', binascii.hexlify(player_data['checksum']), 'player_data', player_data)
self.save_player_data_to_db(player_id, player_data['checksum'], player_data['tdef'].get_infohash(), player_data['developer_id'], player_data['affiliate_id'], player_data['zone_id'])
return player_data
if DEBUG_SERVICE_REQUESTS:
log('baseapp::get_player_data: not found: player_id', player_id)
def check_user_profile(self):
if self.user_profile is None:
self.user_profile = self.get_user_profile()
return self.user_profile is not None
def get_user_profile(self):
db = None
try:
db = self.s.open_dbhandler(NTFY_USER_PROFILE)
if db is None:
return
profile = db.get_active_profile()
if DEBUG_SERVICE_REQUESTS:
log('baseapp::get_user_profile: profile', str(profile))
return profile
except:
log_exc()
return
finally:
if db is not None:
self.s.close_dbhandler(db)
def sesscb_vod_event_callback(self, d, event, params, main_download = None):
pass
def get_supported_vod_events(self):
pass
def get_drive_list(self):
try:
drives = win32api.GetLogicalDriveStrings()
drives = [ drivestr for drivestr in drives.split('\x00') if drivestr ]
return drives
except:
return []
def format_drive_name(self, drive):
if drive is None:
return ''
if len(drive) < 2:
return ''
drive = drive[:2].lower()
if not drive.endswith(':'):
return ''
return drive
def get_disk_info(self, path):
try:
folder = path
if sys.platform == 'win32':
free_bytes, total_bytes, _ = win32file.GetDiskFreeSpaceEx(folder)
used_bytes = total_bytes - free_bytes
else:
st = os.statvfs(folder)
free_bytes = st.f_bavail * st.f_frsize
total_bytes = st.f_blocks * st.f_frsize
used_bytes = (st.f_blocks - st.f_bfree) * st.f_frsize
return (total_bytes, free_bytes, used_bytes)
except:
if DEBUG:
log('baseapp::get_disk_info: cannot get disk info: path', path)
return (None, None, None)
def free_up_diskspace_by_downloads(self, infohash = None, needed = 0):
disk_cache_limit = self.get_playerconfig('disk_cache_limit', DEFAULT_DISKSPACE_LIMIT)
content_dir = self.get_default_destdir()
total, avail, used = self.get_disk_info(content_dir)
if avail is None:
if disk_cache_limit == 0:
if DEBUG:
log('baseapp::free_up_diskspace_by_downloads: cannot get disk info and disk cache is unlimited')
return True
avail = disk_cache_limit
if DEBUG:
log('BaseApp::free_up_diskspace_by_downloads: needed', needed, 'avail', avail, 'disk_cache_limit', disk_cache_limit)
if disk_cache_limit < needed < avail:
if DEBUG:
log('BaseApp::free_up_diskspace_by_downloads: no cleanup for bigguns')
return True
inuse = 0L
timelist = []
if self.apptype == 'acestream':
known_files = []
for d in self.s.get_downloads():
destfiles = d.get_dest_files(exts=videoextdefaults, get_all=True)
if self.apptype == 'acestream':
for filename, savepath in destfiles:
if os.path.exists(savepath):
known_files.append(savepath)
if infohash is not None and infohash == d.get_hash():
continue
if d in self.downloads_in_vodmode:
continue
if d.is_hidden():
continue
if DEBUG:
log('BaseApp::free_up_diskspace_by_downloads: downloaded content', destfiles)
dinuse = 0L
max_ctime = 0
for filename, savepath in destfiles:
dirname = os.path.dirname(savepath)
if dirname != content_dir:
if DEBUG:
log('baseapp::free_up_diskspace_by_downloads: skip dir:', dirname)
continue
if os.path.exists(savepath):
stat = os.stat(savepath)
dinuse += stat.st_size
if stat.st_ctime > max_ctime:
max_ctime = stat.st_ctime
if dinuse > 0:
inuse += dinuse
timerec = (max_ctime, dinuse, d)
timelist.append(timerec)
if self.apptype == 'acestream':
try:
filelist = os.listdir(content_dir)
except:
if DEBUG:
print_exc()
filelist = []
if DEBUG:
log('baseapp::free_up_diskspace_by_downloads: known_files', known_files, 'filelist', filelist)
for basename in filelist:
if basename == '.lock':
continue
if infohash is not None and basename == binascii.hexlify(infohash):
if DEBUG:
log('baseapp::free_up_diskspace_by_downloads: keep file: basename', basename, 'infohash', binascii.hexlify(infohash))
continue
filename = os.path.join(content_dir, basename)
if filename not in known_files:
if DEBUG:
log('baseapp::free_up_diskspace_by_downloads: remove unknown file: filename', filename)
try:
os.remove(filename)
except:
if DEBUG:
print_exc()
if disk_cache_limit == 0:
limit = avail
else:
limit = min(avail, disk_cache_limit)
if inuse + needed < limit:
if DEBUG:
log('BaseApp::free_up_diskspace_by_downloads: enough avail: inuse', inuse, 'needed', needed, 'limit', limit, 'avail', avail)
return True
timelist.sort()
if DEBUG:
log('baseapp::free_up_diskspace_by_downloads: timelist', timelist)
to_free = inuse + needed - limit
if DEBUG:
log('baseapp::free_up_diskspace_by_downloads: to_free', to_free, 'limit', limit, 'inuse', inuse, 'needed', needed)
for ctime, dinuse, d in timelist:
if DEBUG:
log('baseapp::free_up_diskspace_by_downloads: remove download: hash', binascii.hexlify(d.get_hash()), 'dinuse', dinuse, 'ctime', ctime)
self.s.remove_download(d, removecontent=True)
to_free -= dinuse
if DEBUG:
log('baseapp::free_up_diskspace_by_downloads: remove done: to_free', to_free, 'limit', limit, 'inuse', inuse, 'needed', needed)
if to_free <= 0:
return True
return False
def sesscb_states_callback(self, dslist):
if self.debug_systray:
getpeerlist = True
haspeerlist = True
else:
getpeerlist = False
haspeerlist = False
gui_states_callback_wrapper_lambda = lambda : self.gui_states_callback_wrapper(dslist, haspeerlist)
self.run_delayed(gui_states_callback_wrapper_lambda)
return (1.0, getpeerlist)
def gui_states_callback_wrapper(self, dslist, haspeerlist):
try:
self.gui_states_callback(dslist, haspeerlist)
except:
log_exc()
def gui_states_callback(self, dslist, haspeerlist):
if self.shuttingdown:
return ({},
[],
0,
0)
playermode = self.playermode
totalspeed = {UPLOAD: 0.0,
DOWNLOAD: 0.0}
totalhelping = 0
display_stats = self.download_states_display_counter % DOWNLOAD_STATES_DISPLAY_INTERVAL == 0
self.download_states_display_counter += 1
all_dslist = {}
playing_dslist = {}
hidden_dslist = {}
all_playing_are_seeding = True
playing_premium_content = False
self.dlinfo_lock.acquire()
try:
for ds in dslist:
d = ds.get_download()
all_dslist[d] = ds
is_vod_download = False
vod_download_params = None
if d.is_hidden():
hidden_dslist[d] = ds
if d in self.downloads_in_vodmode:
is_vod_download = True
vod_download_params = self.downloads_in_vodmode[d]
playing_dslist[d] = ds
if all_playing_are_seeding and ds.get_status() != DLSTATUS_SEEDING:
all_playing_are_seeding = False
if is_vod_download and vod_download_params.get('premium', False):
playing_premium_content = True
provider_key = d.get_def().get_provider()
provider_content_id = d.get_def().get_content_id()
if not self.report_premium_download(provider_key, provider_content_id, vod_download_params):
if time.time() > vod_download_params['start'] + PREMIUM_PREVIEW_TIMEOUT and not vod_download_params.has_key('stopped_preview'):
if DEBUG_PREMIUM:
log('baseapp::gui_states_callback: user auth failed for premium content, stop')
vod_download_params['stopped_preview'] = True
self.stop_download(d, 'http://acestream.net/embed/premium', 'This content is available for premium users only')
if DEBUG and display_stats:
log('baseapp::gui_states_callback: dlinfo: vod=%i type=%d hash=%s hidden=%i priority=%d status=%s paused=%i progress=%.1f%% error=%s' % (is_vod_download,
d.get_type(),
binascii.hexlify(d.get_hash()),
d.is_hidden(),
d.get_extra('priority', 0),
dlstatus_strings[ds.get_status()],
ds.get_paused(),
100.0 * ds.get_progress(),
ds.get_error()))
self.update_download_stats(ds)
if not d.is_hidden() or SHOW_HIDDEN_DOWNLOADS_INFO:
for dir in [UPLOAD, DOWNLOAD]:
totalspeed[dir] += ds.get_current_speed(dir)
totalhelping += ds.get_num_peers()
for main_download, ad_downloads in self.downloads_in_admode.iteritems():
if not playing_dslist.has_key(main_download):
if DEBUG:
log('baseapp::gui_states_callback: main download in ad mode is not in vod downloads: infohash', binascii.hexlify(main_download.get_hash()))
else:
main_ds = playing_dslist[main_download]
if main_ds.get_status() == DLSTATUS_STOPPED:
all_ads_completed = True
for d in ad_downloads.keys():
if not all_dslist.has_key(d):
if DEBUG:
log('baseapp::gui_states_callback: ad download not found in downloads: infohash', binascii.hexlify(d.get_hash()))
else:
ds = all_dslist[d]
if DEBUG:
log('baseapp::gui_states_callback: check ad download: main', binascii.hexlify(main_download.get_hash()), 'ad', binascii.hexlify(d.get_hash()), 'status', ds.get_status(), 'progress', ds.get_progress())
status = ds.get_status()
if status == DLSTATUS_STOPPED_ON_ERROR:
ad_downloads[d]['failed'] = True
elif status == DLSTATUS_STOPPED:
if DEBUG:
log('!!!! baseapp::gui_states_callback: ad download is stopped, mark as failed !!!!')
ad_downloads[d]['failed'] = True
elif status == DLSTATUS_SEEDING:
ad_downloads[d]['completed'] = True
else:
all_ads_completed = False
if all_ads_completed:
if DEBUG:
log('baseapp::gui_states_callback: all ads are completed, restart download: infohash', binascii.hexlify(main_download.get_hash()))
main_download.restart()
finally:
self.dlinfo_lock.release()
if haspeerlist:
try:
for ds in playing_dslist.values():
peerlist = ds.get_peerlist()
vodstats = ds.get_vod_stats()
stats = ds.get_stats()
if peerlist and self.statFrame:
self.statFrame.updateStats(spew=peerlist, statistics=stats, vod_stats=vodstats)
if DEBUG_STATS_TO_FILE:
self.save_state_to_file(spew=peerlist, statistics=stats, vod_stats=vodstats)
break
except:
log_exc()
if self.live_frame is not None:
try:
for ds in playing_dslist.values():
peerlist = ds.get_peerlist()
vodstats = ds.get_vod_stats()
stats = ds.get_stats()
self.live_frame.update(spew=peerlist, statistics=stats, vod_stats=vodstats)
break
except:
print_exc()
txt = self.appname + '\n\n'
txt += 'DL: %.1f\n' % totalspeed[DOWNLOAD]
txt += 'UL: %.1f\n' % totalspeed[UPLOAD]
txt += 'Helping: %d\n' % totalhelping
self.OnSetSysTrayTooltip(txt)
if totalspeed[DOWNLOAD] > self.max_download_rate:
self.max_download_rate = totalspeed[DOWNLOAD]
if totalspeed[UPLOAD] > self.max_upload_rate:
self.max_upload_rate = totalspeed[UPLOAD]
self.avg_download_rate_sum += totalspeed[DOWNLOAD]
self.avg_download_rate_count += 1
self.avg_download_rate = self.avg_download_rate_sum / float(self.avg_download_rate_count)
self.avg_upload_rate_sum += totalspeed[UPLOAD]
self.avg_upload_rate_count += 1
self.avg_upload_rate = self.avg_upload_rate_sum / float(self.avg_upload_rate_count)
if self.playing_premium_content != playing_premium_content:
if DEBUG_PREMIUM:
log('baseapp::gui_states_callback: playing_premium_content changed to', playing_premium_content)
self.playing_premium_content = playing_premium_content
if playing_premium_content:
self.run_delayed(self.check_auth_level, 1.0, task_id='check_auth_level')
if all_playing_are_seeding:
if self.get_playerconfig('enable_interruptable_ads', True):
max_progress = -1
max_priority = -1
download_to_restart = None
for d, ds in hidden_dslist.iteritems():
status = ds.get_status()
if status == DLSTATUS_STOPPED or status == DLSTATUS_STOPPED_ON_ERROR:
priority = d.get_extra('priority', 0)
if ds.get_progress() == 1.0:
if DEBUG_HIDDEN_DOWNLOADS:
log('baseapp::gui_states_callback: restart completed hidden download: hash', binascii.hexlify(d.get_hash()), 'status', dlstatus_strings[status], 'progress', ds.get_progress())
d.restart()
elif priority > max_priority:
download_to_restart = d
max_progress = ds.get_progress()
max_priority = priority
elif ds.get_progress() > max_progress:
download_to_restart = d
max_progress = ds.get_progress()
max_priority = priority
elif status == DLSTATUS_HASHCHECKING or ds.get_progress() != 1.0:
if DEBUG_HIDDEN_DOWNLOADS:
log('baseapp::gui_states_callback: got running hidden download: hash', binascii.hexlify(d.get_hash()), 'status', dlstatus_strings[status], 'progress', ds.get_progress())
download_to_restart = None
break
if download_to_restart is not None:
import os
current_file_path = os.path.dirname(os.path.realpath(__file__))
downloadlimitvalue_file = os.path.join(os.path.split(current_file_path)[0],"values","downloadlimit.txt")
f = open(downloadlimitvalue_file, "r")
string = f.read()
max_speed = self.get_playerconfig('max_download_rate', int(string))
if max_speed == 0:
max_speed = self.max_download_rate
limit_speed = max_speed / 3
download_to_restart.set_max_speed(DOWNLOAD, limit_speed)
if DEBUG_HIDDEN_DOWNLOADS:
ds = hidden_dslist[download_to_restart]
log('baseapp::gui_states_callback: restart hidden download: hash', binascii.hexlify(download_to_restart.get_hash()), 'status', dlstatus_strings[ds.get_status()], 'progress', ds.get_progress(), 'max_speed', max_speed, 'limit_speed', limit_speed)
download_to_restart.restart()
if playermode == DLSTATUS_DOWNLOADING:
if DEBUG:
log('BaseApp::gui_states_callback: all playing download are seeding, restart others')
t = time.time()
self.restart_other_downloads()
if DEBUG:
log('BaseApp::gui_states_callback: restart others: time', time.time() - t)
elif playermode == DLSTATUS_SEEDING:
if DEBUG:
log('BaseApp::gui_states_callback: not all playing download are seeding, stop others')
t = time.time()
self.stop_other_downloads()
if DEBUG:
log('BaseApp::gui_states_callback: stop others: time', time.time() - t)
if len(playing_dslist) == 0:
return ({},
[],
0,
0)
return (all_dslist,
playing_dslist.values(),
totalhelping,
totalspeed)
def update_download_stats(self, ds, force = False):
try:
if not force and time.time() - self.last_download_stats < DOWNLOAD_STATS_INTERVAL:
return
self.last_download_stats = time.time()
d = ds.get_download()
download_id = d.get_download_id()
if download_id is None:
return
if d.get_type() != DLTYPE_TORRENT:
return
tdef = d.get_def()
if not self.stat_settings.check_content('ts', tdef):
return
downloaded = ds.get_total_transferred(DOWNLOAD)
uploaded = ds.get_total_transferred(UPLOAD)
if not self.download_stats.has_key(download_id):
self.download_stats[download_id] = {'downloaded': 0,
'uploaded': 0}
if self.download_stats[download_id]['downloaded'] != downloaded or self.download_stats[download_id]['uploaded'] != uploaded:
self.download_stats[download_id]['downloaded'] = downloaded
self.download_stats[download_id]['uploaded'] = uploaded
infohash = binascii.hexlify(tdef.get_infohash())
provider_key = tdef.get_provider()
provider_content_id = tdef.get_content_id()
self.traffic_stats.send_event(download_id, 'keepalive', downloaded, uploaded, infohash, provider_key, provider_content_id)
except:
if DEBUG:
print_exc()
def save_state_to_file(self, spew, statistics = None, vod_stats = None):
info = ''
if spew is not None:
tot_uprate = 0.0
tot_downrate = 0.0
tot_downloaded = 0
for x in range(len(spew)):
peerdata = [''] * 17
if spew[x]['optimistic'] == 1:
a = '*'
else:
a = ' '
peerdata[0] = a
peerdata[2] = spew[x]['ip'].ljust(15)
peerdata[3] = spew[x]['direction']
peerdata[4] = ('%.0f kB/s' % (float(spew[x]['uprate']) / 1000)).ljust(10)
tot_uprate += spew[x]['uprate']
if spew[x]['uinterested'] == 1:
a = '*'
else:
a = ' '
peerdata[5] = a
if spew[x]['uchoked'] == 1:
a = '*'
else:
a = ' '
peerdata[6] = a
bitrate = None
if vod_stats['videostatus'] is not None:
bitrate = vod_stats['videostatus'].bitrate
str_downrate = '%.0f' % (spew[x]['downrate'] / 1024.0)
if 'short_downrate' in spew[x]:
if bitrate is None:
str_downrate += ' (%.0f)' % (spew[x]['short_downrate'] / 1024 / 0.0)
else:
str_downrate += ' (%.0f, %.1f)' % (spew[x]['short_downrate'] / 1024.0, spew[x]['short_downrate'] / float(bitrate))
peerdata[7] = str_downrate.ljust(15)
tot_downrate += spew[x]['downrate']
if spew[x]['dinterested'] == 1:
a = '*'
else:
a = ' '
peerdata[8] = a
if spew[x]['dchoked'] == 1:
a = '*'
else:
a = ' '
peerdata[9] = a
if spew[x]['snubbed'] == 1:
a = '*'
else:
a = ' '
peerdata[10] = a
tot_downloaded += spew[x]['dtotal']
peerdata[11] = ('%.2f MiB' % (float(spew[x]['dtotal']) / 1048576)).ljust(10)
if spew[x]['utotal'] is not None:
a = '%.2f MiB' % (float(spew[x]['utotal']) / 1048576)
else:
a = ''
peerdata[12] = a.ljust(10)
peerdata[13] = ('%.1f%%' % (float(int(spew[x]['completed'] * 1000)) / 10)).ljust(5)
if spew[x]['speed'] is not None:
a = '%.0f' % (float(spew[x]['speed']) / 1024)
if 'speed_proxy' in spew[x]:
a += ' | p:%.0f' % (float(spew[x]['speed_proxy']) / 1024)
if 'speed_non_proxy' in spew[x]:
a += ' | r:%.0f' % (float(spew[x]['speed_non_proxy']) / 1024)
else:
a = ''
peerdata[14] = a.ljust(15)
peerdata[15] = str(spew[x]['last_requested_piece']).ljust(4)
peerdata[16] = str(spew[x]['last_received_piece']).ljust(4)
info += '\t'.join(peerdata) + '\n'
info += '\n\nTOTALS: up=' + '%.0f kB/s' % (float(tot_uprate) / 1024) + ' down=' + '%.0f kB/s' % (float(tot_downrate) / 1024) + ' downloaded=' + '%.2f MiB' % (float(tot_downloaded) / 1048576) + '\n\n'
if vod_stats is not None:
for pos, data in vod_stats['proxybuf'].iteritems():
length = len(data)
info += str(pos) + ' '
for i in xrange(length / 131072):
info += '-'
info += str(pos + length - 1) + '\n'
info += 'buf: ' + str(vod_stats['outbuf']) + '\n'
if vod_stats['videostatus'] is not None:
vs = vod_stats['videostatus']
info += ' >> idx: ' + str(vs.fileindex)
info += ', br: ' + str(vs.bitrate / 1024)
info += ', len: ' + str(vs.piecelen / 1024)
info += ', first: ' + str(vs.first_piece)
info += ', last: ' + str(vs.last_piece)
info += ', have: ' + str(vs.numhave)
info += ', comp: %.2f' % vs.completed
info += ', prebuf: ' + str(vs.prebuffering)
info += ', pos: ' + str(vs.playback_pos)
info += ', hp: ' + str(vs.prebuf_high_priority_pieces)
info += ', pp: ' + str(vs.prebuf_missing_pieces)
have = vs.have[:]
have.sort()
info += ', pieces: ' + str(have)
for vs in vod_stats['extra_videostatus']:
info += '\n index: ' + str(vs.fileindex)
info += ', first piece: ' + str(vs.first_piece)
info += ', last piece: ' + str(vs.last_piece)
info += ', numhave: ' + str(vs.numhave)
info += ', completed: %.2f' % vs.completed
info += ', prebuf: ' + str(vs.prebuffering)
info += ', hp: ' + str(vs.prebuf_high_priority_pieces)
info += ', pp: ' + str(vs.prebuf_missing_pieces)
have = vs.have[:]
have.sort()
info += ', pieces: ' + str(have)
if statistics is not None:
for piece in xrange(len(statistics.storage_inactive_list)):
inactive = statistics.storage_inactive_list[piece]
if inactive is None:
inactive = 'all'
elif inactive == 1:
inactive = 'none'
else:
inactive = str(len(inactive))
info += '\n' + str(piece) + ': inactive=' + inactive + ' active=' + str(statistics.storage_active_list[piece]) + ' dirty=' + str(statistics.storage_dirty_list[piece])
if len(info):
self.debug_counter += 1
try:
filename = 'stat_snapshot_' + str(self.debug_counter).rjust(4, '0') + '_' + str(int(time.time())) + '.txt'
f = open(os.path.join(self.installdir, filename), 'w')
f.write(info)
f.close()
except:
raise
def OnSetSysTrayTooltip(self, txt):
try:
self.wrapper.set_icon_tooltip(txt)
except:
pass
def restart_other_downloads(self):
if self.shuttingdown:
return
if DEBUG:
log('baseapp::restart_other_downloads: ---')
self.playermode = DLSTATUS_SEEDING
self.ratelimiter = UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager()
self.set_ratelimits()
dlist = self.s.get_downloads()
for d in dlist:
if d.is_hidden():
ds = d.network_get_state(usercallback=None, getpeerlist=False, sessioncalling=True)
if ds.get_status() != DLSTATUS_STOPPED:
if DEBUG_HIDDEN_DOWNLOADS:
log('baseapp::restart_other_downloads: unpause hidden download: hash', binascii.hexlify(d.get_hash()))
d.pause(False)
continue
if d not in self.downloads_in_vodmode:
ds = d.network_get_state(usercallback=None, getpeerlist=False, sessioncalling=True)
if ds.get_status() == DLSTATUS_STOPPED and ds.get_progress() == 1.0:
if DEBUG:
log('baseapp::restart_other_downloads: start seeding: infohash', binascii.hexlify(d.get_hash()))
d.set_mode(DLMODE_NORMAL)
d.restart()
else:
d.pause(False)
def stop_other_downloads(self):
if self.shuttingdown:
return
if DEBUG:
log('baseapp::stop_other_downloads: ---')
self.playermode = DLSTATUS_DOWNLOADING
dlist = self.s.get_downloads()
for d in dlist:
if d in self.downloads_in_vodmode:
continue
is_ad = False
for maind_d, ads in self.downloads_in_admode.iteritems():
if d in ads:
is_ad = True
break
if is_ad:
continue
ds = d.network_get_state(usercallback=None, getpeerlist=False, sessioncalling=True)
if ds.get_status() == DLSTATUS_STOPPED:
continue
if DEBUG:
log('baseapp::stop_other_downloads: stop: infohash', binascii.hexlify(d.get_hash()), 'status', dlstatus_strings[ds.get_status()], 'progress', ds.get_progress())
if ds.get_status() == DLSTATUS_SEEDING:
d.pause(True, close_connections=True)
else:
d.stop()
def stop_hidden_downloads(self):
if self.shuttingdown:
return
if DEBUG_HIDDEN_DOWNLOADS:
log('baseapp::stop_hidden_downloads: ---')
dlist = self.s.get_downloads()
for d in dlist:
if not d.is_hidden():
continue
if d in self.downloads_in_vodmode:
continue
is_ad = False
for maind_d, ads in self.downloads_in_admode.iteritems():
if d in ads:
is_ad = True
break
if is_ad:
continue
ds = d.network_get_state(usercallback=None, getpeerlist=False, sessioncalling=True)
if ds.get_status() == DLSTATUS_STOPPED:
if ds.get_progress() == 0.0:
if DEBUG_HIDDEN_DOWNLOADS:
log('baseapp::stop_hidden_downloads: remove: infohash', binascii.hexlify(d.get_hash()), 'status', dlstatus_strings[ds.get_status()], 'progress', ds.get_progress())
self.s.remove_download(d, removecontent=True)
continue
if DEBUG_HIDDEN_DOWNLOADS:
log('baseapp::stop_hidden_downloads: stop: infohash', binascii.hexlify(d.get_hash()))
d.stop()
def remove_downloads_in_vodmode_if_not_complete(self):
if DEBUG:
log('BaseApp::remove_downloads_in_vodmode_if_not_complete: Removing playing download if not complete')
for d in self.downloads_in_vodmode:
d.set_state_callback(self.sesscb_remove_playing_callback)
def sesscb_remove_playing_callback(self, ds):
if DEBUG:
d = ds.get_download()
dlhash = binascii.hexlify(d.get_hash())
log('BaseApp::sesscb_remove_playing_callback: type', d.get_type(), 'hash', dlhash, 'status', dlstatus_strings[ds.get_status()], 'progress', ds.get_progress())
self.update_download_stats(ds, True)
d = ds.get_download()
if d.get_type() == DLTYPE_TORRENT:
live = d.get_def().get_live()
else:
live = False
if live:
remove_content = True
elif ds.get_status() == DLSTATUS_DOWNLOADING and ds.get_progress() >= MIN_PROGRESS_KEEP:
remove_content = False
elif ds.get_status() == DLSTATUS_SEEDING:
remove_content = False
elif ds.get_status() == DLSTATUS_HASHCHECKING:
remove_content = False
else:
remove_content = True
if not remove_content:
if ds.get_status() == DLSTATUS_SEEDING:
can_remove = self.can_remove_playing_download(d)
if can_remove:
self.remove_download_info(d)
if DEBUG:
log('baseapp::sesscb_remove_playing_callback: download is seeding, do not stop: dlhash', dlhash, 'remove_dlinfo', can_remove)
else:
if DEBUG:
log('BaseApp::sesscb_remove_playing_callback: keeping: dlhash', dlhash)
remove_playing_download_lambda = lambda : self.remove_playing_download(d, removecontent=False, stop=True)
self.run_delayed(remove_playing_download_lambda, 0.1)
else:
if DEBUG:
log('BaseApp::sesscb_remove_playing_callback: voting for removing: dlhash', dlhash)
if self.shuttingdown:
if DEBUG:
log('BaseApp::sesscb_remove_playing_callback: shuttingdown, call remove_playing_download immediately')
self.remove_playing_download(d, removecontent=True)
else:
if DEBUG:
log('BaseApp::sesscb_remove_playing_callback: schedule remove_playing_download')
remove_playing_download_lambda = lambda : self.remove_playing_download(d, removecontent=True)
self.run_delayed(remove_playing_download_lambda, 0.1)
return (-1.0, False)
def remove_playing_download(self, d, removecontent):
if self.s is not None:
if DEBUG:
log('BaseApp::remove_playing_download: dlhash', binascii.hexlify(d.get_hash()), 'removecontent', removecontent)
try:
self.s.remove_download(d, removecontent)
self.remove_download_info(d)
except:
log_exc()
elif DEBUG:
log('BaseApp::remove_playing_download: s is None')
def stop_playing_download(self, d):
if DEBUG:
log('BaseApp::stop_playing_download: dlhash', binascii.hexlify(d.get_hash()))
try:
d.stop()
self.remove_download_info(d)
except:
log_exc()
def remove_download_info(self, d):
if DEBUG:
log('baseapp::remove_download_info: remove download: hash', binascii.hexlify(d.get_hash()))
if d in self.downloads_in_vodmode:
params = self.downloads_in_vodmode[d]
if params.has_key('tns'):
if DEBUG:
log('baseapp::remove_download_info: stop tns: hash', binascii.hexlify(d.get_hash()))
params['tns'].stop()
del self.downloads_in_vodmode[d]
if d in self.downloads_in_admode:
del self.downloads_in_admode[d]
def set_ratelimits(self):
import os
current_file_path = os.path.dirname(os.path.realpath(__file__))
uploadlimitvalue_file = os.path.join(os.path.split(current_file_path)[0],"values","uploadlimit.txt")
f = open(uploadlimitvalue_file, "r")
uploadrate = float(self.get_playerconfig('total_max_upload_rate', int(string)))
if DEBUG:
log('BaseApp::set_ratelimits: Setting max upload rate to', uploadrate)
if self.ratelimiter is not None:
self.ratelimiter.set_global_max_speed(UPLOAD, uploadrate)
self.ratelimiter.set_global_max_seedupload_speed(uploadrate)
def ratelimit_callback(self, dslist):
if self.ratelimiter is None:
return
adjustspeeds = False
if self.ratelimit_update_count % 4 == 0:
adjustspeeds = True
self.ratelimit_update_count += 1
if adjustspeeds:
self.ratelimiter.add_downloadstatelist(dslist)
self.ratelimiter.adjust_speeds()
def load_playerconfig(self, state_dir):
self.playercfgfilename = os.path.join(state_dir, 'playerconf.pickle')
self.playerconfig = {}
if not os.path.isfile(self.playercfgfilename):
return
try:
f = open(self.playercfgfilename, 'rb')
self.playerconfig = pickle.load(f)
f.close()
except:
print_exc()
self.playerconfig = {}
def save_playerconfig(self):
try:
f = open(self.playercfgfilename, 'wb')
pickle.dump(self.playerconfig, f)
f.close()
except:
log_exc()
def set_playerconfig(self, key, value):
if self.playerconfig.has_key(key):
old_value = self.playerconfig[key]
else:
old_value = None
self.playerconfig[key] = value
if key == 'total_max_upload_rate':
try:
self.set_ratelimits()
except:
log_exc()
return old_value
def update_playerconfig(self, changed_config_params):
if 'enable_interruptable_ads' in changed_config_params:
value = self.get_playerconfig('enable_interruptable_ads')
if DEBUG:
log('baseapp::update_playerconfig: enable_interruptable_ads changed: value', value)
if value:
self.run_delayed(self.check_preload_ads, 3.0, 'check_preload_ads')
else:
self.run_delayed(self.stop_hidden_downloads, 3.0)
if 'disk_cache_limit' in changed_config_params:
if DEBUG:
log('baseapp::update_playerconfig: disk cache limit changed:', self.get_playerconfig('disk_cache_limit'))
self.free_up_diskspace_by_downloads()
for d in self.downloads_in_vodmode:
d.set_wait_sufficient_speed(self.get_playerconfig('wait_sufficient_speed'))
d.set_http_support(self.get_playerconfig('enable_http_support'))
d.set_player_buffer_time(self.get_playerconfig('player_buffer_time'))
d.set_live_buffer_time(self.get_playerconfig('live_buffer_time'))
d.set_max_speed(UPLOAD, self.get_playerconfig('total_max_upload_rate'))
d.set_max_speed(DOWNLOAD, self.get_playerconfig('total_max_download_rate'), self.get_playerconfig('auto_download_limit'))
d.set_max_conns(self.get_playerconfig('download_max_connects'))
def get_playerconfig(self, key, default = None):
if key in self.playerconfig:
return self.playerconfig[key]
return default
def OnExit(self):
log('BaseApp::OnExit:', currentThread().getName())
self.shuttingdown = True
self.remove_downloads_in_vodmode_if_not_complete()
if self.max_download_rate > 0:
if DEBUG:
log('baseapp::onexit: save max down rate:', self.max_download_rate)
self.set_playerconfig('max_download_rate', self.max_download_rate)
self.save_playerconfig()
self.i2i_listen_server.shutdown()
if globalConfig.get_mode() != 'client_console':
time.sleep(2)
if self.s is not None:
try:
state_dir = self.s.get_state_dir()
cfgfilename = Session.get_default_config_filename(state_dir)
if DEBUG:
log('baseapp::onexit: save SessionStartupConfig to', cfgfilename)
scfg = SessionStartupConfig.load(cfgfilename)
scfg.set_authlevel(self.s.get_authlevel())
scfg.save(cfgfilename)
except:
pass
self.s.shutdown(hacksessconfcheckpoint=False)
self.save_cookies()
if DEBUG:
self.debug_threads()
def debug_threads_task(self):
try:
self.debug_threads()
finally:
self.run_delayed(self.debug_threads_task, 600)
def debug_threads(self):
log('baseapp::debug_threads: ---')
count = 0
for t in enumerate():
log('baseapp::debug_threads: thread is running', t.name, 'daemon', t.daemon)
count += 1
log('baseapp::debug_threads: count', count)
def clear_session_state(self):
try:
if self.s is not None:
dlist = self.s.get_downloads(DLTYPE_TORRENT)
for d in dlist:
if not d.is_hidden():
self.s.remove_download(d, removecontent=True)
dlist = self.s.get_downloads(DLTYPE_DIRECT)
for d in dlist:
if not d.is_hidden():
self.s.remove_download(d, removecontent=True)
if self.apptype == 'acestream':
time.sleep(3)
path = self.get_default_destdir()
shutil.rmtree(path, True)
if DEBUG:
log('baseapp::clear_session_state: delete cache dir:', path)
except:
log_exc()
time.sleep(1)
def show_error(self, msg):
log('baseapp::show_error:', msg)
def get_default_destdir(self):
dest_dir = self.get_playerconfig('download_dir')
if dest_dir is not None:
if DEBUG:
print >> sys.stderr, 'get_default_destdir: get from config:', dest_dir, type(dest_dir)
elif sys.platform == 'win32':
registry = Win32RegChecker()
dest_dir = registry.readKey(HKCU, 'Software\\' + self.registry_key, 'DataDir', ignore_errors=True)
if dest_dir is None:
dest_dir = registry.readKey(HKLM, 'Software\\' + self.registry_key, 'DataDir', ignore_errors=True)
if DEBUG:
print >> sys.stderr, 'get_default_destdir: get from registry:', dest_dir, type(dest_dir)
if self.apptype == 'acestream':
if sys.platform == 'win32' and dest_dir is not None:
if len(dest_dir) < 2:
dest_dir = None
else:
drive = dest_dir[:2]
if not drive.endswith(':'):
dest_dir = None
else:
dest_dir = os.path.join(drive + '\\', CACHE_DIR_NAME)
if not self.check_dest_dir(dest_dir, make_hidden=True):
dest_dir = self.select_dest_dir()
if DEBUG:
log('baseapp::get_default_destdir: check_dest_dir() failed, selected:', dest_dir)
else:
if dest_dir is not None:
if not self.check_dest_dir(dest_dir, make_hidden=False):
dest_dir = None
if dest_dir is None:
state_dir = Session.get_default_state_dir()
dest_dir = os.path.join(state_dir, 'downloads')
if not self.check_dest_dir(dest_dir, make_hidden=False):
dest_dir = None
if dest_dir is None and sys.platform != 'win32':
dest_dir = os.path.join('/tmp', '.ACEStream', 'downloads')
if not self.check_dest_dir(dest_dir, make_hidden=False):
dest_dir = None
if dest_dir is None:
raise Exception, 'Cannot select dest dir'
self.set_playerconfig('download_dir', dest_dir)
return dest_dir
def check_dest_dir(self, dest_dir, make_hidden):
if dest_dir is None:
return False
if not os.path.isdir(dest_dir):
if DEBUG:
log('baseapp::check_dest_dir: dest dir is not a directory:', dest_dir)
try:
os.makedirs(dest_dir)
except:
if DEBUG:
log('baseapp::check_dest_dir: failed to create dest dir:', dest_dir)
return False
if make_hidden and sys.platform == 'win32':
try:
p = os.popen('attrib +h ' + dest_dir)
p.close()
except:
if DEBUG:
print_exc()
try:
lock = os.path.join(dest_dir, '.lock')
f = open(lock, 'w')
f.close()
except:
if DEBUG:
log('baseapp::check_dest_dir: cannot write to dest dir:', dest_dir)
return False
return True
def select_dest_dir(self):
dest_dir = None
if sys.platform == 'win32':
candidates = []
drive_list = self.get_drive_list()
if DEBUG:
log('>>>drive_list', drive_list)
for drive in drive_list:
if DEBUG:
log('>>>drive1', drive)
drive = self.format_drive_name(drive) + '\\'
if DEBUG:
log('>>>drive2', drive)
total, free, used = self.get_disk_info(drive)
if free is not None:
path = os.path.join(drive, CACHE_DIR_NAME)
candidates.append((free, path))
candidates.sort(reverse=True)
if DEBUG:
log('baseapp::select_dest_dir: candidates', candidates)
for free, path in candidates:
if self.check_dest_dir(path, True):
dest_dir = path
break
else:
state_dir = Session.get_default_state_dir()
path = os.path.join(state_dir, 'cache')
if self.check_dest_dir(path, True):
dest_dir = path
if dest_dir is None:
path = os.path.join('/tmp', '.ACEStream', 'cache')
if self.check_dest_dir(path, make_hidden=True):
dest_dir = path
if DEBUG:
log('baseapp::select_dest_dir: dest dir selected:', dest_dir)
return dest_dir
def get_preload_ads_enabled(self, default_value = True):
enabled = self.get_playerconfig('enable_interruptable_ads')
if enabled is None:
if sys.platform == 'win32':
registry = Win32RegChecker()
enabled = registry.readKey(HKCU, 'Software\\' + self.registry_key, 'EnablePreload', ignore_errors=True)
if DEBUG:
log('baseapp::get_preload_ads_enabled: get from registry HKCU:', enabled)
if enabled is None:
enabled = registry.readKey(HKLM, 'Software\\' + self.registry_key, 'EnablePreload', ignore_errors=True)
if DEBUG:
log('baseapp::get_preload_ads_enabled: get from registry HKLM:', enabled)
if enabled is None:
enabled = default_value
else:
try:
enabled = int(enabled)
enabled = enabled != 0
except:
enabled = default_value
else:
enabled = default_value
self.set_playerconfig('enable_interruptable_ads', enabled)
elif DEBUG:
log('baseapp::get_preload_ads_enabled: get from config:', enabled)
return enabled
def is_svc(self, dlfile, tdef):
svcfiles = None
if tdef.is_multifile_torrent():
enhancement = tdef.get_files(exts=svcextdefaults)
if enhancement:
enhancement.sort()
if tdef.get_length(enhancement[0]) == tdef.get_length(dlfile):
svcfiles = [dlfile]
svcfiles.extend(enhancement)
return svcfiles
def i2ithread_readlinecallback(self, ic, cmd):
pass
def make_provider_stream_cache_key(self, provider_key, infohash, device_id, user_login, user_password, user_key):
return '-'.join([provider_key,
binascii.hexlify(infohash),
device_id,
user_login,
hashlib.sha1(user_password).hexdigest(),
user_key])
def update_provider_stream_cache(self, provider_key, infohash, device_id, user_login, user_password, user_key):
key = self.make_provider_stream_cache_key(provider_key, infohash, device_id, user_login, user_password, user_key)
if DEBUG:
log('baseapp::update_provider_stream_cache: save data to provider stream cache: key', key)
self.provider_stream_cache.setdefault(key, {'last_success': 0})
self.provider_stream_cache[key]['last_success'] = time.time()
def check_provider_stream_cache(self, provider_key, infohash, device_id, user_login, user_password, user_key):
key = self.make_provider_stream_cache_key(provider_key, infohash, device_id, user_login, user_password, user_key)
if key not in self.provider_stream_cache:
return False
else:
last_success = self.provider_stream_cache[key]['last_success']
if DEBUG:
log('baseapp::check_provider_stream_cache: got data from provider stream cache: key', key, 'last_success', last_success)
if time.time() - last_success > STREAM_CACHE_TTL:
if DEBUG:
log('baseapp::check_provider_stream_cache: data from provider stream cache expired: key', key, 'last_success', last_success)
del self.provider_stream_cache[key]
return False
if DEBUG:
log('baseapp::check_provider_stream_cache: got valid data from provider stream cache: key', key, 'last_success', last_success)
return True
def load_cookies(self):
try:
f = open(self.cookie_file, 'r')
data = pickle.load(f)
f.close()
for c in data:
if DEBUG:
log('baseapp::load_cookies: add cookie:', c)
self.cookie_jar.set_cookie(c)
return True
except:
if DEBUG:
log('baseapp::load_cookies: cannot load cookies file:', self.cookie_file)
return False
def save_cookies(self):
try:
cookies = []
for c in self.cookie_jar:
cookies.append(c)
if DEBUG:
log('baseapp::save_cookies: file', self.cookie_file, 'cookies', cookies)
f = open(self.cookie_file, 'w')
pickle.dump(cookies, f)
f.close()
return True
except:
if DEBUG:
log('baseapp::save_cookies: cannot save to file', self.cookie_file)
return False
def check_premium_status(self, provider_key, content_id, infohash):
if content_id is None:
if DEBUG_PREMIUM:
log('baseapp::check_premium_status: empty content id')
return False
status = self.tsservice.check_premium_status(provider_key, content_id, infohash)
if DEBUG_PREMIUM:
log('baseapp::check_premium_status: provider_key', provider_key, 'content_id', content_id, 'status', status)
if status is None:
if DEBUG_PREMIUM:
log('baseapp::check_premium_status: request failed, consider premium: provider_key', provider_key, 'content_id', content_id)
return True
return status == 1
def report_premium_download(self, provider_key, content_id, params):
report = False
check_user = False
user_ok = True
if not params.has_key('last_report'):
if DEBUG_PREMIUM:
log('baseapp::report_premium_download: not yet reported')
report = True
elif params['last_report'] < time.time() - params['report_interval']:
if DEBUG_PREMIUM:
log('baseapp::report_premium_download: time to report: last', params['last_report'], 'now', time.time(), 'interval', params['report_interval'])
report = True
if not params.has_key('last_user_check'):
if DEBUG_PREMIUM:
log('baseapp::report_premium_download: user not checked')
check_user = True
elif params['last_user_check'] < time.time() - params['user_check_interval']:
if DEBUG_PREMIUM:
log('baseapp::report_premium_download: time to check user: last', params['last_user_check'], 'now', time.time(), 'interval', params['user_check_interval'])
check_user = True
if report:
params['last_report'] = time.time()
user_login = self.s.get_ts_login()
self.tsservice.report_premium_download(params['download_id'], provider_key, content_id, user_login)
if check_user:
params['last_user_check'] = time.time()
user_level = self.s.get_authlevel()
if user_level != 2:
if DEBUG_PREMIUM:
log('baseapp::report_premium_download: user auth failed: level', user_level)
user_ok = False
return user_ok
def check_statistics_settings(self):
if DEBUG:
log('baseapp::check_statistics_settings: ---')
try:
timeout = self.stat_settings.check_settings()
self.traffic_stats.set_url_list(self.stat_settings.get_url_list('ts'))
except:
if DEBUG:
print_exc()
timeout = 3600
finally:
if DEBUG:
log('baseapp::check_statistics_settings: next run in', timeout)
self.run_delayed(self.check_statistics_settings, timeout)
def tns_send_event(self, d, event, event_data = None, delay = 0):
try:
if d in self.downloads_in_vodmode:
dparams = self.downloads_in_vodmode[d]
if dparams.has_key('tns'):
dparams['tns'].send_event(event, event_data, delay)
except:
print_exc()
def init_hardware_key(self):
try:
self.hardware_key = get_hardware_key()
if DEBUG:
log('baseapp::init_hardware_key: got key:', self.hardware_key)
except:
if DEBUG:
print_exc()
self.hardware_key = None
def check_integrity(self):
if sys.platform != 'win32':
return True
if not self.check_string('.Torrent Stream', '64048011141141110101' + '1611230380611411101790901'):
if DEVELOPER_MODE:
log('string failed')
return False
selfpath = sys.argv[0]
exename = os.path.basename(selfpath)
if self.apptype == 'torrentstream':
check_exe1 = 'tsengine.exe'
check_exe2 = 'tsengine'
check_exe3 = '61151110101130150' + '1011101640101021101'
check_exe4 = '61151110' + '1011301501011101'
else:
check_exe1 = 'ace_engine.exe'
check_exe2 = 'ace_engine'
check_exe3 = '79099010159' + '0101011301501011101640101021101'
check_exe4 = '790990101590101011' + '301501011101'
if exename != check_exe1 and exename != check_exe2:
if DEVELOPER_MODE:
log('exename failed:', exename)
return False
if not (self.check_string(exename, check_exe3) or self.check_string(exename, check_exe4)):
if DEVELOPER_MODE:
log('exename failed 2')
return False
base = os.path.abspath(os.path.dirname(selfpath))
if DEVELOPER_MODE:
log('selfpath', selfpath, 'exename', exename, 'base', base)
files = []
files.append({'path': 'lib\\pycompat27.pyd',
'path2': '801501890290211121990111901211790611050550640211121001'})
files.append({'path': '..\\updater\\tsupdate.exe',
'path2': '640640290711211001790611101411290611511711211001790611101640101021101'})
files.append({'path': '..\\player\\npts_plugin.dll',
'path2': '640640290211801790121101411290011211611511590211801711301501011640001801801'})
files.append({'path': '..\\player\\tsplayer.exe',
'path2': '640640290211801790121101411290611511211801790121101411640101021101'})
files.append({'path': 'python27.dll',
'path2': '211121611401111011050550640001801801',
'check': '4cad50ea762261d7f1361f7095cc6c740c2aa1b6',
'check2': '250990790001350840101790550450050050450940001550201940150450940201550840750350990990450990550250840990050790790940890450'})
files.append({'path': 'lib\\_ctypes.pyd',
'path2': '801501890290590990611121211101511640211121001',
'check': '616293e45730b2d4b49002d65cac9fb319c44aa2',
'check2': '450940450050750150101250350550150840890050001250890250750840840050001450350990790990750201890150940750990250250790790050'})
files.append({'path': 'lib\\_hashlib.pyd',
'path2': '801501890290590401790511401801501890640211121001',
'check': '3e5e42e2ff2bfdfa36fad0a14d18a5508717ee47',
'check2': '150101350101250050101050201201050890201001201790150450201790001840790940250001940650790350350840650550940550101101250550'})
files.append({'path': 'lib\\_socket.pyd',
'path2': '801501890290590511111990701101611640211121001',
'check': '95deea9dbbf5c19d8042439bd676c2c3e6b47328',
'check2': '750350001101101790750001890890201350990940750001650840250050250150750890001450550450990050990150101450890250550150050650'})
files.append({'path': 'lib\\_sqlite3.pyd',
'path2': '801501890290590511311801501611101150640211121001',
'check': 'dc0dadc7e0a73ca83c7f6fa21e807b5eb8ff67e1',
'check2': '001990840001790001990550101840790550150990790650150990550201450201790050940101650840550890350101890650201201450550101940'})
files.append({'path': 'lib\\_ssl.pyd',
'path2': '801501890290590511511801640211121001',
'check': '7d656f10b4d9d7f6d55caaa626e5975422637466',
'check2': '550001450350450201940840890250001750001550201450001350350990790790790450050450101350750550350250050050450150550250450450'})
files.append({'path': 'lib\\LIBEAY32.dll',
'path2': '801501890290670370660960560980150050640001801801',
'check': '3fc80784b3f0714a1859521f990965b949a71536',
'check2': '150201990650840550650250890150201840550940250790940650350750350050940201750750840750450350890750250750790550940350150450'})
files.append({'path': 'lib\\M2Crypto.__m2crypto.pyd',
'path2': '801501890290770050760411121211611111640590590901050990411121211611111640211121001',
'check': '01a2dbcfe59602b45fa9c389cb604570ca71dbf1',
'check2': '840940790050001890990201101350750450840050890250350201790750990150650750990890450840250350550840990790550940001890201940'})
files.append({'path': 'lib\\pycompat.pyd',
'path2': '801501890290211121990111901211790611640211121001',
'check': 'e282471605acb12f842fe1047ca445e819297762',
'check2': '101050650050250550940450840350790990890940050201650250050201101940840250550990790250250350101650940750050750550550450050'})
files.append({'path': 'lib\\SSLEAY32.dll',
'path2': '801501890290380380670960560980150050640001801801',
'check': '42323e4435bc986c45c9a2b841e7da7b6a98b228',
'check2': '250050150050150101250250150350890990750650450990250350990750790050890650250940101550001790550890450790750650890050050650'})
files.append({'path': 'lib\\wxbase28uh_vc.dll',
'path2': '801501890290911021890790511101050650711401590811990640001801801',
'check': '22a7683af988f5d0bef8abe4934dba03a093f21d',
'check2': '050050790550450650150790201750650650201350001840890101201650790890101250750150250001890790840150790840750150201050940001'})
files.append({'path': 'lib\\wxmsw28uh_adv_vc.dll',
'path2': '801501890290911021901511911050650711401590790001811590811990640001801801',
'check': 'd0aac3f14afe9c0bedc9a906b4dd6981597a8685',
'check2': '001840790790990150201940250790201101750990840890101001990750790750840450890250001001450750650940350750550790650450650350'})
files.append({'path': 'tsengine.exe',
'path2': '611511101011301501011101640101021101',
'check': '1a77f3cf03b882514683af1d6d2f9f0480a4bf2e',
'check2': '940790550550201150990201840150890650650050350940250450650150790201940001450001050201750201840250650840790250890201050101'})
files.append({'path': 'tsengine_stream.exe',
'path2': '611511101011301501011101590511611411101790901640101021101',
'check': '0c28965c60bae004e0c8a0a79f070dce266f6e33',
'check2': '840990050650750450350990450840890790101840840250101840990650790840790550750201840550840001990101050450450201450101150150'})
return self.check_files(base, files)
def check_files(self, base, files):
for f in files:
do_check = f.has_key('check')
if not self.check_string(f['path'], f['path2']):
if DEVELOPER_MODE:
log('path failed:', f['path'])
return False
if do_check and not self.check_string(f['check'], f['check2']):
if DEVELOPER_MODE:
log('check failed:', f['check'])
return False
path = os.path.join(base, f['path'])
if not self.file_exists(path):
if DEVELOPER_MODE:
log('not found:', path)
return False
if do_check:
check = self.file_checksum(path)
if check != f['check']:
if DEVELOPER_MODE:
log('checksum failed:', path, f['check'], check)
return False
return True
def check_string(self, s, check):
s1 = self.get_string(check)
if s1 != s:
if DEVELOPER_MODE:
log('check string failed:', s, s1)
return False
return True
def get_string(self, s, padding = 3):
return ''.join([ chr(int(s[i:i + padding][::-1])) for i in xrange(0, len(s), padding) ])
def file_exists(self, path):
if not os.path.isfile(path):
return False
try:
f = open(path, 'rb')
f.close()
except:
return False
return True
def file_checksum(self, path):
f = None
try:
f = open(path, 'rb')
h = hashlib.sha1()
got_data = False
while True:
buf = f.read(4096)
if not buf:
break
got_data = True
h.update(buf)
if not got_data:
return ''
return h.hexdigest()
except:
if DEBUG:
print_exc()
return ''
finally:
if f is not None:
f.close()
| .kodi/userdata/addon_data/plugin.video.p2p-streams/acestream/ace/ACEStream/Player/BaseApp.py | 153,607 | Embedded file name: ACEStream\Player\BaseApp.pyo | 48 | en | 0.612645 |
"""
Support for Homekit number ranges.
These are mostly used where a HomeKit accessory exposes additional non-standard
characteristics that don't map to a Home Assistant feature.
"""
from aiohomekit.model.characteristics import Characteristic, CharacteristicsTypes
from homeassistant.components.number import NumberEntity
from homeassistant.core import callback
from . import KNOWN_DEVICES, CharacteristicEntity
NUMBER_ENTITIES = {
CharacteristicsTypes.Vendor.VOCOLINC_HUMIDIFIER_SPRAY_LEVEL: {
"name": "Spray Quantity",
"icon": "mdi:water",
},
CharacteristicsTypes.Vendor.EVE_DEGREE_ELEVATION: {
"name": "Elevation",
"icon": "mdi:elevation-rise",
},
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Homekit numbers."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_characteristic(char: Characteristic):
kwargs = NUMBER_ENTITIES.get(char.type)
if not kwargs:
return False
info = {"aid": char.service.accessory.aid, "iid": char.service.iid}
async_add_entities([HomeKitNumber(conn, info, char, **kwargs)], True)
return True
conn.add_char_factory(async_add_characteristic)
class HomeKitNumber(CharacteristicEntity, NumberEntity):
"""Representation of a Number control on a homekit accessory."""
def __init__(
self,
conn,
info,
char,
device_class=None,
icon=None,
name=None,
**kwargs,
):
"""Initialise a HomeKit number control."""
self._device_class = device_class
self._icon = icon
self._name = name
super().__init__(conn, info, char)
def get_characteristic_types(self):
"""Define the homekit characteristics the entity is tracking."""
return [self._char.type]
@property
def device_class(self):
"""Return type of sensor."""
return self._device_class
@property
def icon(self):
"""Return the sensor icon."""
return self._icon
@property
def min_value(self) -> float:
"""Return the minimum value."""
return self._char.minValue
@property
def max_value(self) -> float:
"""Return the maximum value."""
return self._char.maxValue
@property
def step(self) -> float:
"""Return the increment/decrement step."""
return self._char.minStep
@property
def value(self) -> float:
"""Return the current characteristic value."""
return self._char.value
async def async_set_value(self, value: float):
"""Set the characteristic to this value."""
await self.async_put_characteristics(
{
self._char.type: value,
}
)
| homeassistant/components/homekit_controller/number.py | 2,876 | Representation of a Number control on a homekit accessory.
Initialise a HomeKit number control.
Return type of sensor.
Define the homekit characteristics the entity is tracking.
Return the sensor icon.
Return the maximum value.
Return the minimum value.
Return the increment/decrement step.
Return the current characteristic value.
Support for Homekit number ranges.
These are mostly used where a HomeKit accessory exposes additional non-standard
characteristics that don't map to a Home Assistant feature. | 507 | en | 0.74247 |
# -*- coding: utf-8 -*-
from django.conf.urls import url
from django.views.generic import TemplateView
from . import views
app_name = 'services_communicator'
urlpatterns = [
url(
regex="^ServiceList/~create/$",
view=views.ServiceListCreateView.as_view(),
name='ServiceList_create',
),
url(
regex="^ServiceList/(?P<pk>\d+)/~delete/$",
view=views.ServiceListDeleteView.as_view(),
name='ServiceList_delete',
),
url(
regex="^ServiceList/(?P<pk>\d+)/$",
view=views.ServiceListDetailView.as_view(),
name='ServiceList_detail',
),
url(
regex="^ServiceList/(?P<pk>\d+)/~update/$",
view=views.ServiceListUpdateView.as_view(),
name='ServiceList_update',
),
url(
regex="^ServiceList/$",
view=views.ServiceListListView.as_view(),
name='ServiceList_list',
),
]
| services_communicator/urls.py | 911 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
#
# Copyright 2018 EveryUP Srl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User, AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.utils import timezone
from authosm.exceptions import OSMAuthException
from lib.osm.osmclient.clientv2 import Client
import utils
class OsmUserManager(BaseUserManager):
"""Custom manager for OsmUser."""
def _create_user(self, username, password, is_staff, is_superuser, **extra_fields):
"""Create and save a CustomUser with the given username and password. """
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
is_active = extra_fields.pop("is_active", True)
user = self.model(username=username, is_staff=is_staff, is_active=is_active,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
"""Create and save an OsmUser with the given username and password."""
def create_superuser(self, username, password, **extra_fields):
return self._create_user(username, password, True, True, is_admin=True,
**extra_fields)
class AbstractOsmUser(AbstractBaseUser, PermissionsMixin):
"""Abstract User with the same behaviour as Django's default User.
Inherits from both the AbstractBaseUser and PermissionMixin.
The following attributes are inherited from the superclasses:
* password
* last_login
* is_superuser
"""
username = models.CharField(_('username'), primary_key=True, max_length=255, unique=True, db_index=True)
is_admin = models.BooleanField(_('admin status'), default=False)
is_basic_user = models.BooleanField(_('basic_user status'), default=False)
current_project = models.CharField(_('project_id'), max_length=255)
psw = models.CharField(_('psw'), max_length=36)
token = models.CharField(_('token'), max_length=36)
project_id = models.CharField(_('project_id'), max_length=36)
token_expires = models.FloatField(_('token_expires'), max_length=36)
objects = OsmUserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = []
@property
def is_authenticated(self):
"""Checks for a valid authentication."""
if self.token is not None and utils.is_token_valid({'expires': self.token_expires}):
return True
else:
return False
def get_token(self):
if self.is_authenticated:
return {'id': self.token, 'expires': self.token_expires, 'project_id': self.project_id}
return None
def get_projects(self):
client = Client()
result = client.get_user_info(self.get_token(), self.username)
if 'error' in result and result['error'] is True:
return []
else:
return result['data']['projects']
def switch_project(self, project_id):
client = Client()
result = client.switch_project({'project_id': project_id, 'username': self.username, 'password': self.psw})
if 'error' in result and result['error'] is True:
raise OSMAuthException(result['data'])
else:
self.token = result['data']['id']
self.project_id = result['data']['project_id']
self.token_expires = result['data']['expires']
self.save()
return True
return False
class Meta:
verbose_name = _('custom user')
verbose_name_plural = _('custom users')
abstract = True
class OsmUser(AbstractOsmUser):
"""
Concrete class of AbstractCustomUser.
Use this if you don't need to extend CustomUser.
"""
class Meta(AbstractOsmUser.Meta):
swappable = 'AUTH_USER_MODEL'
| LW-UI/authosm/models.py | 4,568 | Abstract User with the same behaviour as Django's default User.
Inherits from both the AbstractBaseUser and PermissionMixin.
The following attributes are inherited from the superclasses:
* password
* last_login
* is_superuser
Concrete class of AbstractCustomUser.
Use this if you don't need to extend CustomUser.
Custom manager for OsmUser.
Create and save a CustomUser with the given username and password.
Checks for a valid authentication.
Copyright 2018 EveryUP Srl Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 1,023 | en | 0.833735 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
feedforward neural network
"""
import mindspore.nn as nn
from mindelec.architecture import get_activation, LinearBlock
class FFNN(nn.Cell):
"""
Full-connect networks.
Args:
input_dim (int): the input dimensions.
output_dim (int): the output dimensions.
hidden_layer (int): number of hidden layers.
activation (str or Cell): activation functions.
"""
def __init__(self, input_dim, output_dim, hidden_layer=64, activation="sin"):
super(FFNN, self).__init__()
self.activation = get_activation(activation)
self.fc1 = LinearBlock(input_dim, hidden_layer)
self.fc2 = LinearBlock(hidden_layer, hidden_layer)
self.fc3 = LinearBlock(hidden_layer, hidden_layer)
self.fc4 = LinearBlock(hidden_layer, hidden_layer)
self.fc5 = LinearBlock(hidden_layer, output_dim)
def construct(self, *inputs):
"""fc network"""
x = inputs[0]
out = self.fc1(x)
out = self.activation(out)
out = self.fc2(out)
out = self.activation(out)
out = self.fc3(out)
out = self.activation(out)
out = self.fc4(out)
out = self.activation(out)
out = self.fc5(out)
return out
| MindElec/examples/physics_driven/frequency_domain_maxwell/src/model.py | 1,901 | Full-connect networks.
Args:
input_dim (int): the input dimensions.
output_dim (int): the output dimensions.
hidden_layer (int): number of hidden layers.
activation (str or Cell): activation functions.
fc network
feedforward neural network
Copyright 2021 Huawei Technologies Co., Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== | 883 | en | 0.761555 |
# !/usr/bin/python3
# coding: utf_8
""" Config your app """
import os
from hal.files.parsers import JSONParser
from hal.files.save_as import write_dicts_to_json
from .config import APP_FOLDER, API_FOLDER, DATA_FOLDER
from .data.coins import CryptoCoin, CRYPTO_COINS
class ConfigManager:
""" Manages config files for app """
def __init__(self, config_file):
create_workplace()
self.config_file = config_file
self.raw = None
self.data = {}
self._check()
self._read_config()
def _read_config(self):
"""
:return: {}
Config data
"""
self.raw = JSONParser(self.config_file).get_content()
for key, value in self.raw.items():
self.data[key] = value
def _check(self):
if not os.path.exists(self.config_file):
raise ValueError(
"Empty config file! Please write your settings "
"and store at " + self.config_file
)
def create_config(self):
"""
:return: void
Creates config file
"""
if os.path.exists(self.config_file):
raise ValueError("Creating new config will erase previous data!")
write_dicts_to_json({}, self.config_file) # empty data
def get(self, key):
"""
:param key: str
What you want
:return: {}
Item you want
"""
return self.data[key]
def save(self):
"""
:return: void
Saves app data to local config file
"""
write_dicts_to_json(self.data, self.config_file)
def create_workplace():
"""
:return: void
Creates folder
"""
for directory in [APP_FOLDER, API_FOLDER, DATA_FOLDER]:
if not os.path.exists(directory):
os.makedirs(directory)
def get_coin(symbol):
"""
:param symbol: str
Symbol of coin
:return: CryptoCoin
Coin if a crypto-coin exists with that name
"""
candidate = CryptoCoin(symbol, symbol)
for coin in CRYPTO_COINS:
if coin.symbol == candidate:
return coin
| pyhodl/app.py | 2,172 | Manages config files for app
:return: {}
Config data
:return: void
Creates config file
:return: void
Creates folder
:param key: str
What you want
:return: {}
Item you want
:param symbol: str
Symbol of coin
:return: CryptoCoin
Coin if a crypto-coin exists with that name
:return: void
Saves app data to local config file
Config your app
!/usr/bin/python3 coding: utf_8 empty data | 414 | en | 0.561655 |
"""is_sqllab_view
Revision ID: 130915240929
Revises: f231d82b9b26
Create Date: 2018-04-03 08:19:34.098789
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.ext.declarative import declarative_base
from rabbitai import db
# revision identifiers, used by Alembic.
revision = "130915240929"
down_revision = "f231d82b9b26"
Base = declarative_base()
class Table(Base):
"""Declarative class to do query in upgrade"""
__tablename__ = "tables"
id = sa.Column(sa.Integer, primary_key=True)
sql = sa.Column(sa.Text)
is_sqllab_view = sa.Column(sa.Boolean())
def upgrade():
bind = op.get_bind()
op.add_column(
"tables",
sa.Column(
"is_sqllab_view",
sa.Boolean(),
nullable=True,
default=False,
server_default=sa.false(),
),
)
session = db.Session(bind=bind)
# Use Slice class defined here instead of models.Slice
for tbl in session.query(Table).all():
if tbl.sql:
tbl.is_sqllab_view = True
session.commit()
db.session.close()
def downgrade():
op.drop_column("tables", "is_sqllab_view")
| rabbitai/migrations/versions/130915240929_is_sqllab_viz_flow.py | 1,164 | Declarative class to do query in upgrade
is_sqllab_view
Revision ID: 130915240929
Revises: f231d82b9b26
Create Date: 2018-04-03 08:19:34.098789
revision identifiers, used by Alembic. Use Slice class defined here instead of models.Slice | 238 | en | 0.610533 |
from collections import OrderedDict
from . import util
from ..errors import ModelInfoLookupError
class ModelInfo:
def __init__(self, pairs=[], default_fields=None):
"""
Constructs a mapping of information about a model.
:class:`~revscoring.scoring.ModelInfo` objects are usually nested
within each other to provide a convenient tree structure for
:func:`~revscoring.scoring.ModelInfo.lookup` and
:func:`~revscoring.scoring.ModelInfo.format`.
"""
self._data = OrderedDict(pairs)
self._default_fields = set(default_fields) \
if default_fields is not None else None
def __len__(self):
return len(self.keys())
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def __contains__(self, key):
try:
return (key in self._data or
key in ('true', 'false') and key == 'true' in self._data or
int(key) in self._data)
except ValueError:
return False
def keys(self):
return self._data.keys()
def get(self, key, default=None):
return self._data.get(key, default)
def values(self):
return self._data.values()
def items(self):
return self._data.items()
def __iter__(self):
return iter(self._data)
def move_to_end(self, key, last=True):
return self._data.move_to_end(key, last=last)
def lookup(self, path=None):
"""
Looks up a specific information value based on either a string pattern
or a path.
For example, the pattern "stats.roc_auc.labels.true" is the same as
the path ``['stats', 'roc_auc', 'labels', True]``.
:Parameters:
path : `str` | `list`
The location of the information to lookup.
"""
if isinstance(path, str):
path = util.parse_pattern(path)
elif path is None:
path = []
d = self
remaining_path = list(path) # Make sure we don't overwrite the input
while len(path) > 0:
key = path.pop(0)
d = try_key(key, d)
if hasattr(d, "lookup"):
return d.lookup(remaining_path)
else:
continue
return d
def format(self, paths=None, formatting="str", **kwargs):
"""
Format a representation of the model information in a useful way.
:Parameters:
paths : `iterable` ( `str` | [`str`] )
A set of paths to use when selecting which information should
formatted. Everything beneath a provided path in the tree
will be formatted. E.g. `statistics.roc_auc` and `statistics`
will format redundantly because `roc_auc` is already within
`statistics`. Alternatively `statistics.roc_auc` and
`statistics.pr_auc` will format only those two specific
bits of information.
formatting : "json" or "str"
Which output formatting do you want? "str" returns something
nice to show on the command-line. "json" returns something
that will pass through :func:`json.dump` without error.
"""
paths = paths or []
_paths = [
util.parse_pattern(path) if isinstance(path, str) else path
for path in paths]
path_tree = util.treeify(_paths)
if formatting == "str":
return self.format_str(path_tree, **kwargs)
elif formatting == "json":
return self.format_json(path_tree, **kwargs)
else:
raise ValueError("Formatting {0} is not available for {1}."
.format(formatting, self.__class__.__name__))
def format_str(self, path_tree, **kwargs):
formatted = "Model Information:\n"
for key in self.normalize_fields(path_tree):
key_val = try_key(key, self)
if hasattr(key_val, "format_str"):
sub_tree = path_tree.get(key, {})
formatted += util.tab_it_in(
key_val.format_str(sub_tree, **kwargs))
else:
formatted += util.tab_it_in(" - {0}: {1}"
.format(key, key_val))
return formatted
def format_json(self, path_tree, **kwargs):
d = OrderedDict()
for key in self.normalize_fields(path_tree):
key_val = try_key(key, self)
if hasattr(key_val, "format_json"):
sub_tree = path_tree.get(key, {})
d[key] = key_val.format_json(sub_tree, **kwargs)
else:
d[key] = key_val
return d
def normalize_fields(self, path_tree):
if len(path_tree) > 0:
yield from path_tree.keys()
else:
for field in self.keys():
if self._default_fields is None or \
field in self._default_fields:
yield field
def try_key(key, d):
try:
return d[key]
except KeyError:
try:
if key in ("true", "false"):
return d[key == 'true']
else:
try:
return d[int(key)]
except ValueError:
raise ModelInfoLookupError(key)
except KeyError:
raise ModelInfoLookupError(key)
| revscoring/scoring/model_info.py | 5,567 | Constructs a mapping of information about a model.
:class:`~revscoring.scoring.ModelInfo` objects are usually nested
within each other to provide a convenient tree structure for
:func:`~revscoring.scoring.ModelInfo.lookup` and
:func:`~revscoring.scoring.ModelInfo.format`.
Format a representation of the model information in a useful way.
:Parameters:
paths : `iterable` ( `str` | [`str`] )
A set of paths to use when selecting which information should
formatted. Everything beneath a provided path in the tree
will be formatted. E.g. `statistics.roc_auc` and `statistics`
will format redundantly because `roc_auc` is already within
`statistics`. Alternatively `statistics.roc_auc` and
`statistics.pr_auc` will format only those two specific
bits of information.
formatting : "json" or "str"
Which output formatting do you want? "str" returns something
nice to show on the command-line. "json" returns something
that will pass through :func:`json.dump` without error.
Looks up a specific information value based on either a string pattern
or a path.
For example, the pattern "stats.roc_auc.labels.true" is the same as
the path ``['stats', 'roc_auc', 'labels', True]``.
:Parameters:
path : `str` | `list`
The location of the information to lookup.
Make sure we don't overwrite the input | 1,395 | en | 0.679014 |
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
# Define a class to receive the characteristics of each line detection
class Lane():
def __init__(self):
# 当前的图像
self.current_warped_binary = None
# 当前图片的尺寸
self.current_warped_binary_shape = []
# 检测到的车道线像素的横坐标 x values for detected line pixels
self.allx = None
# 检测到的车道线像素的纵坐标 y values for detected line pixels
self.ally = None
# 以纵坐标为自变量,取值空间
self.ploty = None
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# 是否检测到车道线 was the line detected in the last iteration?
self.detected = False
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# 保存的数据量
self.n = 5
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# 最近n个帧的拟合曲线 x values of the last n fits of the line
self.recent_fitted_xs = []
# 最近n个帧的平均拟合曲线 average x values of the fitted line over the last n iterations
self.average_fitted_x = []
# 当前帧的拟合曲线
self.current_fitted_x = []
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# 最近n个帧的拟合函数
self.recent_fits = []
# 最近n个帧的拟合函数 polynomial coefficients averaged over the last n iterations
self.average_fit = []
# 当前帧的拟合函数 polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
# 拟合函数的误差 difference in fit coefficients between last and new fits
self.diffs = np.array([0, 0, 0], dtype='float')
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# 半径 radius of curvature of the line in some units
self.radius_of_curvature = []
# 车辆在车道线之间距离 distance in meters of vehicle center from the line
self.line_base_pos = None
# 对全新的帧进行车道线像素检测
def find_lane_pixels(self, binary_warped, location):
self.current_warped_binary = binary_warped
self.current_warped_binary_shape = binary_warped.shape
self.ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)
# Create an output image to draw on and visualize the result
# out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0] // 2)
if location == "left":
base = np.argmax(histogram[:midpoint])
elif location == "right":
base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 80
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0] // nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero() # 扁平化后非零值点的列表
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
current = base
# Create empty lists to receive left and right lane pixel indices
lane_inds = []
# right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_x_low = current - margin
win_x_high = current + margin
# # Draw the windows on the visualization image
# cv2.rectangle(out_img, (win_xleft_low, win_y_low),
# (win_xleft_high, win_y_high), (0, 255, 0), 2)
# cv2.rectangle(out_img, (win_xright_low, win_y_low),
# (win_xright_high, win_y_high), (0, 255, 0), 2)
# 形成对每个像素的bool值
# Identify the nonzero pixels in x and y within the window #
good_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_x_low) & (nonzerox < win_x_high)).nonzero()[0]
# Append these indices to the lists
lane_inds.append(good_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_inds) > minpix:
current = np.int(np.mean(nonzerox[good_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
lane_inds = np.concatenate(lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
x = nonzerox[lane_inds]
y = nonzeroy[lane_inds]
self.allx = x
self.ally = y
return x, y
# 在之前的plot基础上找车道线
def search_pixel_around_poly(self, binary_warped):
self.current_warped_binary = binary_warped
self.current_warped_binary_shape = binary_warped.shape
self.ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
# HYPERPARAMETER
# Choose the width of the margin around the previous polynomial to search
# The quiz grader expects 100 here, but feel free to tune on your own!
margin = 80
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
fit = self.recent_fits[-1]
### TO-DO: Set the area of search based on activated x-values ###
### within the +/- margin of our polynomial function ###
### Hint: consider the window areas for the similarly named variables ###
### in the previous quiz, but change the windows to our new search area ###
lane_inds = ((nonzerox > (fit[0] * (nonzeroy ** 2) + fit[1] * nonzeroy + fit[2] - margin)) & (
nonzerox < (fit[0] * (nonzeroy ** 2) + fit[1] * nonzeroy + fit[2] + margin)))
# Again, extract left and right line pixel positions
x = nonzerox[lane_inds]
y = nonzeroy[lane_inds]
self.allx = x
self.ally = y
return x, y
def fit_polynomial(self):
ploty = self.ploty
# Fit a second order polynomial to each using `np.polyfit`
fit = np.polyfit(self.ally, self.allx, 2)
# 存储当前结果
self.current_fit = fit
# 计算误差
if len(self.recent_fits) == 0:
self.diffs = [0,0,0]
else:
new = np.array(self.current_fit)
old = np.array(self.recent_fits[-1])
self.diffs = new - old
# 存储为历史结果
if len(self.recent_fits) < self.n:
self.recent_fits.append(self.current_fit)
elif len(self.recent_fits) == self.n:
self.recent_fits.pop(0)
self.recent_fits.append(self.current_fit)
else:
self.recent_fits.append(self.current_fit)
self.recent_fits = self.recent_fits[-self.n:] # 后面n个
# 计算当前平均
self.average_fit = np.array(self.recent_fits).mean(axis=0)
try:
x_fitted = self.average_fit[0] * ploty ** 2 + self.average_fit[1] * ploty + self.average_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
x_fitted = 1 * ploty ** 2 + 1 * ploty
self.detected = False
else:
self.detected = True
self.current_fitted_x = x_fitted
# 存储为历史结果
if len(self.recent_fitted_xs) < self.n:
self.recent_fitted_xs.append(self.current_fitted_x)
elif len(self.recent_fitted_xs) == self.n:
self.recent_fitted_xs.pop(0)
self.recent_fitted_xs.append(self.current_fitted_x)
else:
self.recent_fitted_xs.append(self.current_fitted_x)
self.recent_fitted_xs = self.recent_fitted_xs[-self.n:] # 后面n个
self.average_fitted_x = np.array(self.recent_fitted_xs).mean(axis=0)
return self.average_fitted_x
def fit(self, binary_warped,location,sequence=True):
if sequence:
if not self.detected:
# 没有检测到,重新开始检测
self.find_lane_pixels(binary_warped,location)
else:
# 从上一次周围开始检测
self.search_pixel_around_poly(binary_warped)
# TODO 如果两次检测的误差较大怎么办?
# TODO 是否存在
self.fit_polynomial()
# if np.abs(self.diffs).sum() > 20:
# self.current_fit = np.array(self.recent_fits[:-1]).mean(axis=0)
# self.recent_fits[-1] = self.current_fit
# self.average_fit = np.array(self.recent_fits).mean(axis=0)
#
# self.current_fitted_x = np.array(self.recent_fitted_xs[:-1]).mean(axis=0)
# self.recent_fitted_xs[-1] = self.current_fitted_x
# self.average_fitted_x = np.array(self.recent_fitted_xs).mean(axis=0)
else:
self.find_lane_pixels(binary_warped, location)
self.fit_polynomial()
def measure_curvature_real(self,ploty, x, y):
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30 / 720 # meters per pixel in y dimension
xm_per_pix = 3.7 / 700 # meters per pixel in x dimension
fit_cr = np.polyfit(y * ym_per_pix, x * xm_per_pix, 2)
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
# Calculation of R_curve (radius of curvature)
curverad = ((1 + (2 * fit_cr[0] * y_eval * ym_per_pix + fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * fit_cr[0])
self.radius_of_curvature = curverad
return curverad
if __name__ == "__main__":
from lane.perspective import perspective,src,dst
from lane.gaussian_blur import gaussian_blur
from lane.combined_threshold import combined_threshold
from lane.measure_vehicle_pos import measure_vehicle_pos
from lane.draw_lane import draw_lane
image = mpimg.imread('../output_images/undistorted/straight_lines1-undistorted.jpg')
image = gaussian_blur(image, 3)
combined = combined_threshold(image, ksize=3,
th=[[20, 100], [25, 254], [100, 250], [0.6, 1.2], [180, 254], [250, 0]])
combined = gaussian_blur(combined, 3)
perspectived_img = perspective(combined,src,dst)
# plt.imshow(perspectived_img,cmap="gray")
# plt.show()
left_lane = Lane()
left_lane.fit(perspectived_img,"left")
right_lane = Lane()
right_lane.fit(perspectived_img, "right")
result = left_lane.visual(perspectived_img,"left")
plt.imshow(result)
result = right_lane.visual(perspectived_img, "right")
plt.imshow(result)
plt.show()
# # 计算曲率
# left_r = left_lane.measure_curvature_real(left_lane.ploty, left_lane.average_fitted_x, left_lane.ploty)
# right_r = left_lane.measure_curvature_real(right_lane.ploty, right_lane.average_fitted_x, right_lane.ploty)
#
# # 计算偏移值
# v = measure_vehicle_pos(left_lane.average_fitted_x, right_lane.average_fitted_x,left_lane.current_warped_binary_shape[1])
#
# # 绘制车道线
# img = draw_lane(image, combined, dst, src,left_lane.current_fitted_x, right_lane.current_fitted_x, right_lane.ploty)
# plt.imshow(img)
# # 打印文字
# plt.text(0,60,"Radius of Curvature = %d(m)"%int(r),fontdict={'size': 20, 'color': 'w'})
# plt.text(0,120, "Vehicle is %.2f(m) left of center" % v, fontdict={'size': 20, 'color': 'w'})
# plt.show()
| lane/Lane.py | 13,189 | Calculates the curvature of polynomial functions in meters.
-*- coding: utf-8 -*- Define a class to receive the characteristics of each line detection 当前的图像 当前图片的尺寸 检测到的车道线像素的横坐标 x values for detected line pixels 检测到的车道线像素的纵坐标 y values for detected line pixels 以纵坐标为自变量,取值空间 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 是否检测到车道线 was the line detected in the last iteration? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 保存的数据量 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 最近n个帧的拟合曲线 x values of the last n fits of the line 最近n个帧的平均拟合曲线 average x values of the fitted line over the last n iterations 当前帧的拟合曲线 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 最近n个帧的拟合函数 最近n个帧的拟合函数 polynomial coefficients averaged over the last n iterations 当前帧的拟合函数 polynomial coefficients for the most recent fit 拟合函数的误差 difference in fit coefficients between last and new fits +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 半径 radius of curvature of the line in some units 车辆在车道线之间距离 distance in meters of vehicle center from the line 对全新的帧进行车道线像素检测 Take a histogram of the bottom half of the image Create an output image to draw on and visualize the result out_img = np.dstack((binary_warped, binary_warped, binary_warped)) Find the peak of the left and right halves of the histogram These will be the starting point for the left and right lines HYPERPARAMETERS Choose the number of sliding windows Set the width of the windows +/- margin Set minimum number of pixels found to recenter window Set height of windows - based on nwindows above and image shape Identify the x and y positions of all nonzero pixels in the image 扁平化后非零值点的列表 Current positions to be updated later for each window in nwindows Create empty lists to receive left and right lane pixel indices right_lane_inds = [] Step through the windows one by one Identify window boundaries in x and y (and right and left) Draw the windows on the visualization image cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2) cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2) 形成对每个像素的bool值 Identify the nonzero pixels in x and y within the window Append these indices to the lists If you found > minpix pixels, recenter next window on their mean position Concatenate the arrays of indices (previously was a list of lists of pixels) Avoids an error if the above is not implemented fully Extract left and right line pixel positions 在之前的plot基础上找车道线 HYPERPARAMETER Choose the width of the margin around the previous polynomial to search The quiz grader expects 100 here, but feel free to tune on your own! Grab activated pixels TO-DO: Set the area of search based on activated x-values within the +/- margin of our polynomial function Hint: consider the window areas for the similarly named variables in the previous quiz, but change the windows to our new search area Again, extract left and right line pixel positions Fit a second order polynomial to each using `np.polyfit` 存储当前结果 计算误差 存储为历史结果 后面n个 计算当前平均 Avoids an error if `left` and `right_fit` are still none or incorrect 存储为历史结果 后面n个 没有检测到,重新开始检测 从上一次周围开始检测 TODO 如果两次检测的误差较大怎么办? TODO 是否存在 if np.abs(self.diffs).sum() > 20: self.current_fit = np.array(self.recent_fits[:-1]).mean(axis=0) self.recent_fits[-1] = self.current_fit self.average_fit = np.array(self.recent_fits).mean(axis=0) self.current_fitted_x = np.array(self.recent_fitted_xs[:-1]).mean(axis=0) self.recent_fitted_xs[-1] = self.current_fitted_x self.average_fitted_x = np.array(self.recent_fitted_xs).mean(axis=0) Define conversions in x and y from pixels space to meters meters per pixel in y dimension meters per pixel in x dimension Define y-value where we want radius of curvature We'll choose the maximum y-value, corresponding to the bottom of the image Calculation of R_curve (radius of curvature) plt.imshow(perspectived_img,cmap="gray") plt.show() 计算曲率 left_r = left_lane.measure_curvature_real(left_lane.ploty, left_lane.average_fitted_x, left_lane.ploty) right_r = left_lane.measure_curvature_real(right_lane.ploty, right_lane.average_fitted_x, right_lane.ploty) 计算偏移值 v = measure_vehicle_pos(left_lane.average_fitted_x, right_lane.average_fitted_x,left_lane.current_warped_binary_shape[1]) 绘制车道线 img = draw_lane(image, combined, dst, src,left_lane.current_fitted_x, right_lane.current_fitted_x, right_lane.ploty) plt.imshow(img) 打印文字 plt.text(0,60,"Radius of Curvature = %d(m)"%int(r),fontdict={'size': 20, 'color': 'w'}) plt.text(0,120, "Vehicle is %.2f(m) left of center" % v, fontdict={'size': 20, 'color': 'w'}) plt.show() | 4,859 | en | 0.606499 |
import ctypes as C
import numpy as np
from math import log,e
import hankelmatrixcreator
import sys
import time
import iohelpers
import math
import modelconversion
from scipy.sparse.linalg import lsqr
from scipy import sparse
import copy
DEBUG = False
VERBOSE = True
FAILURE_CONST = -100000.0
class TensorWFA:
def __init__(self, n_symbols):
self.n_symbols = n_symbols
self.hankels_learnt = False
def _estimate_hankels(self, data, prefixdict, suffixdict):
self.h_pands,self.symbol_hankels,self.hp_pandsigma,self.hbar_pands,self.hbar_pandsigma,self.hbar_sigmaands = hankelmatrixcreator.construct_tensor_hankels(data, prefixdict, suffixdict, self.n_symbols, 100)
if VERBOSE:
print "Finished Hankel estimation"
def compute_XY(self, hbar_pands, hbar_pandsigma, hbar_sigmaands, symbol_hankels, num_symbols, num_components):
if VERBOSE:
print "Constructing Q matrices.."
self.qp,_,self.qs = sparse.linalg.svds(hbar_pands,num_components)
self.qs_constructed = True
qp = sparse.csr_matrix(np.mat((self.qp[:,0:num_components]).T))
qs = sparse.csr_matrix(np.mat((self.qs[0:num_components, :])))
qp.eliminate_zeros()
qp.prune()
qs.eliminate_zeros()
qs.prune()
if VERBOSE:
print "Computing N.."
hsquiggleps = qp*hbar_pands*qs.T
hsquiggleps = sparse.csr_matrix(np.linalg.inv(hsquiggleps.A))
N = qs.T*(hsquiggleps)*qp
N.eliminate_zeros()
N.prune()
if VERBOSE:
print "Computing X.."
Xs = hbar_sigmaands*N*hbar_pandsigma
X = np.empty((num_symbols, num_symbols), order='F', dtype=np.float64)
X[:,:] = Xs.A
self.rank = np.linalg.matrix_rank(X)
if self.rank < num_components:
print "Rank Deficient!!"
return [],[], False
if VERBOSE:
print "Computing Y..."
leftside = hbar_sigmaands*N
leftside.eliminate_zeros()
leftside.prune()
rightside = N*hbar_pandsigma
rightside.eliminate_zeros()
rightside.prune()
Y = np.empty((num_symbols, num_symbols, num_symbols))
for sym in range(num_symbols):
Y[:,sym,:] = (leftside*(symbol_hankels[sym])*rightside).A
return X,Y, True
def learn_tensor(self, data, prefixdict, suffixdict, num_components):
starttime = time.clock()
if not self.hankels_learnt:
begintime = time.clock()
self._estimate_hankels(data, prefixdict, suffixdict)
self.inittime = time.clock()-begintime
self.hankels_learnt = True
#adding aliases with "self" prefix for readability.
h_pands = self.h_pands
symbol_hankels = self.symbol_hankels
hp_pandsigma = self.hp_pandsigma
hbar_pands = self.hbar_pands
hbar_pandsigma = self.hbar_pandsigma
hbar_sigmaands = self.hbar_sigmaands
num_symbols = self.n_symbols
X,Y, success = self.compute_XY(hbar_pands, hbar_pandsigma, hbar_sigmaands, symbol_hankels, num_symbols, num_components)
if not success:
return success
if VERBOSE:
print "Performing tensor decomposition.."
lib = C.CDLL('./cpp/libTensor.so')
f_learn_tensor = lib.learn_tensor
f_learn_tensor.argtypes = [np.ctypeslib.ndpointer(dtype = np.float64), np.ctypeslib.ndpointer(dtype = np.float64), C.c_int, np.ctypeslib.ndpointer(dtype = np.float64), np.ctypeslib.ndpointer(dtype = np.float64), C.c_int, C.c_int]
f_learn_tensor.restype = C.c_double
Otilde = np.empty((num_symbols, num_components), order='F', dtype=np.float64)
gamma = np.empty((num_components), order='F', dtype=np.float64)
res = f_learn_tensor(X,Y,num_components,Otilde,gamma,num_symbols, 1)
if res == FAILURE_CONST:
return False
if VERBOSE:
print "Building unnormalized model.."
Otilde = np.mat(Otilde)
# hbar_pandsigma = np.mat(self.hbar_pandsigma.toarray())
# hbar_sigmaands = np.mat(self.hbar_sigmaands.toarray())
# hbar_pands = np.mat(self.hbar_pands.toarray())
Otildepinv = np.linalg.pinv(Otilde)
spOtildepinv = sparse.csr_matrix(Otildepinv)
spOtildepinv.eliminate_zeros()
spOtildepinv.prune()
Otildep = hbar_pandsigma*Otildepinv.T
Otildes = Otildepinv*hbar_sigmaands
alphatilde = np.mat(Otildep[0,:]).T
stopvec = np.mat(Otildes[:,0])
spOtildeppinv = sparse.csr_matrix(np.linalg.pinv(Otildep))
spOtildeppinv.eliminate_zeros()
spOtildeppinv.prune()
Ttilde = self._compute_T(spOtildeppinv, num_components)
Dgamma = np.mat(np.diag(gamma))
Dgamma = Dgamma*Dgamma
Ds = spOtildeppinv*hp_pandsigma*spOtildepinv.T
Dspinv = np.mat(Ds.A)
Ds = np.linalg.pinv(np.mat(Ds.A))
Dsigma = np.eye(alphatilde.shape[0])-np.diag(stopvec)
Dsigmapinv = np.linalg.pinv(Dsigma)
Beta = Ds*(np.linalg.pinv(Ttilde))*Dgamma*stopvec
for i in range(stopvec.shape[0]):
stopvec[i] = Beta[i]/(1+Beta[i])
alpha = np.empty((num_components), order='F', dtype=np.float64)
alpha[:] = (alphatilde.T*Dsigmapinv*Dspinv).A
O = np.empty((num_components, num_symbols), order='F', dtype=np.float64)
O[:,:] = (Otilde*Dsigma).A.T
T = np.empty((num_components, num_components), order='F', dtype=np.float64)
T[:] = (Ds*Ttilde*Dspinv*Dsigmapinv).A
if DEBUG:
print "O before PNFA projection: ", O
print "T before PNFA projection: ", T
print "ainf before simplex projection: ", stopvec
print "a before simplex projection: ", alpha
O,T,alpha, stopvec = self._project_to_probabilities(O,T,alpha, stopvec.T.A[0], num_components, num_symbols)
stopvec = np.mat(stopvec).T
alpha = np.mat(alpha).T
O = O.T
if DEBUG:
print "O after PNFA projection: ", O
print "T after PNFA projection: ", T
print "ainf after simplex projection: ", stopvec
print "a after simplex projection: ", alpha
self.initvec, self.ainf, self.wordstopvec, self.As = self.convert_hmm_to_wfa(O, T, alpha, stopvec)
self.a = self.initvec.copy()
self.buildtime = time.clock() - starttime
return True
def _compute_sparse_pinv(self, sparsemat, rank):
u,s,v = sparse.linalg.svds(sparsemat, rank)
u = np.mat(u)
s = np.mat(np.diag(s))
v = np.mat(v)
pinv = v.T*np.linalg.inv(s)*u.T
return np.mat(pinv)
def _compute_T(self, Otildepinv, num_components):
A = (Otildepinv*self.h_pands).T
A.eliminate_zeros()
A.prune()
B = (Otildepinv*self.hbar_pands).T
B.eliminate_zeros()
B.prune()
T = np.empty((num_components, num_components))
for i in range(num_components):
T[i,:] = lsqr(A,B[:,i].A)[0]
return T
def _project_to_probabilities(self, O, T, alpha, ainf, num_components, num_symbols):
lib = C.CDLL('../cpp/libTensor.so')
f_simplex_proj = lib.do_simplex_projection
f_simplex_proj.argtypes = [np.ctypeslib.ndpointer(dtype = np.float64), C.c_double, C.c_int]
f_simplex_proj.restype = C.c_int
f_probmat_proj = lib.do_probmat_projection
f_probmat_proj.argtypes = [np.ctypeslib.ndpointer(dtype = np.float64), np.ctypeslib.ndpointer(dtype = np.float64), C.c_int, C.c_int]
f_probmat_proj.restype = C.c_int
f_simplex_proj(alpha, 1.0, num_components)
zeros = np.zeros((num_components), order='F', dtype=np.float64)
probO = np.zeros((num_components, num_symbols+1), order='F', dtype=np.float64)
probO[:,0:num_symbols] = O
probO[:,num_symbols:num_symbols+1] = np.mat(ainf).T
f_probmat_proj(probO, zeros, num_components, num_symbols)
O = probO[:,0:num_symbols]
ainf = probO[:,num_symbols:num_symbols+1]
ainf = ainf.T
f_probmat_proj(T, zeros, num_components, num_components)
return O, T, alpha, ainf
def get_symbol_prediction(self):
predictedsymbol = -1
maxscore = np.finfo(float).eps
for symbol in range(self.n_symbols):
symbolscore = self.get_obs_prob(symbol)
if symbolscore > maxscore:
predictedsymbol = symbol
maxscore = symbolscore
stopscore = float(self.a.T*self.wordstopvec)
if stopscore > maxscore:
predictedsymbol = self.n_symbols
return predictedsymbol
def get_WER(self, testdata):
errors = 0
numpredictions = 0
for seq in testdata:
for obs in seq:
numpredictions += 1
predsymbol = self.get_symbol_prediction()
self.update(obs)
if predsymbol != obs:
errors += 1
predsymbol = self.get_symbol_prediction()
numpredictions += 1
if predsymbol != self.n_symbols:
errors += 1
self.reset()
return float(errors)/float(numpredictions)
#provides average log-likelihood score
def score(self, data):
loglike = 0
for seq in data:
seqloglike = 0
self.reset()
for obs in seq:
seqloglike = seqloglike + log(self.get_obs_prob(obs))
self.update(obs)
loglike += seqloglike
return loglike/(float(len(data)))
#updates a/start/state vector after seeing symbol
def update(self, obs):
bomat = self.As[obs]
numerator = self.a.T*bomat
denom = numerator*self.ainf
self.a = (numerator/denom).T
#resets state vector
def reset(self):
self.a = self.initvec.copy()
#returns the probablilty of a particular observation given current state
def get_obs_prob(self, obs):
prob = (self.a.T)*(self.As[obs])*self.ainf
prob = min(prob,1)
prob = max(prob,np.finfo(float).eps)
return prob
#returns the probability of an entire sequence, or "word"
def get_word_prob(self,seq):
seqprob = 0
for obs in seq:
prob = self.get_obs_prob(obs)
if prob <= np.finfo(float).eps:
return np.finfo(float).eps
seqprob += log(prob)
self.update(obs)
endprob = float(self.a.T*self.wordstopvec)
if endprob <= np.finfo(float).eps:
return np.finfo(float).eps
seqprob += log(endprob)
self.reset()
if not math.isnan(seqprob):
return e**seqprob
else:
return np.finfo(float).eps
def scorepautomac(self, testdata, truprobs):
modelprobs = np.zeros((len(truprobs)))
probsum = 0
i = 0
for seq in testdata:
prob = self.get_word_prob(seq)
modelprobs[i] = prob
probsum += prob
i += 1
modelprobs /= float(probsum)
i = 0
scoresum = 0
for truprob in truprobs:
if modelprobs[i] < np.finfo(float).eps:
modelprobs[i] = np.finfo(float).eps
scoresum += truprob*log(modelprobs[i],2)
i += 1
return 2.0**(-1.0*float(scoresum))
def get_perplexity(self, testdata):
modelprobs = np.zeros((len(testdata)))
probsum = 0
i = 0
for seq in testdata:
prob = self.get_word_prob(seq)
modelprobs[i] = prob
probsum += prob
i += 1
scoresum = 0
for i in range(len(modelprobs)):
if modelprobs[i] < np.finfo(float).eps:
modelprobs[i] = np.finfo(float).eps
scoresum += log(modelprobs[i],2)
scoresum /= float(len(testdata))
return 2.0**(-1.0*float(scoresum))
def convert_hmm_to_wfa(self, O, T, alpha, stopvec):
As = {}
for symbol in range(O.shape[0]):
As[symbol] = np.mat(np.diag(O[symbol,:]))*T
return alpha, np.mat(np.ones(T.shape[0])).T, stopvec, As
if __name__ == '__main__':
PAUTOMACPATH = "/home/williamleif/Dropbox/icml2014-experiments/datasets/PAutomaC-competition_sets/"
RESULTS_DIR = "/home/williamleif/Dropbox/icml2014-experiments/results/tensor/"
MODEL_DIR = "/home/williamleif/Dropbox/icml2014-experiments/models/"
metric = sys.argv[1]
problem = sys.argv[2]
n_symbols = sys.argv[3]
n_symbols = int(n_symbols)
if problem != "tree" and problem != "timeseries":
traindata = iohelpers.parse_file(PAUTOMACPATH+problem+".pautomac.train")
testdata = iohelpers.parse_file(PAUTOMACPATH+problem+".pautomac.test")
if metric == "KL":
groundtruth = iohelpers.parse_groundtruth_file(PAUTOMACPATH+problem+".pautomac_solution.txt")
else:
validdata = traindata[15000:20000]
traindata = traindata[0:15000]
maxbasissize = int(sys.argv[4])
prefixdict, suffixdict = hankelmatrixcreator.top_k_string_bases(traindata,maxbasissize,n_symbols)
basislength = len(prefixdict)
bestsize = 0
avruntime = 0
nummodelsmade = 0
bestscore = 0
wfa = TensorWFA(n_symbols)
begintime = time.clock()
for i in range(3, 6):
success = wfa.learn_tensor(traindata, prefixdict, suffixdict, i)
if i == 3:
inittime = wfa.inittime
if not success:
break
if metric == "WER":
score = wfa.get_WER(validdata)
else:
score = wfa.scorepautomac(testdata,groundtruth)
if bestsize == 0:
bestscore = score
bestsize = i
bestwfa = copy.deepcopy(wfa)
elif score < bestscore and abs(score-1000) > 0.1:
bestscore = score
bestsize = i
bestwfa = copy.deepcopy(wfa)
print "Model size: ", i, " Score: ", score
avruntime += wfa.buildtime
nummodelsmade += 1
# if metric == "WER":
# bestscore = bestwfa.get_WER(testdata)
# iohelpers.write_results(RESULTS_DIR+"tensor-pautomac="+problem+"-"+metric+".txt", problem,"size= "+str(bestsize)+", basis size="+str(basislength), metric, bestscore, avruntime/float(nummodelsmade))
# iohelpers.write_pnfa_model(MODEL_DIR+"tensor-"+str(bestsize)+"-pautomac="+problem+"-"+metric+".fsm", bestwfa)
runtime = time.clock()-begintime
fp = open("/home/williamleif/Dropbox/icml2014-experiments/results/runtimes/tensor", "w")
fp.write("Init time: "+str(inittime)+" Runtime: "+str(runtime))
else:
RESULTS_DIR = "/home/williamleif/Dropbox/icml2014-experiments/results/real/"
if problem == "tree":
traindata = iohelpers.parse_file("/home/williamleif/Dropbox/icml2014-experiments/datasets/treebankdata.obs")
validdata = traindata[0:5000]
testdata = traindata[5000:10000]
traindata = traindata[10000:len(traindata)]
maxbasissize = int(sys.argv[4])
prefixdict, suffixdict = hankelmatrixcreator.top_k_string_bases(traindata,maxbasissize,n_symbols)
basislength = len(prefixdict)
bestsize = 0
avruntime = 0
nummodelsmade = 0
bestscore = 0
wfa = TensorWFA(n_symbols)
for i in range(3, n_symbols+1):
success = wfa.learn_tensor(traindata, prefixdict, suffixdict, i)
if not success:
break
if metric == "WER":
score = wfa.get_WER(validdata)
else:
score = wfa.get_perplexity(validdata)
if bestsize == 0:
bestscore = score
bestsize = i
bestwfa = copy.deepcopy(wfa)
elif score < bestscore and abs(score-1000) > 0.1:
bestscore = score
bestsize = i
bestwfa = copy.deepcopy(wfa)
print "Model size: ", i, " Score: ", score
avruntime += wfa.buildtime
nummodelsmade += 1
if metric == "WER":
bestscore = bestwfa.get_WER(testdata)
else:
bestscore = bestwfa.get_perplexity(testdata)
iohelpers.write_results(RESULTS_DIR+"tensor-"+metric+".txt", problem,"size= "+str(bestsize)+", basis size="+str(basislength), metric, bestscore, 0)
iohelpers.write_pnfa_model(MODEL_DIR+"tensor-"+str(bestsize)+"-pautomac="+problem+"-"+metric+".fsm", bestwfa)
| code/tensor/wfatensorlearn.py | 16,907 | adding aliases with "self" prefix for readability. hbar_pandsigma = np.mat(self.hbar_pandsigma.toarray()) hbar_sigmaands = np.mat(self.hbar_sigmaands.toarray()) hbar_pands = np.mat(self.hbar_pands.toarray())provides average log-likelihood scoreupdates a/start/state vector after seeing symbolresets state vectorreturns the probablilty of a particular observation given current statereturns the probability of an entire sequence, or "word" if metric == "WER": bestscore = bestwfa.get_WER(testdata) iohelpers.write_results(RESULTS_DIR+"tensor-pautomac="+problem+"-"+metric+".txt", problem,"size= "+str(bestsize)+", basis size="+str(basislength), metric, bestscore, avruntime/float(nummodelsmade)) iohelpers.write_pnfa_model(MODEL_DIR+"tensor-"+str(bestsize)+"-pautomac="+problem+"-"+metric+".fsm", bestwfa) | 808 | en | 0.321544 |
"""\
Perl code generator
@copyright: 2002-2004 D.H. aka crazyinsomniac on sourceforge.net
@copyright: 2012-2016 Carsten Grohmann
@copyright: 2017-2020 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import os, os.path, re
from codegen import BaseLangCodeWriter, BaseSourceFileContent
import wcodegen, compat
import logging
class SourceFileContent(BaseSourceFileContent):
rec_block_start = re.compile(
r'^(?P<spaces>\s*)' # leading spaces
r'#\s*' # comment sign
r'begin\s+wxGlade:\s*' # "begin wxGlade:" statement and tailing spaces
r'(?P<classname>[a-zA-Z_]+[\w:]*?)??' # class or function name (non-greedy)
r'(?::{2}|\s*)' # separator between class and function / block (non-greedy)
r'(?P<block>\w+)' # function / block name
r'\s*$' # tailing spaces
)
rec_block_end = re.compile(
r'^\s*' # leading spaces
r'#\s*' # comment sign
r'end\s+wxGlade' # "end exGlade" statement
r'\s*$' # tailing spaces
)
# Less precise regex, but working :-P
# Should match: package Foo; or package Foo::bar::baz ;
rec_class_decl = re.compile(
r'^\s*' # leading spaces
r'package\s+([a-zA-Z_][\w:]*)\s*;' # "package <name>" statement
r'.*$' # any character till eol
)
rec_event_handler = re.compile(
r'^\s*' # leading spaces
r'#\s*wxGlade:\s*(?P<class>[\w:]+)::(?P<handler>\w+) <event_handler>' # wxGlade event handler
# statement with class and
# event handler name
r'\s*$' # tailing spaces
)
# Regexp to match Perl's Plain Old Documentation format; see: manpage perlpod
rec_pod = re.compile(
r'^\s*' # leading spaces
r'=[A-Za-z_]+\w*' # match POD statement
r'.*$' # any character till eol
)
def build_untouched_content(self):
"""\
Builds a string with the contents of the file that must be left as is,
and replaces the wxGlade blocks with tags that in turn will be replaced
by the new wxGlade blocks
WARNING: NOT YET COMPLETE -- crazyinsomniac
alb - almost done :)
WARNING: There is *NO* support for here documents: if you put wxGlade
blocks inside a here document, you're likely going into troubles...
"""
BaseSourceFileContent.build_untouched_content(self)
inside_block = False
inside_pod = False
tmp_in = self._load_file(self.name)
out_lines = []
check_old_methods = [] # list of indices with set_properties or do_layout
for line in tmp_in:
result = self.rec_pod.match(line)
if result:
inside_pod = True
if inside_pod:
out_lines.append(line)
if line.startswith('=cut'):
inside_pod = False
continue
result = self.rec_class_decl.match(line)
if result:
if not self.class_name:
# this is the first class declared in the file: insert the new ones before this
out_lines.append( '<%swxGlade insert new_classes>' % self.nonce )
self.new_classes_inserted = True
self.class_name = result.group(1)
self.class_name = self.format_classname(self.class_name)
self.classes.add( self.class_name ) # add the found class to the list of classes of this module
out_lines.append(line)
elif not inside_block:
result = self.rec_block_start.match(line)
if result:
# replace the lines inside a wxGlade block with a tag that will be used later by add_class
spaces = result.group('spaces')
which_class = result.group('classname')
which_block = result.group('block')
if not which_class:
which_class = self.class_name
else:
which_class = self.format_classname(which_class)
self.spaces[which_class] = spaces
inside_block = True
if not self.class_name:
out_lines.append( '<%swxGlade replace %s>' % (self.nonce, which_block) )
else:
if which_block in ("__do_layout","__set_properties"):
# probably to be removed
check_old_methods.append( len(out_lines) )
out_lines.append( '<%swxGlade replace %s %s>' % (self.nonce, which_class, which_block) )
else:
result = self.rec_event_handler.match(line)
if result:
which_handler = result.group('handler')
which_class = self.format_classname(result.group('class'))
self.event_handlers.setdefault( which_class, set() ).add( which_handler )
if self.class_name and self.is_end_of_class(line):
# add extra event handlers here...
out_lines.append( '<%swxGlade event_handlers %s>' % (self.nonce, self.class_name) )
out_lines.append(line)
else:
# ignore all the lines inside a wxGlade block
if self.rec_block_end.match(line):
inside_block = False
if not self.new_classes_inserted:
# if we are here, the previous ``version'' of the file did not contain any class, so we must add the
# new_classes tag at the end of the file
out_lines.append( '<%swxGlade insert new_classes>' % self.nonce )
# when moving from 0.9 to 1.0: remove empty methods "do_layout" and "set_properties"
while check_old_methods:
i = check_old_methods.pop(-1)
if out_lines[i+1].strip()=='}': # just end of block -> remove incl. trailing empty lines
self._remove_method(out_lines, i-2, i+1)
# set the ``persistent'' content of the file
self.content = out_lines
class PerlCodeWriter(BaseLangCodeWriter, wcodegen.PerlMixin):
"Code writer class for writing Perl code out of the designed GUI elements; see: BaseLangCodeWriter"
_code_statements = {
'backgroundcolour': "%(objname)s->SetBackgroundColour(%(value)s);\n",
'disabled': "%(objname)s->Enable(0);\n",
'extraproperties': "%(objname)s->Set%(propname_cap)s(%(value)s);\n",
'focused': "%(objname)s->SetFocus();\n",
'foregroundcolour': "%(objname)s->SetForegroundColour(%(value)s);\n",
'hidden': "%(objname)s->Show(0);\n",
'setfont': "%(objname)s->SetFont(Wx::Font->new(%(size)s, %(family)s, "
"%(style)s, %(weight)s, %(underlined)s, %(face)s));\n",
'tooltip': "%(objname)s->SetToolTipString(%(tooltip)s);\n",
'tooltip_3': "%(objname)s->SetToolTip(%(tooltip)s);\n",
'wxcolour': "Wx::Colour->new(%(value)s)",
'wxnullcolour': "Wx::NullColour",
'wxsystemcolour': "Wx::SystemSettings::GetColour(%(value)s)",
}
class_separator = '::'
classattr_always = ['wxBoxSizer', 'wxStaticBoxSizer', 'wxGridSizer', 'wxFlexGridSizer']
indent_amount = 1
indent_symbol = '\t'
indent_level_func_body = 1
language_note = '# To get wxPerl visit http://www.wxperl.it\n' \
'#\n'
name_ctor = 'new'
new_defaults = [] # Default class members, will be initialised during new_project()
shebang = '#!/usr/bin/perl -w -- \n#\n'
SourceFileContent = SourceFileContent
tmpl_cfunc_end = '%(tab)sreturn $self;\n' \
'\n' \
'}\n' \
'\n'
tmpl_class_end = '\n%(comment)s end of class %(klass)s\n\n1;\n\n'
tmpl_class_end_nomarker = '\n\n1;\n\n'
tmpl_func_event_stub = """\
sub %(handler)s {
%(tab)smy ($self, $event) = @_;
%(tab)s# wxGlade: %(klass)s::%(handler)s <event_handler>
%(tab)swarn "Event handler (%(handler)s) not implemented";
%(tab)s$event->Skip;
%(tab)s# end wxGlade
}
"""
tmpl_func_empty = '%(tab)sreturn;\n'
tmpl_sizeritem = '%s->Add(%s, %s, %s, %s);\n'
tmpl_gridbagsizeritem = '%s->Add(%s, %s, %s, %s, %s);\n'
tmpl_gridbagsizerspacer = '%s->Add(%s, %s, %s, %s, %s, %s);\n'
tmpl_spacersize = '%s, %s'
tmpl_style = \
'%(tab)s$style = %(style)s\n' \
'%(tab)s%(tab)sunless defined $style;\n' \
'\n'
tmpl_toplevel_style = tmpl_style
tmpl_appfile = """%(overwrite)s%(header_lines)s"""
def _get_app_template(self, app, top_win):
'build template string for application'
if not self.app_name: return None
# XXX use Show() for frames/panels and ShowModal()/Destroy for dialogs
klass = app.klass
if self._use_gettext:
gettext1 = ['%(tab)smy $local = Wx::Locale->new("English", "en", "en"); # replace with ??',
'%(tab)s$local->AddCatalog("%(textdomain)s"); # replace with the appropriate catalog name\n']
else:
gettext1 = []
if klass:
ret = [ 'package %(klass)s;',
'',
'use base qw(Wx::App);',
'use strict;',
'%(pl_import)s',
'sub OnInit {',
'%(tab)smy( $self ) = shift;',
'',
'%(tab)sWx::InitAllImageHandlers();',
'',
'%(tab)smy $%(top_win)s = %(top_win_class)s->new();',
'',
'%(tab)s$self->SetTopWindow($%(top_win)s);',
'%(tab)s$%(top_win)s->Show(1);',
'',
'%(tab)sreturn 1;',
'}']
if self._mark_blocks:
ret.append('# end of class %(klass)s')
ret += ['',
'package main;',
'',
'unless(caller){'] + gettext1 + [
'%(tab)smy $%(name)s = %(klass)s->new();',
'%(tab)s$%(name)s->MainLoop();',
'}', '']
else:
ret = ['1;',
'',
'package main;',
'%(pl_import)s',
'unless(caller){'] + gettext1 + [
'%(tab)slocal *Wx::App::OnInit = sub{1};',
'%(tab)smy $%(name)s = Wx::App->new();',
'%(tab)sWx::InitAllImageHandlers();',
'',
'%(tab)smy $%(top_win)s = %(top_win_class)s->new();',
'',
'%(tab)s$%(name)s->SetTopWindow($%(top_win)s);',
'%(tab)s$%(top_win)s->Show(1);',
'%(tab)s$%(name)s->MainLoop();',
'}', '']
return '\n'.join(ret)
def init_lang(self, app_attrs):
# initial new defaults late to use the proper indent characters
tab = self.tabs(1)
self.new_defaults = {
'$parent' : '%s$parent = undef unless defined $parent;\n' % tab,
'$id' : '%s$id = -1 unless defined $id;\n' % tab,
'$title' : '%s$title = "" unless defined $title;\n' % tab,
'$pos' : '%s$pos = wxDefaultPosition unless defined $pos;\n' % tab,
'$size' : '%s$size = wxDefaultSize unless defined $size;\n' % tab,
'$name' : '%s$name = "" unless defined $name;\n\n' % tab,
#'$style' is a special case
}
self.header_lines = [
'use Wx qw[:allclasses];\n',
'use strict;\n'
]
def add_app(self, app_attrs, top_win):
# add language specific mappings
if self.multiple_files:
self.lang_mapping['pl_import'] = "\nuse %s;\n" % top_win.klass
else:
self.lang_mapping['pl_import'] = ''
BaseLangCodeWriter.add_app(self, app_attrs, top_win)
def generate_code_ctor(self, code_obj, is_new, tab):
code_lines = []
write = code_lines.append
builder = self.obj_builders[code_obj.WX_CLASS]
mycn = getattr(builder, 'cn', self.cn)
mycn_f = getattr(builder, 'cn_f', self.cn_f)
# custom base classes support
custom_base = code_obj.check_prop_nodefault('custom_base') and code_obj.custom_base.strip() or None
new_signature = getattr(builder, 'new_signature', [])
# generate constructor code
if is_new:
write('package %s;\n\n' % code_obj.klass)
write('use Wx qw[:everything];\nuse base qw(%s);\nuse strict;\n\n' % code_obj.WX_CLASS.replace('wx', 'Wx::', 1))
if self._use_gettext:
if self.multiple_files:
self.classes[code_obj].dependencies.add( "use Wx::Locale gettext => '_T';\n" )
else:
write("use Wx::Locale gettext => '_T';\n")
# The dependencies have to add to the package block too because global imports are not visible inside the
# package block
# TODO: Don't add dependencies twice with Perl
# write the module dependencies for this class (package)
dep_list = sorted( self.classes[code_obj].dependencies )
if dep_list:
code = self._tagcontent('dependencies', dep_list, True)
write(code)
write('sub new {\n')
write(tab + "my( $self, %s ) = @_;\n" % ", ".join(new_signature))
if new_signature:
for k in new_signature:
if k in self.new_defaults:
write(self.new_defaults[k])
else:
new_signature = ['@_[1 .. $#_]'] # shift(@_)->SUPER::new(@_);
logging.info( "%s did not declare self.new_defaults ", code_obj.klass )
elif custom_base:
# custom base classes set, but "overwrite existing sources" not set. Issue a warning about this
self.warning( '%s has custom base classes, but you are not overwriting existing sources: '
'please check that the resulting code is correct!' % code_obj.name )
if self._mark_blocks:
# __init__ begin tag
write(self.tmpl_block_begin % {'class_separator':self.class_separator, 'comment_sign':self.comment_sign,
'function':self.name_ctor, 'klass':self.cn_class(code_obj.klass),
'tab':tab} )
# the optional initial code from the code properties
if not self.preview and code_obj.check_prop("extracode_pre"):
for l in code_obj.properties["extracode_pre"].get_lines():
write(tab + l)
style_p = code_obj.properties.get("style")
if style_p and style_p.value_set != style_p.default_value:
style = style_p.get_string_value()
m_style = mycn_f( style )
if m_style:
stmt_style = self._format_style(style, code_obj)
write( stmt_style % {'style':m_style, 'tab':tab} )
# class parent constructor
write(tab + '$self = $self->SUPER::new( %s );\n' % ", ".join(new_signature))
# set size here to avoid problems with splitter windows
if code_obj.check_prop('size'):
write( tab + self.generate_code_size(code_obj) )
for l in builder.get_properties_code(code_obj):
write(tab + l)
if code_obj.check_prop_truth('extraproperties'):
for l in builder.generate_code_extraproperties(code_obj):
write(tab + l)
# the initial and final code for the contained elements
for l in self.classes[code_obj].init:
write(tab + l)
if self.classes[code_obj].final:
write(tab + "\n")
for l in self.classes[code_obj].final:
write(tab + l)
# now check if there is initial and final code for the element itself
for l in builder.get_init_code(code_obj):
write(tab+l)
for l in builder.get_layout_code(code_obj):
write(tab + l)
# the optional final code from the code properties
if not self.preview and code_obj.check_prop("extracode_post"):
for l in code_obj.properties["extracode_post"].get_lines():
write(tab + l)
return code_lines
def generate_code_event_bind(self, code_obj, tab, event_handlers):
code_lines = []
for obj, event, handler, unused in event_handlers:
if obj.name:
obj_id = '%s->GetId'%self.format_generic_access(obj) # e.g. '$self->{button_1}->GetId' or '$self->GetId'
else:
obj_id = self.generate_code_id(None, obj.id)[1] or '-1' # but this is wrong anyway...
if 'EVT_NAVIGATION_KEY' in event:
tmpl = '''%(tab)s%(event)s($self, $self->can('%(handler)s'));\n'''
else:
tmpl = '''%(tab)s%(event)s($self, %(obj_id)s, $self->can('%(handler)s'));\n'''
code_lines.append( tmpl % {'tab': tab, 'event': self.cn(event), 'handler': handler, 'obj_id': obj_id} )
if event_handlers:
code_lines.append('\n')
return code_lines
def generate_code_id(self, obj, id=None):
if id is None:
id = obj.window_id
if not id:
if obj is not None and obj.check_prop_truth("stockitem"):
return '', self.cn("wxID_" + obj.stockitem)
return '', self.cn('wxID_ANY')
id = str(id)
tokens = id.split('=', 1)
if len(tokens) != 2:
return '', self.cn(tokens[0]) # we assume name is declared elsewhere
name, val = tokens
if not name:
return '', self.cn(val)
name = name.strip()
val = val.strip()
if val == '?':
val = self.cn('wxNewId()')
else:
val = self.cn(val)
# check to see if we have to make the var global or not...
return 'use constant %s => %s;\n' % (name, val), name
def generate_code_size(self, obj):
objname = self.format_generic_access(obj)
size = obj.properties["size"].get_string_value()
use_dialog_units = (size[-1] == 'd')
method = 'SetMinSize' if obj.parent_window else 'SetSize'
if use_dialog_units:
return '%s->%s(%s->ConvertDialogSizeToPixels(Wx::Size->new(%s)));\n' % (objname, method, objname, size[:-1])
return '%s->%s(Wx::Size->new(%s));\n' % (objname, method, size)
def _quote_str(self, s):
"""Escape all unicode characters to there unicode code points in form of \\uxxxx.
The returned string is a pure ascii string.
Normal ascii characters like \\n or \\t won't be escaped.
note: wxGlade don't handles file encoding well currently. Thereby
we escape all unicode characters.
note: The string 's' is encoded with self.app_encoding already.
see: BaseLangCodeWriter._quote_str for additional details
see: _recode_x80_xff()"""
s = s.replace('$', r'\$')
s = s.replace('@', r'\@')
# convert all strings to unicode first
if not isinstance(s, compat.unicode):
s = s.decode(self.app_encoding)
# check if it's pure ascii
try:
dummy = s.encode('ascii')
if self._use_gettext:
return '_T("%s")' % s
else:
return '"%s"' % s
except UnicodeError:
pass
# convert unicode strings to pure ascii
# use "raw-unicode-escape" just escaped unicode characters and not default escape sequences
s = s.encode('raw-unicode-escape')
s = self._recode_x80_xff(s)
if compat.PYTHON3:
# convert back to str (unicode)
s = s.decode("ASCII")
# convert Python style to Perl style
s = re.sub(r'\\u([0-9a-f]{4})', r'\\N{U+\1}', s)
if self._use_gettext:
return '_T("%s")' % s
else:
return '"%s"' % s
def add_object_format_name(self, name):
return '#$self->%s' % name
def _format_classattr(self, obj):
res = BaseLangCodeWriter._format_classattr(self, obj)
if not res:
return res
elif obj.name.startswith('$self->'):
return obj.name
elif obj.name.startswith('$'):
return obj.name
# spacer.name is "<width>, <height>" already
elif obj.WX_CLASS == 'spacer':
return obj.name
# Perl stores sizers always in class attributes
elif self.store_as_attr(obj) or obj.IS_SIZER:
return '$self->{%s}' % obj.name
return '$%s' % obj.name
def _format_import(self, klass):
return 'use %s;\n' % klass
def _get_class_filename(self, klass):
"Returns the name for a Perl module (.pm) to store a single class in multi file projects"
return os.path.join( self.out_dir, klass.replace('::', os.sep) + '.pm' )
def format_generic_access(self, obj):
if obj.IS_CLASS:
return '$self'
return self._format_classattr(obj)
writer = PerlCodeWriter() # the code writer instance
language = writer.language # Language generated by this code generator
| codegen/perl_codegen.py | 22,443 | Code writer class for writing Perl code out of the designed GUI elements; see: BaseLangCodeWriter
build template string for application
Returns the name for a Perl module (.pm) to store a single class in multi file projects
Escape all unicode characters to there unicode code points in form of \uxxxx.
The returned string is a pure ascii string.
Normal ascii characters like \n or \t won't be escaped.
note: wxGlade don't handles file encoding well currently. Thereby
we escape all unicode characters.
note: The string 's' is encoded with self.app_encoding already.
see: BaseLangCodeWriter._quote_str for additional details
see: _recode_x80_xff()
Builds a string with the contents of the file that must be left as is,
and replaces the wxGlade blocks with tags that in turn will be replaced
by the new wxGlade blocks
WARNING: NOT YET COMPLETE -- crazyinsomniac
alb - almost done :)
WARNING: There is *NO* support for here documents: if you put wxGlade
blocks inside a here document, you're likely going into troubles...
Perl code generator
@copyright: 2002-2004 D.H. aka crazyinsomniac on sourceforge.net
@copyright: 2012-2016 Carsten Grohmann
@copyright: 2017-2020 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
leading spaces comment sign "begin wxGlade:" statement and tailing spaces class or function name (non-greedy) separator between class and function / block (non-greedy) function / block name tailing spaces leading spaces comment sign "end exGlade" statement tailing spaces Less precise regex, but working :-P Should match: package Foo; or package Foo::bar::baz ; leading spaces "package <name>" statement any character till eol leading spaces wxGlade event handler statement with class and event handler name tailing spaces Regexp to match Perl's Plain Old Documentation format; see: manpage perlpod leading spaces match POD statement any character till eol list of indices with set_properties or do_layout this is the first class declared in the file: insert the new ones before this add the found class to the list of classes of this module replace the lines inside a wxGlade block with a tag that will be used later by add_class probably to be removed add extra event handlers here... ignore all the lines inside a wxGlade block if we are here, the previous ``version'' of the file did not contain any class, so we must add the new_classes tag at the end of the file when moving from 0.9 to 1.0: remove empty methods "do_layout" and "set_properties" just end of block -> remove incl. trailing empty lines set the ``persistent'' content of the file Default class members, will be initialised during new_project() XXX use Show() for frames/panels and ShowModal()/Destroy for dialogs initial new defaults late to use the proper indent characters'$style' is a special case add language specific mappings custom base classes support generate constructor code The dependencies have to add to the package block too because global imports are not visible inside the package block TODO: Don't add dependencies twice with Perl write the module dependencies for this class (package) shift(@_)->SUPER::new(@_); custom base classes set, but "overwrite existing sources" not set. Issue a warning about this __init__ begin tag the optional initial code from the code properties class parent constructor set size here to avoid problems with splitter windows the initial and final code for the contained elements now check if there is initial and final code for the element itself the optional final code from the code properties e.g. '$self->{button_1}->GetId' or '$self->GetId' but this is wrong anyway... we assume name is declared elsewhere check to see if we have to make the var global or not... convert all strings to unicode first check if it's pure ascii convert unicode strings to pure ascii use "raw-unicode-escape" just escaped unicode characters and not default escape sequences convert back to str (unicode) convert Python style to Perl style spacer.name is "<width>, <height>" already Perl stores sizers always in class attributes the code writer instance Language generated by this code generator | 4,170 | en | 0.747556 |
# Auto generated from meta.yaml by namespacegen.py version: 0.4.0
# Generation date: 2020-08-25 16:45
# Schema: metamodel
#
# id: https://w3id.org/biolink/biolinkml/meta
# description: A metamodel for defining biolink related schemas
# license: https://creativecommons.org/publicdomain/zero/1.0/
from collections import defaultdict
from typing import Iterable, Dict, Tuple
from biolinkml.utils.curienamespace import CurieNamespace
GENE = 'gene'
DISEASE = 'disease'
CHEMICAL_SUBSTANCE = 'chemical substance'
SYMBOL = 'Approved_Symbol'
class IdentifierResolverException(RuntimeError):
pass
class BiolinkNameSpace:
"""
Map of BioLink Model registered URI Namespaces
"""
_namespaces = [
CurieNamespace('OIO', 'http://www.geneontology.org/formats/oboInOwl#'),
CurieNamespace('bibo', 'http://purl.org/ontology/bibo/'),
CurieNamespace('biolinkml', 'https://w3id.org/biolink/biolinkml/'),
CurieNamespace('dcterms', 'http://purl.org/dc/terms/'),
CurieNamespace('meta', 'https://w3id.org/biolink/biolinkml/meta/'),
CurieNamespace('oslc', 'http://open-services.net/ns/core#'),
CurieNamespace('owl', 'http://www.w3.org/2002/07/owl#'),
CurieNamespace('pav', 'http://purl.org/pav/'),
CurieNamespace('rdf', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'),
CurieNamespace('rdfs', 'http://www.w3.org/2000/01/rdf-schema#'),
CurieNamespace('schema', 'http://schema.org/'),
CurieNamespace('skos', 'http://www.w3.org/2004/02/skos/core#'),
CurieNamespace('xsd', 'http://www.w3.org/2001/XMLSchema#'),
]
# class level dictionaries
_prefix_map: Dict[str, CurieNamespace] = {}
@classmethod
def _get_prefix_map(cls):
if not cls._prefix_map:
for ns in cls._namespaces:
# index by upper case for uniformity of search
cls._prefix_map[ns.prefix.upper()] = ns
return cls._prefix_map
@classmethod
def parse_curie(cls, curie: str) -> Tuple[CurieNamespace, str]:
"""
Parse a candidate CURIE
:param curie: candidate curie string
:return: CURIE namespace and object_id
"""
found = CurieNamespace("", ""), curie # default value if not a CURIE or unknown XMLNS prefix
if ':' in curie:
part = curie.split(":")
# Normalize retrieval with upper case of prefix for lookup
prefix = part[0].upper()
if prefix in cls._get_prefix_map():
found = cls._prefix_map[prefix], part[1]
return found
@classmethod
def parse_uri(cls, uri: str) -> Tuple[CurieNamespace, str]:
"""
Parse a candidate URI
:param uri: candidate URI string
:return: namespace and object_id
"""
found = CurieNamespace("", ""), uri # default value returned if unknown URI namespace
# TODO: is there a more efficient lookup scheme here than a linear search of namespaces?
for ns in cls._namespaces:
base_uri = str(ns)
if uri.startswith(base_uri):
# simple minded deletion of base_uri to give the object_id
object_id = uri.replace(base_uri, "")
found = ns, object_id
break
return found
@classmethod
def parse_identifier(cls, identifier: str) -> Tuple[CurieNamespace, str]:
# trivial case of a null identifier?
if not identifier:
return CurieNamespace("", ""), ""
# check if this is a candidate URI...
if identifier.lower().startswith("http"):
# guess that perhaps it is, so try to parse it
return cls.parse_uri(identifier)
else: # attempt to parse as a CURIE
return cls.parse_curie(identifier)
def object_id(identifier, keep_version=False) -> str:
"""
Returns the core object_id of a CURIE, with or without the version suffix.
Note: not designed to be used with a URI (will give an invalid outcome)
:param identifier: candidate CURIE identifier for processing
:param keep_version: True if the version string suffix is to be retained in the identifier
:return:
"""
# trivial case: null input value?
if not identifier:
return identifier
if ':' in identifier:
identifier = identifier.split(":")[1]
if not keep_version and '.' in identifier:
identifier = identifier.split(".")[0]
return identifier
def fix_curies(identifiers, prefix=''):
"""
Applies the specified XMLNS prefix to (an) identifier(s) known
to be "raw" IDs as keys in a dictionary or elements in a list (or a simple string)
:param identifiers:
:param prefix:
:return:
"""
if not prefix:
# return identifiers without modification
# Caller may already consider them in curie format
return identifiers
if isinstance(identifiers, dict):
curie_dict = defaultdict(dict)
for key in identifiers.keys():
curie_dict[prefix + ':' + object_id(key, keep_version=True)] = identifiers[key]
return curie_dict
# identifiers assumed to be just a single object identifier
elif isinstance(identifiers, str):
# single string to convert
return prefix + ':' + object_id(identifiers, keep_version=True)
elif isinstance(identifiers, Iterable):
return [prefix + ':' + object_id(x, keep_version=True) for x in identifiers]
else:
raise RuntimeError("fix_curie() is not sure how to fix an instance of data type '", type(identifiers))
def curie(identifier) -> str:
# Ignore enpty strings
if not identifier:
return ""
else:
namespace: CurieNamespace
identifier_object_id: str
namespace, identifier_object_id = BiolinkNameSpace.parse_identifier(identifier)
return namespace.curie(identifier_object_id)
| tests/test_scripts/output/gennamespace/meta_namespaces.py | 5,957 | Map of BioLink Model registered URI Namespaces
Applies the specified XMLNS prefix to (an) identifier(s) known
to be "raw" IDs as keys in a dictionary or elements in a list (or a simple string)
:param identifiers:
:param prefix:
:return:
Returns the core object_id of a CURIE, with or without the version suffix.
Note: not designed to be used with a URI (will give an invalid outcome)
:param identifier: candidate CURIE identifier for processing
:param keep_version: True if the version string suffix is to be retained in the identifier
:return:
Parse a candidate CURIE
:param curie: candidate curie string
:return: CURIE namespace and object_id
Parse a candidate URI
:param uri: candidate URI string
:return: namespace and object_id
Auto generated from meta.yaml by namespacegen.py version: 0.4.0 Generation date: 2020-08-25 16:45 Schema: metamodel id: https://w3id.org/biolink/biolinkml/meta description: A metamodel for defining biolink related schemas license: https://creativecommons.org/publicdomain/zero/1.0/ class level dictionaries index by upper case for uniformity of search default value if not a CURIE or unknown XMLNS prefix Normalize retrieval with upper case of prefix for lookup default value returned if unknown URI namespace TODO: is there a more efficient lookup scheme here than a linear search of namespaces? simple minded deletion of base_uri to give the object_id trivial case of a null identifier? check if this is a candidate URI... guess that perhaps it is, so try to parse it attempt to parse as a CURIE trivial case: null input value? return identifiers without modification Caller may already consider them in curie format identifiers assumed to be just a single object identifier single string to convert Ignore enpty strings | 1,758 | en | 0.623757 |
from typing import Sequence, Dict, List, Optional
from abc import ABC, abstractmethod
# from nltk.corpus import wordnet
from pymagnitude import Magnitude
from .utils import (
UPPERCASE_RE,
LOWERCASE_RE,
DIGIT_RE,
PUNC_REPEAT_RE,
)
class FeatureExtractor(ABC):
@abstractmethod
def extract(
self,
token: str,
current_idx: int,
relative_idx: int,
tokens: Sequence[str],
features: Dict[str, float],
) -> None:
raise NotImplementedError
class BiasFeature(FeatureExtractor):
def extract(
self,
token: str,
current_idx: int,
relative_idx: int,
tokens: Sequence[str],
features: Dict[str, float],
) -> None:
features["bias"] = 1.0
class TokenFeature(FeatureExtractor):
def extract(
self,
token: str,
current_idx: int,
relative_idx: int,
tokens: Sequence[str],
features: Dict[str, float],
) -> None:
features["tok[%d]=%s" % (relative_idx, token)] = 1.0
class UppercaseFeature(FeatureExtractor):
def extract(
self,
token: str,
current_idx: int,
relative_idx: int,
tokens: Sequence[str],
features: Dict[str, float],
) -> None:
if str.isupper(token):
features["uppercase[%d]" % (relative_idx)] = 1.0
class TitlecaseFeature(FeatureExtractor):
def extract(
self,
token: str,
current_idx: int,
relative_idx: int,
tokens: Sequence[str],
features: Dict[str, float],
) -> None:
if str.istitle(token):
features["titlecase[%d]" % (relative_idx)] = 1.0
class InitialTitlecaseFeature(FeatureExtractor):
def extract(
self,
token: str,
current_idx: int,
relative_idx: int,
tokens: Sequence[str],
features: Dict[str, float],
) -> None:
if str.istitle(token) and current_idx + relative_idx == 0:
features["initialtitlecase[%d]" % (relative_idx)] = 1.0
class PunctuationFeature(FeatureExtractor):
def extract(
self,
token: str,
current_idx: int,
relative_idx: int,
tokens: Sequence[str],
features: Dict[str, float],
) -> None:
if PUNC_REPEAT_RE.fullmatch(token):
features["punc[%d]" % (relative_idx)] = 1.0
class DigitFeature(FeatureExtractor):
def extract(
self,
token: str,
current_idx: int,
relative_idx: int,
tokens: Sequence[str],
features: Dict[str, float],
) -> None:
if DIGIT_RE.search(token):
features["digit[%d]" % (relative_idx)] = 1.0
class WordShapeFeature(FeatureExtractor):
def extract(
self,
token: str,
current_idx: int,
relative_idx: int,
tokens: Sequence[str],
features: Dict[str, float],
) -> None:
shape_str = token
shape_str = UPPERCASE_RE.sub("X", shape_str)
shape_str = LOWERCASE_RE.sub("x", shape_str)
shape_str = DIGIT_RE.sub("0", shape_str)
features["shape[%d]=%s" % (relative_idx, shape_str)] = 1.0
"""
class LikelyAdjectiveFeature(FeatureExtractor):
def extract(
self,
token: str,
current_idx: int,
relative_idx: int,
tokens: Sequence[str],
features: Dict[str, float],
) -> None:
for synset in wordnet.synsets(token):
if synset.name().split('.')[0] == token:
if synset.pos() == 's':
features["adj[%d]" % (relative_idx)] = 1.0
class AfterVerbFeature(FeatureExtractor):
def extract(
self,
token: str,
current_idx: int,
relative_idx: int,
tokens: Sequence[str],
features: Dict[str, float],
) -> None:
if relative_idx != -1:
return
for synset in wordnet.synsets(token):
if synset.name().split('.')[0] == token:
if synset.pos() == 'v':
features["afterverb[%d]" % (relative_idx)] = 1.0
class PosFeature(FeatureExtractor):
def extract(
self,
token: str,
current_idx: int,
relative_idx: int,
tokens: Sequence[str],
features: Dict[str, float],
) -> None:
for synset in wordnet.synsets(token):
if synset.name().split('.')[0] == token:
features[f"adj[{relative_idx}]={synset.pos()}"] = 1.0
"""
class WordVectorFeature(FeatureExtractor):
def __init__(self, vectors_path: str, scaling: float = 1.0) -> None:
self.vectors = Magnitude(vectors_path, normalized=False)
self.scaling = scaling
def extract(
self,
token: str,
current_idx: int,
relative_idx: int,
tokens: Sequence[str],
features: Dict[str, float],
) -> None:
vector = self.vectors.query(token)
keys = ["v" + str(i) for i in range(len(vector))]
values = vector * self.scaling
features.update(zip(keys, values))
class BrownClusterFeature(FeatureExtractor):
def __init__(
self,
clusters_path: str,
*,
use_full_paths: bool = False,
use_prefixes: bool = False,
prefixes: Optional[Sequence[int]] = None,
):
if not use_full_paths and not use_prefixes:
raise ValueError
self.prefix_dict = {}
self.full_path_dict = {}
self.use_full_paths = use_full_paths
with open(clusters_path, "r", encoding='utf-8') as cluster_file:
for line in cluster_file:
cluster, word, _frequency = line.split("\t")
if use_full_paths:
self.full_path_dict[word] = cluster
elif use_prefixes:
if prefixes is not None:
self.prefix_dict[word] = [
cluster[:prefix]
for prefix in prefixes
if prefix <= len(cluster)
]
else:
self.prefix_dict[word] = [
cluster[:prefix] for prefix in range(1, len(cluster) + 1)
]
def extract(
self,
token: str,
current_idx: int,
relative_idx: int,
tokens: Sequence[str],
features: Dict[str, float],
) -> None:
if relative_idx != 0:
return
if self.use_full_paths and token in self.full_path_dict:
features[f"cpath={self.full_path_dict[token]}"] = 1.0
elif token in self.prefix_dict:
for prefix in self.prefix_dict[token]:
features[f"cprefix{len(prefix)}={prefix}"] = 1.0
class WindowedTokenFeatureExtractor:
def __init__(self, feature_extractors: Sequence[FeatureExtractor], window_size: int):
self.feature_extractors = feature_extractors
self.window_size = window_size
def extract(self, tokens: Sequence[str]) -> List[Dict[str, float]]:
features = []
for i, _ in enumerate(tokens):
current_feature = {}
start = max(0, i - self.window_size)
end = min(len(tokens) - 1, i + self.window_size) + 1
for j in range(start, end):
relative_idx = j - i
for feature_extractor in self.feature_extractors:
feature_extractor.extract(
tokens[j], i, relative_idx, tokens, current_feature
)
features.append(current_feature)
return features
| CRF/feature_extractors.py | 7,685 | from nltk.corpus import wordnet | 31 | en | 0.294486 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
from functools import partial
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from general.utilTF1.utils import session
from general.kneeOsteoarthritisDataset.KneeOsteoarthritsDataset import KneeOsteoarthritsDataset
_N_CPU = multiprocessing.cpu_count()
def batch_dataset(dataset, batch_size, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,
map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1):
if filter:
dataset = dataset.filter(filter)
if map_func:
dataset = dataset.map(map_func, num_parallel_calls=num_threads)
if shuffle:
dataset = dataset.shuffle(buffer_size)
if drop_remainder:
dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
else:
dataset = dataset.batch(batch_size)
dataset = dataset.repeat(repeat).prefetch(prefetch_batch)
return dataset
class Dataset(object):
def __init__(self):
self._dataset = None
self._iterator = None
self._batch_op = None
self._sess = None
self._is_eager = tf.executing_eagerly()
self._eager_iterator = None
def __del__(self):
if self._sess:
self._sess.close()
def __iter__(self):
return self
def __next__(self):
try:
b = self.get_next()
except:
raise StopIteration
else:
return b
next = __next__
def get_next(self):
if self._is_eager:
return self._eager_iterator.get_next()
else:
return self._sess.run(self._batch_op)
def reset(self, feed_dict={}):
if self._is_eager:
self._eager_iterator = tfe.Iterator(self._dataset)
else:
self._sess.run(self._iterator.initializer, feed_dict=feed_dict)
def _bulid(self, dataset, sess=None):
self._dataset = dataset
if self._is_eager:
self._eager_iterator = tfe.Iterator(dataset)
else:
self._iterator = dataset.make_initializable_iterator()
self._batch_op = self._iterator.get_next()
if sess:
self._sess = sess
else:
self._sess = session()
try:
self.reset()
except:
pass
@property
def dataset(self):
return self._dataset
@property
def iterator(self):
return self._iterator
@property
def batch_op(self):
return self._batch_op
def get_dataset(data_set_path,shuffle=True):
# shape
img_shape = [256,256, 1]
# dataset
def _map_func(img,label):
img = tf.image.resize_images(img, [img_shape[0], img_shape[1]], method=tf.image.ResizeMethod.BICUBIC)
img = tf.clip_by_value(tf.cast(img, tf.float32), 0, 255) / 255 # / 127.5 - 1
return img,label
# get image pathes
#
kneeosteo_train = KneeOsteoarthritsDataset(data_path=data_set_path)
labels = list(kneeosteo_train.dict_url_class.values())
paths = list(kneeosteo_train.dict_url_class.keys())
assert (len(paths) == len(labels))
print('The dataset %s has %f elements. ' % (data_set_path, len(labels)))
Dataset = partial(DiskImageData, img_paths=paths,labels=labels, repeat=1, map_func=_map_func,shuffle=shuffle)
# index func
def get_imgs(batch):
return batch
return Dataset, img_shape, get_imgs
def disk_image_batch_dataset(img_paths, batch_size, labels=None, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,
map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1):
"""Disk image batch dataset.
This function is suitable for jpg and png files
Arguments:
img_paths : String list or 1-D tensor, each of which is an iamge path
labels : Label list/tuple_of_list or tensor/tuple_of_tensor, each of which is a corresponding label
"""
if labels is None:
dataset = tf.data.Dataset.from_tensor_slices(img_paths)
elif isinstance(labels, tuple):
dataset = tf.data.Dataset.from_tensor_slices((img_paths,) + tuple(labels))
else:
dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels))
def parse_func(path, *label):
img = tf.read_file(path)
img = tf.image.decode_png(img, 1)
return (img,) + label
if map_func:
def map_func_(*args):
return map_func(*parse_func(*args))
else:
map_func_ = parse_func
# dataset = dataset.map(parse_func, num_parallel_calls=num_threads) is slower
dataset = batch_dataset(dataset, batch_size, prefetch_batch, drop_remainder, filter,
map_func_, num_threads, shuffle, buffer_size, repeat)
return dataset
class DiskImageData(Dataset):
"""DiskImageData.
This class is suitable for jpg and png files
Arguments:
img_paths : String list or 1-D tensor, each of which is an iamge path
labels : Label list or tensor, each of which is a corresponding label
"""
def __init__(self, img_paths, batch_size, labels=None, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,
map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1, sess=None):
super(DiskImageData, self).__init__()
dataset = disk_image_batch_dataset(img_paths, batch_size, labels, prefetch_batch, drop_remainder, filter,
map_func, num_threads, shuffle, buffer_size, repeat)
self._bulid(dataset, sess)
self._n_data = len(img_paths)
def __len__(self):
return self._n_data
| general/utilTF1/dataset.py | 5,854 | DiskImageData.
This class is suitable for jpg and png files
Arguments:
img_paths : String list or 1-D tensor, each of which is an iamge path
labels : Label list or tensor, each of which is a corresponding label
Disk image batch dataset.
This function is suitable for jpg and png files
Arguments:
img_paths : String list or 1-D tensor, each of which is an iamge path
labels : Label list/tuple_of_list or tensor/tuple_of_tensor, each of which is a corresponding label
shape dataset / 127.5 - 1 get image pathes index func dataset = dataset.map(parse_func, num_parallel_calls=num_threads) is slower | 623 | en | 0.682548 |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from flask import Flask
from flask import request
from flask import jsonify
from line_sdk import Linebot
LINE_ACCESS_TOKEN = ""
LINE_CHANNEL_SECRET = ""
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def webhook():
# GET
if request.method == "GET":
return "Line Webhook Success."
# POST
elif request.method == "POST":
body = request.get_data(as_text=True)
signature = request.headers["X-Line-Signature"]
# Line
linebot = Linebot(LINE_ACCESS_TOKEN, LINE_CHANNEL_SECRET)
# dataLIST = [{status, type, message, userID, replyToken, timestamp}]
# replyToken = 回覆需要的ID , message = 使用者輸入的內容
dataLIST = linebot.parse(body, signature)
for dataDICT in dataLIST:
if dataDICT["status"]:
if dataDICT["type"] == "message":
try:
# 這裡輸入客製化內容
# dataDICT["message"] => 使用者輸入的內容
msgSTR = "[MSG] {}".format(dataDICT["message"])
except Exception as e:
msgSTR = "[ERROR] {}".format(str(e))
# respText(聊天室ID, 要回覆的內容)
linebot.respText(dataDICT["replyToken"], msgSTR)
return jsonify({"status": True, "msg": "Line Webhook Success."})
# OTHER
else:
return "HTTP_405_METHOD_NOT_ALLOWED"
if __name__ == "__main__":
app.run() | Heroku/Heroku-Linebot-Github/line_app.py | 1,569 | !/usr/bin/env python3 -*- coding:utf-8 -*- GET POST Line dataLIST = [{status, type, message, userID, replyToken, timestamp}] replyToken = 回覆需要的ID , message = 使用者輸入的內容 這裡輸入客製化內容 dataDICT["message"] => 使用者輸入的內容 respText(聊天室ID, 要回覆的內容) OTHER | 238 | zh | 0.527236 |
# All paths are relative to train_val.py file
config = {
'images_path': 'train_val_data/Flicker8k_Dataset/', #Make sure you put that last slash(/)
'train_data_path': 'train_val_data/Flickr8k_text/Flickr_8k.trainImages.txt',
'val_data_path': 'train_val_data/Flickr8k_text/Flickr_8k.devImages.txt',
'captions_path': 'train_val_data/Flickr8k_text/Flickr8k.token.txt',
'tokenizer_path': 'model_data/tokenizer.pkl',
'model_data_path': 'model_data/', #Make sure you put that last slash(/)
'model_load_path': 'model_data/model_vgg16_epoch-17_train_loss-2.2955_val_loss-3.1448.hdf5',
'num_of_epochs': 18,
'max_length': 40, #This is set manually after training of model and required for test.py
'batch_size': 64,
'beam_search_k':3,
'test_data_tuple': [
"https://images.unsplash.com/photo-1502673530728-f79b4cab31b1?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1050&q=80", # Photo by John Price (https://unsplash.com/@johnprice?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on Unsplash (https://unsplash.com/s/photos/dog?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)
"https://images.unsplash.com/photo-1576941089067-2de3c901e126?ixlib=rb-1.2.1&auto=format&fit=crop&w=1443&q=80", # Photo by Jacques Bopp (https://unsplash.com/@jacquesbopp?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on Unsplash (https://unsplash.com/s/photos/house?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)
"https://images.unsplash.com/photo-1542103749-8ef59b94f47e?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1050&q=80" # Photo by Dan (https://unsplash.com/@dann?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on Unsplash (https://unsplash.com/s/photos/person?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)
], #sample images
'test_data_path': 'test_data/', #Make sure you put that last slash(/)
'model_type': 'vgg16', # inceptionv3 or vgg16
'random_seed': 1035
}
rnnConfig = {
'embedding_size': 300,
'LSTM_units': 256,
'dense_units': 256,
'dropout': 0.3
}
| ml/config.py | 2,144 | All paths are relative to train_val.py fileMake sure you put that last slash(/)Make sure you put that last slash(/)This is set manually after training of model and required for test.py Photo by John Price (https://unsplash.com/@johnprice?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on Unsplash (https://unsplash.com/s/photos/dog?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) Photo by Jacques Bopp (https://unsplash.com/@jacquesbopp?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on Unsplash (https://unsplash.com/s/photos/house?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) Photo by Dan (https://unsplash.com/@dann?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on Unsplash (https://unsplash.com/s/photos/person?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)sample imagesMake sure you put that last slash(/) inceptionv3 or vgg16 | 959 | en | 0.752702 |
"""
---
title: Train Feedback Transformer
summary: This is training code with notes for a feedback transformer.
---
# Train Feedback Transformer
This trains a [feedback transformer](index.html) model for auto-regression.
You can pick the original feedback transformer or the new version
where the keys and values are precalculated.
Here's a Colab notebook for training a feedback transformer on Tiny Shakespeare dataset.
[](https://colab.research.google.com/github/lab-ml/nn/blob/master/labml_nn/transformers/feedback/experiment.ipynb)
[](https://web.lab-ml.com/run?uuid=d8eb9416530a11eb8fb50242ac1c0002)
"""
import torch
from torch import nn
from labml import experiment
from labml.configs import option
from labml.utils.pytorch import get_modules
from labml_helpers.module import Module
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.transformers import Encoder, Generator, TransformerConfigs
from labml_nn.transformers.utils import subsequent_mask
class AutoregressiveModel(Module):
"""
## Auto regressive model
"""
def __init__(self, n_vocab: int, d_model: int, transformer: Module):
super().__init__()
# Token embedding module
self.src_embed = nn.Embedding(n_vocab, d_model)
self.transformer = transformer
self.generator = nn.Linear(d_model, n_vocab)
def __call__(self, x: torch.Tensor):
# Embed the tokens
x = self.src_embed(x)
# Run it through the the transformer
res = self.transformer(x)
# Generate logits of the next token
return self.generator(res), None
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
The default configs can and will be over-ridden when we start the experiment
"""
model: AutoregressiveModel
d_model: int = 512
heads: int = 8
dropout: float = 0.0
d_ff: int = 2048
n_layers: int = 6
@option(Configs.model)
def feedback_transformer(c: Configs):
"""
Create [original feedback transformer](index.html).
"""
from labml_nn.transformers.feedback import FeedbackTransformer, FeedbackTransformerLayer, \
FeedbackAttention, FeedForward
return AutoregressiveModel(
c.n_tokens, c.d_model,
FeedbackTransformer(
FeedbackTransformerLayer(d_model=c.d_model,
attn=FeedbackAttention(c.heads, c.d_model, c.dropout),
feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout),
dropout_prob=c.dropout),
c.n_layers)).to(c.device)
@option(Configs.model)
def feedback_transformer_kv(c: Configs):
"""
Create [updated feedback transformer](index.html#kv_shared), with precalculated keys and values.
"""
from labml_nn.transformers.feedback import FeedbackTransformerKV, FeedbackTransformerLayer, \
FeedbackAttention, FeedForward
return AutoregressiveModel(
c.n_tokens, c.d_model,
FeedbackTransformerKV(
FeedbackTransformerLayer(d_model=c.d_model,
attn=FeedbackAttention(c.heads, c.d_model, c.dropout,
is_kv_precomputed=True),
feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout),
dropout_prob=c.dropout),
c.n_layers, c.d_model, c.heads)).to(c.device)
def main():
# Create experiment
experiment.create(name="feedback_transformer")
# Create configs
conf = Configs()
# Load configurations
experiment.configs(conf,
# A dictionary of configurations to override
{'tokenizer': 'character',
'text': 'tiny_shakespeare',
'optimizer.learning_rate': 1.0,
'optimizer.optimizer': 'Noam',
'prompt': 'It is',
'prompt_separator': '',
# Use `feedback_transformer` for original feedback transformer
'model': 'feedback_transformer_kv',
'train_loader': 'shuffled_train_loader',
'valid_loader': 'shuffled_valid_loader',
'seq_len': 128,
'epochs': 128,
'batch_size': 64,
'inner_iterations': 25})
# Set models for saving and loading
experiment.add_pytorch_models(get_modules(conf))
# Start the experiment
with experiment.start():
# Run the training loop
conf.run()
if __name__ == '__main__':
main()
| labml_nn/transformers/feedback/experiment.py | 4,893 | ## Auto regressive model
## Configurations
The default configs can and will be over-ridden when we start the experiment
Create [original feedback transformer](index.html).
Create [updated feedback transformer](index.html#kv_shared), with precalculated keys and values.
---
title: Train Feedback Transformer
summary: This is training code with notes for a feedback transformer.
---
# Train Feedback Transformer
This trains a [feedback transformer](index.html) model for auto-regression.
You can pick the original feedback transformer or the new version
where the keys and values are precalculated.
Here's a Colab notebook for training a feedback transformer on Tiny Shakespeare dataset.
[](https://colab.research.google.com/github/lab-ml/nn/blob/master/labml_nn/transformers/feedback/experiment.ipynb)
[](https://web.lab-ml.com/run?uuid=d8eb9416530a11eb8fb50242ac1c0002)
Token embedding module Embed the tokens Run it through the the transformer Generate logits of the next token Create experiment Create configs Load configurations A dictionary of configurations to override Use `feedback_transformer` for original feedback transformer Set models for saving and loading Start the experiment Run the training loop | 1,363 | en | 0.641244 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from pathlib import Path
from typing import cast
import toml
sys.path.insert(0, os.path.abspath("../../"))
# -- Project information -----------------------------------------------------
project = "dataprep"
copyright = "2020, SFU Database System Lab"
author = "SFU Database System Lab"
# The full version, including alpha/beta/rc tags
def get_version() -> str:
"""
Get the library version from pyproject.toml
"""
path = Path(__file__).resolve().parents[2] / "pyproject.toml"
pyproject = toml.loads(open(str(path)).read())
return cast(str, pyproject["tool"]["poetry"]["version"])
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"nbsphinx",
"sphinx_autodoc_typehints",
]
autodoc_typehints = "description"
# Napoleon settings
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_use_keyword = True
napoleon_custom_sections = None
# autodoc_default_options = {
# "members": True,
# "member-order": "bysource",
# "special-members": "__init__",
# }
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
master_doc = "index"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "nature"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| docs/source/conf.py | 3,065 | Get the library version from pyproject.toml
Configuration file for the Sphinx documentation builder. This file only contains a selection of the most common options. For a full list see the documentation: https://www.sphinx-doc.org/en/master/usage/configuration.html -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- Project information ----------------------------------------------------- The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Napoleon settings autodoc_default_options = { "members": True, "member-order": "bysource", "special-members": "__init__", } Add any paths that contain templates here, relative to this directory. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". | 1,713 | en | 0.678744 |
import os
from os import path
import stat
import mmap
import directio
from setting import (
LONGHORN_SOCKET_DIR, LONGHORN_DEV_DIR, PAGE_SIZE,
)
def readat_direct(dev, offset, length):
pg = offset / PAGE_SIZE
in_page_offset = offset % PAGE_SIZE
# either read less than a page, or whole pages
if in_page_offset != 0:
assert pg == (offset + length) / PAGE_SIZE
to_read = PAGE_SIZE
else:
assert length % PAGE_SIZE == 0
to_read = length
pg_offset = pg * PAGE_SIZE
f = os.open(dev, os.O_DIRECT | os.O_RDONLY)
try:
os.lseek(f, pg_offset, os.SEEK_SET)
ret = directio.read(f, to_read)
finally:
os.close(f)
return ret[in_page_offset: in_page_offset + length]
def writeat_direct(dev, offset, data):
pg = offset / PAGE_SIZE
# don't support across page write
assert pg == (offset + len(data)) / PAGE_SIZE
pg_offset = pg * PAGE_SIZE
f = os.open(dev, os.O_DIRECT | os.O_RDWR)
m = mmap.mmap(-1, PAGE_SIZE)
try:
os.lseek(f, pg_offset, os.SEEK_SET)
pg_data = readat_direct(dev, pg_offset, PAGE_SIZE)
m.write(pg_data)
m.seek(offset % PAGE_SIZE)
m.write(data)
ret = directio.write(f, m)
finally:
m.close()
os.close(f)
return ret
def get_socket_path(volume):
return path.join(LONGHORN_SOCKET_DIR, "longhorn-" + volume + ".sock")
class blockdev:
def __init__(self, volume):
self.dev = path.join(LONGHORN_DEV_DIR, volume)
def readat(self, offset, length):
with open(self.dev, 'r') as f:
f.seek(offset)
ret = f.read(length)
return ret
# return readat_direct(self.dev, offset, length)
def writeat(self, offset, data):
return writeat_direct(self.dev, offset, data)
def ready(self):
if not os.path.exists(self.dev):
return False
mode = os.stat(self.dev).st_mode
if not stat.S_ISBLK(mode):
return False
return True
| integration/data/frontend.py | 2,042 | either read less than a page, or whole pages don't support across page write return readat_direct(self.dev, offset, length) | 123 | en | 0.731409 |
import datetime
import string
from collections import namedtuple
from distutils.version import LooseVersion
from random import choices
from typing import Optional, Type
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from pandas.tests.extension.base import (
BaseArithmeticOpsTests,
BaseBooleanReduceTests,
BaseCastingTests,
BaseComparisonOpsTests,
BaseConstructorsTests,
BaseDtypeTests,
BaseGetitemTests,
BaseGroupbyTests,
BaseInterfaceTests,
BaseMethodsTests,
BaseMissingTests,
BaseNoReduceTests,
BaseNumericReduceTests,
BaseParsingTests,
BasePrintingTests,
BaseReshapingTests,
BaseSetitemTests,
)
from fletcher import FletcherBaseDtype
if LooseVersion(pd.__version__) >= "0.25.0":
# imports of pytest fixtures needed for derived unittest classes
from pandas.tests.extension.conftest import ( # noqa: F401
as_array, # noqa: F401
use_numpy, # noqa: F401
groupby_apply_op, # noqa: F401
as_frame, # noqa: F401
as_series, # noqa: F401
)
PANDAS_GE_1_1_0 = LooseVersion(pd.__version__) >= "1.1.0"
FletcherTestType = namedtuple(
"FletcherTestType",
[
"dtype",
"data",
"data_missing",
"data_for_grouping",
"data_for_sorting",
"data_missing_for_sorting",
"data_repeated",
],
)
def is_arithmetic_type(arrow_dtype: pa.DataType) -> bool:
"""Check whether this is a type that support arithmetics."""
return (
pa.types.is_integer(arrow_dtype)
or pa.types.is_floating(arrow_dtype)
or pa.types.is_decimal(arrow_dtype)
)
skip_non_artithmetic_type = pytest.mark.skip_by_type_filter(
[lambda x: not is_arithmetic_type(x)]
)
xfail_list_scalar_constuctor_not_implemented = pytest.mark.xfail_by_type_filter(
[pa.types.is_list], "constructor from scalars is not implemented for lists"
)
xfail_list_equals_not_implemented = pytest.mark.xfail_by_type_filter(
[pa.types.is_list], "== is not implemented for lists"
)
xfail_list_setitem_not_implemented = pytest.mark.xfail_by_type_filter(
[pa.types.is_list], "__setitem__ is not implemented for lists"
)
xfail_missing_list_dict_encode = pytest.mark.xfail_by_type_filter(
[pa.types.is_list],
"ArrowNotImplementedError: dictionary-encode not implemented for list<item: string>",
)
xfail_bool_too_few_uniques = pytest.mark.xfail_by_type_filter(
[pa.types.is_boolean], "Test requires at least 3 unique values"
)
test_types = [
FletcherTestType(
pa.string(),
["🙈", "Ö", "Č", "a", "B"] * 20,
[None, "A"],
["B", "B", None, None, "A", "A", "B", "C"],
["B", "C", "A"],
["B", None, "A"],
lambda: choices(list(string.ascii_letters), k=10),
),
FletcherTestType(
pa.bool_(),
[True, False, True, True, False] * 20,
[None, False],
[True, True, None, None, False, False, True, False],
[True, False, False],
[True, None, False],
lambda: choices([True, False], k=10),
),
FletcherTestType(
pa.int8(),
# Use small values here so that np.prod stays in int32
[2, 1, 1, 2, 1] * 20,
[None, 1],
[2, 2, None, None, -100, -100, 2, 100],
[2, 100, -10],
[2, None, -10],
lambda: choices(list(range(100)), k=10),
),
FletcherTestType(
pa.int16(),
# Use small values here so that np.prod stays in int32
[2, 1, 3, 2, 1] * 20,
[None, 1],
[2, 2, None, None, -100, -100, 2, 100],
[2, 100, -10],
[2, None, -10],
lambda: choices(list(range(100)), k=10),
),
FletcherTestType(
pa.int32(),
# Use small values here so that np.prod stays in int32
[2, 1, 3, 2, 1] * 20,
[None, 1],
[2, 2, None, None, -100, -100, 2, 100],
[2, 100, -10],
[2, None, -10],
lambda: choices(list(range(100)), k=10),
),
FletcherTestType(
pa.int64(),
# Use small values here so that np.prod stays in int64
[2, 1, 3, 2, 1] * 20,
[None, 1],
[2, 2, None, None, -100, -100, 2, 100],
[2, 100, -10],
[2, None, -10],
lambda: choices(list(range(100)), k=10),
),
FletcherTestType(
pa.float64(),
[2, 1.0, 1.0, 5.5, 6.6] * 20,
[None, 1.1],
[2.5, 2.5, None, None, -100.1, -100.1, 2.5, 100.1],
[2.5, 100.99, -10.1],
[2.5, None, -10.1],
lambda: choices([2.5, 1.0, -1.0, 0, 66.6], k=10),
),
# Most of the tests fail as assert_extension_array_equal casts to numpy object
# arrays and on them equality is not defined.
pytest.param(
FletcherTestType(
pa.list_(pa.string()),
[["B", "C"], ["A"], [None], ["A", "A"], []] * 20,
[None, ["A"]],
[["B"], ["B"], None, None, ["A"], ["A"], ["B"], ["C"]],
[["B"], ["C"], ["A"]],
[["B"], None, ["A"]],
lambda: choices([["B", "C"], ["A"], [None], ["A", "A"]], k=10),
)
),
FletcherTestType(
pa.date64(),
[
datetime.date(2015, 1, 1),
datetime.date(2010, 12, 31),
datetime.date(1970, 1, 1),
datetime.date(1900, 3, 31),
datetime.date(1999, 12, 31),
]
* 20,
[None, datetime.date(2015, 1, 1)],
[
datetime.date(2015, 2, 2),
datetime.date(2015, 2, 2),
None,
None,
datetime.date(2015, 1, 1),
datetime.date(2015, 1, 1),
datetime.date(2015, 2, 2),
datetime.date(2015, 3, 3),
],
[
datetime.date(2015, 2, 2),
datetime.date(2015, 3, 3),
datetime.date(2015, 1, 1),
],
[datetime.date(2015, 2, 2), None, datetime.date(2015, 1, 1)],
lambda: choices(list(pd.date_range("2010-1-1", "2011-1-1").date), k=10),
),
]
@pytest.fixture(params=[True, False])
def box_in_series(request):
"""Whether to box the data in a Series."""
return request.param
@pytest.fixture(params=test_types)
def fletcher_type(request):
return request.param
@pytest.fixture(autouse=True)
def skip_by_type_filter(request, fletcher_type):
if request.node.get_closest_marker("skip_by_type_filter"):
for marker in request.node.iter_markers("skip_by_type_filter"):
for func in marker.args[0]:
if func(fletcher_type.dtype):
pytest.skip(f"skipped for type: {fletcher_type}")
@pytest.fixture(autouse=True)
def xfail_by_type_filter(request, fletcher_type):
if request.node.get_closest_marker("xfail_by_type_filter"):
for marker in request.node.iter_markers("xfail_by_type_filter"):
for func in marker.args[0]:
if func(fletcher_type.dtype):
pytest.xfail(f"XFAIL for type: {fletcher_type}")
@pytest.fixture
def dtype(fletcher_type, fletcher_dtype):
return fletcher_dtype(fletcher_type.dtype)
@pytest.fixture
def data(fletcher_type, fletcher_array):
return fletcher_array(fletcher_type.data, dtype=fletcher_type.dtype)
@pytest.fixture
def data_for_twos(dtype, fletcher_type, fletcher_array):
if dtype._is_numeric:
return fletcher_array([2] * 100, dtype=fletcher_type.dtype)
else:
return None
@pytest.fixture
def data_missing(fletcher_type, fletcher_array):
return fletcher_array(fletcher_type.data_missing, dtype=fletcher_type.dtype)
@pytest.fixture
def data_repeated(fletcher_type, fletcher_array):
"""Return different versions of data for count times."""
pass # noqa
def gen(count):
for _ in range(count):
yield fletcher_array(
fletcher_type.data_repeated(), dtype=fletcher_type.dtype
)
yield gen
@pytest.fixture
def data_for_grouping(fletcher_type, fletcher_array):
"""Fixture with data for factorization, grouping, and unique tests.
Expected to be like [B, B, NA, NA, A, A, B, C]
Where A < B < C and NA is missing
"""
return fletcher_array(fletcher_type.data_for_grouping, dtype=fletcher_type.dtype)
@pytest.fixture
def data_for_sorting(fletcher_type, fletcher_array):
"""Length-3 array with a known sort order.
This should be three items [B, C, A] with
A < B < C
"""
return fletcher_array(fletcher_type.data_for_sorting, dtype=fletcher_type.dtype)
@pytest.fixture
def data_missing_for_sorting(fletcher_type, fletcher_array):
"""Length-3 array with a known sort order.
This should be three items [B, NA, A] with
A < B and NA missing.
"""
return fletcher_array(
fletcher_type.data_missing_for_sorting, dtype=fletcher_type.dtype
)
@pytest.fixture(params=[None, lambda x: x])
def sort_by_key(request):
"""
Return a simple fixture for festing keys in sorting methods.
Tests None (no key) and the identity key.
"""
return request.param
class TestBaseCasting(BaseCastingTests):
pass
class TestBaseConstructors(BaseConstructorsTests):
def test_from_dtype(self, data):
if pa.types.is_string(data.dtype.arrow_dtype):
pytest.xfail(
"String construction is failing as Pandas wants to pass the FletcherChunkedDtype to NumPy"
)
BaseConstructorsTests.test_from_dtype(self, data)
@xfail_list_scalar_constuctor_not_implemented
def test_series_constructor_scalar_with_index(self, data, dtype):
if PANDAS_GE_1_1_0:
BaseConstructorsTests.test_series_constructor_scalar_with_index(
self, data, dtype
)
class TestBaseDtype(BaseDtypeTests):
pass
class TestBaseGetitemTests(BaseGetitemTests):
def test_loc_iloc_frame_single_dtype(self, data):
if pa.types.is_string(data.dtype.arrow_dtype):
pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/27673"
)
else:
BaseGetitemTests.test_loc_iloc_frame_single_dtype(self, data)
class TestBaseGroupbyTests(BaseGroupbyTests):
@xfail_bool_too_few_uniques
@xfail_missing_list_dict_encode
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
BaseGroupbyTests.test_groupby_extension_agg(self, as_index, data_for_grouping)
@xfail_bool_too_few_uniques
@xfail_missing_list_dict_encode
def test_groupby_extension_no_sort(self, data_for_grouping):
BaseGroupbyTests.test_groupby_extension_no_sort(self, data_for_grouping)
@xfail_missing_list_dict_encode
def test_groupby_extension_transform(self, data_for_grouping):
if pa.types.is_boolean(data_for_grouping.dtype.arrow_dtype):
valid = data_for_grouping[~data_for_grouping.isna()]
df = pd.DataFrame({"A": [1, 1, 3, 3, 1, 4], "B": valid})
result = df.groupby("B").A.transform(len)
# Expected grouping is different as we only have two non-null values
expected = pd.Series([3, 3, 3, 3, 3, 3], name="A")
self.assert_series_equal(result, expected)
else:
BaseGroupbyTests.test_groupby_extension_transform(self, data_for_grouping)
@xfail_missing_list_dict_encode
def test_groupby_extension_apply(
self, data_for_grouping, groupby_apply_op # noqa: F811
):
BaseGroupbyTests.test_groupby_extension_apply(
self, data_for_grouping, groupby_apply_op
)
class TestBaseInterfaceTests(BaseInterfaceTests):
@pytest.mark.xfail(
reason="view or self[:] returns a shallow copy in-place edits are not backpropagated"
)
def test_view(self, data):
BaseInterfaceTests.test_view(self, data)
def test_array_interface(self, data):
if pa.types.is_list(data.dtype.arrow_dtype):
pytest.xfail("Not sure whether this test really holds for list")
else:
BaseInterfaceTests.test_array_interface(self, data)
@xfail_list_setitem_not_implemented
def test_copy(self, data):
BaseInterfaceTests.test_array_interface(self, data)
class TestBaseMethodsTests(BaseMethodsTests):
# https://github.com/pandas-dev/pandas/issues/22843
@pytest.mark.skip(reason="Incorrect expected")
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna, dtype):
pass
@xfail_list_equals_not_implemented
@pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame])
def test_equals(self, data, na_value, as_series, box): # noqa: F811
if PANDAS_GE_1_1_0:
BaseMethodsTests.test_equals(self, data, na_value, as_series, box)
@xfail_missing_list_dict_encode
def test_value_counts_with_normalize(self, data):
if PANDAS_GE_1_1_0:
BaseMethodsTests.test_value_counts_with_normalize(self, data)
def test_combine_le(self, data_repeated):
# GH 20825
# Test that combine works when doing a <= (le) comparison
# Fletcher returns 'fletcher_chunked[bool]' instead of np.bool as dtype
orig_data1, orig_data2 = data_repeated(2)
if pa.types.is_list(orig_data1.dtype.arrow_dtype):
return pytest.skip("__le__ not implemented for list scalars with None")
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 <= x2)
expected = pd.Series(
orig_data1._from_sequence(
[a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))]
)
)
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 <= x2)
expected = pd.Series(
orig_data1._from_sequence([a <= val for a in list(orig_data1)])
)
self.assert_series_equal(result, expected)
def test_combine_add(self, data_repeated, dtype):
if dtype.name in [
"fletcher_chunked[date64[ms]]",
"fletcher_continuous[date64[ms]]",
]:
pytest.skip(
"unsupported operand type(s) for +: 'datetime.date' and 'datetime.date"
)
else:
BaseMethodsTests.test_combine_add(self, data_repeated)
@xfail_bool_too_few_uniques
def test_argsort(self, data_for_sorting):
BaseMethodsTests.test_argsort(self, data_for_sorting)
@xfail_bool_too_few_uniques
def test_argmin_argmax(self, data_for_sorting, data_missing_for_sorting, na_value):
if PANDAS_GE_1_1_0:
BaseMethodsTests.test_argmin_argmax(
self, data_for_sorting, data_missing_for_sorting, na_value
)
else:
pass
@pytest.mark.parametrize("ascending", [True, False])
@xfail_bool_too_few_uniques
@xfail_missing_list_dict_encode
def test_sort_values(self, data_for_sorting, ascending, sort_by_key):
if PANDAS_GE_1_1_0:
BaseMethodsTests.test_sort_values(
self, data_for_sorting, ascending, sort_by_key
)
else:
BaseMethodsTests.test_sort_values(self, data_for_sorting, ascending)
@pytest.mark.parametrize("na_sentinel", [-1, -2])
@xfail_bool_too_few_uniques
@xfail_missing_list_dict_encode
def test_factorize(self, data_for_grouping, na_sentinel):
BaseMethodsTests.test_factorize(self, data_for_grouping, na_sentinel)
@pytest.mark.parametrize("na_sentinel", [-1, -2])
@xfail_bool_too_few_uniques
@xfail_missing_list_dict_encode
def test_factorize_equivalence(self, data_for_grouping, na_sentinel):
BaseMethodsTests.test_factorize_equivalence(
self, data_for_grouping, na_sentinel
)
@pytest.mark.parametrize("ascending", [True, False])
@xfail_missing_list_dict_encode
def test_sort_values_frame(self, data_for_sorting, ascending):
BaseMethodsTests.test_sort_values_frame(self, data_for_sorting, ascending)
@xfail_bool_too_few_uniques
def test_searchsorted(self, data_for_sorting, as_series): # noqa: F811
BaseMethodsTests.test_searchsorted(self, data_for_sorting, as_series)
@pytest.mark.parametrize("box", [pd.Series, lambda x: x])
@pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique])
@xfail_missing_list_dict_encode
def test_unique(self, data, box, method):
BaseMethodsTests.test_unique(self, data, box, method)
@xfail_missing_list_dict_encode
def test_factorize_empty(self, data):
BaseMethodsTests.test_factorize_empty(self, data)
def test_fillna_copy_frame(self, data_missing):
if pa.types.is_list(data_missing.dtype.arrow_dtype):
pytest.xfail("pandas' fillna cannot cope with lists as a scalar")
else:
BaseMethodsTests.test_fillna_copy_frame(self, data_missing)
def test_fillna_copy_series(self, data_missing):
if pa.types.is_list(data_missing.dtype.arrow_dtype):
pytest.xfail("pandas' fillna cannot cope with lists as a scalar")
else:
BaseMethodsTests.test_fillna_copy_series(self, data_missing)
@xfail_list_setitem_not_implemented
def test_combine_first(self, data):
BaseMethodsTests.test_combine_first(self, data)
@xfail_list_setitem_not_implemented
def test_shift_0_periods(self, data):
if PANDAS_GE_1_1_0:
BaseMethodsTests.test_shift_0_periods(self, data)
def test_shift_fill_value(self, data):
if pa.types.is_list(data.dtype.arrow_dtype):
pytest.xfail("pandas' isna cannot cope with lists")
else:
BaseMethodsTests.test_shift_fill_value(self, data)
def test_hash_pandas_object_works(self, data, as_frame): # noqa: F811
if pa.types.is_list(data.dtype.arrow_dtype):
pytest.xfail("Fails on hashing ndarrays")
else:
BaseMethodsTests.test_hash_pandas_object_works(self, data, as_frame)
@xfail_list_setitem_not_implemented
def test_where_series(self, data, na_value, as_frame): # noqa: F811
BaseMethodsTests.test_where_series(self, data, na_value, as_frame)
class TestBaseMissingTests(BaseMissingTests):
@pytest.mark.parametrize("method", ["ffill", "bfill"])
def test_fillna_series_method(self, data_missing, method):
BaseMissingTests.test_fillna_series_method(self, data_missing, method)
def test_fillna_frame(self, data_missing):
if pa.types.is_list(data_missing.dtype.arrow_dtype):
pytest.xfail("pandas' fillna cannot cope with lists as a scalar")
else:
BaseMissingTests.test_fillna_frame(self, data_missing)
def test_fillna_scalar(self, data_missing):
if pa.types.is_list(data_missing.dtype.arrow_dtype):
pytest.xfail("pandas' fillna cannot cope with lists as a scalar")
else:
BaseMissingTests.test_fillna_scalar(self, data_missing)
def test_fillna_series(self, data_missing):
if pa.types.is_list(data_missing.dtype.arrow_dtype):
pytest.xfail("pandas' fillna cannot cope with lists as a scalar")
else:
BaseMissingTests.test_fillna_series(self, data_missing)
class TestBaseReshapingTests(BaseReshapingTests):
def test_concat_mixed_dtypes(self, data, dtype):
arrow_dtype = data.dtype.arrow_dtype
if (
pa.types.is_integer(arrow_dtype)
or pa.types.is_floating(arrow_dtype)
or pa.types.is_boolean(arrow_dtype)
):
# https://github.com/pandas-dev/pandas/issues/21792
pytest.skip("pd.concat(int64, fletcher_chunked[int64] yields int64")
elif pa.types.is_temporal(arrow_dtype):
# https://github.com/pandas-dev/pandas/issues/33331
pytest.xfail("pd.concat(temporal, categorical) mangles dates")
else:
BaseReshapingTests.test_concat_mixed_dtypes(self, data)
def test_merge_on_extension_array(self, data):
if pa.types.is_list(data.dtype.arrow_dtype):
pytest.xfail("pandas tries to hash scalar lists")
else:
BaseReshapingTests.test_merge_on_extension_array(self, data)
def test_merge_on_extension_array_duplicates(self, data):
if pa.types.is_list(data.dtype.arrow_dtype):
pytest.xfail("pandas tries to hash scalar lists")
else:
BaseReshapingTests.test_merge_on_extension_array_duplicates(self, data)
@xfail_list_setitem_not_implemented
def test_ravel(self, data):
BaseReshapingTests.test_ravel(self, data)
@xfail_list_setitem_not_implemented
@pytest.mark.xfail(reason="Views don't update their parent #96")
def test_transpose(self, data):
if hasattr(BaseReshapingTests, "test_transpose"):
BaseReshapingTests.test_transpose(self, data)
class TestBaseSetitemTests(BaseSetitemTests):
@xfail_list_setitem_not_implemented
def test_setitem_scalar_series(self, data, box_in_series):
BaseSetitemTests.test_setitem_scalar_series(self, data, box_in_series)
@xfail_list_setitem_not_implemented
def test_setitem_sequence(self, data, box_in_series):
BaseSetitemTests.test_setitem_sequence(self, data, box_in_series)
@xfail_list_setitem_not_implemented
def test_setitem_empty_indxer(self, data, box_in_series):
BaseSetitemTests.test_setitem_empty_indxer(self, data, box_in_series)
@xfail_list_setitem_not_implemented
def test_setitem_sequence_broadcasts(self, data, box_in_series):
BaseSetitemTests.test_setitem_sequence_broadcasts(self, data, box_in_series)
@pytest.mark.parametrize("setter", ["loc", "iloc"])
@xfail_list_setitem_not_implemented
def test_setitem_scalar(self, data, setter):
BaseSetitemTests.test_setitem_scalar(self, data, setter)
@xfail_list_setitem_not_implemented
def test_setitem_loc_scalar_mixed(self, data):
BaseSetitemTests.test_setitem_loc_scalar_mixed(self, data)
@xfail_list_setitem_not_implemented
def test_setitem_loc_scalar_single(self, data):
BaseSetitemTests.test_setitem_loc_scalar_single(self, data)
@xfail_list_setitem_not_implemented
def test_setitem_loc_scalar_multiple_homogoneous(self, data):
BaseSetitemTests.test_setitem_loc_scalar_multiple_homogoneous(self, data)
@xfail_list_setitem_not_implemented
def test_setitem_iloc_scalar_mixed(self, data):
BaseSetitemTests.test_setitem_iloc_scalar_mixed(self, data)
@xfail_list_setitem_not_implemented
def test_setitem_iloc_scalar_single(self, data):
BaseSetitemTests.test_setitem_iloc_scalar_single(self, data)
@xfail_list_setitem_not_implemented
def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
BaseSetitemTests.test_setitem_iloc_scalar_multiple_homogoneous(self, data)
@xfail_list_setitem_not_implemented
def test_setitem_nullable_mask(self, data):
if not PANDAS_GE_1_1_0:
BaseSetitemTests.test_setitem_nullable_mask(self, data)
@pytest.mark.parametrize("as_callable", [True, False])
@pytest.mark.parametrize("setter", ["loc", None])
@xfail_list_setitem_not_implemented
def test_setitem_mask_aligned(self, data, as_callable, setter):
BaseSetitemTests.test_setitem_mask_aligned(self, data, as_callable, setter)
@pytest.mark.parametrize("setter", ["loc", None])
@xfail_list_setitem_not_implemented
def test_setitem_mask_broadcast(self, data, setter):
BaseSetitemTests.test_setitem_mask_broadcast(self, data, setter)
@xfail_list_setitem_not_implemented
def test_setitem_slice(self, data, box_in_series):
if PANDAS_GE_1_1_0:
BaseSetitemTests.test_setitem_slice(self, data, box_in_series)
@xfail_list_setitem_not_implemented
def test_setitem_loc_iloc_slice(self, data):
if PANDAS_GE_1_1_0:
BaseSetitemTests.test_setitem_loc_iloc_slice(self, data)
@xfail_list_setitem_not_implemented
def test_setitem_slice_array(self, data):
BaseSetitemTests.test_setitem_slice_array(self, data)
@xfail_list_setitem_not_implemented
@pytest.mark.parametrize(
"mask",
[
np.array([True, True, True, False, False]),
pd.array([True, True, True, False, False], dtype="boolean"),
pd.array([True, True, True, pd.NA, pd.NA], dtype="boolean"),
],
ids=["numpy-array", "boolean-array", "boolean-array-na"],
)
def test_setitem_mask(self, data, mask, box_in_series):
if PANDAS_GE_1_1_0:
BaseSetitemTests.test_setitem_mask(self, data, mask, box_in_series)
@pytest.mark.xfail(reason="Views don't update their parent #96")
def test_setitem_preserves_views(self, data):
pass
@xfail_list_setitem_not_implemented
def test_setitem_mask_boolean_array_with_na(self, data, box_in_series):
if PANDAS_GE_1_1_0:
BaseSetitemTests.test_setitem_mask_boolean_array_with_na(
self, data, box_in_series
)
@pytest.mark.parametrize(
"idx",
[[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
ids=["list", "integer-array", "numpy-array"],
)
@pytest.mark.xfail(reason="https://github.com/xhochy/fletcher/issues/110")
def test_setitem_integer_array(self, data, idx, box_in_series):
if PANDAS_GE_1_1_0:
BaseSetitemTests.test_setitem_integer_array(self, data, idx, box_in_series)
class TestBaseParsingTests(BaseParsingTests):
@pytest.mark.parametrize("engine", ["c", "python"])
def test_EA_types(self, engine, data):
pytest.mark.xfail(
"pandas doesn't yet support registering ExtentionDtypes via a pattern"
)
class TestBasePrintingTests(BasePrintingTests):
pass
class TestBaseBooleanReduceTests(BaseBooleanReduceTests):
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series(self, data, all_boolean_reductions, skipna):
if pa.types.is_boolean(data.dtype.arrow_dtype):
BaseBooleanReduceTests.test_reduce_series(
self, data, all_boolean_reductions, skipna
)
else:
pytest.skip("Boolean reductions are only tested with boolean types")
class TestBaseNoReduceTests(BaseNoReduceTests):
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
arrow_dtype = data.dtype.arrow_dtype
if (
pa.types.is_integer(arrow_dtype)
or pa.types.is_floating(arrow_dtype)
or pa.types.is_decimal(arrow_dtype)
):
pytest.skip("Numeric arrays implement reductions, so don't raise")
else:
BaseNoReduceTests.test_reduce_series_numeric(
self, data, all_numeric_reductions, skipna
)
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):
if pa.types.is_boolean(data.dtype.arrow_dtype):
pytest.skip("BooleanArray does define boolean reductions, so don't raise")
else:
BaseNoReduceTests.test_reduce_series_boolean(
self, data, all_boolean_reductions, skipna
)
class TestBaseNumericReduceTests(BaseNumericReduceTests):
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series(self, data, all_numeric_reductions, skipna):
if all_numeric_reductions == "prod":
# Shorten in the case of prod to avoid overflows
data = data[:2]
arrow_dtype = data.dtype.arrow_dtype
if (
pa.types.is_integer(arrow_dtype)
or pa.types.is_floating(arrow_dtype)
or pa.types.is_decimal(arrow_dtype)
):
BaseNumericReduceTests.test_reduce_series(
self, data, all_numeric_reductions, skipna
)
else:
pytest.skip("Reduce not implemented on non-numeric types")
class TestBaseComparisonOpsTests(BaseComparisonOpsTests):
def check_opname(self, s, op_name, other, exc=None):
super().check_opname(s, op_name, other, exc=None)
def _compare_other(self, s, data, op_name, other):
self.check_opname(s, op_name, other)
def test_compare_scalar(self, data, all_compare_operators):
if pa.types.is_list(data.dtype.arrow_dtype):
pytest.xfail("pandas cannot cope with lists as scalar")
else:
# FIXME: Upstream always compares againt 0
op_name = all_compare_operators
s = pd.Series(data)
self._compare_other(s, data, op_name, data[0])
def test_compare_array(self, data, all_compare_operators):
if pa.types.is_list(data.dtype.arrow_dtype):
pytest.xfail("Comparision of list not implemented yet")
else:
BaseComparisonOpsTests.test_compare_array(self, data, all_compare_operators)
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
result = op(s, other)
# We return fletcher booleans to support nulls
expected = s.combine(other, op)
if not isinstance(expected.dtype, FletcherBaseDtype):
expected = pd.Series(type(s.values)(expected.values))
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
class TestBaseArithmeticOpsTests(BaseArithmeticOpsTests):
# TODO: Instead of skipping other types, we should set the correct exceptions here
series_scalar_exc: Optional[Type[TypeError]] = None
frame_scalar_exc: Optional[Type[TypeError]] = None
series_array_exc: Optional[Type[TypeError]] = None
divmod_exc: Optional[Type[TypeError]] = None
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
result = op(s, other)
expected = s.combine(other, op)
# Combine always returns an int64 for integral arrays but for
# operations on smaller integer types, we expect also smaller int types
# in the result of the non-combine operations.
if hasattr(expected.dtype, "arrow_dtype"):
arrow_dtype = expected.dtype.arrow_dtype
if pa.types.is_integer(arrow_dtype):
# In the case of an operand with a higher bytesize, we also expect the
# output to be int64.
other_is_np_int64 = (
isinstance(other, pd.Series)
and isinstance(other.values, np.ndarray)
and other.dtype.char in ("q", "l")
)
if (
pa.types.is_integer(arrow_dtype)
and pa.types.is_integer(s.dtype.arrow_dtype)
and not other_is_np_int64
):
expected = expected.astype(s.dtype)
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
@skip_non_artithmetic_type
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
BaseArithmeticOpsTests.test_arith_series_with_scalar(
self, data, all_arithmetic_operators
)
@skip_non_artithmetic_type
def test_arith_series_with_array(self, data, all_arithmetic_operators):
BaseArithmeticOpsTests.test_arith_series_with_array(
self, data, all_arithmetic_operators
)
@skip_non_artithmetic_type
def test_divmod(self, data):
BaseArithmeticOpsTests.test_divmod(self, data)
@skip_non_artithmetic_type
def test_divmod_series_array(self, data, data_for_twos):
BaseArithmeticOpsTests.test_divmod(self, data)
@skip_non_artithmetic_type
def test_add_series_with_extension_array(self, data):
BaseArithmeticOpsTests.test_add_series_with_extension_array(self, data)
def test_error(self, data, all_arithmetic_operators):
arrow_dtype = data.dtype.arrow_dtype
if (
pa.types.is_integer(arrow_dtype)
or pa.types.is_floating(arrow_dtype)
or pa.types.is_decimal(arrow_dtype)
):
pytest.skip("Numeric does not error on ops")
else:
pytest.xfail("Should error here")
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
| tests/test_pandas_extension.py | 32,872 | Whether to box the data in a Series.
Fixture with data for factorization, grouping, and unique tests.
Expected to be like [B, B, NA, NA, A, A, B, C]
Where A < B < C and NA is missing
Length-3 array with a known sort order.
This should be three items [B, C, A] with
A < B < C
Length-3 array with a known sort order.
This should be three items [B, NA, A] with
A < B and NA missing.
Return different versions of data for count times.
Check whether this is a type that support arithmetics.
Return a simple fixture for festing keys in sorting methods.
Tests None (no key) and the identity key.
imports of pytest fixtures needed for derived unittest classes noqa: F401 noqa: F401 noqa: F401 noqa: F401 noqa: F401 noqa: F401 Use small values here so that np.prod stays in int32 Use small values here so that np.prod stays in int32 Use small values here so that np.prod stays in int32 Use small values here so that np.prod stays in int64 Most of the tests fail as assert_extension_array_equal casts to numpy object arrays and on them equality is not defined. noqa Expected grouping is different as we only have two non-null values noqa: F811 https://github.com/pandas-dev/pandas/issues/22843 noqa: F811 GH 20825 Test that combine works when doing a <= (le) comparison Fletcher returns 'fletcher_chunked[bool]' instead of np.bool as dtype noqa: F811 noqa: F811 noqa: F811 https://github.com/pandas-dev/pandas/issues/21792 https://github.com/pandas-dev/pandas/issues/33331 Shorten in the case of prod to avoid overflows FIXME: Upstream always compares againt 0 We return fletcher booleans to support nulls TODO: Instead of skipping other types, we should set the correct exceptions here Combine always returns an int64 for integral arrays but for operations on smaller integer types, we expect also smaller int types in the result of the non-combine operations. In the case of an operand with a higher bytesize, we also expect the output to be int64. | 1,947 | en | 0.823899 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-09 13:02
from __future__ import unicode_literals
from django.db import migrations
def load_continents(apps, schema_editor):
continent = apps.get_model("country_name_tool", "Continent")
newcontinents = [['NA', 'North America'], ['AS', 'Asia'], ['AF', 'Africa'], ['EU', 'Europe'], ['SA', 'South America'], ['OC', 'Oceania'], ['AN', 'Antarctica']]
for each in newcontinents:
toinsert = continent(continent_code=each[0], continent_name=each[1])
toinsert.save()
class Migration(migrations.Migration):
dependencies = [
('country_name_tool', '0002_auto_20170523_1825'),
]
operations = [
migrations.RunPython(load_continents)
]
| country_name_tool/migrations/0003_auto_20170609_1302.py | 749 | -*- coding: utf-8 -*- Generated by Django 1.11 on 2017-06-09 13:02 | 66 | en | 0.708555 |
import os,sys,json,cv2
from nima.inference.inference_model import InferenceModel
import opt4 as opt
from PIL import Image
# write to result file
def write_json(args,score):
try:
outfile =open(args[3],'w')
#print('saving test json at '+args[3])
except IndexError:
print('output_location not found')
print('saving test json at "./test_data/data.json"')
outfile = open('./test_data/data.json','w+')
finally:
#print(score)
json.dump(score,outfile)
outfile.close
#get score by testing
def test(model,image):
score = {'results':[]}
for i in image:
#'''
img = cv2.imread(image.get(i))
img = Image.fromarray(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))
r = model.predict_from_pil_image(img)
'''
r = model.predict_from_file(image.get(i))
'''
score['results'].append({'name':i, 'score':r['mean_score'], 'b':0, 'c':0})
#print(r['mean_score'],type(r['mean_score']))
score['results'] = sorted(score['results'], key= lambda x : x['score'], reverse= True)
return score
#switch mode and execute test
def start(mode,model,image):
if mode == 'test':
score = test(model,image)
'''
print(str(len(image))+' pictures found!')
print('=======================')
print('test_json')
print(score)
print('=======================')
'''
elif mode == 'adjust':
#print(image)
if len(image) > 1:
print('error adjust more than 1 image')
sys.exit(0)
score = test(model,image)
'''
print('=======================')
print('test_json')
print(score)
print('=======================')
'''
for i in image:
results = opt.starts(image.get(i))
#print(i,results)
filepath, ext = os.path.splitext(image.get(i))
img_dir = filepath+'_'+ext
results[2].save(img_dir)
score['results'].append({'name':i.split('.')[-2]+'_.'+i.split('.')[-1], 'score':results[1], 'b':results[0][0], 'c':results[0][1], 'img_dir':img_dir})
'''
print('adjsut_json')
print(score)
print('=======================')
'''
else:
print('error select mode')
sys.exit(0)
return score
#get image from a folder
def handle_folder(pth):
image = {}
for i in os.listdir(pth):
if i.endswith('.jpeg') or i.endswith('.jpg'):
image[i] = pth + i
return image
# detect test or adjust
def detect_mode(args):
target = args[1]
if target == 'test':
return 'test'
elif target == 'adjust':
return 'adjust'
else:
print('error mode in args[1]')
sys.exit(0)
# detect folder or a picture
def detect_pth(args):
target = args[2]
if target.endswith('.jpeg') or target.endswith('.jpg'):
try:
_=open(target)
except:
print('error pic')
sys.exit(0)
else:
return 'pic',target
else:
try:
#print(target)
os.stat(target)
except:
print('error folder')
sys.exit(0)
else:
if not target.endswith('/'):
target += '/'
try:
os.stat(target)
except:
print('error detecting the path')
sys.exit(0)
return 'folder',target
# main
def mynima(args):
model_pth = './tmp3/emd_loss_epoch_49_train_0.05391903784470127_0.12613263790013726.pth'
#model_pth = './tmp/emd_loss_epoch_49_train_0.03547421253612805_0.08993149643023331.pth'
#model_pth = './tmp0710/emd_loss_epoch_49_train_0.06696929275146844_0.1384279955681362.pth'
model = InferenceModel(path_to_model=model_pth)
mode = detect_mode(args)
method,pth = detect_pth(args)
#print(method)
if method == 'pic':
name = os.path.basename(pth)
#print('name '+name) #get pic name
image = {name:pth} #make image dict.
elif method == 'folder':
image = handle_folder(pth) #get image dict.
score = start(mode,model,image)
write_json(args,score)
return
#arg[1]: (adjust/test) arg[2]: (folder_path/img_pth) arg[3]: (output_file)
mynima(sys.argv) | nima/nima_new.py | 4,372 | write to result fileprint('saving test json at '+args[3])print(score)get score by testing'''print(r['mean_score'],type(r['mean_score']))switch mode and execute testprint(image)print(i,results)get image from a folder detect test or adjust detect folder or a pictureprint(target) mainmodel_pth = './tmp/emd_loss_epoch_49_train_0.03547421253612805_0.08993149643023331.pth'model_pth = './tmp0710/emd_loss_epoch_49_train_0.06696929275146844_0.1384279955681362.pth'print(method)print('name '+name) get pic namemake image dict.get image dict.arg[1]: (adjust/test) arg[2]: (folder_path/img_pth) arg[3]: (output_file) | 608 | en | 0.561162 |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functorch import grad, nnc_jit, make_fx, make_nnc
import torch
import time
def f(x):
return torch.sin(x).sum()
inp = torch.randn(100)
grad_pt = grad(f)
grad_fx = make_fx(grad_pt)(inp)
grad_nnc = nnc_jit(grad_pt, skip_specialization=True)
loopnest = make_nnc(grad_pt)(inp)
print(loopnest)
def bench(name, f, iters=10000, warmup=3):
for _ in range(warmup):
f()
begin = time.time()
for _ in range(iters):
f()
print(f"{name}: ", time.time() - begin)
bench("Pytorch: ", lambda: grad_pt(inp))
bench("FX: ", lambda: grad_fx(inp))
bench("NNC: ", lambda: grad_nnc(inp))
| examples/compilation/simple_function.py | 817 | Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. | 195 | en | 0.937181 |
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2021 Greg Davill <greg.davill@gmail.com>
# SPDX-License-Identifier: BSD-2-Clause
# Build/Use:
# ./gsd_butterstick.py --uart-name=crossover --with-etherbone --csr-csv=csr.csv --build --load
# litex_server --udp
# litex_term bridge
import os
import sys
import argparse
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex_boards.platforms import butterstick
from litex.build.lattice.trellis import trellis_args, trellis_argdict
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
from litedram.modules import MT41K64M16,MT41K128M16,MT41K256M16,MT41K512M16
from litedram.phy import ECP5DDRPHY
from liteeth.phy.ecp5rgmii import LiteEthPHYRGMII
# CRG ---------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_init = ClockDomain()
self.clock_domains.cd_por = ClockDomain(reset_less=True)
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys2x = ClockDomain()
self.clock_domains.cd_sys2x_i = ClockDomain(reset_less=True)
# # #
self.stop = Signal()
self.reset = Signal()
# Clk / Rst
clk30 = platform.request("clk30")
rst_n = platform.request("user_btn", 0)
# Power on reset
por_count = Signal(16, reset=2**16-1)
por_done = Signal()
self.comb += self.cd_por.clk.eq(clk30)
self.comb += por_done.eq(por_count == 0)
self.sync.por += If(~por_done, por_count.eq(por_count - 1))
# PLL
self.submodules.pll = pll = ECP5PLL()
self.comb += pll.reset.eq(~por_done | ~rst_n | self.rst)
pll.register_clkin(clk30, 30e6)
pll.create_clkout(self.cd_sys2x_i, 2*sys_clk_freq)
pll.create_clkout(self.cd_init, 25e6)
self.specials += [
Instance("ECLKSYNCB",
i_ECLKI = self.cd_sys2x_i.clk,
i_STOP = self.stop,
o_ECLKO = self.cd_sys2x.clk),
Instance("CLKDIVF",
p_DIV = "2.0",
i_ALIGNWD = 0,
i_CLKI = self.cd_sys2x.clk,
i_RST = self.reset,
o_CDIVX = self.cd_sys.clk),
AsyncResetSynchronizer(self.cd_sys, ~pll.locked | self.reset),
AsyncResetSynchronizer(self.cd_sys2x, ~pll.locked | self.reset),
]
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, revision="1.0", device="85F", sdram_device="MT41K64M16", sys_clk_freq=int(60e6),
toolchain="trellis", with_ethernet=False, with_etherbone=False, eth_ip="192.168.1.50",
eth_dynamic_ip=False,
with_spi_flash=False,
with_led_chaser=True,
**kwargs) :
platform = butterstick.Platform(revision=revision, device=device ,toolchain=toolchain)
# SoCCore ----------------------------------------------------------------------------------
if kwargs["uart_name"] == "serial":
kwargs["uart_name"] = "crossover"
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on ButterStick",
ident_version = True,
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# DDR3 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
available_sdram_modules = {
"MT41K64M16": MT41K64M16,
"MT41K128M16": MT41K128M16,
"MT41K256M16": MT41K256M16,
"MT41K512M16": MT41K512M16,
}
sdram_module = available_sdram_modules.get(sdram_device)
self.submodules.ddrphy = ECP5DDRPHY(
platform.request("ddram"),
sys_clk_freq=sys_clk_freq)
self.comb += self.crg.stop.eq(self.ddrphy.init.stop)
self.comb += self.crg.reset.eq(self.ddrphy.init.reset)
self.add_sdram("sdram",
phy = self.ddrphy,
module = sdram_module(sys_clk_freq, "1:2"),
l2_cache_size = kwargs.get("l2_size", 8192)
)
# Ethernet / Etherbone ---------------------------------------------------------------------
if with_ethernet or with_etherbone:
self.submodules.ethphy = LiteEthPHYRGMII(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"))
if with_ethernet:
self.add_ethernet(phy=self.ethphy, dynamic_ip=eth_dynamic_ip)
if with_etherbone:
self.add_etherbone(phy=self.ethphy, ip_address=eth_ip)
# SPI Flash --------------------------------------------------------------------------------
if with_spi_flash:
from litespi.modules import W25Q128JV
from litespi.opcodes import SpiNorFlashOpCodes as Codes
self.add_spi_flash(mode="4x", module=W25Q128JV(Codes.READ_1_1_4), with_master=False)
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.comb += platform.request("user_led_color").eq(0b010) # Blue.
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on ButterStick")
parser.add_argument("--build", action="store_true", help="Build bitstream.")
parser.add_argument("--load", action="store_true", help="Load bitstream.")
parser.add_argument("--toolchain", default="trellis", help="FPGA toolchain (trellis or diamond).")
parser.add_argument("--sys-clk-freq", default=75e6, help="System clock frequency.")
parser.add_argument("--revision", default="1.0", help="Board Revision (1.0).")
parser.add_argument("--device", default="85F", help="ECP5 device (25F, 45F, 85F).")
parser.add_argument("--sdram-device", default="MT41K64M16", help="SDRAM device (MT41K64M16, MT41K128M16, MT41K256M16 or MT41K512M16).")
ethopts = parser.add_mutually_exclusive_group()
ethopts.add_argument("--with-ethernet", action="store_true", help="Add Ethernet.")
ethopts.add_argument("--with-etherbone", action="store_true", help="Add EtherBone.")
parser.add_argument("--eth-ip", default="192.168.1.50", help="Ethernet/Etherbone IP address.")
parser.add_argument("--eth-dynamic-ip", action="store_true", help="Enable dynamic Ethernet IP addresses setting.")
parser.add_argument("--with-spi-flash", action="store_true", help="Enable SPI Flash (MMAPed).")
sdopts = parser.add_mutually_exclusive_group()
sdopts.add_argument("--with-spi-sdcard", action="store_true", help="Enable SPI-mode SDCard support.")
sdopts.add_argument("--with-sdcard", action="store_true", help="Enable SDCard support.")
builder_args(parser)
soc_core_args(parser)
trellis_args(parser)
args = parser.parse_args()
assert not (args.with_etherbone and args.eth_dynamic_ip)
soc = BaseSoC(
toolchain = args.toolchain,
revision = args.revision,
device = args.device,
sdram_device = args.sdram_device,
sys_clk_freq = int(float(args.sys_clk_freq)),
with_ethernet = args.with_ethernet,
with_etherbone = args.with_etherbone,
eth_ip = args.eth_ip,
eth_dynamic_ip = args.eth_dynamic_ip,
with_spi_flash = args.with_spi_flash,
**soc_core_argdict(args))
if args.with_spi_sdcard:
soc.add_spi_sdcard()
if args.with_sdcard:
soc.add_sdcard()
builder = Builder(soc, **builder_argdict(args))
builder_kargs = trellis_argdict(args) if args.toolchain == "trellis" else {}
builder.build(**builder_kargs, run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if __name__ == "__main__":
main()
| litex_boards/targets/gsd_butterstick.py | 8,915 | !/usr/bin/env python3 This file is part of LiteX-Boards. Copyright (c) 2021 Florent Kermarrec <florent@enjoy-digital.fr> Copyright (c) 2021 Greg Davill <greg.davill@gmail.com> SPDX-License-Identifier: BSD-2-Clause Build/Use: ./gsd_butterstick.py --uart-name=crossover --with-etherbone --csr-csv=csr.csv --build --load litex_server --udp litex_term bridge CRG --------------------------------------------------------------------------------------------- Clk / Rst Power on reset PLL BaseSoC ------------------------------------------------------------------------------------------ SoCCore ---------------------------------------------------------------------------------- CRG -------------------------------------------------------------------------------------- DDR3 SDRAM ------------------------------------------------------------------------------- Ethernet / Etherbone --------------------------------------------------------------------- SPI Flash -------------------------------------------------------------------------------- Leds ------------------------------------------------------------------------------------- Blue. Build -------------------------------------------------------------------------------------------- | 1,233 | en | 0.207246 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Provides standard metric evaluations for dialog.
Uses locking and shared memory when ``numthreads`` is set to >1 to share metrics between
processes.
"""
import re
from abc import ABC, abstractmethod
from collections import Counter
import queue
import functools
import datetime
from typing import Union, List, Optional, Tuple, Set, Any, Dict
import torch
from parlai.core.message import Message
from parlai.utils.misc import warn_once
from parlai.utils.typing import TScalar, TVector
try:
import torch.multiprocessing as multiprocessing
except ImportError:
import multiprocessing # type: ignore
DEFAULT_METRICS = {'bleu-4', 'accuracy', 'f1'}
ROUGE_METRICS = {'rouge-1', 'rouge-2', 'rouge-L'}
BLEU_METRICS = {'bleu-1', 'bleu-2', 'bleu-3', 'bleu-4'}
ALL_METRICS = DEFAULT_METRICS | ROUGE_METRICS | BLEU_METRICS
try:
from nltk.translate import bleu_score as nltkbleu
except ImportError:
# User doesn't have nltk installed, so we can't use it for bleu
# We'll just turn off things, but we might want to warn the user
nltkbleu = None
try:
from fairseq import bleu as fairseqbleu
except ImportError:
fairseqbleu = None
try:
import rouge
except ImportError:
# User doesn't have py-rouge installed, so we can't use it.
# We'll just turn off rouge computations
rouge = None
re_art = re.compile(r'\b(a|an|the)\b')
re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']')
@functools.total_ordering
class Metric(ABC):
"""
Base class for storing metrics.
Subclasses should define .value(). Examples are provided for each subclass.
"""
@property
def is_global(self) -> bool:
"""
Indicates whether this metric should be reported globally or per-task.
"""
return False
@property
def macro_average(self) -> bool:
"""
Indicates whether this metric should be macro-averaged when globally reported.
"""
return False
@abstractmethod
def value(self) -> float:
"""
Return the value of the metric as a float.
"""
pass
@abstractmethod
def __add__(self, other: Any) -> 'Metric':
raise NotImplementedError
def __iadd__(self, other):
return self.__radd__(other)
def __radd__(self, other: Any):
if other is None:
return self
return self.__add__(other)
def __str__(self) -> str:
return f'{self.value():.4g}'
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.value():.4g})'
def __float__(self) -> float:
return float(self.value())
def __int__(self) -> int:
return int(self.value())
def __eq__(self, other: Any) -> bool:
if isinstance(other, Metric):
return self.value() == other.value()
else:
return self.value() == other
def __lt__(self, other: Any) -> bool:
if isinstance(other, Metric):
return self.value() < other.value()
else:
return self.value() < other
def __sub__(self, other: Any) -> float:
"""
Used heavily for assertAlmostEqual.
"""
if not isinstance(other, float):
raise TypeError('Metrics.__sub__ is intentionally limited to floats.')
return self.value() - other
def __rsub__(self, other: Any) -> float:
"""
Used heavily for assertAlmostEqual.
NOTE: This is not necessary in python 3.7+.
"""
if not isinstance(other, float):
raise TypeError('Metrics.__rsub__ is intentionally limited to floats.')
return other - self.value()
@classmethod
def as_number(cls, obj: TScalar) -> Union[int, float]:
if isinstance(obj, torch.Tensor):
obj_as_number: Union[int, float] = obj.item()
else:
obj_as_number = obj # type: ignore
assert isinstance(obj_as_number, int) or isinstance(obj_as_number, float)
return obj_as_number
@classmethod
def as_float(cls, obj: TScalar) -> float:
return float(cls.as_number(obj))
@classmethod
def as_int(cls, obj: TScalar) -> int:
return int(cls.as_number(obj))
@classmethod
def many(cls, *objs: List[TVector]) -> List['Metric']:
"""
Construct many of a Metric from the base parts.
Useful if you separately compute numerators and denomenators, etc.
"""
lengths = [len(o) for o in objs]
if len(set(lengths)) != 1:
raise IndexError(f'Uneven {cls.__name__} constructions: {lengths}')
return [cls(*items) for items in zip(*objs)]
class FixedMetric(Metric):
"""
Fixed metrics are verified to be the same when combined, or throw an error.
FixedMetric is used for things like total_train_updates, which should not be
combined across different multitasks or different workers.
"""
__slots__ = ('_value',)
def __init__(self, value: TScalar):
self._value = self.as_number(value)
def __add__(self, other: Optional['FixedMetric']) -> 'FixedMetric':
if other is None:
return self
if self != other:
raise ValueError(f"FixedMetrics not the same: {self} and {other}")
return self
def value(self) -> float:
return self._value
class SumMetric(Metric):
"""
Class that keeps a running sum of some metric.
Examples of SumMetric include things like "exs", the number of examples seen since
the last report, which depends exactly on a teacher.
"""
__slots__ = ('_sum',)
def __init__(self, sum_: TScalar = 0):
if isinstance(sum_, torch.Tensor):
self._sum = sum_.item()
else:
assert isinstance(sum_, (int, float))
self._sum = sum_
def __add__(self, other: Optional['SumMetric']) -> 'SumMetric':
# NOTE: hinting can be cleaned up with "from __future__ import annotations" when
# we drop Python 3.6
if other is None:
return self
full_sum = self._sum + other._sum
# always keep the same return type
return type(self)(sum_=full_sum)
def value(self) -> float:
return self._sum
class AverageMetric(Metric):
"""
Class that keeps a running average of some metric.
Examples of AverageMetrics include hits@1, F1, accuracy, etc. These metrics all have
per-example values that can be directly mapped back to a teacher.
"""
__slots__ = ('_numer', '_denom')
@property
def macro_average(self) -> bool:
"""
Indicates whether this metric should be macro-averaged when globally reported.
"""
return True
def __init__(self, numer: TScalar, denom: TScalar = 1):
self._numer = self.as_number(numer)
self._denom = self.as_number(denom)
def __add__(self, other: Optional['AverageMetric']) -> 'AverageMetric':
# NOTE: hinting can be cleaned up with "from __future__ import annotations" when
# we drop Python 3.6
if other is None:
return self
full_numer: TScalar = self._numer + other._numer
full_denom: TScalar = self._denom + other._denom
# always keep the same return type
return type(self)(numer=full_numer, denom=full_denom)
def value(self) -> float:
if self._numer == 0 and self._denom == 0:
# don't nan out if we haven't counted anything
return 0.0
if self._denom == 0:
return float('nan')
return self._numer / self._denom
class MacroAverageMetric(Metric):
"""
Class that represents the macro average of several numbers.
Used for aggregating task level metrics. It is only used for things that are
AverageMetrics already.
"""
__slots__ = '_values'
def __init__(self, metrics: Dict[str, Metric]) -> None:
self._values = metrics
def __add__(self, other: Optional['MacroAverageMetric']) -> 'MacroAverageMetric':
if other is None:
return self
output = dict(**self._values)
for k, v in other._values.items():
output[k] = output.get(k, None) + v
return MacroAverageMetric(output)
def value(self) -> float:
sum_ = sum(v.value() for v in self._values.values())
n = len(self._values)
return sum_ / n
class TimerMetric(Metric):
"""
A timer metric keep tracks of the first/last times it was used.
"""
__slots__ = ('_value', '_start', '_end')
@classmethod
def _now(cls) -> int:
return datetime.datetime.utcnow().timestamp()
def __init__(
self,
value: TScalar,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
):
self._value = self.as_number(value)
if start_time is None:
start_time = self._now()
if end_time is None:
end_time = self._now()
self._start = start_time
self._end = end_time
def __add__(self, other: Optional['TimerMetric']) -> 'TimerMetric':
# NOTE: hinting can be cleaned up with "from __future__ import annotations" when
# we drop Python 3.6
if other is None:
return self
total: TScalar = self._value + other._value
start: int = min(self._start, other._start)
end: int = max(self._start, other._end)
return type(self)(total, start, end)
def value(self) -> float:
if self._value == 0 or self._end == self._start:
return 0
return self._value / (self._end - self._start)
class GlobalMetric:
"""
A global metric is one that should not be aggregated across different tasks.
Examples of global metric include things like learning rate and updates.
These need to be accumulated or averaged over multiple parleys, but cannot
be correlated with a single task.
Key to it is the notion that any one worker or any one task already has a global
view of the value, and so no combinations should be done. Note this is different
then a FixedMetric, in that a GlobalMetric can be still averaged across multiple
parleys(), but a FixedMetric is always fixed.
"""
@property
def is_global(self) -> bool:
return True
class GlobalFixedMetric(GlobalMetric, FixedMetric):
"""
Global fixed metric.
Used for things like total_train_updates.
"""
pass
class GlobalSumMetric(GlobalMetric, SumMetric):
"""
Global sum metric.
Used for 'exs' and 'updates'.
"""
pass
class GlobalAverageMetric(GlobalMetric, AverageMetric):
"""
Global Average metric.
Used for things like learning rate, and many agent-specific metrics.
"""
pass
class LegacyMetric(GlobalAverageMetric):
"""
Legacy Metrics are reported by agent as float.
"""
pass
class GlobalTimerMetric(GlobalMetric, TimerMetric):
pass
class F1Metric(AverageMetric):
"""
Helper class which computes token-level F1.
"""
@staticmethod
def _prec_recall_f1_score(pred_items, gold_items):
"""
Compute precision, recall and f1 given a set of gold and prediction items.
:param pred_items: iterable of predicted values
:param gold_items: iterable of gold values
:return: tuple (p, r, f1) for precision, recall, f1
"""
common = Counter(gold_items) & Counter(pred_items)
num_same = sum(common.values())
if num_same == 0:
return 0, 0, 0
precision = 1.0 * num_same / len(pred_items)
recall = 1.0 * num_same / len(gold_items)
f1 = (2 * precision * recall) / (precision + recall)
return precision, recall, f1
@staticmethod
def compute(guess: str, answers: List[str]) -> 'F1Metric':
if guess is None or answers is None:
return AverageMetric(0, 0)
g_tokens = normalize_answer(guess).split()
scores = [
F1Metric._prec_recall_f1_score(g_tokens, normalize_answer(a).split())
for a in answers
]
return F1Metric(max(f1 for p, r, f1 in scores), 1)
class ExactMatchMetric(AverageMetric):
@staticmethod
def compute(guess: str, answers: List[str]) -> 'ExactMatchMetric':
if guess is None or answers is None:
return None
guess = normalize_answer(guess)
for a in answers:
if guess == normalize_answer(a):
return ExactMatchMetric(1)
return ExactMatchMetric(0)
class BleuMetric(AverageMetric):
@staticmethod
def compute(guess: str, answers: List[str], k: int = 4) -> Optional['BleuMetric']:
"""
Compute approximate BLEU score between guess and a set of answers.
"""
if nltkbleu is None:
# bleu library not installed, just return a default value
return None
# Warning: BLEU calculation *should* include proper tokenization and
# punctuation etc. We're using the normalize_answer for everything though,
# so we're over-estimating our BLEU scores. Also note that NLTK's bleu is
# going to be slower than fairseq's (which is written in C), but fairseq's
# requires that everything be in arrays of ints (i.e. as tensors). NLTK's
# works with strings, which is better suited for this module.
weights = [1 / k for _ in range(k)]
score = nltkbleu.sentence_bleu(
[normalize_answer(a).split(" ") for a in answers],
normalize_answer(guess).split(" "),
smoothing_function=nltkbleu.SmoothingFunction(epsilon=1e-12).method1,
weights=weights,
)
return BleuMetric(score)
class FairseqBleuMetric(AverageMetric):
@staticmethod
def compute_many(
guess: torch.Tensor, answers: torch.Tensor, pad_idx, end_idx, unk_idx
):
"""
Return BLEU-1..4 using fairseq and tokens.
"""
if fairseqbleu is None:
return None
scorer = fairseqbleu.Scorer(pad_idx, end_idx, unk_idx)
answers = answers.cpu().int()
guess = guess.cpu().int()
scorer.add(answers, guess)
return [FairseqBleuMetric(scorer.score(i) / 100.0) for i in range(1, 5)]
class RougeMetric(AverageMetric):
_evaluator = None
@staticmethod
def compute_many(
guess: str, answers: List[str]
) -> Tuple[
Optional['RougeMetric'], Optional['RougeMetric'], Optional['RougeMetric']
]:
"""
Compute ROUGE score between guess and *any* answer.
Done with compute_many due to increased efficiency.
:return: (rouge-1, rouge-2, rouge-L)
"""
# possible global initialization
global rouge
if rouge is None:
return None, None, None
if RougeMetric._evaluator is None:
RougeMetric._evaluator = rouge.Rouge(
metrics=['rouge-n', 'rouge-l'], max_n=2
)
try:
scores = [
RougeMetric._evaluator.get_scores(
normalize_answer(guess), normalize_answer(a)
)
for a in answers
]
except LookupError:
warn_once(
'ROUGE requires nltk punkt tokenizer. Please run '
'`python -c "import nltk; nltk.download(\'punkt\')`'
)
return None, None, None
scores_rouge1 = max(score['rouge-1']['r'] for score in scores)
scores_rouge2 = max(score['rouge-2']['r'] for score in scores)
scores_rougeL = max(score['rouge-l']['r'] for score in scores)
return (
RougeMetric(scores_rouge1),
RougeMetric(scores_rouge2),
RougeMetric(scores_rougeL),
)
def normalize_answer(s):
"""
Lower text and remove punctuation, articles and extra whitespace.
"""
s = s.lower()
s = re_punc.sub(' ', s)
s = re_art.sub(' ', s)
# TODO: this could almost certainly be faster with a regex \s+ -> ' '
s = ' '.join(s.split())
return s
def aggregate_named_reports(
named_reports: Dict[str, Dict[str, Metric]], micro_average: bool = False
) -> Dict[str, Metric]:
"""
Aggregate metrics from multiple reports.
:param reports:
Dict of tasks -> metrics.
:param micro_average:
If true, top level metrics will be the micro average. By default, we
use macro average.
:return:
The aggregated report
"""
if len(named_reports) == 0:
raise ValueError("Cannot aggregate empty reports.")
if len(named_reports) == 1:
# no real aggregation to be done
return next(iter(named_reports.values()))
# reporters is a list of teachers or worlds
m: Dict[str, Metric] = {}
macro_averages: Dict[str, Dict[str, Metric]] = {}
for task_id, task_report in named_reports.items():
for each_metric, value in task_report.items():
if value.is_global:
# just take the first one we saw
if each_metric not in m:
m[each_metric] = value
else:
task_metric = f'{task_id}/{each_metric}'
m[task_metric] = m.get(task_metric) + value
if micro_average or not value.macro_average:
# none + a => a from implementation of Metric.__add__
m[each_metric] = m.get(each_metric) + value
else:
# macro average
if each_metric not in macro_averages:
macro_averages[each_metric] = {}
macro_averages[each_metric][task_id] = value
for key, values in macro_averages.items():
m[key] = MacroAverageMetric(values)
return m
def aggregate_unnamed_reports(reports: List[Dict[str, Metric]]) -> Dict[str, Metric]:
"""
Combines metrics without regard for tracking provenence.
"""
m: Dict[str, Metric] = {}
for task_report in reports:
for each_metric, value in task_report.items():
m[each_metric] = m.get(each_metric) + value
return m
class Metrics(object):
"""
Threadsafe metrics container focused on aggregation.
"""
def __init__(self, threadsafe=False, shared=None):
self._threadsafe = threadsafe
if self._threadsafe and shared is None:
# Threadsafe metrics tracking works by keeping a queue that workers can
# push updates to. the main worker works through the queue at report
# time. We could add some buffering to improve performance, but we
# are deprioritizing hogwild performance at this time.
self._buffer = None
self._queue = multiprocessing.SimpleQueue()
self._worker = False
self._data = {}
elif shared and 'queue' in shared:
# This is a clone, in threadsafe mode
self._buffer = {}
self._queue = shared['queue']
self._worker = True
self._data = None
elif shared and 'data' in shared:
# This is a clone, in non-threadsafe mode
self._buffer = None
self._queue = None
self._worker = False
self._data = shared['data']
else:
# The original in non-threadsafe mode
self._buffer = None
self._queue = None
self._worker = False
self._data = {}
def __str__(self):
return str(self._data)
def __repr__(self):
return f'Metrics({repr(self._data)})'
def add(self, key: str, value: Optional[Metric]) -> None:
"""
Record an accumulation to a metric.
"""
if self._threadsafe and self._worker:
self._buffer[key] = self._buffer.get(key) + value
else:
self._data[key] = self._data.get(key) + value
def flush(self):
"""
Clear the local buffer and push it on.
"""
if self._threadsafe and self._buffer:
self._queue.put(self._buffer)
self._buffer.clear()
def report(self):
"""
Report the metrics over all data seen so far.
"""
self.sync()
return {k: v for k, v in self._data.items()}
def sync(self):
"""
Process all items on the queue to ensure it is up to date.
"""
if self._worker:
self.flush()
elif self._threadsafe and not self._worker:
for buffer_ in self._drain_queue():
for key, value in buffer_.items():
self._data[key] = self._data.get(key) + value
def _drain_queue(self):
"""
Drain the queue, yielding all items in it.
"""
while not self._queue.empty():
try:
yield self._queue.get()
except queue.Empty:
break
def clear(self):
"""
Clear all the metrics.
"""
if self._worker:
self._buffer.clear()
elif self._threadsafe and not self._worker:
for _ in self._drain_queue():
pass
if self._data:
self._data.clear()
def share(self):
if self._threadsafe:
return {'queue': self._queue}
else:
return {'data': self._data}
class TeacherMetrics(Metrics):
"""
Helper container which encapsulates standard metrics (F1, BLEU, ...).
"""
def __init__(
self,
threadsafe: bool = False,
metrics_list: str = "default",
shared: Dict[str, Any] = None,
) -> None:
super().__init__(threadsafe=threadsafe, shared=shared)
self._metrics_list = self._infer_metrics(metrics_list)
self.eval_pr = [1, 5, 10, 100]
@staticmethod
def _infer_metrics(cli_arg: str) -> Set[str]:
"""
Parse the CLI metric into a list of metrics we wish to compute.
"""
col: Set[str] = set()
names = cli_arg.split(",")
for n in names:
if n == 'default':
col |= DEFAULT_METRICS
elif n == 'rouge':
col |= ROUGE_METRICS
elif n == 'bleu':
col |= BLEU_METRICS
elif n == 'all':
col |= ALL_METRICS
else:
col.add(n)
return col
def _update_ranking_metrics(self, observation, labels):
text_cands = observation.get('text_candidates', None)
if text_cands is None:
return
# Now loop through text candidates, assuming they are sorted.
# If any of them is a label then score a point.
# maintain hits@1, 5, 10, 50, 100, etc.
label_set = set(normalize_answer(l) for l in labels)
cnts = {k: 0 for k in self.eval_pr}
cnt = 0
for c in text_cands:
cnt += 1
if normalize_answer(c) in label_set:
for k in self.eval_pr:
if cnt <= k:
cnts[k] += 1
# hits metric is 1 if cnts[k] > 0.
# (other metrics such as p@k and r@k take
# the value of cnt into account.)
for k in self.eval_pr:
self.add(f'hits@{k}', AverageMetric(cnts[k] > 0))
def evaluate_response(self, observation: Message, labels: List[str]) -> None:
"""
Compute all required text-based metrics based on an observation and labels.
"""
prediction = observation.get('text', None)
self.add('exs', SumMetric(1))
if prediction is not None:
self.add('accuracy', ExactMatchMetric.compute(prediction, labels))
self.add('f1', F1Metric.compute(prediction, labels))
for k in range(1, 5): # 1..4
if f'bleu-{k}' in self._metrics_list:
self.add(f'bleu-{k}', BleuMetric.compute(prediction, labels, k))
# if any of the rouges are in the list
if self._metrics_list & ROUGE_METRICS:
r1, r2, rL = RougeMetric.compute_many(prediction, labels)
if 'rouge-1' in self._metrics_list:
self.add('rouge_1', r1)
if 'rouge-2' in self._metrics_list:
self.add('rouge_2', r2)
if 'rouge-L' in self._metrics_list:
self.add('rouge_L', rL)
# Ranking metrics.
self._update_ranking_metrics(observation, labels)
# User-reported metrics
if 'metrics' in observation:
for uk, v in observation['metrics'].items():
if uk in ALL_METRICS:
# don't let the user override our metrics
uk = f'USER_{uk}'
assert isinstance(uk, str), type(k)
if not isinstance(v, Metric):
warn_once(f'Metric {uk} is assumed to be averaged per example.')
v = AverageMetric(v)
assert isinstance(v, Metric)
self.add(uk, v)
# always flush at the end of processing this response
self.flush()
| parlai/core/metrics.py | 25,487 | Class that keeps a running average of some metric.
Examples of AverageMetrics include hits@1, F1, accuracy, etc. These metrics all have
per-example values that can be directly mapped back to a teacher.
Helper class which computes token-level F1.
Fixed metrics are verified to be the same when combined, or throw an error.
FixedMetric is used for things like total_train_updates, which should not be
combined across different multitasks or different workers.
Global Average metric.
Used for things like learning rate, and many agent-specific metrics.
Global fixed metric.
Used for things like total_train_updates.
A global metric is one that should not be aggregated across different tasks.
Examples of global metric include things like learning rate and updates.
These need to be accumulated or averaged over multiple parleys, but cannot
be correlated with a single task.
Key to it is the notion that any one worker or any one task already has a global
view of the value, and so no combinations should be done. Note this is different
then a FixedMetric, in that a GlobalMetric can be still averaged across multiple
parleys(), but a FixedMetric is always fixed.
Global sum metric.
Used for 'exs' and 'updates'.
Legacy Metrics are reported by agent as float.
Class that represents the macro average of several numbers.
Used for aggregating task level metrics. It is only used for things that are
AverageMetrics already.
Base class for storing metrics.
Subclasses should define .value(). Examples are provided for each subclass.
Threadsafe metrics container focused on aggregation.
Class that keeps a running sum of some metric.
Examples of SumMetric include things like "exs", the number of examples seen since
the last report, which depends exactly on a teacher.
Helper container which encapsulates standard metrics (F1, BLEU, ...).
A timer metric keep tracks of the first/last times it was used.
Used heavily for assertAlmostEqual.
NOTE: This is not necessary in python 3.7+.
Used heavily for assertAlmostEqual.
Drain the queue, yielding all items in it.
Parse the CLI metric into a list of metrics we wish to compute.
Compute precision, recall and f1 given a set of gold and prediction items.
:param pred_items: iterable of predicted values
:param gold_items: iterable of gold values
:return: tuple (p, r, f1) for precision, recall, f1
Record an accumulation to a metric.
Aggregate metrics from multiple reports.
:param reports:
Dict of tasks -> metrics.
:param micro_average:
If true, top level metrics will be the micro average. By default, we
use macro average.
:return:
The aggregated report
Combines metrics without regard for tracking provenence.
Clear all the metrics.
Compute approximate BLEU score between guess and a set of answers.
Return BLEU-1..4 using fairseq and tokens.
Compute ROUGE score between guess and *any* answer.
Done with compute_many due to increased efficiency.
:return: (rouge-1, rouge-2, rouge-L)
Compute all required text-based metrics based on an observation and labels.
Clear the local buffer and push it on.
Indicates whether this metric should be reported globally or per-task.
Indicates whether this metric should be macro-averaged when globally reported.
Indicates whether this metric should be macro-averaged when globally reported.
Construct many of a Metric from the base parts.
Useful if you separately compute numerators and denomenators, etc.
Lower text and remove punctuation, articles and extra whitespace.
Report the metrics over all data seen so far.
Process all items on the queue to ensure it is up to date.
Return the value of the metric as a float.
Provides standard metric evaluations for dialog.
Uses locking and shared memory when ``numthreads`` is set to >1 to share metrics between
processes.
!/usr/bin/env python3 Copyright (c) Facebook, Inc. and its affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. type: ignore User doesn't have nltk installed, so we can't use it for bleu We'll just turn off things, but we might want to warn the user User doesn't have py-rouge installed, so we can't use it. We'll just turn off rouge computations type: ignore NOTE: hinting can be cleaned up with "from __future__ import annotations" when we drop Python 3.6 always keep the same return type NOTE: hinting can be cleaned up with "from __future__ import annotations" when we drop Python 3.6 always keep the same return type don't nan out if we haven't counted anything NOTE: hinting can be cleaned up with "from __future__ import annotations" when we drop Python 3.6 bleu library not installed, just return a default value Warning: BLEU calculation *should* include proper tokenization and punctuation etc. We're using the normalize_answer for everything though, so we're over-estimating our BLEU scores. Also note that NLTK's bleu is going to be slower than fairseq's (which is written in C), but fairseq's requires that everything be in arrays of ints (i.e. as tensors). NLTK's works with strings, which is better suited for this module. possible global initialization TODO: this could almost certainly be faster with a regex \s+ -> ' ' no real aggregation to be done reporters is a list of teachers or worlds just take the first one we saw none + a => a from implementation of Metric.__add__ macro average Threadsafe metrics tracking works by keeping a queue that workers can push updates to. the main worker works through the queue at report time. We could add some buffering to improve performance, but we are deprioritizing hogwild performance at this time. This is a clone, in threadsafe mode This is a clone, in non-threadsafe mode The original in non-threadsafe mode Now loop through text candidates, assuming they are sorted. If any of them is a label then score a point. maintain hits@1, 5, 10, 50, 100, etc. hits metric is 1 if cnts[k] > 0. (other metrics such as p@k and r@k take the value of cnt into account.) 1..4 if any of the rouges are in the list Ranking metrics. User-reported metrics don't let the user override our metrics always flush at the end of processing this response | 6,161 | en | 0.882948 |
from abcunits import ConversionUnit
KFACTOR = 273.15 #Difference Kelvin, C (how precise is this known?)
class TempUnit(ConversionUnit):
""" Temperature units. ALl conversions go through Kelvin. """
#http://www.metric-conversions.org/temperature/fahrenheit-to-kelvin.htm
class Kelvin(TempUnit):
short = 'K'
full = 'Kelvin' #Proper names, keep this way?
symbol = 'r$^{\deg}K$' #Isn't degree Kelvin technially wrong?
_canonical = True
@staticmethod
def to_canonical(x):
return x
@staticmethod
def from_canonical(x):
return x
class Celsius(TempUnit):
short = 'C'
full = 'Celsius'
symbol = 'r$^{\deg}C$'
@staticmethod
def to_canonical(x):
return x + KFACTOR
@staticmethod
def from_canonical(x):
return x - KFACTOR
class Farenheiht(TempUnit):
short = 'F'
full = 'Farenheiht'
symbol = 'r$^{\deg}F$'
@staticmethod
def to_canonical(x):
return ((x - 32.00)/1.80) + 273.15
@staticmethod
def from_canonical(x):
return ((x - KFACTOR) * 1.80) + 32.00
_tempunits = (Kelvin(),
Celsius(),
Farenheiht(),
ConversionUnit() #For null case
)
TEMPUNITS = dict((obj.short, obj) for obj in _tempunits) | skspec/units/tempunits.py | 1,284 | Temperature units. ALl conversions go through Kelvin.
Difference Kelvin, C (how precise is this known?)http://www.metric-conversions.org/temperature/fahrenheit-to-kelvin.htmProper names, keep this way?Isn't degree Kelvin technially wrong?For null case | 254 | en | 0.801498 |
##
##
# File auto-generated against equivalent DynamicSerialize Java class
class DeleteRequest(object):
def __init__(self):
self.datasets = None
self.groups = None
self.filename = None
def getDatasets(self):
return self.datasets
def setDatasets(self, datasets):
self.datasets = datasets
def getGroups(self):
return self.groups
def setGroups(self, groups):
self.groups = groups
def getFilename(self):
return self.filename
def setFilename(self, filename):
self.filename = filename
| dynamicserialize/dstypes/com/raytheon/uf/common/pypies/request/DeleteRequest.py | 589 | File auto-generated against equivalent DynamicSerialize Java class | 66 | en | 0.815673 |
# -*- coding: utf-8 -*-
{
"name": """Preview Media Files""",
"summary": """Open attached images in popup""",
"category": "Web",
"images": ["images/screenshot-1.png"],
"vesion": "10.0.1.0.0",
"application": False,
"author": "IT-Projects LLC, Dinar Gabbasov",
"support": "apps@itpp.dev",
"website": "https://twitter.com/gabbasov_dinar",
"license": "OPL-1",
"price": 19.00,
"currency": "EUR",
"depends": ["ir_attachment_url"],
"external_dependencies": {"python": [], "bin": []},
"data": ["views/web_preview_template.xml"],
"qweb": ["static/src/xml/media_tree_view_widget.xml"],
"demo": [],
"post_load": None,
"pre_init_hook": None,
"post_init_hook": None,
"auto_install": False,
"installable": True,
}
| web_preview/__manifest__.py | 787 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# convert / import osm xml .osm file into a Shapefile
import subprocess
import os
import shutil
# specify output format
output_format = "ESRI Shapefile"
# complete path to input OSM xml file .osm
input_osm = '../geodata/OSM_san_francisco_westbluff.osm'
# Windows users can uncomment these two lines if needed
# ogr2ogr = r"c:/OSGeo4W/bin/ogr2ogr.exe"
# ogr_info = r"c:/OSGeo4W/bin/ogrinfo.exe"
# view what geometry types are available in our OSM file
subprocess.call([ogr_info, input_osm])
destination_dir = os.path.realpath('../geodata/temp')
if os.path.isdir(destination_dir):
# remove output folder if it exists
shutil.rmtree(destination_dir)
print("removing existing directory : " + destination_dir)
# create new output folder
os.mkdir(destination_dir)
print("creating new directory : " + destination_dir)
# list of geometry types to convert to Shapefile
geom_types = ["lines", "points", "multilinestrings", "multipolygons"]
# create a new Shapefile for each geometry type
for g_type in geom_types:
subprocess.call([ogr2ogr,
"-skipfailures", "-f", output_format,
destination_dir, input_osm,
"layer", g_type,
"--config","OSM_USE_CUSTOM_INDEXING", "NO"])
print("done creating " + g_type)
# if you like to export to SPATIALITE from .osm
# subprocess.call([ogr2ogr, "-skipfailures", "-f",
# "SQLITE", "-dsco", "SPATIALITE=YES",
# "my2.sqlite", input_osm])
| ch03/code/ch03-04_osm2shp.py | 1,553 | !/usr/bin/env python -*- coding: utf-8 -*- convert / import osm xml .osm file into a Shapefile specify output format complete path to input OSM xml file .osm Windows users can uncomment these two lines if needed ogr2ogr = r"c:/OSGeo4W/bin/ogr2ogr.exe" ogr_info = r"c:/OSGeo4W/bin/ogrinfo.exe" view what geometry types are available in our OSM file remove output folder if it exists create new output folder list of geometry types to convert to Shapefile create a new Shapefile for each geometry type if you like to export to SPATIALITE from .osm subprocess.call([ogr2ogr, "-skipfailures", "-f", "SQLITE", "-dsco", "SPATIALITE=YES", "my2.sqlite", input_osm]) | 673 | en | 0.564318 |
from flask_wtf import FlaskForm
from flask_wtf.file import FileRequired
from wtforms import (
StringField,
SubmitField,
PasswordField,
FileField,
SelectField,
TextAreaField,
BooleanField,
)
from wtforms.validators import DataRequired, Length, ValidationError
from models import RoomBGMTypes
"""
Reference:
movie_id = form.movie_id.data
place = form.place.data
imageid = form.imageid.data
title = form.title.data
"""
class RoomMovieForm(FlaskForm):
movie_id = StringField("Movie ID")
place = StringField("Place in Room")
imageid = StringField("Image ID")
title = StringField("Movie Title")
submit = SubmitField("Done!")
class LoginForm(FlaskForm):
username = StringField("Username")
password = PasswordField("Password")
submit = SubmitField("Enter the underground")
class NewsForm(FlaskForm):
news = TextAreaField("News Contents", validators=[DataRequired()])
create = SubmitField("Create!")
class MiiUploadForm(FlaskForm):
mii = FileField("Mii Selection", validators=[FileRequired()])
name = StringField("Mii Name", validators=[DataRequired()])
color1 = StringField("Shirt Color (Hex)", validators=[DataRequired()])
color2 = StringField("Pants Color (Hex)", validators=[DataRequired()])
upload = SubmitField("Add Mii")
class NewUserForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
password1 = PasswordField("Password", validators=[DataRequired()])
password2 = PasswordField("Confirm Password", validators=[DataRequired()])
upload = SubmitField("Complete")
def validate_password1(self, _):
if self.password1.data != self.password2.data:
return ValidationError("New passwords must be the same")
class ChangePasswordForm(FlaskForm):
current_password = PasswordField("Password", validators=[DataRequired()])
new_password = PasswordField("New Password", validators=[DataRequired()])
new_password_confirmation = PasswordField(
"Confirm New Password", validators=[DataRequired()]
)
complete = SubmitField("Complete")
def validate_current_password(self, _):
if self.current_password.data == self.new_password.data:
return ValidationError("New password cannot be the same as current!")
def validate_new_password(self, _):
if self.new_password.data != self.new_password_confirmation.data:
return ValidationError("New passwords must be the same")
class MovieUploadForm(FlaskForm):
movie = FileField("Movie", validators=[FileRequired()])
title = StringField("Movie title", validators=[DataRequired(), Length(max=48)])
thumbnail = FileField("Movie thumbnail", validators=[FileRequired()])
# Choices for the select field are only evaluated once, so we must set it when necessary.
category = SelectField("Movie category", validators=[DataRequired()])
upload = SubmitField("Add Movie")
class CategoryAddForm(FlaskForm):
category_name = StringField("Category Name", validators=[DataRequired()])
thumbnail = FileField("Movie thumbnail", validators=[FileRequired()])
submit = SubmitField("Add")
class CategoryEditForm(FlaskForm):
category_name = StringField("Category Name", validators=[DataRequired()])
thumbnail = FileField("Movie thumbnail")
submit = SubmitField("Edit")
class ParadeForm(FlaskForm):
news = StringField("News", validators=[DataRequired()])
company = StringField("Company", validators=[DataRequired()])
image = FileField("Parade Banner", validators=[FileRequired()])
submit = SubmitField("Create")
class RoomForm(FlaskForm):
bgm = SelectField(
"Background Music",
choices=RoomBGMTypes.choices(),
coerce=RoomBGMTypes.coerce,
)
room_logo = FileField("Room Logo", validators=[FileRequired()])
has_mascot = BooleanField("Mascot Enabled")
has_contact = BooleanField("Show Contact Information")
intro_msg = StringField("Intro Message", validators=[DataRequired()])
mii_msg = StringField("Mii Message", validators=[DataRequired()])
submit = SubmitField("Create")
class KillMii(FlaskForm):
given_id = StringField("Item ID", validators=[DataRequired()])
submit = SubmitField("Delete!")
class ConciergeForm(FlaskForm):
prof = StringField("Profession", validators=[DataRequired()])
message1 = StringField("Message 1", validators=[DataRequired()])
message2 = StringField("Message 2", validators=[DataRequired()])
message3 = StringField("Message 3", validators=[DataRequired()])
message4 = StringField("Message 4", validators=[DataRequired()])
message5 = StringField("Message 5", validators=[DataRequired()])
message6 = StringField("Message 6", validators=[DataRequired()])
message7 = StringField("Message 7", validators=[DataRequired()])
movieid = StringField("Movie ID", validators=[DataRequired()])
submit = SubmitField("Create!")
class PosterForm(FlaskForm):
file = FileField("Poster Image", validators=[FileRequired()])
title = StringField("Title", validators=[DataRequired()])
msg = StringField("Message", validators=[DataRequired()])
upload = SubmitField("Create Poster!")
| theunderground/forms.py | 5,251 | Choices for the select field are only evaluated once, so we must set it when necessary. | 87 | en | 0.898393 |
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2020 The UFO Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""UFO test framework primitive and message structures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
UFO/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization.
Classes use __slots__ to ensure extraneous attributes aren't accidentally added
by tests, compromising their intended effect.
"""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import math
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, assert_equal
MAX_LOCATOR_SZ = 101
MAX_BLOCK_BASE_SIZE = 1000000
MAX_BLOOM_FILTER_SIZE = 36000
MAX_BLOOM_HASH_FUNCS = 50
COIN = 100000000 # 1 btc in satoshis
MAX_MONEY = 21000000 * COIN
BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is BIP 125 opt-in and BIP 68-opt-out
MAX_PROTOCOL_MESSAGE_LENGTH = 4000000 # Maximum length of incoming protocol messages
MAX_HEADERS_RESULTS = 2000 # Number of headers sent in one getheaders result
MAX_INV_SIZE = 50000 # Maximum number of entries in an 'inv' protocol message
NODE_NETWORK = (1 << 0)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_COMPACT_FILTERS = (1 << 6)
NODE_NETWORK_LIMITED = (1 << 10)
MSG_TX = 1
MSG_BLOCK = 2
MSG_FILTERED_BLOCK = 3
MSG_CMPCT_BLOCK = 4
MSG_WTX = 5
MSG_WITNESS_FLAG = 1 << 30
MSG_TYPE_MASK = 0xffffffff >> 2
MSG_WITNESS_TX = MSG_TX | MSG_WITNESS_FLAG
FILTER_TYPE_BASIC = 0
WITNESS_SCALE_FACTOR = 4
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for _ in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
# deser_function_name: Allow for an alternate deserialization function on the
# entries in the vector.
def deser_vector(f, c, deser_function_name=None):
nit = deser_compact_size(f)
r = []
for _ in range(nit):
t = c()
if deser_function_name:
getattr(t, deser_function_name)(f)
else:
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for _ in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for _ in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return obj.serialize().hex()
# Objects that map to UFOd objects, which can be serialized/deserialized
class CAddress:
__slots__ = ("net", "ip", "nServices", "port", "time")
# see https://github.com/UFO/bips/blob/master/bip-0155.mediawiki
NET_IPV4 = 1
ADDRV2_NET_NAME = {
NET_IPV4: "IPv4"
}
ADDRV2_ADDRESS_LENGTH = {
NET_IPV4: 4
}
def __init__(self):
self.time = 0
self.nServices = 1
self.net = self.NET_IPV4
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f, *, with_time=True):
"""Deserialize from addrv1 format (pre-BIP155)"""
if with_time:
# VERSION messages serialize CAddress objects without time
self.time = struct.unpack("<I", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
# We only support IPv4 which means skip 12 bytes and read the next 4 as IPv4 address.
f.read(12)
self.net = self.NET_IPV4
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self, *, with_time=True):
"""Serialize in addrv1 format (pre-BIP155)"""
assert self.net == self.NET_IPV4
r = b""
if with_time:
# VERSION messages serialize CAddress objects without time
r += struct.pack("<I", self.time)
r += struct.pack("<Q", self.nServices)
r += b"\x00" * 10 + b"\xff" * 2
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def deserialize_v2(self, f):
"""Deserialize from addrv2 format (BIP155)"""
self.time = struct.unpack("<I", f.read(4))[0]
self.nServices = deser_compact_size(f)
self.net = struct.unpack("B", f.read(1))[0]
assert self.net == self.NET_IPV4
address_length = deser_compact_size(f)
assert address_length == self.ADDRV2_ADDRESS_LENGTH[self.net]
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize_v2(self):
"""Serialize in addrv2 format (BIP155)"""
assert self.net == self.NET_IPV4
r = b""
r += struct.pack("<I", self.time)
r += ser_compact_size(self.nServices)
r += struct.pack("B", self.net)
r += ser_compact_size(self.ADDRV2_ADDRESS_LENGTH[self.net])
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return ("CAddress(nServices=%i net=%s addr=%s port=%i)"
% (self.nServices, self.ADDRV2_NET_NAME[self.net], self.ip, self.port))
class CInv:
__slots__ = ("hash", "type")
typemap = {
0: "Error",
MSG_TX: "TX",
MSG_BLOCK: "Block",
MSG_TX | MSG_WITNESS_FLAG: "WitnessTx",
MSG_BLOCK | MSG_WITNESS_FLAG: "WitnessBlock",
MSG_FILTERED_BLOCK: "filtered Block",
MSG_CMPCT_BLOCK: "CompactBlock",
MSG_WTX: "WTX",
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<I", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<I", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
def __eq__(self, other):
return isinstance(other, CInv) and self.hash == other.hash and self.type == other.type
class CBlockLocator:
__slots__ = ("nVersion", "vHave")
def __init__(self):
self.vHave = []
def deserialize(self, f):
struct.unpack("<i", f.read(4))[0] # Ignore version field.
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", 0) # UFO Core ignores version field. Set it to 0.
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(vHave=%s)" % (repr(self.vHave))
class COutPoint:
__slots__ = ("hash", "n")
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn:
__slots__ = ("nSequence", "prevout", "scriptSig")
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), self.scriptSig.hex(),
self.nSequence)
class CTxOut:
__slots__ = ("nValue", "scriptPubKey")
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
self.scriptPubKey.hex())
class CScriptWitness:
__slots__ = ("stack",)
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([x.hex() for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness:
__slots__ = ("scriptWitness",)
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness:
__slots__ = ("vtxinwit",)
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction:
__slots__ = ("hash", "nLockTime", "nVersion", "sha256", "vin", "vout",
"wit")
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in UFOd
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for _ in range(len(self.vin))]
self.wit.deserialize(f)
else:
self.wit = CTxWitness()
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for _ in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_with_witness()
def getwtxid(self):
return hash256(self.serialize())[::-1].hex()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.hash
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = hash256(self.serialize_without_witness())[::-1].hex()
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
# Calculate the virtual transaction size using witness and non-witness
# serialization size (does NOT use sigops).
def get_vsize(self):
with_witness_size = len(self.serialize_with_witness())
without_witness_size = len(self.serialize_without_witness())
return math.ceil(((WITNESS_SCALE_FACTOR - 1) * without_witness_size + with_witness_size) / WITNESS_SCALE_FACTOR)
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader:
__slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nufos", "nNonce",
"nTime", "nVersion", "sha256")
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nufos = header.nufos
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nufos = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nufos = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nufos)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nufos)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nufos=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nufos, self.nNonce)
BLOCK_HEADER_SIZE = len(CBlockHeader().serialize())
assert_equal(BLOCK_HEADER_SIZE, 80)
class CBlock(CBlockHeader):
__slots__ = ("vtx",)
def __init__(self, header=None):
super().__init__(header)
self.vtx = []
def deserialize(self, f):
super().deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += super().serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nufos)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nufos)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nufos=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nufos, self.nNonce, repr(self.vtx))
class PrefilledTransaction:
__slots__ = ("index", "tx")
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "prefilled_txn_length",
"shortids", "shortids_length")
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for _ in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
__slots__ = ()
def serialize(self):
return super().serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "shortids", "use_witness")
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids is not None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list=None, use_witness=False):
if prefill_list is None:
prefill_list = [0]
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest:
__slots__ = ("blockhash", "indexes")
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for _ in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions:
__slots__ = ("blockhash", "transactions")
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree:
__slots__ = ("nTransactions", "vufos", "vHash")
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vufos = []
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vufos = []
for i in range(len(vBytes) * 8):
self.vufos.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vufos) + 7)//8))
for i in range(len(self.vufos)):
vBytesArray[i // 8] |= self.vufos[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vufos=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vufos))
class CMerkleBlock:
__slots__ = ("header", "txn")
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version:
__slots__ = ("addrFrom", "addrTo", "nNonce", "relay", "nServices",
"nStartingHeight", "nTime", "nVersion", "strSubVer")
msgtype = b"version"
def __init__(self):
self.nVersion = 0
self.nServices = 0
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandufos(64)
self.strSubVer = ''
self.nStartingHeight = -1
self.relay = 0
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f, with_time=False)
self.addrFrom = CAddress()
self.addrFrom.deserialize(f, with_time=False)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f).decode('utf-8')
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.relay = struct.unpack("<b", f.read(1))[0]
except:
self.relay = 0
else:
self.relay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize(with_time=False)
r += self.addrFrom.serialize(with_time=False)
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer.encode('utf-8'))
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.relay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i relay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.relay)
class msg_verack:
__slots__ = ()
msgtype = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr:
__slots__ = ("addrs",)
msgtype = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_addrv2:
__slots__ = ("addrs",)
msgtype = b"addrv2"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress, "deserialize_v2")
def serialize(self):
return ser_vector(self.addrs, "serialize_v2")
def __repr__(self):
return "msg_addrv2(addrs=%s)" % (repr(self.addrs))
class msg_sendaddrv2:
__slots__ = ()
msgtype = b"sendaddrv2"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendaddrv2()"
class msg_inv:
__slots__ = ("inv",)
msgtype = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata:
__slots__ = ("inv",)
msgtype = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv is not None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks:
__slots__ = ("locator", "hashstop")
msgtype = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx:
__slots__ = ("tx",)
msgtype = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_with_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_wtxidrelay:
__slots__ = ()
msgtype = b"wtxidrelay"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_wtxidrelay()"
class msg_no_witness_tx(msg_tx):
__slots__ = ()
def serialize(self):
return self.tx.serialize_without_witness()
class msg_block:
__slots__ = ("block",)
msgtype = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the msgtype, and the data
class msg_generic:
__slots__ = ("data")
def __init__(self, msgtype, data=None):
self.msgtype = msgtype
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_no_witness_block(msg_block):
__slots__ = ()
def serialize(self):
return self.block.serialize(with_witness=False)
class msg_getaddr:
__slots__ = ()
msgtype = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping:
__slots__ = ("nonce",)
msgtype = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong:
__slots__ = ("nonce",)
msgtype = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool:
__slots__ = ()
msgtype = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_notfound:
__slots__ = ("vec", )
msgtype = b"notfound"
def __init__(self, vec=None):
self.vec = vec or []
def deserialize(self, f):
self.vec = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.vec)
def __repr__(self):
return "msg_notfound(vec=%s)" % (repr(self.vec))
class msg_sendheaders:
__slots__ = ()
msgtype = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders:
__slots__ = ("hashstop", "locator",)
msgtype = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers:
__slots__ = ("headers",)
msgtype = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in UFOd indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_merkleblock:
__slots__ = ("merkleblock",)
msgtype = b"merkleblock"
def __init__(self, merkleblock=None):
if merkleblock is None:
self.merkleblock = CMerkleBlock()
else:
self.merkleblock = merkleblock
def deserialize(self, f):
self.merkleblock.deserialize(f)
def serialize(self):
return self.merkleblock.serialize()
def __repr__(self):
return "msg_merkleblock(merkleblock=%s)" % (repr(self.merkleblock))
class msg_filterload:
__slots__ = ("data", "nHashFuncs", "nTweak", "nFlags")
msgtype = b"filterload"
def __init__(self, data=b'00', nHashFuncs=0, nTweak=0, nFlags=0):
self.data = data
self.nHashFuncs = nHashFuncs
self.nTweak = nTweak
self.nFlags = nFlags
def deserialize(self, f):
self.data = deser_string(f)
self.nHashFuncs = struct.unpack("<I", f.read(4))[0]
self.nTweak = struct.unpack("<I", f.read(4))[0]
self.nFlags = struct.unpack("<B", f.read(1))[0]
def serialize(self):
r = b""
r += ser_string(self.data)
r += struct.pack("<I", self.nHashFuncs)
r += struct.pack("<I", self.nTweak)
r += struct.pack("<B", self.nFlags)
return r
def __repr__(self):
return "msg_filterload(data={}, nHashFuncs={}, nTweak={}, nFlags={})".format(
self.data, self.nHashFuncs, self.nTweak, self.nFlags)
class msg_filteradd:
__slots__ = ("data")
msgtype = b"filteradd"
def __init__(self, data):
self.data = data
def deserialize(self, f):
self.data = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.data)
return r
def __repr__(self):
return "msg_filteradd(data={})".format(self.data)
class msg_filterclear:
__slots__ = ()
msgtype = b"filterclear"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_filterclear()"
class msg_feefilter:
__slots__ = ("feerate",)
msgtype = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct:
__slots__ = ("announce", "version")
msgtype = b"sendcmpct"
def __init__(self, announce=False, version=1):
self.announce = announce
self.version = version
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock:
__slots__ = ("header_and_shortids",)
msgtype = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn:
__slots__ = ("block_txn_request",)
msgtype = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn:
__slots__ = ("block_transactions",)
msgtype = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_no_witness_blocktxn(msg_blocktxn):
__slots__ = ()
def serialize(self):
return self.block_transactions.serialize(with_witness=False)
class msg_getcfilters:
__slots__ = ("filter_type", "start_height", "stop_hash")
msgtype = b"getcfilters"
def __init__(self, filter_type, start_height, stop_hash):
self.filter_type = filter_type
self.start_height = start_height
self.stop_hash = stop_hash
def deserialize(self, f):
self.filter_type = struct.unpack("<B", f.read(1))[0]
self.start_height = struct.unpack("<I", f.read(4))[0]
self.stop_hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<B", self.filter_type)
r += struct.pack("<I", self.start_height)
r += ser_uint256(self.stop_hash)
return r
def __repr__(self):
return "msg_getcfilters(filter_type={:#x}, start_height={}, stop_hash={:x})".format(
self.filter_type, self.start_height, self.stop_hash)
class msg_cfilter:
__slots__ = ("filter_type", "block_hash", "filter_data")
msgtype = b"cfilter"
def __init__(self, filter_type=None, block_hash=None, filter_data=None):
self.filter_type = filter_type
self.block_hash = block_hash
self.filter_data = filter_data
def deserialize(self, f):
self.filter_type = struct.unpack("<B", f.read(1))[0]
self.block_hash = deser_uint256(f)
self.filter_data = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<B", self.filter_type)
r += ser_uint256(self.block_hash)
r += ser_string(self.filter_data)
return r
def __repr__(self):
return "msg_cfilter(filter_type={:#x}, block_hash={:x})".format(
self.filter_type, self.block_hash)
class msg_getcfheaders:
__slots__ = ("filter_type", "start_height", "stop_hash")
msgtype = b"getcfheaders"
def __init__(self, filter_type, start_height, stop_hash):
self.filter_type = filter_type
self.start_height = start_height
self.stop_hash = stop_hash
def deserialize(self, f):
self.filter_type = struct.unpack("<B", f.read(1))[0]
self.start_height = struct.unpack("<I", f.read(4))[0]
self.stop_hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<B", self.filter_type)
r += struct.pack("<I", self.start_height)
r += ser_uint256(self.stop_hash)
return r
def __repr__(self):
return "msg_getcfheaders(filter_type={:#x}, start_height={}, stop_hash={:x})".format(
self.filter_type, self.start_height, self.stop_hash)
class msg_cfheaders:
__slots__ = ("filter_type", "stop_hash", "prev_header", "hashes")
msgtype = b"cfheaders"
def __init__(self, filter_type=None, stop_hash=None, prev_header=None, hashes=None):
self.filter_type = filter_type
self.stop_hash = stop_hash
self.prev_header = prev_header
self.hashes = hashes
def deserialize(self, f):
self.filter_type = struct.unpack("<B", f.read(1))[0]
self.stop_hash = deser_uint256(f)
self.prev_header = deser_uint256(f)
self.hashes = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<B", self.filter_type)
r += ser_uint256(self.stop_hash)
r += ser_uint256(self.prev_header)
r += ser_uint256_vector(self.hashes)
return r
def __repr__(self):
return "msg_cfheaders(filter_type={:#x}, stop_hash={:x})".format(
self.filter_type, self.stop_hash)
class msg_getcfcheckpt:
__slots__ = ("filter_type", "stop_hash")
msgtype = b"getcfcheckpt"
def __init__(self, filter_type, stop_hash):
self.filter_type = filter_type
self.stop_hash = stop_hash
def deserialize(self, f):
self.filter_type = struct.unpack("<B", f.read(1))[0]
self.stop_hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<B", self.filter_type)
r += ser_uint256(self.stop_hash)
return r
def __repr__(self):
return "msg_getcfcheckpt(filter_type={:#x}, stop_hash={:x})".format(
self.filter_type, self.stop_hash)
class msg_cfcheckpt:
__slots__ = ("filter_type", "stop_hash", "headers")
msgtype = b"cfcheckpt"
def __init__(self, filter_type=None, stop_hash=None, headers=None):
self.filter_type = filter_type
self.stop_hash = stop_hash
self.headers = headers
def deserialize(self, f):
self.filter_type = struct.unpack("<B", f.read(1))[0]
self.stop_hash = deser_uint256(f)
self.headers = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<B", self.filter_type)
r += ser_uint256(self.stop_hash)
r += ser_uint256_vector(self.headers)
return r
def __repr__(self):
return "msg_cfcheckpt(filter_type={:#x}, stop_hash={:x})".format(
self.filter_type, self.stop_hash)
| test/functional/test_framework/messages.py | 51,355 | Deserialize from addrv1 format (pre-BIP155)
Deserialize from addrv2 format (BIP155)
Serialize in addrv1 format (pre-BIP155)
Serialize in addrv2 format (BIP155)
UFO test framework primitive and message structures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
UFO/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization.
Classes use __slots__ to ensure extraneous attributes aren't accidentally added
by tests, compromising their intended effect.
!/usr/bin/env python3 Copyright (c) 2010 ArtForz -- public domain half-a-node Copyright (c) 2012 Jeff Garzik Copyright (c) 2010-2020 The UFO Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. 1 btc in satoshis Sequence number that is BIP 125 opt-in and BIP 68-opt-out Maximum length of incoming protocol messages Number of headers sent in one getheaders result Maximum number of entries in an 'inv' protocol message Serialization/deserialization tools deser_function_name: Allow for an alternate deserialization function on the entries in the vector. ser_function_name: Allow for an alternate serialization function on the entries in the vector (we use this for serializing the vector of transactions for a witness block). Deserialize from a hex string representation (eg from RPC) Convert a binary-serializable object to hex (eg for submission via RPC) Objects that map to UFOd objects, which can be serialized/deserialized see https://github.com/UFO/bips/blob/master/bip-0155.mediawiki VERSION messages serialize CAddress objects without time We only support IPv4 which means skip 12 bytes and read the next 4 as IPv4 address. VERSION messages serialize CAddress objects without time Ignore version field. UFO Core ignores version field. Set it to 0. stack is a vector of strings This is different than the usual vector serialization -- we omit the length of the vector, which is required to be the same length as the transaction's vin vector. Not sure why flags can't be zero, but this matches the implementation in UFOd Only serialize with witness when explicitly called for vtxinwit must have the same length as vin Regular serialization is with witness -- must explicitly call serialize_without_witness to exclude witness data. Recalculate the txid (transaction hash without witness) We will only cache the serialization without witness in self.sha256 and self.hash -- those are expected to be the txid. Don't cache the result, just return it Calculate the virtual transaction size using witness and non-witness serialization size (does NOT use sigops). Calculate the merkle root given a vector of transaction hashes For witness root purposes, the hash of the coinbase, with witness, is defined to be 0...0 Calculate the hashes with witness data This is what we send on the wire, in a cmpctblock message. shortids are defined to be 6 bytes in the spec, so append two zero bytes and read it in as an 8-byte number When using version 2 compact blocks, we must serialize with_witness. We only want the first 6 bytes P2P version of the above that will use witness serialization (for compact block version 2) Calculate the BIP 152-compact blocks shortid for a given transaction hash This version gets rid of the array lengths, and reinterprets the differential encoding into indices that can be used for lookup. Version 2 compact blocks use wtxid in shortids (rather than txid) helper to set the differentially encoded indexes from absolute ones Objects that correspond to messages on the wire Relay field is optional for version 70001 onwards for cases where a user needs tighter control over what is sent over the wire note that the user must supply the name of the msgtype, and the data getheaders message has number of entries vector of hashes hash_stop (hash of last desired block header, 0 to get as many as possible) headers message has <count> <vector of block headers> comment in UFOd indicates these should be deserialized as blocks | 4,189 | en | 0.814852 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'Messages'
copyright = u'2018, Tim Phillips'
author = u'Tim Phillips, Tasha Chin'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Messagesdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Messages.tex', u'Messages Documentation',
u'Tasha Chin', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'messages', u'Messages Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Messages', u'Messages Documentation',
author, 'Messages', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html'] | docs/conf.py | 5,157 | -*- coding: utf-8 -*- Configuration file for the Sphinx documentation builder. This file does only contain a selection of the most common options. For a full list see the documentation: http://www.sphinx-doc.org/en/master/config -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys sys.path.insert(0, os.path.abspath('.')) -- Project information ----------------------------------------------------- The short X.Y version The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] The master toctree document. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. The name of the Pygments (syntax highlighting) style to use. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation. html_theme_options = {} Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Custom sidebar templates, must be a dictionary that maps document names to template names. The default sidebars (for documents that don't match any pattern) are defined by theme itself. Builtin themes are using these templates by default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']``. html_sidebars = {} -- Options for HTMLHelp output --------------------------------------------- Output file base name for HTML help builder. -- Options for LaTeX output ------------------------------------------------ The paper size ('letterpaper' or 'a4paper'). 'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. 'preamble': '', Latex figure (float) alignment 'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). -- Options for manual page output ------------------------------------------ One entry per manual page. List of tuples (source start file, name, description, authors, manual section). -- Options for Texinfo output ---------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) -- Options for Epub output ------------------------------------------------- Bibliographic Dublin Core info. The unique identifier of the text. This can be a ISBN number or the project homepage. epub_identifier = '' A unique identification for the text. epub_uid = '' A list of files that should not be packed into the epub file. | 4,028 | en | 0.608195 |
from unittest import TestCase, mock
from bulksms.sms import send_single, send_bulk
class BulkSMSTestCase(TestCase):
def test_send_single_sms(self):
# Mock send single sms function.
mock_send_single = mock.create_autospec(send_single, return_value='results')
mock_send_single('0831234567', 'Message.')
mock_send_single.assert_called_once_with('0831234567', 'Message.')
self.assertEqual(mock_send_single.call_count, 1)
def test_send_bulk_sms(self):
# Mock send bulk sms function.
mock_send_bulk = mock.create_autospec(send_bulk, return_value='results')
mock_send_bulk('test.txt')
mock_send_bulk.assert_called_once_with('test.txt')
self.assertEqual(mock_send_bulk.call_count, 1)
| tests/unit/test_bulksms.py | 768 | Mock send single sms function. Mock send bulk sms function. | 59 | en | 0.7476 |
import os
from options.train_options import TrainOptions
from models import create_model
from util.visualizer import save_images
from util import html
from PIL import Image
import string
import torch
import torchvision
import torchvision.transforms as transforms
import coremltools as ct
from util import util
import numpy as np
opt = TrainOptions().gather_options()
opt.isTrain = True
opt.name = "siggraph_caffemodel"
opt.mask_cent = 0
# opt.name = "siggraph_retrained"
opt.gpu_ids = []
opt.load_model = True
opt.num_threads = 1 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.display_id = -1 # no visdom display
opt.phase = 'val'
opt.dataroot = './dataset/ilsvrc2012/%s/' % opt.phase
opt.serial_batches = True
opt.aspect_ratio = 1.
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))
) if opt.suffix != '' else ''
opt.name = opt.name + suffix
opt.A = 2 * opt.ab_max / opt.ab_quant + 1
opt.B = opt.A
class Colorization(torch.nn.Module):
def __init__(self):
super(Colorization, self).__init__()
model = create_model(opt)
model.setup(opt)
model.eval()
self.model = model
def forward(self, image, hint):
data = {
"A": image[:, 0:1, :, :],
"B": image[:, 1:3, :, :],
"hint_B": hint[:, 0:2, :, :],
"mask_B": hint[:, 2:3, :, :]
}
# with torch.no_grad():
self.model.set_input(data)
self.model.forward()
fake_reg = torch.cat((self.model.real_A, self.model.fake_B_reg), dim=1)
return fake_reg
image_path = "./large.JPG"
image = Image.open(image_path)
image = transforms.Compose([
transforms.Resize(512),
transforms.ToTensor(),
])(image)
image = image.view(1, *image.shape)
image = util.crop_mult(image, mult=8, HWmax=[4032, 4032])
transforms.ToPILImage()(image[0]).show(command='fim')
data = util.get_colorization_data(
[image], opt, ab_thresh=0., p=0.125)
img = torch.cat((data["A"], data["B"]), dim=1)
hint = torch.cat((data["hint_B"], data["mask_B"]), dim=1)
# print(data["mask_B"], data["hint_B"])
# data["hint_B"] = torch.zeros_like(data["hint_B"])
# data["mask_B"] = torch.zeros_like(data["mask_B"])
# model = Colorization()
with torch.no_grad():
model = Colorization()
model.eval()
for param in model.parameters():
param.requires_grad = False
model.model.set_requires_grad(model.model.netG)
# model(data)
# transforms.ToPILImage()(image[0]).show(command='fim')
# to_visualize = ['gray', 'hint', 'hint_ab', 'fake_entr',
# 'real', 'fake_reg', 'real_ab', 'fake_ab_reg', ]
# visuals = util.get_subset_dict(
# model.model.get_current_visuals(), to_visualize)
# for key, value in visuals.items():
# print(key)
# transforms.ToPILImage()(value[0]).show(command='fim')
output = model(img, hint)
output = util.lab2rgb(output, opt=opt)
transforms.ToPILImage()(output[0]).show(command='fim')
traced_model = torch.jit.trace(
model, (img, hint), check_trace=False)
mlmodel = ct.convert(model=traced_model, inputs=[
ct.TensorType(name="image", shape=ct.Shape(
shape=(1, 3, ct.RangeDim(1, 4096), ct.RangeDim(1, 4096)))),
ct.TensorType(name="hint", shape=ct.Shape(
shape=(1, 3, ct.RangeDim(1, 4096), ct.RangeDim(1, 4096)))),
])
mlmodel.save("~/color.mlmodel")
| imtest.py | 3,419 | opt.name = "siggraph_retrained" test code only supports num_threads = 1 test code only supports batch_size = 1 no visdom display process opt.suffix with torch.no_grad(): print(data["mask_B"], data["hint_B"]) data["hint_B"] = torch.zeros_like(data["hint_B"]) data["mask_B"] = torch.zeros_like(data["mask_B"]) model = Colorization() model(data) transforms.ToPILImage()(image[0]).show(command='fim') to_visualize = ['gray', 'hint', 'hint_ab', 'fake_entr', 'real', 'fake_reg', 'real_ab', 'fake_ab_reg', ] visuals = util.get_subset_dict( model.model.get_current_visuals(), to_visualize) for key, value in visuals.items(): print(key) transforms.ToPILImage()(value[0]).show(command='fim') | 709 | en | 0.194728 |
from __future__ import division
from itertools import combinations_with_replacement
import numpy as np
import math
import sys
def shuffle_data(X, y, seed=None):
if seed:
np.random.seed(seed)
n_samples = X.shape[0]
idx = np.arange(n_samples)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
return X, y
# Divide dataset based on if sample value on feature index is larger than
# the given threshold
def divide_on_feature(X, feature_i, threshold):
split_func = None
if isinstance(threshold, int) or isinstance(threshold, float):
split_func = lambda sample: sample[feature_i] >= threshold
else:
split_func = lambda sample: sample[feature_i] == threshold
X_1 = np.array([sample for sample in X if split_func(sample)])
X_2 = np.array([sample for sample in X if not split_func(sample)])
return np.array([X_1, X_2])
def polynomial_features(X, degree):
n_samples, n_features = np.shape(X)
def index_combinations():
combs = [combinations_with_replacement(range(n_features), i) for i in range(0, degree + 1)]
flat_combs = [item for sublist in combs for item in sublist]
return flat_combs
combinations = index_combinations()
n_output_features = len(combinations)
X_new = np.empty((n_samples, n_output_features))
for i, index_combs in enumerate(combinations):
X_new[:, i] = np.prod(X[:, index_combs], axis=1)
return X_new
# Return random subsets (with replacements) of the data
def get_random_subsets(X, y, n_subsets, replacements=True):
n_samples = np.shape(X)[0]
# Concatenate x and y and do a random shuffle
X_y = np.concatenate((X, y.reshape((1, len(y))).T), axis=1)
np.random.shuffle(X_y)
subsets = []
# Uses 50% of training samples without replacements
subsample_size = n_samples // 2
if replacements:
subsample_size = n_samples # 100% with replacements
for _ in range(n_subsets):
idx = np.random.choice(
range(n_samples),
size=np.shape(range(subsample_size)),
replace=replacements)
X = X_y[idx][:, :-1]
y = X_y[idx][:, -1]
subsets.append([X, y])
return subsets
# Normalize the dataset X
def normalize(X, axis=-1, order=2):
l2 = np.atleast_1d(np.linalg.norm(X, order, axis))
l2[l2 == 0] = 1
return X / np.expand_dims(l2, axis)
# Standardize the dataset X
def standardize(X):
X_std = X
mean = X.mean(axis=0)
std = X.std(axis=0)
for col in range(np.shape(X)[1]):
if std[col]:
X_std[:, col] = (X_std[:, col] - mean[col]) / std[col]
# X_std = (X - X.mean(axis=0)) / X.std(axis=0)
return X_std
# Split the data into train and test sets
def train_test_split(X, y, test_size=0.5, shuffle=True, seed=None):
if shuffle:
X, y = shuffle_data(X, y, seed)
# Split the training data from test data in the ratio specified in
# test_size
split_i = len(y) - int(len(y) // (1 / test_size))
x_train, x_test = X[:split_i], X[split_i:]
y_train, y_test = y[:split_i], y[split_i:]
return x_train, x_test, y_train, y_test
# Split the data into k sets of training / test data
def k_fold_cross_validation_sets(X, y, k, shuffle=True):
if shuffle:
X, y = shuffle_data(X, y)
n_samples = len(y)
left_overs = {}
n_left_overs = (n_samples % k)
if n_left_overs != 0:
left_overs["X"] = X[-n_left_overs:]
left_overs["y"] = y[-n_left_overs:]
X = X[:-n_left_overs]
y = y[:-n_left_overs]
X_split = np.split(X, k)
y_split = np.split(y, k)
sets = []
for i in range(k):
X_test, y_test = X_split[i], y_split[i]
X_train = np.concatenate(X_split[:i] + X_split[i + 1:], axis=0)
y_train = np.concatenate(y_split[:i] + y_split[i + 1:], axis=0)
sets.append([X_train, X_test, y_train, y_test])
# Add left over samples to last set as training samples
if n_left_overs != 0:
np.append(sets[-1][0], left_overs["X"], axis=0)
np.append(sets[-1][2], left_overs["y"], axis=0)
return np.array(sets)
# Making an array of nominal values into a binarized matrix
def categorical_to_binary(x):
n_col = np.amax(x) + 1
binarized = np.zeros((len(x), n_col))
for i in range(len(x)):
binarized[i, x[i]] = 1
return binarized
# Converting from binary vectors to nominal values
def binary_to_categorical(x):
categorical = []
for i in range(len(x)):
if not 1 in x[i]:
categorical.append(0)
else:
i_where_one = np.where(x[i] == 1)[0][0]
categorical.append(i_where_one)
return categorical
# Converts a vector into an diagonal matrix
def make_diagonal(x):
m = np.zeros((len(x), len(x)))
for i in range(len(m[0])):
m[i, i] = x[i]
return m
| mlfromscratch/utils/data_manipulation.py | 4,893 | Divide dataset based on if sample value on feature index is larger than the given threshold Return random subsets (with replacements) of the data Concatenate x and y and do a random shuffle Uses 50% of training samples without replacements 100% with replacements Normalize the dataset X Standardize the dataset X X_std = (X - X.mean(axis=0)) / X.std(axis=0) Split the data into train and test sets Split the training data from test data in the ratio specified in test_size Split the data into k sets of training / test data Add left over samples to last set as training samples Making an array of nominal values into a binarized matrix Converting from binary vectors to nominal values Converts a vector into an diagonal matrix | 726 | en | 0.839107 |
'''
This file is a part of Test Mile Arjuna
Copyright 2018 Test Mile Software Testing Pvt Ltd
Website: www.TestMile.com
Email: support [at] testmile.com
Creator: Rahul Verma
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from arjuna.core.exceptions import *
class DataSourceFinished(StopIteration):
def __init__(self, msg=None):
super().__init__(msg is None and "Done" or msg)
class EmptyListDataRecordLookupException(Exception):
def __init__(self, index):
super().__init__("Invalid index [%s] used for list data record lookup. It is empty.".format(index))
class ListDataRecordLookupException(Exception):
def __init__(self, index, max_index):
super().__init__(
"Invalid index [%s] used for list data record lookup. Use indices between 0 and %d".format(index,
max_index))
class MapDataRecordLookupException(Exception):
def __init__(self, key):
super().__init__("Invalid Key/header [%s] used for map data record lookup.".format(key))
class DataSourceConstructionError(Exception):
def __init__(self, message, name, exc):
super().__init__(message)
self.name = name
self.exc = exc
def get_Exception(self):
return self.exc
def get_Name(self):
return self.name
class InvalidTestObjectException(Exception):
pass
class SessionNodesFinishedException(Exception):
def __init__(self):
super().__init__("Done")
class SubTestsFinished(Exception):
def __init__(self):
super().__init__("Done")
class TestGroupsFinishedException(Exception):
def __init__(self):
super().__init__("Done")
class PickerMisConfigurationException(Exception):
def __init__(self):
super().__init__("Picker is misconfigured.")
# Test Result Related
class StepResultEvent(ArjunaException):
def __init__(self, step):
super().__init__(step.assert_message)
self.step = step
class Pass(StepResultEvent):
def __init__(self, check):
super().__init__(check)
class Error(StepResultEvent):
def __init__(self, step):
super().__init__(step)
class Failure(StepResultEvent):
def __init__(self, step):
super().__init__(step)
class DependencyNotMet(Exception):
def __init__(self, iid):
self.iid = iid | arjuna/engine/unitee/exceptions.py | 2,886 | This file is a part of Test Mile Arjuna
Copyright 2018 Test Mile Software Testing Pvt Ltd
Website: www.TestMile.com
Email: support [at] testmile.com
Creator: Rahul Verma
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Test Result Related | 715 | en | 0.83964 |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General utilities."""
import numpy
from pytorch_lightning.utilities.apply_func import move_data_to_device # noqa: F401
from pytorch_lightning.utilities.distributed import AllGatherGrad # noqa: F401
from pytorch_lightning.utilities.enums import ( # noqa: F401
_AcceleratorType,
_StrategyType,
AMPType,
DistributedType,
GradClipAlgorithmType,
LightningEnum,
ModelSummaryMode,
)
from pytorch_lightning.utilities.grads import grad_norm # noqa: F401
from pytorch_lightning.utilities.imports import ( # noqa: F401
_APEX_AVAILABLE,
_BAGUA_AVAILABLE,
_DEEPSPEED_AVAILABLE,
_FAIRSCALE_AVAILABLE,
_FAIRSCALE_FULLY_SHARDED_AVAILABLE,
_FAIRSCALE_OSS_FP16_BROADCAST_AVAILABLE,
_GROUP_AVAILABLE,
_HIVEMIND_AVAILABLE,
_HOROVOD_AVAILABLE,
_HPU_AVAILABLE,
_HYDRA_AVAILABLE,
_HYDRA_EXPERIMENTAL_AVAILABLE,
_IPU_AVAILABLE,
_IS_INTERACTIVE,
_IS_WINDOWS,
_module_available,
_OMEGACONF_AVAILABLE,
_POPTORCH_AVAILABLE,
_RICH_AVAILABLE,
_TORCH_GREATER_EQUAL_1_9,
_TORCH_GREATER_EQUAL_1_10,
_TORCH_GREATER_EQUAL_1_11,
_TORCH_QUANTIZE_AVAILABLE,
_TORCHTEXT_AVAILABLE,
_TORCHVISION_AVAILABLE,
_TPU_AVAILABLE,
_XLA_AVAILABLE,
)
from pytorch_lightning.utilities.parameter_tying import find_shared_parameters, set_shared_parameters # noqa: F401
from pytorch_lightning.utilities.parsing import AttributeDict, flatten_dict, is_picklable # noqa: F401
from pytorch_lightning.utilities.rank_zero import ( # noqa: F401
rank_zero_deprecation,
rank_zero_info,
rank_zero_only,
rank_zero_warn,
)
FLOAT16_EPSILON = numpy.finfo(numpy.float16).eps
FLOAT32_EPSILON = numpy.finfo(numpy.float32).eps
FLOAT64_EPSILON = numpy.finfo(numpy.float64).eps
| pytorch_lightning/utilities/__init__.py | 2,355 | General utilities.
Copyright The PyTorch Lightning team. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. noqa: F401 noqa: F401 noqa: F401 noqa: F401 noqa: F401 noqa: F401 noqa: F401 noqa: F401 | 668 | en | 0.809176 |
"""
WSGI config for poker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'poker.settings')
application = get_wsgi_application()
| poker/wsgi.py | 387 | WSGI config for poker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ | 211 | en | 0.775142 |
#!/usr/bin/env python
# Import required modules
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
import os
import argparse
import subprocess
import ICA_AROMA_functions as aromafunc
import shutil
import classification_plots
# Change to script directory
cwd = os.path.realpath(os.path.curdir)
scriptDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(scriptDir)
#-------------------------------------------- PARSER --------------------------------------------#
parser = argparse.ArgumentParser(
description=
'Script to run ICA-AROMA v0.3 beta (\'ICA-based Automatic Removal Of Motion Artifacts\') on fMRI data. See the companion manual for further information.'
)
# Required options
reqoptions = parser.add_argument_group('Required arguments')
reqoptions.add_argument(
'-o', '-out', dest="outDir", required=True, help='Output directory name')
# Required options in non-Feat mode
nonfeatoptions = parser.add_argument_group('Required arguments - generic mode')
nonfeatoptions.add_argument(
'-i',
'-in',
dest="inFile",
required=False,
help='Input file name of fMRI data (.nii.gz)')
nonfeatoptions.add_argument(
'-mc',
dest="mc",
required=False,
help=
'File name of the motion parameters obtained after motion realingment (e.g., FSL mcflirt). Note that the order of parameters does not matter, should your file not originate from FSL mcflirt. (e.g., /home/user/PROJECT/SUBJECT.feat/mc/prefiltered_func_data_mcf.par'
)
nonfeatoptions.add_argument(
'-a',
'-affmat',
dest="affmat",
default="",
help=
'File name of the mat-file describing the affine registration (e.g., FSL FLIRT) of the functional data to structural space (.mat file). (e.g., /home/user/PROJECT/SUBJECT.feat/reg/example_func2highres.mat'
)
nonfeatoptions.add_argument(
'-w',
'-warp',
dest="warp",
default="",
help=
'File name of the warp-file describing the non-linear registration (e.g., FSL FNIRT) of the structural data to MNI152 space (.nii.gz). (e.g., /home/user/PROJECT/SUBJECT.feat/reg/highres2standard_warp.nii.gz'
)
nonfeatoptions.add_argument(
'-m',
'-mask',
dest="mask",
default="",
help=
'File name of the mask to be used for MELODIC (denoising will be performed on the original/non-masked input data)'
)
# Required options in Feat mode
featoptions = parser.add_argument_group('Required arguments - FEAT mode')
featoptions.add_argument(
'-f',
'-feat',
dest="inFeat",
required=False,
help=
'Feat directory name (Feat should have been run without temporal filtering and including registration to MNI152)'
)
# Optional options
optoptions = parser.add_argument_group('Optional arguments')
optoptions.add_argument('-tr', dest="TR", help='TR in seconds', type=float)
optoptions.add_argument(
'-den',
dest="denType",
default="nonaggr",
help=
'Type of denoising strategy: \'no\': only classification, no denoising; \'nonaggr\': non-aggresssive denoising (default); \'aggr\': aggressive denoising; \'both\': both aggressive and non-aggressive denoising (seperately)'
)
optoptions.add_argument(
'-md',
'-meldir',
dest="melDir",
default="",
help='MELODIC directory name, in case MELODIC has been run previously.')
optoptions.add_argument(
'-dim',
dest="dim",
default=0,
help=
'Dimensionality reduction into #num dimensions when running MELODIC (default: automatic estimation; i.e. -dim 0)',
type=int)
optoptions.add_argument(
'-ow',
'-overwrite',
dest="overwrite",
action='store_true',
help='Overwrite existing output',
default=False)
print(
'\n------------------------------- RUNNING ICA-AROMA ------------------------------- '
)
print(
'--------------- \'ICA-based Automatic Removal Of Motion Artifacts\' --------------- \n'
)
#--------------------------------------- PARSE ARGUMENTS ---------------------------------------#
args = parser.parse_args()
# Define variables based on the type of input (i.e. Feat directory or specific input arguments), and check whether the specified files exist.
cancel = False
if args.inFeat:
inFeat = args.inFeat
# Check whether the Feat directory exists
if not os.path.isdir(inFeat):
print('The specified Feat directory does not exist.')
print(
'\n----------------------------- ICA-AROMA IS CANCELED -----------------------------\n'
)
exit()
# Define the variables which should be located in the Feat directory
inFile = os.path.join(args.inFeat, 'filtered_func_data.nii.gz')
mc = os.path.join(args.inFeat, 'mc', 'prefiltered_func_data_mcf.par')
affmat = os.path.join(args.inFeat, 'reg', 'example_func2highres.mat')
warp = os.path.join(args.inFeat, 'reg', 'highres2standard_warp.nii.gz')
# Check whether these files actually exist
if not os.path.isfile(inFile):
print('Missing filtered_func_data.nii.gz in Feat directory.')
cancel = True
if not os.path.isfile(mc):
print('Missing mc/prefiltered_func_data_mcf.mat in Feat directory.')
cancel = True
if not os.path.isfile(affmat):
print('Missing reg/example_func2highres.mat in Feat directory.')
cancel = True
if not os.path.isfile(warp):
print('Missing reg/highres2standard_warp.nii.gz in Feat directory.')
cancel = True
# Check whether a melodic.ica directory exists
if os.path.isdir(os.path.join(args.inFeat, 'filtered_func_data.ica')):
melDir = os.path.join(args.inFeat, 'filtered_func_data.ica')
else:
melDir = args.melDir
else:
inFile = args.inFile
mc = args.mc
affmat = args.affmat
warp = args.warp
melDir = args.melDir
# Check whether the files exist
if not inFile:
print('No input file specified.')
else:
if not os.path.isfile(inFile):
print('The specified input file does not exist.')
cancel = True
if not mc:
print('No mc file specified.')
else:
if not os.path.isfile(mc):
print('The specified mc file does does not exist.')
cancel = True
if affmat:
if not os.path.isfile(affmat):
print('The specified affmat file does not exist.')
cancel = True
if warp:
if not os.path.isfile(warp):
print('The specified warp file does not exist.')
cancel = True
# Parse the arguments which do not depend on whether a Feat directory has been specified
outDir = args.outDir
dim = args.dim
denType = args.denType
# Check if the mask exists, when specified.
if args.mask:
if not os.path.isfile(args.mask):
print('The specified mask does not exist.')
cancel = True
# Check if the type of denoising is correctly specified, when specified
if not (denType == 'nonaggr') and not (denType == 'aggr') and not (
denType == 'both') and not (denType == 'no'):
print(
'Type of denoising was not correctly specified. Non-aggressive denoising will be run.'
)
denType = 'nonaggr'
# If the criteria for file/directory specifications have not been met. Cancel ICA-AROMA.
if cancel:
print(
'\n----------------------------- ICA-AROMA IS CANCELED -----------------------------\n'
)
exit()
#------------------------------------------- PREPARE -------------------------------------------#
# Define the FSL-bin directory
fslDir = os.path.join(os.environ["FSLDIR"], 'bin', '')
# Create output directory if needed
if os.path.isdir(outDir) and args.overwrite is False:
print(
'Output directory', outDir, """already exists.
AROMA will not continue.
Rerun with the -overwrite option to explicitly overwrite existing output."""
)
exit()
elif os.path.isdir(outDir) and args.overwrite is True:
print('Warning! Output directory', outDir,
'exists and will be overwritten.\n')
shutil.rmtree(outDir)
os.makedirs(outDir)
else:
os.makedirs(outDir)
# Get TR of the fMRI data, if not specified
if args.TR:
TR = args.TR
else:
cmd = ' '.join([
os.path.join(fslDir, 'fslinfo'), inFile,
'| grep pixdim4 | awk \'{print $2}\''
])
TR = float(subprocess.getoutput(cmd))
# Check TR
if TR == 1:
print('Warning! Please check whether the determined TR (of ' + str(TR) +
's) is correct!\n')
elif TR == 0:
print(
'TR is zero. ICA-AROMA requires a valid TR and will therefore exit. Please check the header, or define the TR as an additional argument.\n----------------------------- ICA-AROMA IS CANCELED -----------------------------\n'
)
exit()
# Define/create mask. Either by making a copy of the specified mask, or by creating a new one.
mask = os.path.join(outDir, 'mask.nii.gz')
if args.mask:
shutil.copyfile(args.mask, mask)
else:
# If a Feat directory is specified, and an example_func is present use example_func to create a mask
if args.inFeat and os.path.isfile(
os.path.join(inFeat, 'example_func.nii.gz')):
os.system(' '.join([
os.path.join(fslDir, 'bet'),
os.path.join(inFeat, 'example_func.nii.gz'),
os.path.join(outDir, 'bet'), '-f 0.3 -n -m -R'
]))
os.system(' '.join(
['mv', os.path.join(outDir, 'bet_mask.nii.gz'), mask]))
if os.path.isfile(os.path.join(outDir, 'bet.nii.gz')):
os.remove(os.path.join(outDir, 'bet.nii.gz'))
else:
if args.inFeat:
print(
' - No example_func was found in the Feat directory. A mask will be created including all voxels with varying intensity over time in the fMRI data. Please check!\n'
)
os.system(' '.join(
[os.path.join(fslDir, 'fslmaths'), inFile, '-Tstd -bin', mask]))
#---------------------------------------- Run ICA-AROMA ----------------------------------------#
print('Step 1) MELODIC')
aromafunc.runICA(fslDir, inFile, outDir, melDir, mask, dim, TR)
print('Step 2) Automatic classification of the components')
print(' - registering the spatial maps to MNI')
melIC = os.path.join(outDir, 'melodic_IC_thr.nii.gz')
melIC_MNI = os.path.join(outDir, 'melodic_IC_thr_MNI2mm.nii.gz')
aromafunc.register2MNI(fslDir, melIC, melIC_MNI, affmat, warp)
print(' - extracting the CSF & Edge fraction features')
edgeFract, csfFract = aromafunc.feature_spatial(fslDir, outDir, scriptDir,
melIC_MNI)
print(' - extracting the Maximum RP correlation feature')
melmix = os.path.join(outDir, 'melodic.ica', 'melodic_mix')
maxRPcorr = aromafunc.feature_time_series(melmix, mc)
print(' - extracting the High-frequency content feature')
melFTmix = os.path.join(outDir, 'melodic.ica', 'melodic_FTmix')
HFC = aromafunc.feature_frequency(melFTmix, TR)
print(' - classification')
motionICs = aromafunc.classification(outDir, maxRPcorr, edgeFract, HFC,
csfFract)
# classification_plots.classification_plot(os.path.join(outDir, 'classification_overview.txt'),
# outDir)
if (denType != 'no'):
print('Step 3) Data denoising')
aromafunc.denoising(fslDir, inFile, outDir, melmix, denType, motionICs)
# Remove thresholded melodic_IC file
os.remove(melIC)
# Revert to old directory
os.chdir(cwd)
print(
'\n----------------------------------- Finished -----------------------------------\n'
)
| thirdparty/ICA_AROMA_79x95x69/ICA_AROMA.py | 11,580 | !/usr/bin/env python Import required modules Change to script directory-------------------------------------------- PARSER -------------------------------------------- Required options Required options in non-Feat mode Required options in Feat mode Optional options--------------------------------------- PARSE ARGUMENTS --------------------------------------- Define variables based on the type of input (i.e. Feat directory or specific input arguments), and check whether the specified files exist. Check whether the Feat directory exists Define the variables which should be located in the Feat directory Check whether these files actually exist Check whether a melodic.ica directory exists Check whether the files exist Parse the arguments which do not depend on whether a Feat directory has been specified Check if the mask exists, when specified. Check if the type of denoising is correctly specified, when specified If the criteria for file/directory specifications have not been met. Cancel ICA-AROMA.------------------------------------------- PREPARE ------------------------------------------- Define the FSL-bin directory Create output directory if needed Get TR of the fMRI data, if not specified Check TR Define/create mask. Either by making a copy of the specified mask, or by creating a new one. If a Feat directory is specified, and an example_func is present use example_func to create a mask---------------------------------------- Run ICA-AROMA ---------------------------------------- classification_plots.classification_plot(os.path.join(outDir, 'classification_overview.txt'), outDir) Remove thresholded melodic_IC file Revert to old directory | 1,707 | en | 0.414131 |
# -*- coding: utf-8 -*-
# Copyright Hannah von Reth <vonreth@kde.org>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
### fetch functions
from CraftCore import CraftCore
from CraftDebug import deprecated
import utils
import io
import os
import urllib
import subprocess
import sys
import re
def getFile(url, destdir, filename='', quiet=None) -> bool:
"""download file from 'url' into 'destdir'"""
if quiet is None:
quiet = CraftCore.settings.getboolean("ContinuousIntegration", "Enabled", False)
CraftCore.log.debug("getFile called. url: %s" % url)
if url == "":
CraftCore.log.error("fetch: no url given")
return False
pUrl = urllib.parse.urlparse(url)
if not filename:
filename = os.path.basename(pUrl.path)
utils.createDir(destdir)
if pUrl.scheme == "s3":
return s3File(url, destdir, filename)
elif pUrl.scheme == "minio":
return minioGet(pUrl.netloc + pUrl.path, destdir, filename)
# curl and wget basically only work when we have a cert store on windows
if not CraftCore.compiler.isWindows or os.path.exists(os.path.join(CraftCore.standardDirs.etcDir(), "cacert.pem")):
if not CraftCore.settings.getboolean("General", "NoWget"):
if CraftCore.cache.findApplication("wget"):
return wgetFile(url, destdir, filename, quiet)
if CraftCore.cache.findApplication("curl"):
return curlFile(url, destdir, filename, quiet)
if os.path.exists(os.path.join(destdir, filename)):
return True
powershell = CraftCore.cache.findApplication("powershell")
if powershell:
filename = os.path.join(destdir, filename)
return utils.system([powershell, "-NoProfile", "-ExecutionPolicy", "ByPass", "-Command",
f"[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; (new-object net.webclient).DownloadFile(\"{url}\", \"{filename}\")"])
else:
def dlProgress(count, blockSize, totalSize):
if totalSize != -1:
percent = int(count * blockSize * 100 / totalSize)
utils.printProgress(percent)
else:
sys.stdout.write(("\r%s bytes downloaded" % (count * blockSize)))
sys.stdout.flush()
try:
urllib.request.urlretrieve(url, filename=os.path.join(destdir, filename),
reporthook=dlProgress if CraftCore.debug.verbose() >= 0 else None)
except Exception as e:
CraftCore.log.warning(e)
return False
if CraftCore.debug.verbose() >= 0:
sys.stdout.write("\n")
sys.stdout.flush()
return True
def curlFile(url, destdir, filename, quiet):
"""download file with curl from 'url' into 'destdir', if filename is given to the file specified"""
curl = CraftCore.cache.findApplication("curl")
command = [curl, "-C", "-", "--retry", "10", "-L", "--ftp-ssl", "--fail"]
cert = os.path.join(CraftCore.standardDirs.etcDir(), "cacert.pem")
if os.path.exists(cert):
command += ["--cacert", cert]
# the default of 20 might not be enough for sourceforge ...
command += ["--max-redirs", "50"]
command += ["-o", os.path.join(destdir, filename)]
command += [url]
CraftCore.log.debug("curlfile called")
if CraftCore.debug.verbose() < 1:
if quiet:
with io.StringIO() as tmp:
ciMode = CraftCore.settings.getboolean("ContinuousIntegration", "Enabled", False)
if ciMode:
command += ["-v"]
if not utils.system(command, logCommand=ciMode, stdout=tmp, stderr=subprocess.STDOUT):
CraftCore.log.warning(tmp.getvalue())
return False
if ciMode:
loc = re.findall(r"Host: ([^\s]+)", tmp.getvalue())
if loc:
CraftCore.log.info(f"Downloaded from: {loc[-1]}")
return True
elif CraftCore.cache.checkCommandOutputFor(curl, "--progress-bar"):
command += ["--progress-bar"]
CraftCore.log.info(f"curl {url}")
return utils.system(command, displayProgress=True, logCommand=False, stderr=subprocess.STDOUT)
command += ["-v"]
return utils.system(command)
def wgetFile(url, destdir, filename, quiet):
"""download file with wget from 'url' into 'destdir', if filename is given to the file specified"""
wget = CraftCore.cache.findApplication("wget")
command = [wget, "-c", "-t", "10"]
cert = os.path.join(CraftCore.standardDirs.etcDir(), "cacert.pem")
if os.path.exists(cert):
command += ["--ca-certificate", cert]
# the default of 20 might not be enough for sourceforge ...
command += ["--max-redirect", "50"]
if CraftCore.settings.getboolean("General", "EMERGE_NO_PASSIVE_FTP", False):
command += ["--no-passive-ftp"]
if not filename:
command += ["-P", destdir]
else:
command += ["-O", os.path.join(destdir, filename)]
command += [url]
if CraftCore.debug.verbose() < 1:
if quiet:
with io.StringIO() as tmp:
ciMode = CraftCore.settings.getboolean("ContinuousIntegration", "Enabled", False)
if not utils.system(command, logCommand=ciMode, stdout=tmp, stderr=subprocess.STDOUT):
CraftCore.log.warning(tmp.getvalue())
return False
if ciMode:
loc = re.findall(r"Location: ([^\s]+)", tmp.getvalue())
if loc:
CraftCore.log.info(f"Downloaded from: {loc[-1]}")
return True
elif CraftCore.cache.checkCommandOutputFor(wget, "--show-progress"):
command += ["-q", "--show-progress"]
CraftCore.log.info(f"wget {url}")
return utils.system(command, displayProgress=True, logCommand=False, stderr=subprocess.STDOUT)
return utils.system(command)
def s3File(url : str, destdir : str, filename : str) ->bool:
aws = CraftCore.cache.findApplication("aws")
if not aws:
CraftCore.log.critical("aws not found, please install awscli. \"pip install awscli\" ")
return False
return utils.system([aws, "s3", "cp", url, os.path.join(destdir, filename)])
def minioGet(url : str, destdir : str, filename : str) ->bool:
minio = None
if CraftCore.compiler.isWindows:
minio = CraftCore.cache.findApplication("minio")
if not minio:
minio = CraftCore.cache.findApplication("mc")
if not minio:
CraftCore.log.critical("minio client not found, please install minio")
return False
return utils.system([minio, "cp", url, os.path.join(destdir, filename)])
| bin/Utils/GetFiles.py | 8,038 | download file with curl from 'url' into 'destdir', if filename is given to the file specified
download file from 'url' into 'destdir'
download file with wget from 'url' into 'destdir', if filename is given to the file specified
-*- coding: utf-8 -*- Copyright Hannah von Reth <vonreth@kde.org> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fetch functions curl and wget basically only work when we have a cert store on windows the default of 20 might not be enough for sourceforge ... the default of 20 might not be enough for sourceforge ... | 1,724 | en | 0.859509 |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'zhekudblog.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| src/zhekudblog/manage.py | 630 | Django's command-line utility for administrative tasks.
!/usr/bin/env python | 77 | en | 0.656913 |
import numpy as np
import tempfile
import os
import pytest
import torch
from anndata import AnnData
from scvi.dataset import (
AnnDatasetFromAnnData,
CortexDataset,
SyntheticDataset,
GeneExpressionDataset,
Dataset10X,
)
from scvi.inference import (
JointSemiSupervisedTrainer,
AlternateSemiSupervisedTrainer,
ClassifierTrainer,
UnsupervisedTrainer,
AdapterTrainer,
TotalTrainer,
TotalPosterior,
)
from scvi.inference.posterior import unsupervised_clustering_accuracy
from scvi.inference.posterior_utils import load_posterior
from scvi.inference.annotation import compute_accuracy_rf, compute_accuracy_svc
from scvi.models import VAE, SCANVI, VAEC, LDVAE, TOTALVI, AutoZIVAE
from scvi.models.distributions import ZeroInflatedNegativeBinomial, NegativeBinomial
from scvi.models.classifier import Classifier
from scvi.models.log_likelihood import log_zinb_positive, log_nb_positive
from scvi import set_seed
set_seed(0)
use_cuda = True
def test_cortex(save_path):
cortex_dataset = CortexDataset(save_path=save_path)
vae = VAE(cortex_dataset.nb_genes, cortex_dataset.n_batches)
trainer_cortex_vae = UnsupervisedTrainer(
vae, cortex_dataset, train_size=0.5, use_cuda=use_cuda
)
trainer_cortex_vae.train(n_epochs=1)
trainer_cortex_vae.train_set.reconstruction_error()
trainer_cortex_vae.train_set.differential_expression_stats()
trainer_cortex_vae.train_set.generate_feature_correlation_matrix(
n_samples=2, correlation_type="pearson"
)
trainer_cortex_vae.train_set.generate_feature_correlation_matrix(
n_samples=2, correlation_type="spearman"
)
trainer_cortex_vae.train_set.imputation(n_samples=1)
trainer_cortex_vae.test_set.imputation(n_samples=5)
trainer_cortex_vae.corrupt_posteriors(corruption="binomial")
trainer_cortex_vae.corrupt_posteriors()
trainer_cortex_vae.train(n_epochs=1)
trainer_cortex_vae.uncorrupt_posteriors()
trainer_cortex_vae.train_set.imputation_benchmark(
n_samples=1, show_plot=False, title_plot="imputation", save_path=save_path
)
trainer_cortex_vae.train_set.generate_parameters()
n_cells, n_genes = (
len(trainer_cortex_vae.train_set.indices),
cortex_dataset.nb_genes,
)
n_samples = 3
(dropout, means, dispersions) = trainer_cortex_vae.train_set.generate_parameters()
assert dropout.shape == (n_cells, n_genes) and means.shape == (n_cells, n_genes)
assert dispersions.shape == (n_cells, n_genes)
(dropout, means, dispersions) = trainer_cortex_vae.train_set.generate_parameters(
n_samples=n_samples
)
assert dropout.shape == (n_samples, n_cells, n_genes)
assert means.shape == (n_samples, n_cells, n_genes)
(dropout, means, dispersions) = trainer_cortex_vae.train_set.generate_parameters(
n_samples=n_samples, give_mean=True
)
assert dropout.shape == (n_cells, n_genes) and means.shape == (n_cells, n_genes)
full = trainer_cortex_vae.create_posterior(
vae, cortex_dataset, indices=np.arange(len(cortex_dataset))
)
x_new, x_old = full.generate(n_samples=10)
assert x_new.shape == (cortex_dataset.nb_cells, cortex_dataset.nb_genes, 10)
assert x_old.shape == (cortex_dataset.nb_cells, cortex_dataset.nb_genes)
trainer_cortex_vae.train_set.imputation_benchmark(
n_samples=1, show_plot=False, title_plot="imputation", save_path=save_path
)
svaec = SCANVI(
cortex_dataset.nb_genes, cortex_dataset.n_batches, cortex_dataset.n_labels
)
trainer_cortex_svaec = JointSemiSupervisedTrainer(
svaec, cortex_dataset, n_labelled_samples_per_class=3, use_cuda=use_cuda
)
trainer_cortex_svaec.train(n_epochs=1)
trainer_cortex_svaec.labelled_set.accuracy()
trainer_cortex_svaec.full_dataset.reconstruction_error()
svaec = SCANVI(
cortex_dataset.nb_genes, cortex_dataset.n_batches, cortex_dataset.n_labels
)
trainer_cortex_svaec = AlternateSemiSupervisedTrainer(
svaec, cortex_dataset, n_labelled_samples_per_class=3, use_cuda=use_cuda
)
trainer_cortex_svaec.train(n_epochs=1, lr=1e-2)
trainer_cortex_svaec.unlabelled_set.accuracy()
data_train, labels_train = trainer_cortex_svaec.labelled_set.raw_data()
data_test, labels_test = trainer_cortex_svaec.unlabelled_set.raw_data()
compute_accuracy_svc(
data_train,
labels_train,
data_test,
labels_test,
param_grid=[{"C": [1], "kernel": ["linear"]}],
)
compute_accuracy_rf(
data_train,
labels_train,
data_test,
labels_test,
param_grid=[{"max_depth": [3], "n_estimators": [10]}],
)
cls = Classifier(cortex_dataset.nb_genes, n_labels=cortex_dataset.n_labels)
cls_trainer = ClassifierTrainer(cls, cortex_dataset)
cls_trainer.train(n_epochs=1)
cls_trainer.train_set.accuracy()
def test_synthetic_1():
synthetic_dataset = SyntheticDataset()
synthetic_dataset.cell_types = np.array(["A", "B", "C"])
svaec = SCANVI(
synthetic_dataset.nb_genes,
synthetic_dataset.n_batches,
synthetic_dataset.n_labels,
)
trainer_synthetic_svaec = JointSemiSupervisedTrainer(
svaec, synthetic_dataset, use_cuda=use_cuda
)
trainer_synthetic_svaec.train(n_epochs=1)
trainer_synthetic_svaec.labelled_set.entropy_batch_mixing()
with tempfile.TemporaryDirectory() as temp_dir:
posterior_save_path = os.path.join(temp_dir, "posterior_data")
original_post = trainer_synthetic_svaec.labelled_set.sequential()
original_post.save_posterior(posterior_save_path)
new_svaec = SCANVI(
synthetic_dataset.nb_genes,
synthetic_dataset.n_batches,
synthetic_dataset.n_labels,
)
new_post = load_posterior(posterior_save_path, model=new_svaec, use_cuda=False)
assert np.array_equal(new_post.indices, original_post.indices)
assert np.array_equal(new_post.gene_dataset.X, original_post.gene_dataset.X)
assert np.array_equal(
new_post.gene_dataset.labels, original_post.gene_dataset.labels
)
trainer_synthetic_svaec.full_dataset.knn_purity()
trainer_synthetic_svaec.labelled_set.show_t_sne(n_samples=5)
trainer_synthetic_svaec.unlabelled_set.show_t_sne(n_samples=5, color_by="labels")
trainer_synthetic_svaec.labelled_set.show_t_sne(
n_samples=5, color_by="batches and labels"
)
trainer_synthetic_svaec.labelled_set.clustering_scores()
trainer_synthetic_svaec.labelled_set.clustering_scores(prediction_algorithm="gmm")
trainer_synthetic_svaec.unlabelled_set.unsupervised_classification_accuracy()
trainer_synthetic_svaec.unlabelled_set.differential_expression_score(
synthetic_dataset.labels.ravel() == 1,
synthetic_dataset.labels.ravel() == 2,
n_samples=2,
M_permutation=10,
)
trainer_synthetic_svaec.unlabelled_set.one_vs_all_degenes(
n_samples=2, M_permutation=10
)
def test_synthetic_2():
synthetic_dataset = SyntheticDataset()
vaec = VAEC(
synthetic_dataset.nb_genes,
synthetic_dataset.n_batches,
synthetic_dataset.n_labels,
)
trainer_synthetic_vaec = JointSemiSupervisedTrainer(
vaec,
synthetic_dataset,
use_cuda=use_cuda,
frequency=1,
early_stopping_kwargs={
"early_stopping_metric": "reconstruction_error",
"on": "labelled_set",
"save_best_state_metric": "reconstruction_error",
},
)
trainer_synthetic_vaec.train(n_epochs=2)
def base_benchmark(gene_dataset):
vae = VAE(gene_dataset.nb_genes, gene_dataset.n_batches, gene_dataset.n_labels)
trainer = UnsupervisedTrainer(vae, gene_dataset, train_size=0.5, use_cuda=use_cuda)
trainer.train(n_epochs=1)
return trainer
def ldvae_benchmark(dataset, n_epochs, use_cuda=True):
ldvae = LDVAE(
dataset.nb_genes, n_batch=dataset.n_batches, latent_distribution="normal"
)
trainer = UnsupervisedTrainer(ldvae, dataset, use_cuda=use_cuda)
trainer.train(n_epochs=n_epochs)
trainer.test_set.reconstruction_error()
trainer.test_set.marginal_ll()
ldvae = LDVAE(dataset.nb_genes, n_batch=dataset.n_batches, latent_distribution="ln")
trainer = UnsupervisedTrainer(ldvae, dataset, use_cuda=use_cuda)
trainer.train(n_epochs=n_epochs)
trainer.test_set.reconstruction_error()
ldvae.get_loadings()
return trainer
def totalvi_benchmark(dataset, n_epochs, use_cuda=True):
totalvae = TOTALVI(
dataset.nb_genes, len(dataset.protein_names), n_batch=dataset.n_batches
)
trainer = TotalTrainer(
totalvae, dataset, train_size=0.5, use_cuda=use_cuda, early_stopping_kwargs=None
)
trainer.train(n_epochs=n_epochs)
trainer.test_set.reconstruction_error()
trainer.test_set.marginal_ll()
trainer.test_set.get_protein_background_mean()
trainer.test_set.get_latent()
trainer.test_set.generate()
trainer.test_set.get_sample_dropout()
trainer.test_set.get_normalized_denoised_expression(transform_batch=0)
trainer.test_set.get_normalized_denoised_expression(transform_batch=0)
trainer.test_set.imputation()
trainer.test_set.get_protein_mean()
trainer.test_set.one_vs_all_degenes(n_samples=2, M_permutation=10)
trainer.test_set.generate_feature_correlation_matrix(n_samples=2)
trainer.test_set.generate_feature_correlation_matrix(n_samples=2, transform_batch=0)
return trainer
def test_synthetic_3():
gene_dataset = SyntheticDataset()
trainer = base_benchmark(gene_dataset)
adapter_trainer = AdapterTrainer(
trainer.model, gene_dataset, trainer.train_set, frequency=1
)
adapter_trainer.train(n_path=1, n_epochs=1)
def test_nb_not_zinb():
synthetic_dataset = SyntheticDataset()
svaec = SCANVI(
synthetic_dataset.nb_genes,
synthetic_dataset.n_batches,
synthetic_dataset.n_labels,
labels_groups=[0, 0, 1],
reconstruction_loss="nb",
)
trainer_synthetic_svaec = JointSemiSupervisedTrainer(
svaec, synthetic_dataset, use_cuda=use_cuda
)
trainer_synthetic_svaec.train(n_epochs=1)
def test_poisson_not_zinb():
synthetic_dataset = SyntheticDataset()
svaec = SCANVI(
synthetic_dataset.nb_genes,
synthetic_dataset.n_batches,
synthetic_dataset.n_labels,
labels_groups=[0, 0, 1],
reconstruction_loss="poisson",
)
trainer_synthetic_svaec = JointSemiSupervisedTrainer(
svaec, synthetic_dataset, use_cuda=use_cuda
)
trainer_synthetic_svaec.train(n_epochs=1)
def test_classifier_accuracy(save_path):
cortex_dataset = CortexDataset(save_path=save_path)
cls = Classifier(cortex_dataset.nb_genes, n_labels=cortex_dataset.n_labels)
cls_trainer = ClassifierTrainer(
cls,
cortex_dataset,
metrics_to_monitor=["accuracy"],
frequency=1,
early_stopping_kwargs={
"early_stopping_metric": "accuracy",
"save_best_state_metric": "accuracy",
},
)
cls_trainer.train(n_epochs=2)
cls_trainer.train_set.accuracy()
def test_LDVAE(save_path):
synthetic_datset_one_batch = SyntheticDataset(n_batches=1)
ldvae_benchmark(synthetic_datset_one_batch, n_epochs=1, use_cuda=False)
synthetic_datset_two_batches = SyntheticDataset(n_batches=2)
ldvae_benchmark(synthetic_datset_two_batches, n_epochs=1, use_cuda=False)
def test_sampling_zl(save_path):
cortex_dataset = CortexDataset(save_path=save_path)
cortex_vae = VAE(cortex_dataset.nb_genes, cortex_dataset.n_batches)
trainer_cortex_vae = UnsupervisedTrainer(
cortex_vae, cortex_dataset, train_size=0.5, use_cuda=use_cuda
)
trainer_cortex_vae.train(n_epochs=2)
cortex_cls = Classifier((cortex_vae.n_latent + 1), n_labels=cortex_dataset.n_labels)
trainer_cortex_cls = ClassifierTrainer(
cortex_cls, cortex_dataset, sampling_model=cortex_vae, sampling_zl=True
)
trainer_cortex_cls.train(n_epochs=2)
trainer_cortex_cls.test_set.accuracy()
def test_annealing_procedures(save_path):
cortex_dataset = CortexDataset(save_path=save_path)
cortex_vae = VAE(cortex_dataset.nb_genes, cortex_dataset.n_batches)
trainer_cortex_vae = UnsupervisedTrainer(
cortex_vae,
cortex_dataset,
train_size=0.5,
use_cuda=use_cuda,
n_epochs_kl_warmup=1,
)
trainer_cortex_vae.train(n_epochs=2)
assert trainer_cortex_vae.kl_weight >= 0.99, "Annealing should be over"
trainer_cortex_vae = UnsupervisedTrainer(
cortex_vae,
cortex_dataset,
train_size=0.5,
use_cuda=use_cuda,
n_epochs_kl_warmup=5,
)
trainer_cortex_vae.train(n_epochs=2)
assert trainer_cortex_vae.kl_weight <= 0.99, "Annealing should be proceeding"
# iter
trainer_cortex_vae = UnsupervisedTrainer(
cortex_vae,
cortex_dataset,
train_size=0.5,
use_cuda=use_cuda,
n_iter_kl_warmup=1,
n_epochs_kl_warmup=None,
)
trainer_cortex_vae.train(n_epochs=2)
assert trainer_cortex_vae.kl_weight >= 0.99, "Annealing should be over"
def test_differential_expression(save_path):
dataset = CortexDataset(save_path=save_path)
n_cells = len(dataset)
all_indices = np.arange(n_cells)
vae = VAE(dataset.nb_genes, dataset.n_batches)
trainer = UnsupervisedTrainer(vae, dataset, train_size=0.5, use_cuda=use_cuda)
trainer.train(n_epochs=2)
post = trainer.create_posterior(vae, dataset, shuffle=False, indices=all_indices)
with tempfile.TemporaryDirectory() as temp_dir:
posterior_save_path = os.path.join(temp_dir, "posterior_data")
post = post.sequential(batch_size=3)
post.save_posterior(posterior_save_path)
new_vae = VAE(dataset.nb_genes, dataset.n_batches)
new_post = load_posterior(posterior_save_path, model=new_vae, use_cuda=False)
assert new_post.data_loader.batch_size == 3
assert np.array_equal(new_post.indices, post.indices)
assert np.array_equal(new_post.gene_dataset.X, post.gene_dataset.X)
# Sample scale example
px_scales = post.scale_sampler(
n_samples_per_cell=4, n_samples=None, selection=all_indices
)["scale"]
assert (
px_scales.shape[1] == dataset.nb_genes
), "posterior scales should have shape (n_samples, n_genes)"
# Differential expression different models
idx_1 = [1, 2, 3]
idx_2 = [4, 5, 6, 7]
de_dataframe = post.differential_expression_score(
idx1=idx_1,
idx2=idx_2,
n_samples=10,
mode="vanilla",
use_permutation=True,
M_permutation=100,
)
de_dataframe = post.differential_expression_score(
idx1=idx_1,
idx2=idx_2,
n_samples=10,
mode="change",
use_permutation=True,
M_permutation=100,
cred_interval_lvls=[0.5, 0.95],
)
print(de_dataframe.keys())
assert (
de_dataframe["lfc_confidence_interval_0.5_min"]
<= de_dataframe["lfc_confidence_interval_0.5_max"]
).all()
assert (
de_dataframe["lfc_confidence_interval_0.95_min"]
<= de_dataframe["lfc_confidence_interval_0.95_max"]
).all()
# DE estimation example
de_probabilities = de_dataframe.loc[:, "proba_de"]
assert ((0.0 <= de_probabilities) & (de_probabilities <= 1.0)).all()
# Test totalVI DE
sp = os.path.join(save_path, "10X")
dataset = Dataset10X(dataset_name="pbmc_10k_protein_v3", save_path=sp)
n_cells = len(dataset)
all_indices = np.arange(n_cells)
vae = TOTALVI(
dataset.nb_genes, len(dataset.protein_names), n_batch=dataset.n_batches
)
trainer = TotalTrainer(
vae, dataset, train_size=0.5, use_cuda=use_cuda, early_stopping_kwargs=None
)
trainer.train(n_epochs=2)
post = trainer.create_posterior(
vae, dataset, shuffle=False, indices=all_indices, type_class=TotalPosterior
)
# Differential expression different models
idx_1 = [1, 2, 3]
idx_2 = [4, 5, 6, 7]
de_dataframe = post.differential_expression_score(
idx1=idx_1,
idx2=idx_2,
n_samples=10,
mode="vanilla",
use_permutation=True,
M_permutation=100,
)
de_dataframe = post.differential_expression_score(
idx1=idx_1,
idx2=idx_2,
n_samples=10,
mode="change",
use_permutation=True,
M_permutation=100,
)
def test_totalvi(save_path):
synthetic_dataset_one_batch = SyntheticDataset(n_batches=1)
totalvi_benchmark(synthetic_dataset_one_batch, n_epochs=1, use_cuda=use_cuda)
synthetic_dataset_two_batches = SyntheticDataset(n_batches=2)
totalvi_benchmark(synthetic_dataset_two_batches, n_epochs=1, use_cuda=use_cuda)
# adversarial testing
dataset = synthetic_dataset_two_batches
totalvae = TOTALVI(
dataset.nb_genes, len(dataset.protein_names), n_batch=dataset.n_batches
)
trainer = TotalTrainer(
totalvae,
dataset,
train_size=0.5,
use_cuda=use_cuda,
early_stopping_kwargs=None,
use_adversarial_loss=True,
)
trainer.train(n_epochs=1)
with tempfile.TemporaryDirectory() as temp_dir:
posterior_save_path = os.path.join(temp_dir, "posterior_data")
original_post = trainer.create_posterior(
totalvae,
dataset,
indices=np.arange(len(dataset)),
type_class=TotalPosterior,
)
original_post.save_posterior(posterior_save_path)
new_totalvae = TOTALVI(
dataset.nb_genes, len(dataset.protein_names), n_batch=dataset.n_batches
)
new_post = load_posterior(
posterior_save_path, model=new_totalvae, use_cuda=False
)
assert new_post.posterior_type == "TotalPosterior"
assert np.array_equal(
new_post.gene_dataset.protein_expression, dataset.protein_expression
)
def test_autozi(save_path):
data = SyntheticDataset(n_batches=1)
for disp_zi in ["gene", "gene-label"]:
autozivae = AutoZIVAE(
n_input=data.nb_genes,
dispersion=disp_zi,
zero_inflation=disp_zi,
n_labels=data.n_labels,
)
trainer_autozivae = UnsupervisedTrainer(
model=autozivae, gene_dataset=data, train_size=0.5
)
trainer_autozivae.train(n_epochs=2, lr=1e-2)
trainer_autozivae.test_set.elbo()
trainer_autozivae.test_set.reconstruction_error()
trainer_autozivae.test_set.marginal_ll()
def test_multibatches_features():
data = [
np.random.randint(1, 5, size=(20, 10)),
np.random.randint(1, 10, size=(20, 10)),
np.random.randint(1, 10, size=(20, 10)),
np.random.randint(1, 10, size=(30, 10)),
]
dataset = GeneExpressionDataset()
dataset.populate_from_per_batch_list(data)
vae = VAE(dataset.nb_genes, dataset.n_batches)
trainer = UnsupervisedTrainer(vae, dataset, train_size=0.5, use_cuda=use_cuda)
trainer.train(n_epochs=2)
trainer.test_set.imputation(n_samples=2, transform_batch=0)
trainer.train_set.imputation(n_samples=2, transform_batch=[0, 1, 2])
def test_deprecated_munkres():
y = np.array([0, 1, 0, 1, 0, 1, 1, 1])
y_pred = np.array([0, 0, 0, 0, 1, 1, 1, 1])
reward, assignment = unsupervised_clustering_accuracy(y, y_pred)
assert reward == 0.625
assert (assignment == np.array([[0, 0], [1, 1]])).all()
y = np.array([1, 1, 2, 2, 0, 0, 3, 3])
y_pred = np.array([1, 1, 2, 2, 3, 3, 0, 0])
reward, assignment = unsupervised_clustering_accuracy(y, y_pred)
assert reward == 1.0
assert (assignment == np.array([[0, 3], [1, 1], [2, 2], [3, 0]])).all()
def test_zinb_distribution():
theta = 100.0 + torch.rand(size=(2,))
mu = 15.0 * torch.ones_like(theta)
pi = torch.randn_like(theta)
x = torch.randint_like(mu, high=20)
log_p_ref = log_zinb_positive(x, mu, theta, pi)
dist = ZeroInflatedNegativeBinomial(mu=mu, theta=theta, zi_logits=pi)
log_p_zinb = dist.log_prob(x)
assert (log_p_ref - log_p_zinb).abs().max().item() <= 1e-8
torch.manual_seed(0)
s1 = dist.sample((100,))
assert s1.shape == (100, 2)
s2 = dist.sample(sample_shape=(4, 3))
assert s2.shape == (4, 3, 2)
log_p_ref = log_nb_positive(x, mu, theta)
dist = NegativeBinomial(mu=mu, theta=theta)
log_p_nb = dist.log_prob(x)
assert (log_p_ref - log_p_nb).abs().max().item() <= 1e-8
s1 = dist.sample((1000,))
assert s1.shape == (1000, 2)
assert (s1.mean(0) - mu).abs().mean() <= 1e0
assert (s1.std(0) - (mu + mu * mu / theta) ** 0.5).abs().mean() <= 1e0
size = (50, 3)
theta = 100.0 + torch.rand(size=size)
mu = 15.0 * torch.ones_like(theta)
pi = torch.randn_like(theta)
x = torch.randint_like(mu, high=20)
dist1 = ZeroInflatedNegativeBinomial(mu=mu, theta=theta, zi_logits=pi)
dist2 = NegativeBinomial(mu=mu, theta=theta)
assert dist1.log_prob(x).shape == size
assert dist2.log_prob(x).shape == size
with pytest.raises(ValueError):
ZeroInflatedNegativeBinomial(mu=-mu, theta=theta, zi_logits=pi)
with pytest.warns(UserWarning):
dist1.log_prob(-x) # ensures neg values raise warning
with pytest.warns(UserWarning):
dist2.log_prob(0.5 * x) # ensures float values raise warning
def test_anndata_loader():
x = np.random.randint(low=0, high=100, size=(15, 4))
batch_ids = np.random.randint(low=0, high=2, size=(15,))
n_batches = 2
adata = AnnData(X=x, obs=dict(batch=batch_ids))
_ = AnnDatasetFromAnnData(adata, batch_label="batch")
dataset = AnnDatasetFromAnnData(adata, batch_label="batch")
assert (
dataset.n_batches == n_batches
), "AnnDatasetFromAnnData should not modify the anndata object"
| tests/test_scvi.py | 22,043 | iter Sample scale example Differential expression different models DE estimation example Test totalVI DE Differential expression different models adversarial testing ensures neg values raise warning ensures float values raise warning | 233 | en | 0.520064 |
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class JianshuSpiderSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class JianshuSpiderDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| scrapy/jianshu_spider/jianshu_spider/middlewares.py | 3,611 | -*- coding: utf-8 -*- Define here the models for your spider middleware See documentation in: https://doc.scrapy.org/en/latest/topics/spider-middleware.html Not all methods need to be defined. If a method is not defined, scrapy acts as if the spider middleware does not modify the passed objects. This method is used by Scrapy to create your spiders. Called for each response that goes through the spider middleware and into the spider. Should return None or raise an exception. Called with the results returned from the Spider, after it has processed the response. Must return an iterable of Request, dict or Item objects. Called when a spider or process_spider_input() method (from other spider middleware) raises an exception. Should return either None or an iterable of Response, dict or Item objects. Called with the start requests of the spider, and works similarly to the process_spider_output() method, except that it doesn’t have a response associated. Must return only requests (not items). Not all methods need to be defined. If a method is not defined, scrapy acts as if the downloader middleware does not modify the passed objects. This method is used by Scrapy to create your spiders. Called for each request that goes through the downloader middleware. Must either: - return None: continue processing this request - or return a Response object - or return a Request object - or raise IgnoreRequest: process_exception() methods of installed downloader middleware will be called Called with the response returned from the downloader. Must either; - return a Response object - return a Request object - or raise IgnoreRequest Called when a download handler or a process_request() (from other downloader middleware) raises an exception. Must either: - return None: continue processing this exception - return a Response object: stops process_exception() chain - return a Request object: stops process_exception() chain | 1,931 | en | 0.87019 |
#!/usr/bin/env python
# Copyright 2016 Medical Research Council Harwell.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @author Neil Horner <n.horner@har.mrc.ac.uk>
from setuptools.command import easy_install
dependencies = ["pyqtgraph", "appdirs", "SimpleITK", "numpy"]
for dep in dependencies:
try:
mod = __import__(dep) # try to import module
print("{0} already installed.".format(dep))
except ImportError:
# If it fails, try to easy install it
easy_install.main(["--user", dep])
| setup.py | 1,007 | !/usr/bin/env python Copyright 2016 Medical Research Council Harwell. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @author Neil Horner <n.horner@har.mrc.ac.uk> try to import module If it fails, try to easy install it | 694 | en | 0.815972 |
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
#############################################################################
# #
# hsrp.py --- HSRP protocol support for Scapy #
# #
# Copyright (C) 2010 Mathieu RENARD mathieu.renard(at)gmail.com #
# #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation; version 2. #
# #
# This program is distributed in the hope that it will be useful, but #
# WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
#############################################################################
# HSRP Version 1
# Ref. RFC 2281
# HSRP Version 2
# Ref. http://www.smartnetworks.jp/2006/02/hsrp_8_hsrp_version_2.html
##
# $Log: hsrp.py,v $
# Revision 0.2 2011/05/01 15:23:34 mrenard
# Cleanup code
"""
HSRP (Hot Standby Router Protocol): proprietary redundancy protocol for Cisco routers. # noqa: E501
"""
from scapy.fields import ByteEnumField, ByteField, IPField, SourceIPField, \
StrFixedLenField, XIntField, XShortField
from scapy.packet import Packet, bind_layers, bind_bottom_up
from scapy.layers.inet import DestIPField, UDP
from scapy.layers.inet6 import DestIP6Field
class HSRP(Packet):
name = "HSRP"
fields_desc = [
ByteField("version", 0),
ByteEnumField("opcode", 0, {0: "Hello", 1: "Coup", 2: "Resign", 3: "Advertise"}), # noqa: E501
ByteEnumField("state", 16, {0: "Initial", 1: "Learn", 2: "Listen", 4: "Speak", 8: "Standby", 16: "Active"}), # noqa: E501
ByteField("hellotime", 3),
ByteField("holdtime", 10),
ByteField("priority", 120),
ByteField("group", 1),
ByteField("reserved", 0),
StrFixedLenField("auth", b"cisco" + b"\00" * 3, 8),
IPField("virtualIP", "192.168.1.1")]
def guess_payload_class(self, payload):
if self.underlayer.len > 28:
return HSRPmd5
else:
return Packet.guess_payload_class(self, payload)
class HSRPmd5(Packet):
name = "HSRP MD5 Authentication"
fields_desc = [
ByteEnumField("type", 4, {4: "MD5 authentication"}),
ByteField("len", None),
ByteEnumField("algo", 0, {1: "MD5"}),
ByteField("padding", 0x00),
XShortField("flags", 0x00),
SourceIPField("sourceip", None),
XIntField("keyid", 0x00),
StrFixedLenField("authdigest", b"\00" * 16, 16)]
def post_build(self, p, pay):
if self.len is None and pay:
tmp_len = len(pay)
p = p[:1] + hex(tmp_len)[30:] + p[30:]
return p
bind_bottom_up(UDP, HSRP, dport=1985)
bind_bottom_up(UDP, HSRP, sport=1985)
bind_bottom_up(UDP, HSRP, dport=2029)
bind_bottom_up(UDP, HSRP, sport=2029)
bind_layers(UDP, HSRP, dport=1985, sport=1985)
bind_layers(UDP, HSRP, dport=2029, sport=2029)
DestIPField.bind_addr(UDP, "224.0.0.2", dport=1985)
DestIP6Field.bind_addr(UDP, "ff02::66", dport=2029)
| lib/scapy/layers/hsrp.py | 3,766 | HSRP (Hot Standby Router Protocol): proprietary redundancy protocol for Cisco routers. # noqa: E501
This file is part of Scapy See http://www.secdev.org/projects/scapy for more information Copyright (C) Philippe Biondi <phil@secdev.org> This program is published under a GPLv2 license hsrp.py --- HSRP protocol support for Scapy Copyright (C) 2010 Mathieu RENARD mathieu.renard(at)gmail.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; version 2. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. HSRP Version 1 Ref. RFC 2281 HSRP Version 2 Ref. http://www.smartnetworks.jp/2006/02/hsrp_8_hsrp_version_2.html $Log: hsrp.py,v $ Revision 0.2 2011/05/01 15:23:34 mrenard Cleanup code noqa: E501 noqa: E501 | 1,545 | en | 0.759747 |
# model settings
model = dict(
type='FasterRCNN',
pretrained='open-mmlab://resnet50_caffe',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='GARPNHead',
in_channels=256,
feat_channels=256,
octave_base_scale=8,
scales_per_octave=3,
octave_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
anchor_base_sizes=None,
anchoring_means=[.0, .0, .0, .0],
anchoring_stds=[0.07, 0.07, 0.14, 0.14],
target_means=(.0, .0, .0, .0),
target_stds=[0.07, 0.07, 0.11, 0.11],
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
center_ratio=0.2,
ignore_ratio=0.5,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=300,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=300,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=1e-3, nms=dict(type='nms', iou_thr=0.5), max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ga_faster_rcnn_r50_caffe_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x.py | 6,133 | model settings model training and testing settings dataset settings optimizer learning policy yapf:disable dict(type='TensorboardLoggerHook') yapf:enable runtime settings | 170 | en | 0.727131 |
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from pprint import pprint
from config_loader import try_load_from_file
from hpOneView.exceptions import HPOneViewException
from hpOneView.oneview_client import OneViewClient
config = {
"ip": "<oneview_ip>",
"credentials": {
"userName": "<oneview_administrator_name>",
"password": "<oneview_administrator_password>",
}
}
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
# To run this sample you must define a server hardware type uri and an enclosure group uri
enclosure_group_uri = None
server_hardware_type_uri = None
# To run the example 'get a specific storage system' you must define a storage system ID
storage_system_id = None
# To run the example 'get port model associated with a server hardware' you must define a server hardware uri
server_hardware_uri = None
try:
# Create a server profile template to associate with the server profile
basic_server_template = oneview_client.server_profile_templates.create(dict(
name="ProfileTemplate101",
serverHardwareTypeUri=server_hardware_type_uri,
enclosureGroupUri=enclosure_group_uri
))
server_template_uri = basic_server_template['uri']
# Create a server profile
print("\nCreate a basic connection-less assigned server profile")
basic_profile_options = dict(
name="Profile101",
serverProfileTemplateUri=server_template_uri,
serverHardwareTypeUri=server_hardware_type_uri,
enclosureGroupUri=enclosure_group_uri
)
basic_profile = oneview_client.server_profiles.create(basic_profile_options)
profile_uri = basic_profile["uri"]
pprint(basic_profile)
except HPOneViewException as e:
print(e.msg)
# Update bootMode from recently created profile
print("\nUpdate bootMode from recently created profile")
profile_to_update = basic_profile.copy()
profile_to_update["bootMode"] = dict(manageMode=True, mode="BIOS")
profile_updated = oneview_client.server_profiles.update(resource=profile_to_update, id_or_uri=profile_to_update["uri"])
pprint(profile_updated)
# Patch
print("\nUpdate the profile configuration from server profile template")
profile_updated = oneview_client.server_profiles.patch(id_or_uri=profile_uri, operation="replace",
path="/templateCompliance", value="Compliant")
pprint(profile_updated)
# Get all
print("\nGet list of all server profiles")
all_profiles = oneview_client.server_profiles.get_all()
for profile in all_profiles:
print(' %s' % profile['name'])
# Get by property
print("\nGet a list of server profiles that matches the specified macType")
profile_mac_type = all_profiles[1]["macType"]
profiles = oneview_client.server_profiles.get_by('macType', profile_mac_type)
for profile in profiles:
print(' %s' % profile['name'])
# Get by name
print("\nGet a server profile by name")
profile = oneview_client.server_profiles.get_by_name("Profile101")
pprint(profile)
# Get by uri
print("\nGet a server profile by uri")
profile = oneview_client.server_profiles.get(profile_uri)
pprint(profile)
if oneview_client.api_version <= 500:
# Retrieve ServerProfile schema
# This method available only for API version <= 500
print("\nRetrieve the generated ServerProfile schema")
schema = oneview_client.server_profiles.get_schema()
pprint(schema)
try:
# Server profile compliance preview
print("\nGets the preview of manual and automatic updates required to make the server profile consistent "
"with its template.")
schema = oneview_client.server_profiles.get_compliance_preview(profile_uri)
pprint(schema)
except HPOneViewException as e:
print(e.msg)
# Get profile ports
print("\nRetrieve the port model associated with a server hardware type and enclosure group")
profile_ports = oneview_client.server_profiles.get_profile_ports(enclosureGroupUri=enclosure_group_uri,
serverHardwareTypeUri=server_hardware_type_uri)
pprint(profile_ports)
try:
# Get profile ports
if server_hardware_uri:
print("\nRetrieve the port model associated with a server hardware")
profile_ports = oneview_client.server_profiles.get_profile_ports(serverHardwareUri=server_hardware_uri)
pprint(profile_ports)
except HPOneViewException as e:
print(e.msg)
try:
# Retrieve the error or status messages associated with the specified profile
print("\nList profile status messages associated with a profile")
messages = oneview_client.server_profiles.get_messages(profile_uri)
pprint(messages)
except HPOneViewException as e:
print(e.msg)
try:
# Transform an server profile
print("\nTransform an existing profile by supplying a new server hardware type and/or enclosure group.")
server_transformed = oneview_client.server_profiles.get_transformation(
basic_profile['uri'], enclosureGroupUri=enclosure_group_uri, serverHardwareTypeUri=server_hardware_type_uri)
print("Transformation complete. Updating server profile with the new configuration.")
profile_updated = oneview_client.server_profiles.update(server_transformed, server_transformed["uri"])
pprint(profile_updated)
except HPOneViewException as e:
print(e.msg)
try:
# Get the list of networks and network sets that are available to a server profile along with their respective ports
print("\nList all Ethernet networks associated with a server hardware type and enclosure group")
available_networks = oneview_client.server_profiles.get_available_networks(
enclosureGroupUri=enclosure_group_uri, serverHardwareTypeUri=server_hardware_type_uri, view='Ethernet')
pprint(available_networks)
except HPOneViewException as e:
print(e.msg)
try:
# Get the all Ethernet networks associated with a server hardware type, enclosure group and scopeuris
if oneview_client.api_version >= 600:
enclosure_group_uri = "/rest/enclosure-groups/8cf8fd62-ad9f-4946-abf7-6dac9cb59253"
server_hardware_type_uri = "/rest/server-hardware-types/B342B5D4-387D-4DEB-ADBB-9D7256DF2A47"
available_networks = oneview_client.server_profiles.get_available_networks(enclosureGroupUri=enclosure_group_uri,
serverHardwareTypeUri=server_hardware_type_uri, view='Ethernet',
scope_uris="\"'/rest/scopes/3bb0c754-fd38-45af-be8a-4d4419de06e9'\"")
if len(available_networks) > 0:
pprint(available_networks)
else:
print("No Server Profiles Group found.")
except HPOneViewException as e:
print(e.msg)
try:
# Get the list of available servers
print("\nList all available servers associated with a server hardware type and enclosure group")
available_servers = oneview_client.server_profiles.get_available_servers(
enclosureGroupUri=enclosure_group_uri, serverHardwareTypeUri=server_hardware_type_uri)
pprint(available_servers)
except HPOneViewException as e:
print(e.msg)
try:
# List available storage systems
print("\nList available storage systems associated with the given enclosure group URI and server hardware type URI")
available_storage_systems = oneview_client.server_profiles.get_available_storage_systems(
count=25, start=0, enclosureGroupUri=enclosure_group_uri, serverHardwareTypeUri=server_hardware_type_uri)
pprint(available_storage_systems)
except HPOneViewException as e:
print(e.msg)
try:
# Get a specific storage system
if storage_system_id:
print("\nRetrieve a specific storage system associated with the given enclosure group URI, a server hardware"
" type URI and a storage system ID")
available_storage_system = oneview_client.server_profiles.get_available_storage_system(
storageSystemId=storage_system_id, enclosureGroupUri=enclosure_group_uri,
serverHardwareTypeUri=server_hardware_type_uri)
pprint(available_storage_system)
except HPOneViewException as e:
print(e.msg)
try:
# List available targets
print("\nList all available servers and bays for a given enclosure group.")
available_targets = oneview_client.server_profiles.get_available_targets(enclosureGroupUri=enclosure_group_uri)
pprint(available_targets)
except HPOneViewException as e:
print(e.msg)
# Generate a new Server Profile Template based on an existing Server Profile
new_spt = oneview_client.server_profiles.get_new_profile_template(basic_profile['uri'])
print('\nNew SPT generated:')
pprint(new_spt)
new_spt['name'] = 'spt_generated_from_sp'
new_spt = oneview_client.server_profile_templates.create(new_spt)
print('\nNew SPT created successfully.')
oneview_client.server_profile_templates.delete(new_spt)
print('\nDropped recently created SPT.')
# Delete the created server profile
print("\nDelete the created server profile")
oneview_client.server_profiles.delete(basic_profile)
print("The server profile was successfully deleted.")
# Delete the created server profile template
oneview_client.server_profile_templates.delete(basic_server_template)
# Delete all server profile (filtering)
print("\nRemove all profiles that match the name 'Profile fake'")
# Create a new profile to delete
oneview_client.server_profiles.create(dict(
name="Profile fake",
serverHardwareTypeUri=server_hardware_type_uri,
enclosureGroupUri=enclosure_group_uri
))
oneview_client.server_profiles.delete_all(filter="name='Profile fake'")
print("The server profiles were successfully deleted.")
| examples/server_profiles.py | 10,920 | -*- coding: utf-8 -*- (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Try load config from a file (if there is a config file) To run this sample you must define a server hardware type uri and an enclosure group uri To run the example 'get a specific storage system' you must define a storage system ID To run the example 'get port model associated with a server hardware' you must define a server hardware uri Create a server profile template to associate with the server profile Create a server profile Update bootMode from recently created profile Patch Get all Get by property Get by name Get by uri Retrieve ServerProfile schema This method available only for API version <= 500 Server profile compliance preview Get profile ports Get profile ports Retrieve the error or status messages associated with the specified profile Transform an server profile Get the list of networks and network sets that are available to a server profile along with their respective ports Get the all Ethernet networks associated with a server hardware type, enclosure group and scopeuris Get the list of available servers List available storage systems Get a specific storage system List available targets Generate a new Server Profile Template based on an existing Server Profile Delete the created server profile Delete the created server profile template Delete all server profile (filtering) Create a new profile to delete | 2,451 | en | 0.855731 |
import sqlite3
import pandas as pd
import numpy as np
import csv
import gzip
from collections import defaultdict
if __name__ == '__main__':
conn = sqlite3.connect('data/instacart.db')
c = conn.cursor()
# Get the orders properly sorted, so we can directly
# group by user_id, order_id and then compute the weights.
q = """
SELECT user_id, order_id, days_since_prior_order
FROM orders
ORDER BY order_number
"""
orders = pd.read_sql(q, conn)
# First day is 0
orders.ix[orders.days_since_prior_order == '', 'days_since_prior_order'] = 0
# Cumsum to obtain total days since *first* order
orders_g = orders.groupby(['user_id'])['days_since_prior_order'].cumsum()
orders['cumulative_days'] = orders_g.astype(int)
# But I need to subtract cumulative_days from the actual day of the
# order we want to compute... which will be the maximum
max_cum_days = orders.groupby(['user_id'])['cumulative_days'].max()
max_cum_days = max_cum_days.reset_index()
max_cum_days.columns = ['user_id', 'max_order_day']
orders = pd.merge(orders, max_cum_days, on = "user_id", how = 'left')
# Compute weights
orders['w_periodic'] = (np.cos(2 * (orders['max_order_day'] - orders['cumulative_days']) / 365.0 * 3.14) + 1) / 2
orders['w_decay'] = 1.0 / ((365 - orders['cumulative_days']) / 365.0 + 1.0)
# Remove unwanted columns (for DB storage, let's try not do duplicate)
res = orders
res = res.drop(['days_since_prior_order', 'cumulative_days', 'max_order_day'],
axis = 1)
# Insert weights into the DB
res.to_sql('order_weights', conn, if_exists = 'replace')
c.execute("CREATE INDEX IF NOT EXISTS idx_tmp1 ON order_weights(user_id)")
c.execute("CREATE INDEX IF NOT EXISTS idx_tmp2 ON order_weights(order_id)")
| data/external/repositories_2to3/164369/kaggle-public-master/instacart/compute_weights_2.py | 1,884 | Get the orders properly sorted, so we can directly group by user_id, order_id and then compute the weights. First day is 0 Cumsum to obtain total days since *first* order But I need to subtract cumulative_days from the actual day of the order we want to compute... which will be the maximum Compute weights Remove unwanted columns (for DB storage, let's try not do duplicate) Insert weights into the DB | 403 | en | 0.858729 |
import cv2
import numpy as np
import math
from vcam import vcam,meshGen
def nothing(x):
pass
WINDOW_NAME = "output"
cv2.namedWindow(WINDOW_NAME,cv2.WINDOW_NORMAL)
cv2.resizeWindow(WINDOW_NAME,700,700)
# Creating the tracker bar for all the features
cv2.createTrackbar("X",WINDOW_NAME,500,1000,nothing)
cv2.createTrackbar("Y",WINDOW_NAME,500,1000,nothing)
cv2.createTrackbar("Z",WINDOW_NAME,0,1000,nothing)
cv2.createTrackbar("alpha",WINDOW_NAME,180,360,nothing)
cv2.createTrackbar("beta",WINDOW_NAME,180,360,nothing)
cv2.createTrackbar("gama",WINDOW_NAME,180,360,nothing)
cv2.createTrackbar("K1",WINDOW_NAME,0,100000,nothing)
cv2.createTrackbar("K2",WINDOW_NAME,0,100000,nothing)
cv2.createTrackbar("P1",WINDOW_NAME,0,100000,nothing)
cv2.createTrackbar("P2",WINDOW_NAME,0,100000,nothing)
cv2.createTrackbar("focus",WINDOW_NAME,600,1000,nothing)
cv2.createTrackbar("Sx",WINDOW_NAME,100,1000,nothing)
cv2.createTrackbar("Sy",WINDOW_NAME,100,1000,nothing)
# cap = cv2.VideoCapture(0)
# ret,img = cap.read()
img = cv2.imread("chess.png")
H,W = img.shape[:2]
c1 = vcam(H=H,W=W)
plane = meshGen(H,W)
plane.Z = plane.X*0 + 1
pts3d = plane.getPlane()
while True:
# ret, img = cap.read()
img = cv2.imread("chess.png")
X = -cv2.getTrackbarPos("X",WINDOW_NAME) + 500
Y = -cv2.getTrackbarPos("Y",WINDOW_NAME) + 500
Z = -cv2.getTrackbarPos("Z",WINDOW_NAME)
alpha = cv2.getTrackbarPos("alpha",WINDOW_NAME) - 180
beta = cv2.getTrackbarPos("beta",WINDOW_NAME) - 180
gamma = -cv2.getTrackbarPos("gama",WINDOW_NAME) - 180
c1.focus = cv2.getTrackbarPos("focus",WINDOW_NAME) - 500
c1.sx = (cv2.getTrackbarPos("Sx",WINDOW_NAME)+1)/100
c1.sy = (cv2.getTrackbarPos("Sy",WINDOW_NAME)+1)/100
k1 = cv2.getTrackbarPos("K1",WINDOW_NAME)/100000
k2 = cv2.getTrackbarPos("K2",WINDOW_NAME)/100000
p1 = cv2.getTrackbarPos("P1",WINDOW_NAME)/100000
p2 = cv2.getTrackbarPos("P2",WINDOW_NAME)/100000
c1.KpCoeff[0] = k1
c1.KpCoeff[1] = k2
c1.KpCoeff[2] = p1
c1.KpCoeff[3] = p2
c1.set_tvec(X,Y,Z)
c1.set_rvec(alpha,beta,gamma)
pts2d = c1.project(pts3d)
map_x,map_y = c1.getMaps(pts2d)
output = cv2.remap(img,map_x,map_y,interpolation=cv2.INTER_LINEAR)
M = c1.RT
print("\n\n############## Camera Matrix ##################")
print(M)
cv2.imshow("output",output)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| GUI.py | 2,312 | Creating the tracker bar for all the features cap = cv2.VideoCapture(0) ret,img = cap.read() ret, img = cap.read() | 114 | en | 0.413493 |
#!/usr/bin/python
'''
Gapfilling function that utilizes pFBA and flux sampling to find most
parsimonious additional reactions to achieve minimum flux through the objective
Author: Matthew Jenior
'''
import pandas
import math
import copy
import time
import random
# Using Cobrapy 0.13.0
import cobra
import cobra.test
from cobra.flux_analysis.sampling import OptGPSampler
from cobra.manipulation.delete import *
from cobra.flux_analysis.parsimonious import add_pfba
from cobra.medium import find_boundary_types
from cobra.util import solver as sutil
# pFBA gapfiller
def pfba_gapfill(model, reaction_bag, obj=None, obj_lb=10., obj_constraint=False,
iters=1, tasks=None, task_lb=0.05,
add_exchanges=True, extracellular='e', cores=4):
'''
Function that utilizes iterations of pFBA solution with a universal reaction bag
in order to gapfill a model.
Parameters
----------
model : cobra.Model
Model to be gapfilled
reaction_bag : cobra.Model
Reaction bag reference to use during gapfilling
obj : string
Reaction ID for objective function in model to be gapfilled.
obj_lb : float
Lower bound for objective function
obj_constraint : bool
Sets objective as contstraint which must be maximized
tasks : list or None
List of reactions IDs (strings) of metabolic tasks
to set a minimum lower bound for
task_lb : float
Lower bound for any metabolic tasks
iters : int
Number of gapfilling rounds. Unique reactions from each round are
saved and the union is added simulatneously to the model
add_exchanges : bool
Identifies extracellular metabolites added during gapfilling that
are not associated with exchange reactions and creates them
extracellular : string
Label for extracellular compartment of model
cores : int
Number of processors to utilize during flux sampling
'''
start_time = time.time()
# Save some basic network info for downstream membership testing
orig_rxn_ids = set([str(x.id) for x in model.reactions])
orig_cpd_ids = set([str(y.id) for y in model.metabolites])
univ_rxn_ids = set([str(z.id) for z in reaction_bag.reactions])
# Find overlap in model and reaction bag
overlap_rxn_ids = univ_rxn_ids.intersection(orig_rxn_ids)
# Get model objective reaction ID
if obj == None:
obj = get_objective(model)
else:
obj = obj
# Modify universal reaction bag
new_rxn_ids = set()
print('Creating universal model...')
with reaction_bag as universal:
# Remove overlapping reactions from universal bag, and reset objective if needed
for rxn in overlap_rxn_ids:
universal.reactions.get_by_id(rxn).remove_from_model()
# Set objective in universal if told by user
# Made constraint as fraction of minimum in next step
if obj_constraint:
universal.add_reactions([model.reactions.get_by_id(obj)])
universal.objective = obj
orig_rxn_ids.remove(obj)
orig_rxns = []
for rxn in orig_rxn_ids:
orig_rxns.append(copy.deepcopy(model.reactions.get_by_id(rxn)))
else:
orig_rxns = list(copy.deepcopy(model.reactions))
# Add pFBA to universal model and add model reactions
add_pfba(universal)
#universal = copy.deepcopy(universal) # reset solver
universal.add_reactions(orig_rxns)
# If previous objective not set as constraint, set minimum lower bound
if not obj_constraint:
universal.reactions.get_by_id(obj).lower_bound = obj_lb
# Set metabolic tasks that must carry flux in gapfilled solution
if tasks != None:
for task in tasks:
try:
universal.reactions.get_by_id(task).lower_bound = task_lb
except:
print(task + 'not found in model. Ignoring.')
continue
# Run FBA and save solution
print('Optimizing model with combined reactions...')
solution = universal.optimize()
if iters > 1:
print('Generating flux sampling object...')
sutil.fix_objective_as_constraint(universal, fraction=0.99)
optgp_object = OptGPSampler(universal, processes=cores)
# Assess the sampled flux distributions
print('Sampling ' + str(iters) + ' flux distributions...')
flux_samples = optgp_object.sample(iters)
rxns = list(flux_samples.columns)
for distribution in flux_samples.iterrows():
for flux in range(0, len(list(distribution[1]))):
if abs(list(distribution[1])[flux]) > 1e-6:
new_rxn_ids |= set([rxns[flux]]).difference(orig_rxn_ids)
else:
rxns = list(solution.fluxes.index)
fluxes = list(solution.fluxes)
for flux in range(0, len(fluxes)):
if abs(fluxes[flux]) > 1e-6:
new_rxn_ids |= set([rxns[flux]])
# Screen new reaction IDs
if obj in new_rxn_ids: new_rxn_ids.remove(obj)
for rxn in orig_rxn_ids:
try:
new_rxn_ids.remove(rxn)
except:
continue
# Get reactions and metabolites to be added to the model
print('Gapfilling model...')
new_rxns = copy.deepcopy([reaction_bag.reactions.get_by_id(rxn) for rxn in new_rxn_ids])
new_cpd_ids = set()
for rxn in new_rxns: new_cpd_ids |= set([str(x.id) for x in list(rxn.metabolites)])
new_cpd_ids = new_cpd_ids.difference(orig_cpd_ids)
new_cpds = copy.deepcopy([reaction_bag.metabolites.get_by_id(cpd) for cpd in new_cpd_ids])
# Copy model and gapfill
new_model = copy.deepcopy(model)
new_model.add_metabolites(new_cpds)
new_model.add_reactions(new_rxns)
# Identify extracellular metabolites with no exchanges
if add_exchanges == True:
new_exchanges = extend_exchanges(new_model, new_cpd_ids, extracellular)
if len(new_exchanges) > 0: new_rxn_ids |= new_exchanges
duration = int(round(time.time() - start_time))
print('Took ' + str(duration) + ' seconds to gapfill ' + str(len(new_rxn_ids)) + \
' reactions and ' + str(len(new_cpd_ids)) + ' metabolites.')
new_obj_val = new_model.slim_optimize()
if new_obj_val > 1e-6:
print('Gapfilled model objective now carries flux (' + str(new_obj_val) + ').')
else:
print('Gapfilled model objective still does not carry flux.')
return new_model
# Adds missing exchanges for extracellulart metbaolites
def extend_exchanges(model, cpd_ids, ex):
model_exchanges = set(find_boundary_types(model, 'exchange', external_compartment=ex))
new_ex_ids = set()
for cpd in cpd_ids:
cpd = model.metabolites.get_by_id(cpd)
if str(cpd.compartment) != ex:
continue
else:
if bool(set(cpd.reactions) & model_exchanges) == False:
try:
new_id = 'EX_' + cpd.id
model.add_boundary(cpd, type='exchange', reaction_id=new_id, lb=-1000.0, ub=1000.0)
new_ex_ids |= set([new_id])
except ValueError:
pass
return new_ex_ids
# Returns the reaction ID of the objective reaction
def get_objective(model):
if len(list(model.objective.variables)) == 0:
raise IndexError('Model has no objective set.')
expression = str(model.objective.expression).split()
if 'reverse' in expression[0]:
obj_id = expression[2].split('*')[-1]
else:
obj_id = expression[0].split('*')[-1]
return obj_id
| pfba_gapfiller.py | 7,930 | Function that utilizes iterations of pFBA solution with a universal reaction bag
in order to gapfill a model.
Parameters
----------
model : cobra.Model
Model to be gapfilled
reaction_bag : cobra.Model
Reaction bag reference to use during gapfilling
obj : string
Reaction ID for objective function in model to be gapfilled.
obj_lb : float
Lower bound for objective function
obj_constraint : bool
Sets objective as contstraint which must be maximized
tasks : list or None
List of reactions IDs (strings) of metabolic tasks
to set a minimum lower bound for
task_lb : float
Lower bound for any metabolic tasks
iters : int
Number of gapfilling rounds. Unique reactions from each round are
saved and the union is added simulatneously to the model
add_exchanges : bool
Identifies extracellular metabolites added during gapfilling that
are not associated with exchange reactions and creates them
extracellular : string
Label for extracellular compartment of model
cores : int
Number of processors to utilize during flux sampling
Gapfilling function that utilizes pFBA and flux sampling to find most
parsimonious additional reactions to achieve minimum flux through the objective
Author: Matthew Jenior
!/usr/bin/python Using Cobrapy 0.13.0 pFBA gapfiller Save some basic network info for downstream membership testing Find overlap in model and reaction bag Get model objective reaction ID Modify universal reaction bag Remove overlapping reactions from universal bag, and reset objective if needed Set objective in universal if told by user Made constraint as fraction of minimum in next step Add pFBA to universal model and add model reactionsuniversal = copy.deepcopy(universal) reset solver If previous objective not set as constraint, set minimum lower bound Set metabolic tasks that must carry flux in gapfilled solution Run FBA and save solution Assess the sampled flux distributions Screen new reaction IDs Get reactions and metabolites to be added to the model Copy model and gapfill Identify extracellular metabolites with no exchanges Adds missing exchanges for extracellulart metbaolites Returns the reaction ID of the objective reaction | 2,203 | en | 0.858558 |
import torch
import time
from audio_zen.acoustics.feature import mag_phase
from audio_zen.acoustics.mask import decompress_cIRM
from audio_zen.inferencer.base_inferencer import BaseInferencer
# for log
from utils.logger import log
print=log
def cumulative_norm(input):
eps = 1e-10
device = input.device
data_type = input.dtype
n_dim = input.ndim
assert n_dim in (3, 4)
if n_dim == 3:
n_channels = 1
batch_size, n_freqs, n_frames = input.size()
else:
batch_size, n_channels, n_freqs, n_frames = input.size()
input = input.reshape(batch_size * n_channels, n_freqs, n_frames)
step_sum = torch.sum(input, dim=1) # [B, T]
step_pow_sum = torch.sum(torch.square(input), dim=1)
cumulative_sum = torch.cumsum(step_sum, dim=-1) # [B, T]
cumulative_pow_sum = torch.cumsum(step_pow_sum, dim=-1) # [B, T]
entry_count = torch.arange(n_freqs, n_freqs * n_frames + 1, n_freqs, dtype=data_type, device=device)
entry_count = entry_count.reshape(1, n_frames) # [1, T]
entry_count = entry_count.expand_as(cumulative_sum) # [1, T] => [B, T]
cum_mean = cumulative_sum / entry_count # B, T
cum_var = (cumulative_pow_sum - 2 * cum_mean * cumulative_sum) / entry_count + cum_mean.pow(2) # B, T
cum_std = (cum_var + eps).sqrt() # B, T
cum_mean = cum_mean.reshape(batch_size * n_channels, 1, n_frames)
cum_std = cum_std.reshape(batch_size * n_channels, 1, n_frames)
x = (input - cum_mean) / cum_std
if n_dim == 4:
x = x.reshape(batch_size, n_channels, n_freqs, n_frames)
return x
class Inferencer(BaseInferencer):
def __init__(self, config, checkpoint_path, output_dir):
super().__init__(config, checkpoint_path, output_dir)
@torch.no_grad()
def mag(self, noisy, inference_args):
noisy_complex = self.torch_stft(noisy)
noisy_mag, noisy_phase = mag_phase(noisy_complex) # [B, F, T] => [B, 1, F, T]
enhanced_mag = self.model(noisy_mag.unsqueeze(1)).squeeze(1)
enhanced = self.torch_istft((enhanced_mag, noisy_phase), length=noisy.size(-1), use_mag_phase=True)
enhanced = enhanced.detach().squeeze(0).cpu().numpy()
return enhanced
@torch.no_grad()
def scaled_mask(self, noisy, inference_args):
noisy_complex = self.torch_stft(noisy)
noisy_mag, noisy_phase = mag_phase(noisy_complex)
# [B, F, T] => [B, 1, F, T] => model => [B, 2, F, T] => [B, F, T, 2]
noisy_mag = noisy_mag.unsqueeze(1)
scaled_mask = self.model(noisy_mag)
scaled_mask = scaled_mask.permute(0, 2, 3, 1)
enhanced_complex = noisy_complex * scaled_mask
enhanced = self.torch_istft(enhanced_complex, length=noisy.size(-1), use_mag_phase=False)
enhanced = enhanced.detach().squeeze(0).cpu().numpy()
return enhanced
@torch.no_grad()
def sub_band_crm_mask(self, noisy, inference_args):
pad_mode = inference_args["pad_mode"]
n_neighbor = inference_args["n_neighbor"]
noisy = noisy.cpu().numpy().reshape(-1)
noisy_D = self.librosa_stft(noisy)
noisy_real = torch.tensor(noisy_D.real, device=self.device)
noisy_imag = torch.tensor(noisy_D.imag, device=self.device)
noisy_mag = torch.sqrt(torch.square(noisy_real) + torch.square(noisy_imag)) # [F, T]
n_freqs, n_frames = noisy_mag.size()
noisy_mag = noisy_mag.reshape(1, 1, n_freqs, n_frames)
noisy_mag_padded = self._unfold(noisy_mag, pad_mode, n_neighbor) # [B, N, C, F_s, T] <=> [1, 257, 1, 31, T]
noisy_mag_padded = noisy_mag_padded.squeeze(0).squeeze(1) # [257, 31, 200] <=> [B, F_s, T]
pred_crm = self.model(noisy_mag_padded).detach() # [B, 2, T] <=> [F, 2, T]
pred_crm = pred_crm.permute(0, 2, 1).contiguous() # [B, T, 2]
lim = 9.99
pred_crm = lim * (pred_crm >= lim) - lim * (pred_crm <= -lim) + pred_crm * (torch.abs(pred_crm) < lim)
pred_crm = -10 * torch.log((10 - pred_crm) / (10 + pred_crm))
enhanced_real = pred_crm[:, :, 0] * noisy_real - pred_crm[:, :, 1] * noisy_imag
enhanced_imag = pred_crm[:, :, 1] * noisy_real + pred_crm[:, :, 0] * noisy_imag
enhanced_real = enhanced_real.cpu().numpy()
enhanced_imag = enhanced_imag.cpu().numpy()
enhanced = self.librosa_istft(enhanced_real + 1j * enhanced_imag, length=len(noisy))
return enhanced
@torch.no_grad()
def full_band_crm_mask(self, noisy, inference_args):
noisy_complex = self.torch_stft(noisy)
noisy_mag, _ = mag_phase(noisy_complex)
noisy_mag = noisy_mag.unsqueeze(1)
t1 = time.time()
pred_crm = self.model(noisy_mag)
t2 = time.time()
pred_crm = pred_crm.permute(0, 2, 3, 1)
pred_crm = decompress_cIRM(pred_crm)
enhanced_real = pred_crm[..., 0] * noisy_complex.real - pred_crm[..., 1] * noisy_complex.imag
enhanced_imag = pred_crm[..., 1] * noisy_complex.real + pred_crm[..., 0] * noisy_complex.imag
enhanced_complex = torch.stack((enhanced_real, enhanced_imag), dim=-1)
enhanced = self.torch_istft(enhanced_complex, length=noisy.size(-1))
enhanced = enhanced.detach().squeeze(0).cpu().numpy()
#
rtf = (t2 - t1) / (len(enhanced) * 1.0 / self.acoustic_config["sr"])
print(f"model rtf: {rtf}")
return enhanced
@torch.no_grad()
def overlapped_chunk(self, noisy, inference_args):
sr = self.acoustic_config["sr"]
noisy = noisy.squeeze(0)
num_mics = 8
chunk_length = sr * inference_args["chunk_length"]
chunk_hop_length = chunk_length // 2
num_chunks = int(noisy.shape[-1] / chunk_hop_length) + 1
win = torch.hann_window(chunk_length, device=noisy.device)
prev = None
enhanced = None
# 模拟语音的静音段,防止一上来就给语音,处理的不好
for chunk_idx in range(num_chunks):
if chunk_idx == 0:
pad = torch.zeros((num_mics, 256), device=noisy.device)
chunk_start_position = chunk_idx * chunk_hop_length
chunk_end_position = chunk_start_position + chunk_length
# concat([(8, 256), (..., ... + chunk_length)])
noisy_chunk = torch.cat((pad, noisy[:, chunk_start_position:chunk_end_position]), dim=1)
enhanced_chunk = self.model(noisy_chunk.unsqueeze(0))
enhanced_chunk = torch.squeeze(enhanced_chunk)
enhanced_chunk = enhanced_chunk[256:]
# Save the prior half chunk,
cur = enhanced_chunk[:chunk_length // 2]
# only for the 1st chunk,no overlap for the very 1st chunk prior half
prev = enhanced_chunk[chunk_length // 2:] * win[chunk_length // 2:]
else:
# use the previous noisy data as the pad
pad = noisy[:, (chunk_idx * chunk_hop_length - 256):(chunk_idx * chunk_hop_length)]
chunk_start_position = chunk_idx * chunk_hop_length
chunk_end_position = chunk_start_position + chunk_length
noisy_chunk = torch.cat((pad, noisy[:8, chunk_start_position:chunk_end_position]), dim=1)
enhanced_chunk = self.model(noisy_chunk.unsqueeze(0))
enhanced_chunk = torch.squeeze(enhanced_chunk)
enhanced_chunk = enhanced_chunk[256:]
# 使用这个窗函数来对拼接的位置进行平滑?
enhanced_chunk = enhanced_chunk * win[:len(enhanced_chunk)]
tmp = enhanced_chunk[:chunk_length // 2]
cur = tmp[:min(len(tmp), len(prev))] + prev[:min(len(tmp), len(prev))]
prev = enhanced_chunk[chunk_length // 2:]
if enhanced is None:
enhanced = cur
else:
enhanced = torch.cat((enhanced, cur), dim=0)
enhanced = enhanced[:noisy.shape[1]]
return enhanced.detach().squeeze(0).cpu().numpy()
@torch.no_grad()
def time_domain(self, noisy, inference_args):
noisy = noisy.to(self.device)
enhanced = self.model(noisy)
return enhanced.detach().squeeze().cpu().numpy()
if __name__ == '__main__':
a = torch.rand(10, 2, 161, 200)
print(cumulative_norm(a).shape)
| speech_enhance/fullsubnet/inferencer/inferencer.py | 8,392 | for log [B, T] [B, T] [B, T] [1, T] [1, T] => [B, T] B, T B, T B, T [B, F, T] => [B, 1, F, T] [B, F, T] => [B, 1, F, T] => model => [B, 2, F, T] => [B, F, T, 2] [F, T] [B, N, C, F_s, T] <=> [1, 257, 1, 31, T] [257, 31, 200] <=> [B, F_s, T] [B, 2, T] <=> [F, 2, T] [B, T, 2] 模拟语音的静音段,防止一上来就给语音,处理的不好 concat([(8, 256), (..., ... + chunk_length)]) Save the prior half chunk, only for the 1st chunk,no overlap for the very 1st chunk prior half use the previous noisy data as the pad 使用这个窗函数来对拼接的位置进行平滑? | 498 | en | 0.749364 |
# test_codecs.py from CPython 2.7, modified for Jython
from test import test_support
import unittest
import codecs
import locale
import sys, StringIO
if not test_support.is_jython:
import _testcapi
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self):
self._buffer = ""
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = ""
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class ReadTest(unittest.TestCase):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue()
r = codecs.getreader(self.encoding)(q)
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(c)
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), u"")
self.assertEqual(r.bytebuffer, "")
self.assertEqual(r.charbuffer, u"")
# do the check again, this time using a incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# Check whether the reset method works properly
d.reset()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
u"".join(codecs.iterdecode(encoded, self.encoding))
)
def test_readline(self):
def getreader(input):
stream = StringIO.StringIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = u"foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = u"foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = u"foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(u"\n \r\n \r \u2028".split()):
vw.append((i*200)*u"\3042" + lineend)
vwo.append((i*200)*u"\3042")
self.assertEqual(readalllines("".join(vw), True), "".join(vw))
self.assertEqual(readalllines("".join(vw), False),"".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in xrange(80):
for lineend in u"\n \r\n \r \u2028".split():
s = 10*(size*u"a" + lineend + u"xxx\n")
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=True),
size*u"a" + lineend,
)
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=False),
size*u"a",
)
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
' #-------------------- TODAY\'S ARTICLES\r\n',
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
' #-------------------- ACTIVE ARTICLES redirect\r\n',
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
' #-------------------- LOGIN PAGE redirect\r\n',
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
' #-------------------- ARTICLES OF A SPECIFIC DATE\r\n',
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
' #-------------------- RECENT ARTICLES\r\n',
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = StringIO.StringIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue()
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write(u"foo\r")
self.assertEqual(reader.readline(keepends=False), u"foo")
writer.write(u"\nbar\r")
self.assertEqual(reader.readline(keepends=False), u"")
self.assertEqual(reader.readline(keepends=False), u"bar")
writer.write(u"baz")
self.assertEqual(reader.readline(keepends=False), u"baz")
self.assertEqual(reader.readline(keepends=False), u"")
# Lineends
writer.write(u"foo\r")
self.assertEqual(reader.readline(keepends=True), u"foo\r")
writer.write(u"\nbar\r")
self.assertEqual(reader.readline(keepends=True), u"\n")
self.assertEqual(reader.readline(keepends=True), u"bar\r")
writer.write(u"baz")
self.assertEqual(reader.readline(keepends=True), u"baz")
self.assertEqual(reader.readline(keepends=True), u"")
writer.write(u"foo\r\n")
self.assertEqual(reader.readline(keepends=True), u"foo\r\n")
def test_bug1098990_a(self):
s1 = u"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = u"offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = u"next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = StringIO.StringIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), u"")
def test_bug1098990_b(self):
s1 = u"aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = u"bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = u"stillokay:bbbbxx\r\n"
s4 = u"broken!!!!badbad\r\n"
s5 = u"againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = StringIO.StringIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), u"")
class UTF32Test(ReadTest):
encoding = "utf-32"
spamle = ('\xff\xfe\x00\x00'
's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = ('\x00\x00\xfe\xff'
'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = StringIO.StringIO()
f = writer(s)
f.write(u"spam")
f.write(u"spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = StringIO.StringIO(d)
f = reader(s)
self.assertEqual(f.read(), u"spamspam")
def test_badbom(self):
s = StringIO.StringIO(4*"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = StringIO.StringIO(8*"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"", # first byte of BOM read
u"", # second byte of BOM read
u"", # third byte of BOM read
u"", # fourth byte of BOM read => byteorder known
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_handlers(self):
self.assertEqual((u'\ufffd', 1),
codecs.utf_32_decode('\x01', 'replace', True))
self.assertEqual((u'', 1),
codecs.utf_32_decode('\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = '\xff\xfe\x00\x00' + '\x00\x00\x01\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = '\x00\x00\xfe\xff' + '\x00\x01\x00\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
class UTF32LETest(ReadTest):
encoding = "utf-32-le"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_simple(self):
self.assertEqual(u"\U00010203".encode(self.encoding), "\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = '\x00\x00\x01\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
class UTF32BETest(ReadTest):
encoding = "utf-32-be"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_simple(self):
self.assertEqual(u"\U00010203".encode(self.encoding), "\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = '\x00\x01\x00\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
class UTF16Test(ReadTest):
encoding = "utf-16"
spamle = '\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = '\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = StringIO.StringIO()
f = writer(s)
f.write(u"spam")
f.write(u"spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = StringIO.StringIO(d)
f = reader(s)
self.assertEqual(f.read(), u"spamspam")
def test_badbom(self):
s = StringIO.StringIO("\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = StringIO.StringIO("\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"", # first byte of BOM read
u"", # second byte of BOM read => byteorder known
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_handlers(self):
self.assertEqual((u'\ufffd', 1),
codecs.utf_16_decode('\x01', 'replace', True))
self.assertEqual((u'', 1),
codecs.utf_16_decode('\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode, "\xff", "strict", True)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = u'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(test_support.unlink, test_support.TESTFN)
with open(test_support.TESTFN, 'wb') as fp:
fp.write(s)
with codecs.open(test_support.TESTFN, 'U', encoding=self.encoding) as reader:
self.assertEqual(reader.read(), s1)
class UTF16LETest(ReadTest):
encoding = "utf-16-le"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode, "\xff", "strict", True)
class UTF16BETest(ReadTest):
encoding = "utf-16-be"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode, "\xff", "strict", True)
class UTF8Test(ReadTest):
encoding = "utf-8"
def test_partial(self):
self.check_partial(
u"\x00\xff\u07ff\u0800\uffff",
[
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800\uffff",
]
)
class UTF7Test(ReadTest):
encoding = "utf-7"
def test_partial(self):
self.check_partial(
u"a+-b",
[
u"a",
u"a",
u"a+",
u"a+-",
u"a+-b",
]
)
# Jython extra (test supplementary characters)
@unittest.skipIf(not test_support.is_jython, "Jython supports surrogate pairs")
def test_partial_supp(self):
# Check the encoding is what we think it is
ustr = u"x\U00023456.\u0177\U00023456\u017az"
bstr = b'x+2E3cVg.+AXfYTdxWAXo-z'
self.assertEqual(ustr.encode(self.encoding), bstr)
self.check_partial(
ustr,
[
u"x",
u"x", # '+' added: begins Base64
u"x",
u"x",
u"x",
u"x",
u"x",
u"x",
u"x\U00023456.", # '.' added: ends Base64
u"x\U00023456.", # '+' added: begins Base64
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.\u0177\U00023456\u017a", # '-' added: ends Base64
u"x\U00023456.\u0177\U00023456\u017az",
]
)
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, "\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
@unittest.skipIf(test_support.is_jython, "Jython has no _codecs.readbuffer_encode method")
class ReadBufferTest(unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("c", "spam")),
("spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), ("", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
@unittest.skipIf(test_support.is_jython, "Jython has no _codecs.charbuffer_encode method")
class CharBufferTest(unittest.TestCase):
def test_string(self):
self.assertEqual(codecs.charbuffer_encode("spam"), ("spam", 4))
def test_empty(self):
self.assertEqual(codecs.charbuffer_encode(""), ("", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.charbuffer_encode)
self.assertRaises(TypeError, codecs.charbuffer_encode, 42)
class UTF8SigTest(ReadTest):
encoding = "utf-8-sig"
def test_partial(self):
self.check_partial(
u"\ufeff\x00\xff\u07ff\u0800\uffff",
[
u"",
u"",
u"", # First BOM has been read and skipped
u"",
u"",
u"\ufeff", # Second BOM has been read and emitted
u"\ufeff\x00", # "\x00" read and emitted
u"\ufeff\x00", # First byte of encoded u"\xff" read
u"\ufeff\x00\xff", # Second byte of encoded u"\xff" read
u"\ufeff\x00\xff", # First byte of encoded u"\u07ff" read
u"\ufeff\x00\xff\u07ff", # Second byte of encoded u"\u07ff" read
u"\ufeff\x00\xff\u07ff",
u"\ufeff\x00\xff\u07ff",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
unicode("\xef\xbb\xbf", "utf-8-sig")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = u"spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = u"ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + "ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + range(1, 11) + \
[64, 128, 256, 512, 1024]:
istream = reader(StringIO.StringIO(bytestring))
ostream = StringIO.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = u"ABC\u00A1\u2200XYZ"
bytestring = "ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + range(1, 11) + \
[64, 128, 256, 512, 1024]:
istream = reader(StringIO.StringIO(bytestring))
ostream = StringIO.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(""), ("", 0))
class RecodingTest(unittest.TestCase):
def test_recoding(self):
f = StringIO.StringIO()
f2 = codecs.EncodedFile(f, "unicode_internal", "utf-8")
# f2.write(u"a")
# Must be bytes in Jython (and probably should have been in CPython)
f2.write(b"\x00\x00\x00\x61")
f2.close()
# Python used to crash on this at exit because of a refcount
# bug in _codecsmodule.c
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
(u"\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
u"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
(u"\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
(u"\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
(u"\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
u"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
u"\u0065\u0073\u006B\u0079",
"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
(u"\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
u"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
u"\u05D1\u05E8\u05D9\u05EA",
"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
(u"\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
u"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
u"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
u"\u0939\u0948\u0902",
"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
(u"\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
u"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
(u"\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
u"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
u"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
(u"\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
u"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
u"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
u"\u0438",
"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
(u"\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
u"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
u"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
u"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
u"\u0061\u00F1\u006F\u006C",
"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
(u"\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
u"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
u"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
u"\u0056\u0069\u1EC7\u0074",
"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
(u"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
(u"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
u"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
u"\u004F\u004E\u004B\u0045\u0059\u0053",
"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
(u"\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
u"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
u"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
(u"\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
(u"\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
u"\u308B\u0035\u79D2\u524D",
"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
(u"\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
(u"\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
"d9juau41awczczp"),
# (S) -> $1.00 <-
(u"\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
u"\u003C\u002D",
"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print repr(i)
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(uni.encode("punycode").lower(), puny.lower())
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
class UnicodeInternalTest(unittest.TestCase):
def test_bug1251300(self):
# Decoding with unicode_internal used to not correctly handle "code
# points" above 0x10ffff on UCS-4 builds.
if sys.maxunicode > 0xffff:
ok = [
("\x00\x10\xff\xff", u"\U0010ffff"),
("\x00\x00\x01\x01", u"\U00000101"),
("", u""),
]
not_ok = [
"\x7f\xff\xff\xff",
"\x80\x00\x00\x00",
"\x81\x00\x00\x00",
"\x00",
"\x00\x00\x00\x00\x00",
]
for internal, uni in ok:
if sys.byteorder == "little":
internal = "".join(reversed(internal))
self.assertEqual(uni, internal.decode("unicode_internal"))
for internal in not_ok:
if sys.byteorder == "little":
internal = "".join(reversed(internal))
self.assertRaises(UnicodeDecodeError, internal.decode,
"unicode_internal")
def test_decode_error_attributes(self):
if sys.maxunicode > 0xffff:
try:
"\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal")
except UnicodeDecodeError, ex:
if test_support.is_jython:
# Jython delegates internally to utf-32be and it shows here
self.assertEqual("utf-32", ex.encoding)
else:
self.assertEqual("unicode_internal", ex.encoding)
self.assertEqual("\x00\x00\x00\x00\x00\x11\x11\x00", ex.object)
self.assertEqual(4, ex.start)
self.assertEqual(8, ex.end)
else:
self.fail("UnicodeDecodeError not raised")
def test_decode_callback(self):
if sys.maxunicode > 0xffff:
codecs.register_error("UnicodeInternalTest", codecs.ignore_errors)
decoder = codecs.getdecoder("unicode_internal")
ab = u"ab".encode("unicode_internal")
ignored = decoder("%s\x22\x22\x22\x22%s" % (ab[:4], ab[4:]),
"UnicodeInternalTest")
self.assertEqual((u"ab", 12), ignored)
def test_encode_length(self):
# Issue 3739
encoder = codecs.getencoder("unicode_internal")
self.assertEqual(encoder(u"a")[1], 1)
self.assertEqual(encoder(u"\xe9\u0142")[1], 2)
encoder = codecs.getencoder("string-escape")
self.assertEqual(encoder(r'\x00')[1], 4)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
('foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
'\xb8\x8f\xef\xbb\xbf',
'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
('CAFE',
'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
('\xc3\x9f',
'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
('\xc4\xb0',
'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
('\xc5\x83\xcd\xba',
'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
('j\xcc\x8c\xc2\xa0\xc2\xaa',
'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
('\xe1\xbe\xb7',
'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
('\xc7\xb0',
'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
('\xce\x90',
'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
('\xce\xb0',
'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
('\xe1\xba\x96',
'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
('\xe1\xbd\x96',
'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(' ',
' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
('\xc2\xa0',
' '),
# 3.16 Non-ASCII multibyte space character U+1680.
('\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
('\xe2\x80\x80',
' '),
# 3.18 Zero Width Space U+200b.
('\xe2\x80\x8b',
''),
# 3.19 Non-ASCII multibyte space character U+3000.
('\xe3\x80\x80',
' '),
# 3.20 ASCII control characters U+0010 U+007F.
('\x10\x7f',
'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
('\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
('\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
('\xef\xbb\xbf',
''),
# 3.24 Non-ASCII control character U+1D175.
('\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
('\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
('\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
('\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
('\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
('\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
('\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
('\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
('\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
('\xcd\x81',
'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
('\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
('\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
('\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
('\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
('foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
('foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
('foo\xef\xb9\xb6bar',
'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
('\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
('\xd8\xa71\xd8\xa8',
'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#('\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
('X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
'\xaa\xce\xb0\xe2\x80\x80',
'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
('X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
'\x80',
'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
@unittest.skipIf(test_support.is_jython, "FIXME: incomplete unicodedata module")
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = unicode(orig, "utf-8")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = unicode(prepped, "utf-8")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception,e:
raise test_support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
@unittest.skipIf(test_support.is_jython, "FIXME: Jython issue 2000 missing support for IDNA")
class IDNACodecTest(unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(unicode("python.org", "idna"), u"python.org")
self.assertEqual(unicode("python.org.", "idna"), u"python.org.")
self.assertEqual(unicode("xn--pythn-mua.org", "idna"), u"pyth\xf6n.org")
self.assertEqual(unicode("xn--pythn-mua.org.", "idna"), u"pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual(u"python.org".encode("idna"), "python.org")
self.assertEqual("python.org.".encode("idna"), "python.org.")
self.assertEqual(u"pyth\xf6n.org".encode("idna"), "xn--pythn-mua.org")
self.assertEqual(u"pyth\xf6n.org.".encode("idna"), "xn--pythn-mua.org.")
def test_stream(self):
import StringIO
r = codecs.getreader("idna")(StringIO.StringIO("abc"))
r.read(3)
self.assertEqual(r.read(), u"")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode("python.org", "idna")),
u"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode("python.org.", "idna")),
u"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
u"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
u"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode("xn--xam", ), u"")
self.assertEqual(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
self.assertEqual(decoder.decode(u"rg"), u"")
self.assertEqual(decoder.decode(u"", True), u"org")
decoder.reset()
self.assertEqual(decoder.decode("xn--xam", ), u"")
self.assertEqual(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
self.assertEqual(decoder.decode("rg."), u"org.")
self.assertEqual(decoder.decode("", True), u"")
def test_incremental_encode(self):
self.assertEqual(
"".join(codecs.iterencode(u"python.org", "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterencode(u"python.org.", "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
"xn--pythn-mua.org."
)
self.assertEqual(
"".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode(u"\xe4x"), "")
self.assertEqual(encoder.encode(u"ample.org"), "xn--xample-9ta.")
self.assertEqual(encoder.encode(u"", True), "org")
encoder.reset()
self.assertEqual(encoder.encode(u"\xe4x"), "")
self.assertEqual(encoder.encode(u"ample.org."), "xn--xample-9ta.org.")
self.assertEqual(encoder.encode(u"", True), "")
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode('\xe4\xf6\xfc', 'latin-1'),
u'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode('abc'), u'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, '\xff', 'ascii')
def test_encode(self):
self.assertEqual(codecs.encode(u'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, u"foo", "__spam__")
self.assertEqual(codecs.encode(u'abc'), 'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, u'\xffff', 'ascii')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as a dotless "i"
oldlocale = locale.getlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
class StreamReaderTest(unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = StringIO.StringIO('\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), [u'\ud55c\n', u'\uae00'])
class EncodedFileTest(unittest.TestCase):
def test_basic(self):
f = StringIO.StringIO('\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), '\\\xd5\n\x00\x00\xae')
f = StringIO.StringIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin1')
ef.write('\xc3\xbc')
self.assertEqual(f.getvalue(), '\xfc')
class Str2StrTest(unittest.TestCase):
def test_read(self):
sin = "\x80".encode("base64_codec")
reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
sout = reader.read()
self.assertEqual(sout, "\x80")
self.assertIsInstance(sout, str)
def test_readline(self):
sin = "\x80".encode("base64_codec")
reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
sout = reader.readline()
self.assertEqual(sout, "\x80")
self.assertIsInstance(sout, str)
all_unicode_encodings = [
"ascii",
"base64_codec",
# FIXME: Jython issue 1066: "big5",
# FIXME: Jython issue 1066: "big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
# FIXME: Jython issue 1066: "cp932",
# FIXME: Jython issue 1066: "cp949",
# FIXME: Jython issue 1066: "cp950",
# FIXME: Jython issue 1066: "euc_jis_2004",
# FIXME: Jython issue 1066: 'euc_jisx0213',
# FIXME: Jython issue 1066: 'euc_jp',
# FIXME: Jython issue 1066: 'euc_kr',
# FIXME: Jython issue 1066: 'gb18030',
# FIXME: Jython issue 1066: 'gb2312',
# FIXME: Jython issue 1066: 'gbk',
"hex_codec",
"hp_roman8",
# FIXME: Jython issue 1066: 'hz',
# FIXME: Jython issue 1066: "idna",
# FIXME: Jython issue 1066: 'iso2022_jp',
# FIXME: Jython issue 1066: 'iso2022_jp_1',
# FIXME: Jython issue 1066: 'iso2022_jp_2',
# FIXME: Jython issue 1066: 'iso2022_jp_2004',
# FIXME: Jython issue 1066: 'iso2022_jp_3',
# FIXME: Jython issue 1066: 'iso2022_jp_ext',
# FIXME: Jython issue 1066: 'iso2022_kr',
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
# FIXME: Jython issue 1066: 'johab',
"koi8_r",
"koi8_u",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"rot_13",
# FIXME: Jython issue 1066: 'shift_jis',
# FIXME: Jython issue 1066: 'shift_jis_2004',
# FIXME: Jython issue 1066: 'shift_jisx0213',
"tis_620",
"unicode_escape",
"unicode_internal",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
# The following encodings work only with str, not unicode
all_string_encodings = [
"quopri_codec",
"string_escape",
"uu_codec",
]
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_streams = [
"base64_codec",
"hex_codec",
"punycode",
"unicode_internal"
]
broken_incremental_coders = broken_unicode_with_streams[:]
# The following encodings only support "strict" mode
only_strict_mode = [
"idna",
"zlib_codec",
"bz2_codec",
]
try:
import bz2
except ImportError:
pass
else:
all_unicode_encodings.append("bz2_codec")
broken_unicode_with_streams.append("bz2_codec")
try:
import zlib
except ImportError:
pass
else:
all_unicode_encodings.append("zlib_codec")
broken_unicode_with_streams.append("zlib_codec")
class BasicUnicodeTest(unittest.TestCase):
@unittest.skipIf(test_support.is_jython, "_testcapi module not present in Jython")
def test_basics(self):
s = u"abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
(bytes, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "%r != %r (encoding=%r)" % (size, len(s), encoding))
(chars, size) = codecs.getdecoder(encoding)(bytes)
self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
if encoding not in broken_unicode_with_streams:
# check stream reader/writer
q = Queue()
writer = codecs.getwriter(encoding)(q)
encodedresult = ""
for c in s:
writer.write(c)
encodedresult += q.read()
q = Queue()
reader = codecs.getreader(encoding)(q)
decodedresult = u""
for c in encodedresult:
q.write(c)
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
if encoding not in broken_incremental_coders:
# check incremental decoder/encoder (fetched via the Python
# and C API) and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
cencoder = _testcapi.codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = ""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode(u"", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = u""
for c in encodedresult:
decodedresult += decoder.decode(c)
decodedresult += decoder.decode("", True)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check C API
encodedresult = ""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode(u"", True)
cdecoder = _testcapi.codec_incrementaldecoder(encoding)
decodedresult = u""
for c in encodedresult:
decodedresult += cdecoder.decode(c)
decodedresult += cdecoder.decode("", True)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check iterencode()/iterdecode()
result = u"".join(codecs.iterdecode(codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "%r != %r (encoding=%r)" % (result, s, encoding))
# check iterencode()/iterdecode() with empty string
result = u"".join(codecs.iterdecode(codecs.iterencode(u"", encoding), encoding))
self.assertEqual(result, u"")
if encoding not in only_strict_mode:
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
cencoder = _testcapi.codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = "".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = u"".join(decoder.decode(c) for c in encodedresult)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
encodedresult = "".join(cencoder.encode(c) for c in s)
cdecoder = _testcapi.codec_incrementaldecoder(encoding, "ignore")
decodedresult = u"".join(cdecoder.decode(c) for c in encodedresult)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
def test_seek(self):
# all codecs should be able to encode these
s = u"%s\n%s\n" % (100*u"abc123", 100*u"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_streams:
continue
reader = codecs.getreader(encoding)(StringIO.StringIO(s.encode(encoding)))
for t in xrange(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
line = reader.readline()
self.assertEqual(s[:len(line)], line)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
class BasicStrTest(unittest.TestCase):
def test_basics(self):
s = "abc123"
for encoding in all_string_encodings:
(bytes, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s))
(chars, size) = codecs.getdecoder(encoding)(bytes)
self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
class CharmapTest(unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict", u"abc"),
(u"abc", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace", u"ab"),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace", u"ab\ufffe"),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab"),
(u"ab", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab\ufffe"),
(u"ab", 3)
)
allbytes = "".join(chr(i) for i in xrange(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", u""),
(u"", len(allbytes))
)
class WithStmtTest(unittest.TestCase):
def test_encodedfile(self):
f = StringIO.StringIO("\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), "\xfc")
def test_streamreaderwriter(self):
f = StringIO.StringIO("\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), u"\xfc")
class BomTest(unittest.TestCase):
def test_seek0(self):
data = u"1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be",
)
self.addCleanup(test_support.unlink, test_support.TESTFN)
for encoding in tests:
# Check if the BOM is written only once
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# Check that the BOM is written after a seek(0)
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# (StreamWriter) Check that the BOM is written after a seek(0)
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# Check that the BOM is not written after a seek() at a position
# different than the start
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# (StreamWriter) Check that the BOM is not written after a seek()
# at a position different than the start
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
def test_main():
test_support.run_unittest(
UTF32Test,
UTF32LETest,
UTF32BETest,
UTF16Test,
UTF16LETest,
UTF16BETest,
UTF8Test,
UTF8SigTest,
UTF7Test,
UTF16ExTest,
ReadBufferTest,
CharBufferTest,
EscapeDecodeTest,
RecodingTest,
PunycodeTest,
UnicodeInternalTest,
NameprepTest,
IDNACodecTest,
CodecsModuleTest,
StreamReaderTest,
EncodedFileTest,
Str2StrTest,
BasicUnicodeTest,
BasicStrTest,
CharmapTest,
WithStmtTest,
BomTest,
)
if __name__ == "__main__":
test_main()
| framework/extensions/org.python.jython/Lib/test/test_codecs.py | 60,499 | test_codecs.py from CPython 2.7, modified for Jython get a StreamReader for the encoding and feed the bytestring version of input to the reader byte by byte. Read everything available from the StreamReader and check that the results equal the appropriate entries from partialresults. check that there's nothing left in the buffers do the check again, this time using a incremental decoder check that there's nothing left in the buffers Check whether the reset method works properly check that there's nothing left in the buffers check iterdecode() Test long lines (multiple calls to read() in readline()) Test lines where the first read might end with \r, so the reader has to look ahead whether this is a lone \r or a \r\n No lineends Lineends encode some stream check whether there is exactly one BOM in it try to read it back first byte of BOM read second byte of BOM read third byte of BOM read fourth byte of BOM read => byteorder known Issue 8941: insufficient result allocation when decoding into surrogate pairs on UCS-2 builds. Issue 8941: insufficient result allocation when decoding into surrogate pairs on UCS-2 builds. Issue 8941: insufficient result allocation when decoding into surrogate pairs on UCS-2 builds. encode some stream check whether there is exactly one BOM in it try to read it back first byte of BOM read second byte of BOM read => byteorder known Files are always opened in binary mode, even if no binary mode was specified. This means that no automatic conversion of '\n' is done on reading and writing. Jython extra (test supplementary characters) Check the encoding is what we think it is '+' added: begins Base64 '.' added: ends Base64 '+' added: begins Base64 '-' added: ends Base64 First BOM has been read and skipped Second BOM has been read and emitted "\x00" read and emitted First byte of encoded u"\xff" read Second byte of encoded u"\xff" read First byte of encoded u"\u07ff" read Second byte of encoded u"\u07ff" read SF bug 1601501: check that the codec works with a buffer f2.write(u"a") Must be bytes in Jython (and probably should have been in CPython) Python used to crash on this at exit because of a refcount bug in _codecsmodule.c From RFC 3492 A Arabic (Egyptian): B Chinese (simplified): C Chinese (traditional): D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky E Hebrew: F Hindi (Devanagari):(G) Japanese (kanji and hiragana): (H) Korean (Hangul syllables): (I) Russian (Cyrillic): (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol (K) Vietnamese: T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\ <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t(L) 3<nen>B<gumi><kinpachi><sensei> (M) <amuro><namie>-with-SUPER-MONKEYS (N) Hello-Another-Way-<sorezore><no><basho> (O) <hitotsu><yane><no><shita>2 (P) Maji<de>Koi<suru>5<byou><mae> (Q) <pafii>de<runba> (R) <sono><supiido><de> (S) -> $1.00 <- Need to convert both strings to lower case, since some of the extended encodings use upper case, but our code produces only lower case. Converting just puny to lower is also insufficient, since some of the input characters are upper case. Decoding with unicode_internal used to not correctly handle "code points" above 0x10ffff on UCS-4 builds. Jython delegates internally to utf-32be and it shows here Issue 3739 From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html 3.1 Map to nothing. 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045. 3.3 Case folding 8bit U+00DF (german sharp s). The original test case is bogus; it says \xc3\xdf 3.4 Case folding U+0130 (turkish capital I with dot). 3.5 Case folding multibyte U+0143 U+037A. 3.6 Case folding U+2121 U+33C6 U+1D7BB. XXX: skip this as it fails in UCS-2 mode('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb', 'telc\xe2\x88\x95kg\xcf\x83'), 3.7 Normalization of U+006a U+030c U+00A0 U+00AA. 3.8 Case folding U+1FB7 and normalization. 3.9 Self-reverting case folding U+01F0 and normalization. The original test case is bogus, it says `\xc7\xf0' 3.10 Self-reverting case folding U+0390 and normalization. 3.11 Self-reverting case folding U+03B0 and normalization. 3.12 Self-reverting case folding U+1E96 and normalization. 3.13 Self-reverting case folding U+1F56 and normalization. 3.14 ASCII space character U+0020. 3.15 Non-ASCII 8bit space character U+00A0. 3.16 Non-ASCII multibyte space character U+1680. 3.17 Non-ASCII multibyte space character U+2000. 3.18 Zero Width Space U+200b. 3.19 Non-ASCII multibyte space character U+3000. 3.20 ASCII control characters U+0010 U+007F. 3.21 Non-ASCII 8bit control character U+0085. 3.22 Non-ASCII multibyte control character U+180E. 3.23 Zero Width No-Break Space U+FEFF. 3.24 Non-ASCII control character U+1D175. 3.25 Plane 0 private use character U+F123. 3.26 Plane 15 private use character U+F1234. 3.27 Plane 16 private use character U+10F234. 3.28 Non-character code point U+8FFFE. 3.29 Non-character code point U+10FFFF. 3.30 Surrogate code U+DF42. 3.31 Non-plain text character U+FFFD. 3.32 Ideographic description character U+2FF5. 3.33 Display property character U+0341. 3.34 Left-to-right mark U+200E. 3.35 Deprecated U+202A. 3.36 Language tagging character U+E0001. 3.37 Language tagging character U+E0042. 3.38 Bidi: RandALCat character U+05BE and LCat characters. 3.39 Bidi: RandALCat character U+FD50 and LCat characters. 3.40 Bidi: RandALCat character U+FB38 and LCat characters. 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031. 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628. 3.43 Unassigned code point U+E0002. Skip this test as we allow unassigned('\xf3\xa0\x80\x82', None), 3.44 Larger test (shrinking). Original test case reads \xc3\xdf 3.45 Larger test (expanding). Original test case reads \xc3\x9f Skipped The Unicode strings are given in UTF-8 Input contains prohibited characters Issue 1813: under Turkish locales, lookup of some codecs failed because 'I' is lowercased as a dotless "i" Unsupported locale on this system FIXME: Jython issue 1066: "big5", FIXME: Jython issue 1066: "big5hkscs", FIXME: Jython issue 1066: "cp932", FIXME: Jython issue 1066: "cp949", FIXME: Jython issue 1066: "cp950", FIXME: Jython issue 1066: "euc_jis_2004", FIXME: Jython issue 1066: 'euc_jisx0213', FIXME: Jython issue 1066: 'euc_jp', FIXME: Jython issue 1066: 'euc_kr', FIXME: Jython issue 1066: 'gb18030', FIXME: Jython issue 1066: 'gb2312', FIXME: Jython issue 1066: 'gbk', FIXME: Jython issue 1066: 'hz', FIXME: Jython issue 1066: "idna", FIXME: Jython issue 1066: 'iso2022_jp', FIXME: Jython issue 1066: 'iso2022_jp_1', FIXME: Jython issue 1066: 'iso2022_jp_2', FIXME: Jython issue 1066: 'iso2022_jp_2004', FIXME: Jython issue 1066: 'iso2022_jp_3', FIXME: Jython issue 1066: 'iso2022_jp_ext', FIXME: Jython issue 1066: 'iso2022_kr', FIXME: Jython issue 1066: 'johab', FIXME: Jython issue 1066: 'shift_jis', FIXME: Jython issue 1066: 'shift_jis_2004', FIXME: Jython issue 1066: 'shift_jisx0213', The following encodings work only with str, not unicode The following encoding is not tested, because it's not supposed to work: "undefined" The following encodings don't work in stateful mode The following encodings only support "strict" mode all codecs should be able to encode these check stream reader/writer check incremental decoder/encoder (fetched via the Python and C API) and iterencode()/iterdecode() no IncrementalEncoder check incremental decoder/encoder check C API check iterencode()/iterdecode() check iterencode()/iterdecode() with empty string check incremental decoder/encoder with errors argument no IncrementalEncoder all codecs should be able to encode these FIXME: See SF bug 1163178 Test that calling seek resets the internal codec state and buffers This used to crash, we are only verifying there's no crash. Check if the BOM is written only once Check that the BOM is written after a seek(0) (StreamWriter) Check that the BOM is written after a seek(0) Check that the BOM is not written after a seek() at a position different than the start (StreamWriter) Check that the BOM is not written after a seek() at a position different than the start | 8,189 | en | 0.746593 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adam_update_numpy(param,
g_t,
t,
m,
v,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7):
lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1))
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - lr_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
def adam_update_numpy_amsgrad(param,
g_t,
t,
m,
v,
vhat,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7):
lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1))
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
vhat_t = np.maximum(vhat, v_t)
param_t = param - lr_t * m_t / (np.sqrt(vhat_t) + epsilon)
return param_t, m_t, v_t, vhat_t
def adam_sparse_update_numpy_amsgrad(param,
indices,
g_t,
t,
m,
v,
vhat,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7):
m_t, v_t, vhat_t, param_t = (np.copy(m), np.copy(v), np.copy(vhat),
np.copy(param))
lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1))
m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t
v_t_slice = beta2 * v[indices] + (1 - beta2) * g_t * g_t
m_t[indices] = m_t_slice
v_t[indices] = v_t_slice
v_hat_t = np.maximum(vhat_t, v_t)
v_hat_t_slice = v_hat_t[indices]
param_t_slice = param[indices] - (
lr_t * (m_t_slice / (np.sqrt(v_hat_t_slice) + epsilon)))
param_t[indices] = param_t_slice
return param_t, m_t, v_t, vhat_t
def get_beta_accumulators(opt, dtype):
local_step = math_ops.cast(opt.iterations + 1, dtype)
beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype)
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_t = math_ops.cast(opt._get_hyper("beta_2"), dtype)
beta_2_power = math_ops.pow(beta_2_t, local_step)
return (beta_1_power, beta_2_power)
class AdamOptimizerTest(test.TestCase):
@test_util.run_deprecated_v1
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np[grads0_np_indices]),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np[grads1_np_indices]),
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
opt = adam.Adam()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseDevicePlacement(self):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop
optimizer = adam.Adam(3.0)
minimize_op = optimizer.minimize(g_sum, var_list=[var])
variables.global_variables_initializer().run()
minimize_op.run()
@test_util.run_deprecated_v1
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adam.Adam().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adam.Adam().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
def doTestBasic(self, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = adam.Adam(learning_rate=learning_rate)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic()
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_callable_params=True)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testBasicWithAmsgrad(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.Adam(amsgrad=True)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0, v0hat = adam_update_numpy_amsgrad(
var0_np, grads0_np, t, m0, v0, v0hat)
var1_np, m1, v1, v1hat = adam_update_numpy_amsgrad(
var1_np, grads1_np, t, m1, v1, v1hat)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testSparseWithAmsgrad(self):
# dtypes.half does not work on gpu + eager.
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
m0 = np.array([[0.0], [0.0]])
v0 = np.array([[0.0], [0.0]])
v0hat = np.array([[0.0], [0.0]])
indices_np = np.array([1])
indices = constant_op.constant(indices_np, dtype=dtypes.int32)
var0_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype)
repeated_index_update_var = variables.Variable(var0_np, dtype=dtype)
aggregated_update_var = variables.Variable(var0_np, dtype=dtype)
grads0_np = np.array([[0.2]], dtype=dtype.as_numpy_dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]), constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(grads0_np, indices,
constant_op.constant([2, 1]))
opt_repeated = adam.Adam(amsgrad=True)
opt_aggregated = adam.Adam(amsgrad=True)
if not context.executing_eagerly():
repeated_update = opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var))
for t in range(3):
if not context.executing_eagerly():
self.evaluate(repeated_update)
self.evaluate(aggregated_update)
else:
opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
var0_np, m0, v0, v0hat = adam_sparse_update_numpy_amsgrad(
var0_np, indices_np, grads0_np, t, m0, v0, v0hat)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(aggregated_update_var))
self.assertAllCloseAccordingToType(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var))
@test_util.run_deprecated_v1
def testBasicWithLearningRateDecay(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
decay = 0.5
opt = adam.Adam(
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
decay=decay)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.Adam(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.Adam()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = adam.Adam(1.)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and v2.
self.assertEqual(5, len(set(opt.variables())))
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
def testSetWeightsFromV1AdamWithoutMinimize(self):
keras_v1_adam = optimizers.Adam()
keras_v2_adam = adam.Adam()
keras_v2_adam.set_weights(keras_v1_adam.get_weights())
keras_v1_iteration = keras_v1_adam.iterations
keras_v2_iteration = keras_v2_adam.iterations
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
self.evaluate(keras_v1_iteration), self.evaluate(keras_v2_iteration))
def testConstructAdamWithLR(self):
opt = adam.Adam(lr=1.0)
opt_2 = adam.Adam(learning_rate=0.1, lr=1.0)
opt_3 = adam.Adam(learning_rate=0.1)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt_2.lr, variables.Variable)
self.assertIsInstance(opt_3.lr, variables.Variable)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
def testConstructAdamWithEpsilonValues(self):
opt = adam.Adam(epsilon=None)
config = opt.get_config()
self.assertEqual(config["epsilon"], 1e-7)
opt = adam.Adam(epsilon=1e-8)
config = opt.get_config()
self.assertEqual(config["epsilon"], 1e-8)
if __name__ == "__main__":
test.main()
| tensorflow/python/keras/optimizer_v2/adam_test.py | 22,978 | Tests for Adam.
Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Initialize variables for numpy implementation. Fetch params to validate initial values Run 3 steps of Adam Validate updated params If a GPU is available, tests that all optimizer ops can be placed on it (i.e. they have GPU kernels). pylint: disable=cell-var-from-loop Initialize variables for numpy implementation. Run 3 steps of Adam Validate updated params Initialize variables for numpy implementation. Run 3 steps of Adam Validate updated params dtypes.half does not work on gpu + eager. Validate updated params Initialize variables for numpy implementation. Run 3 steps of Adam Validate updated params Initialize variables for numpy implementation. Fetch params to validate initial values Run 3 steps of Adam Validate updated params Initialize variables for numpy implementation. Fetch params to validate initial values Run 3 steps of intertwined Adam1 and Adam2. Validate updated params There should be iteration, and two unique slot variables for v1 and v2. | 1,643 | en | 0.688031 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ContainerGroupsOperations:
"""ContainerGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerinstance.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ContainerGroupListResult"]:
"""Get a list of container groups in the specified subscription.
Get a list of container groups in the specified subscription. This operation returns properties
of each container group including containers, image registry credentials, restart policy, IP
address type, OS type, state, and volumes.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ContainerGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerinstance.models.ContainerGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ContainerGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerInstance/containerGroups'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ContainerGroupListResult"]:
"""Get a list of container groups in the specified subscription and resource group.
Get a list of container groups in a specified subscription and resource group. This operation
returns properties of each container group including containers, image registry credentials,
restart policy, IP address type, OS type, state, and volumes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ContainerGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerinstance.models.ContainerGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ContainerGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups'} # type: ignore
async def get(
self,
resource_group_name: str,
container_group_name: str,
**kwargs: Any
) -> "_models.ContainerGroup":
"""Get the properties of the specified container group.
Gets the properties of the specified container group in the specified subscription and resource
group. The operation returns the properties of each container group including containers, image
registry credentials, restart policy, IP address type, OS type, state, and volumes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param container_group_name: The name of the container group.
:type container_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ContainerGroup, or the result of cls(response)
:rtype: ~azure.mgmt.containerinstance.models.ContainerGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'containerGroupName': self._serialize.url("container_group_name", container_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ContainerGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
container_group_name: str,
container_group: "_models.ContainerGroup",
**kwargs: Any
) -> "_models.ContainerGroup":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'containerGroupName': self._serialize.url("container_group_name", container_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(container_group, 'ContainerGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ContainerGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ContainerGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
container_group_name: str,
container_group: "_models.ContainerGroup",
**kwargs: Any
) -> AsyncLROPoller["_models.ContainerGroup"]:
"""Create or update container groups.
Create or update container groups with specified configurations.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param container_group_name: The name of the container group.
:type container_group_name: str
:param container_group: The properties of the container group to be created or updated.
:type container_group: ~azure.mgmt.containerinstance.models.ContainerGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ContainerGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerinstance.models.ContainerGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
container_group_name=container_group_name,
container_group=container_group,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ContainerGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'containerGroupName': self._serialize.url("container_group_name", container_group_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}'} # type: ignore
async def update(
self,
resource_group_name: str,
container_group_name: str,
resource: "_models.Resource",
**kwargs: Any
) -> "_models.ContainerGroup":
"""Update container groups.
Updates container group tags with specified values.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param container_group_name: The name of the container group.
:type container_group_name: str
:param resource: The container group resource with just the tags to be updated.
:type resource: ~azure.mgmt.containerinstance.models.Resource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ContainerGroup, or the result of cls(response)
:rtype: ~azure.mgmt.containerinstance.models.ContainerGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'containerGroupName': self._serialize.url("container_group_name", container_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(resource, 'Resource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ContainerGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
container_group_name: str,
**kwargs: Any
) -> Optional["_models.ContainerGroup"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ContainerGroup"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'containerGroupName': self._serialize.url("container_group_name", container_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ContainerGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
container_group_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ContainerGroup"]:
"""Delete the specified container group.
Delete the specified container group in the specified subscription and resource group. The
operation does not delete other resources provided by the user, such as volumes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param container_group_name: The name of the container group.
:type container_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ContainerGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerinstance.models.ContainerGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
container_group_name=container_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ContainerGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'containerGroupName': self._serialize.url("container_group_name", container_group_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}'} # type: ignore
async def _restart_initial(
self,
resource_group_name: str,
container_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self._restart_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'containerGroupName': self._serialize.url("container_group_name", container_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}/restart'} # type: ignore
async def begin_restart(
self,
resource_group_name: str,
container_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Restarts all containers in a container group.
Restarts all containers in a container group in place. If container image has updates, new
image will be downloaded.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param container_group_name: The name of the container group.
:type container_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._restart_initial(
resource_group_name=resource_group_name,
container_group_name=container_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'containerGroupName': self._serialize.url("container_group_name", container_group_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}/restart'} # type: ignore
async def stop(
self,
resource_group_name: str,
container_group_name: str,
**kwargs: Any
) -> None:
"""Stops all containers in a container group.
Stops all containers in a container group. Compute resources will be deallocated and billing
will stop.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param container_group_name: The name of the container group.
:type container_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.stop.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'containerGroupName': self._serialize.url("container_group_name", container_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}/stop'} # type: ignore
async def _start_initial(
self,
resource_group_name: str,
container_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'containerGroupName': self._serialize.url("container_group_name", container_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}/start'} # type: ignore
async def begin_start(
self,
resource_group_name: str,
container_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Starts all containers in a container group.
Starts all containers in a container group. Compute resources will be allocated and billing
will start.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param container_group_name: The name of the container group.
:type container_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
container_group_name=container_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'containerGroupName': self._serialize.url("container_group_name", container_group_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}/start'} # type: ignore
| sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_container_groups_operations.py | 42,428 | ContainerGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerinstance.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
Get a list of container groups in the specified subscription.
Get a list of container groups in the specified subscription. This operation returns properties
of each container group including containers, image registry credentials, restart policy, IP
address type, OS type, state, and volumes.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ContainerGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerinstance.models.ContainerGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
Get a list of container groups in the specified subscription and resource group.
Get a list of container groups in a specified subscription and resource group. This operation
returns properties of each container group including containers, image registry credentials,
restart policy, IP address type, OS type, state, and volumes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ContainerGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerinstance.models.ContainerGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- type: ClsType["_models.ContainerGroupListResult"] Construct headers type: Dict[str, Any] Construct URL type: ignore Construct parameters type: Dict[str, Any] type: Dict[str, Any] type: ignore type: ClsType["_models.ContainerGroupListResult"] Construct headers type: Dict[str, Any] Construct URL type: ignore Construct parameters type: Dict[str, Any] type: Dict[str, Any] type: ignore type: ClsType["_models.ContainerGroup"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: ClsType["_models.ContainerGroup"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: Dict[str, Any] type: ignore type: Union[bool, AsyncPollingMethod] type: ClsType["_models.ContainerGroup"] type: Optional[str] type: ignore type: ClsType["_models.ContainerGroup"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: Dict[str, Any] type: ignore type: ClsType[Optional["_models.ContainerGroup"]] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: Union[bool, AsyncPollingMethod] type: ClsType["_models.ContainerGroup"] type: Optional[str] type: ignore type: ClsType[None] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: Union[bool, AsyncPollingMethod] type: ClsType[None] type: Optional[str] type: ignore type: ClsType[None] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: ClsType[None] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: Union[bool, AsyncPollingMethod] type: ClsType[None] type: Optional[str] type: ignore | 4,291 | en | 0.440757 |
from typing import Union
import Geometry
from Geometry import Line
from Geometry import Point
import cmath
class Circle:
def __init__(self, center: Union[Point, tuple, list], radius: float):
if isinstance(center, tuple) or isinstance(center, list):
assert len(center) == 2, "Center must be a 2-tuple or list"
center = Point(center[0], center[1])
self.center = center
self.radius = radius
def area(self) -> float:
return cmath.pi * self.radius ** 2
def circumference(self) -> float:
return 2 * cmath.pi * self.radius
def tangent(self, p: Point) -> Line:
# try:
# m = Geometry.slope(self.center, p)
# except ZeroDivisionError:
# return Line(0, 1, -p.y)
# if m == 0:
# return Line(1, 0, -p.x)
# m = -1/m
# return Geometry.slope_point_line(m, p)
x, y = p.x, p.y
c_x , c_y = -self.center.x, -self.center.y
return Line(x + c_x, y + c_y, x*c_x + y*c_y + c_x**2 + c_y**2 - self.radius**2)
def normal(self, p: Point) -> Line:
return Line.construct(p, self.center)
def power(self, p: Point) -> float:
return Geometry.distance(self.center, p) ** 2 - self.radius ** 2
def is_tangent(self, l: Line) -> bool:
return l.distance(self.center) == self.radius
def is_normal(self, l: Line) -> bool:
return l(self.center) == 0
def equation(self) -> str:
(x, y) = self.center
return f"x^2 + 2*{-x}*x + 2*{-y}*y + y^2 + {x**2 + y**2 - self.radius**2} = 0"
def parametric_equation(self, theta_resolution: float = 0.01, semi=False):
i = 0
if semi:
k = cmath.pi
else:
k = 2 * cmath.pi
while i < k:
yield self.center.x + self.radius * cmath.cos(i), self.center.y + self.radius * cmath.sin(i)
i += theta_resolution
def sector_length(self, theta: float) -> float:
"""Returns the length of a sector of the circle which subtended angle theta(radians) at center."""
return self.radius * theta
def sector_area(self, theta: float) -> float:
"""Returns the area of a sector of the circle which subtended angle theta(radians) at center."""
return self.radius ** 2 * theta / 2
def intersetion(self, other) -> Union[Point, None]:
if isinstance(other, Circle):
c1 = self.center
c2 = other.center
m = Geometry.slope(c1, c2)
theta = cmath.atan(m)
d = Geometry.distance(c1, c2)
if d == self.radius + other.radius:
"""Two circles are touching each other"""
x = c1.x + self.radius * cmath.cos(theta)
y = c1.y + self.radius * cmath.sin(theta)
return Point(x, y)
elif d < self.radius + other.radius:
"""Two circles intersect"""
r1 = self.radius
r2 = other.radius
theta = cmath.asin(r2 / d)
x = c1.x + r1 * cmath.cos(theta)
y = c1.y + r1 * cmath.sin(theta)
p1 = Point(x, y)
l = Line.construct(c1, c2)
p2 = l.image(p1)
return (p1, p2)
else:
return None
else:
raise ValueError("Can only intersect with another circle")
def __repr__(self):
return 'Circle(center={0}, radius={1})'.format(self.center, self.radius)
def __eq__(self, other):
if isinstance(other, Circle):
return self.center == other.center and self.radius == other.radius
else:
return False
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.center, self.radius))
def __str__(self):
return 'Circle(center={0}, radius={1})'.format(self.center, self.radius)
def construct(p0: Point, p1: Point, p2: Point) -> Circle:
try:
assert not Geometry.colinear(p0, p1, p2)
except AssertionError:
raise AssertionError("Circle can not be constructed from three points that are colinear")
l1 = Geometry.perpendicular_bisector(p0, p1)
l2 = Geometry.perpendicular_bisector(p1, p2)
center = l1.intersection(l2)
radius = Geometry.distance(center, p0)
return Circle(center, radius)
| Geometry/circle.py | 4,442 | Returns the area of a sector of the circle which subtended angle theta(radians) at center.
Returns the length of a sector of the circle which subtended angle theta(radians) at center.
try: m = Geometry.slope(self.center, p) except ZeroDivisionError: return Line(0, 1, -p.y) if m == 0: return Line(1, 0, -p.x) m = -1/m return Geometry.slope_point_line(m, p) | 372 | en | 0.556023 |
from collections import OrderedDict
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from easygraphics.dialog._indexed_order_list import IndexedOrderedDict
__all__ = ['MultipleFieldsDialog']
class MultipleFieldsDialog(QtWidgets.QDialog):
"""Dialog with multiple fields stored in a dict, with the label
being the key and the entry being the corresponding value"""
def __init__(self, labels=None, title="Demo", masks=None):
super(MultipleFieldsDialog, self).__init__(None,
QtCore.Qt.WindowSystemMenuHint |
QtCore.Qt.WindowTitleHint)
self.enters = IndexedOrderedDict()
self.setWindowTitle(title)
# set up a special case for quick demo
if labels is None:
labels = ["Regular field", "Masked field"]
masks = [False, True]
self.setWindowTitle("MultipleFieldsDialog demo")
if masks is not None:
assert len(masks) == len(labels)
layout = QtWidgets.QGridLayout()
layout.setColumnStretch(1, 1)
layout.setColumnMinimumWidth(1, 250)
self._labels_ = []
self.fields = []
for index, choice in enumerate(labels):
self._labels_.append(QtWidgets.QLabel())
self._labels_[index].setText(choice)
self.fields.append(QtWidgets.QLineEdit())
self.fields[index].setText('')
self.enters[choice] = ''
if masks is not None and masks[index]:
self.fields[index].setEchoMode(QtWidgets.QLineEdit.Password)
layout.addWidget(self._labels_[index], index, 0)
layout.addWidget(self.fields[index], index, 1)
button_box = QtWidgets.QDialogButtonBox()
confirm_button = button_box.addButton(QtWidgets.QDialogButtonBox.Ok)
layout.addWidget(button_box, index + 1, 1)
confirm_button.clicked.connect(self.confirm)
self.setLayout(layout)
self.setWindowTitle(title)
self.show()
self.raise_()
def confirm(self):
"""Selection completed, set the value and close"""
o_dict = self.enters
for index, item in enumerate(self._labels_):
o_dict[item.text()] = self.fields[index].text()
self.close()
if __name__ == '__main__':
app = QtWidgets.QApplication([])
dialog = MultipleFieldsDialog()
dialog.exec_()
print(dialog.get_ordered_dict())
| easygraphics/dialog/multifields.py | 2,501 | Dialog with multiple fields stored in a dict, with the label
being the key and the entry being the corresponding value
Selection completed, set the value and close
set up a special case for quick demo | 202 | en | 0.884103 |
from musicautobot.numpy_encode import *
from musicautobot.config import *
from musicautobot.music_transformer import *
from musicautobot.utils.midifile import *
from musicautobot.utils.file_processing import process_all
from musicautobot.numpy_encode import *
from musicautobot.config import *
from musicautobot.music_transformer import *
from musicautobot.utils.midifile import *
from musicautobot.utils.file_processing import process_all
import random
def create_databunch(files, data_save_name, path):
save_file = path/data_save_name
if save_file.exists():
data = load_data(path, data_save_name)
else:
save_file.parent.mkdir(exist_ok=True, parents=True)
vocab = MusicVocab.create()
processors = [OpenNPFileProcessor(), MusicItemProcessor()]
data = MusicDataBunch.from_files(files, path, processors=processors, encode_position=True)
data.save(data_save_name)
return data
def timeout_func(data, seconds):
print("Timeout:", seconds)
def process_metadata(midi_file):
# Get outfile and check if it exists
out_file = numpy_path/midi_file.relative_to(midi_path).with_suffix('.npy')
out_file.parent.mkdir(parents=True, exist_ok=True)
if out_file.exists(): return
npenc = transform_midi(midi_file)
if npenc is not None: np.save(out_file, npenc)
def transform_midi(midi_file):
input_path = midi_file
# Part 1: Filter out midi tracks (drums, repetitive instruments, etc.)
try:
# if duet_only and num_piano_tracks(input_path) not in [1, 2]: return None
input_file = compress_midi_file(input_path, min_variation=min_variation, cutoff=cutoff) # remove non note tracks and standardize instruments
if input_file is None: return None
except Exception as e:
if 'badly form' in str(e): return None # ignore badly formatted midi errors
if 'out of range' in str(e): return None # ignore badly formatted midi errors
print('Error parsing midi', input_path, e)
return None
# Part 2. Compress rests and long notes
stream = file2stream(input_file) # 1.
try:
chordarr = stream2chordarr(stream) # 2. max_dur = quarter_len * sample_freq (4). 128 = 8 bars
except Exception as e:
print('Could not encode to chordarr:', input_path, e)
print(traceback.format_exc())
return None
# Part 3. Compress song rests - Don't want songs with really long pauses
# (this happens because we filter out midi tracks).
chord_trim = trim_chordarr_rests(chordarr)
chord_short = shorten_chordarr_rests(chord_trim)
delta_trim = chord_trim.shape[0] - chord_short.shape[0]
# if delta_trim > 500:
# print(f'Removed {delta_trim} rests from {input_path}. Skipping song')
# return None
chordarr = chord_short
# Part 3. Chord array to numpy
npenc = chordarr2npenc(chordarr)
if not is_valid_npenc(npenc, input_path=input_path):
return None
return npenc
# Location of your midi files
midi_path = Path('data/midi/TPD')
# Location of preprocessed numpy files
numpy_path = Path('data/numpy/preprocessed data')
# Location of models and cached dataset
data_path = Path('data/cached')
data_save_name = 'TPD_musicitem_data_save.pkl'
# num_tracks = [1, 2] # number of tracks to support
cutoff = 5 # max instruments
min_variation = 3 # minimum number of different midi notes played
# max_dur = 128
midi_files = get_files(midi_path, '.mid', recurse=True)
print('Loading model...')
batch_size = 1
encode_position = True
dl_tfms = [batch_position_tfm] if encode_position else []
data = load_data(data_path, data_save_name, bs=batch_size, encode_position=encode_position, dl_tfms=dl_tfms)
config = default_config()
config['encode_position'] = encode_position
learn = music_model_learner(data, config=config.copy())
learn.fit_one_cycle(4)
learn.save('TPD_model')
| transformer code/train.py | 3,930 | Get outfile and check if it exists Part 1: Filter out midi tracks (drums, repetitive instruments, etc.) if duet_only and num_piano_tracks(input_path) not in [1, 2]: return None remove non note tracks and standardize instruments ignore badly formatted midi errors ignore badly formatted midi errors Part 2. Compress rests and long notes 1. 2. max_dur = quarter_len * sample_freq (4). 128 = 8 bars Part 3. Compress song rests - Don't want songs with really long pauses (this happens because we filter out midi tracks). if delta_trim > 500: print(f'Removed {delta_trim} rests from {input_path}. Skipping song') return None Part 3. Chord array to numpy Location of your midi files Location of preprocessed numpy files Location of models and cached dataset num_tracks = [1, 2] number of tracks to support max instruments minimum number of different midi notes played max_dur = 128 | 906 | en | 0.80039 |
import logging
from typing import Dict, List, Tuple
import aiosqlite
from btcgreen.server.address_manager import (
BUCKET_SIZE,
NEW_BUCKET_COUNT,
NEW_BUCKETS_PER_ADDRESS,
AddressManager,
ExtendedPeerInfo,
)
log = logging.getLogger(__name__)
class AddressManagerStore:
"""
Metadata table:
- private key
- new table count
- tried table count
Nodes table:
* Maps entries from new/tried table to unique node ids.
- node_id
- IP, port, together with the IP, port of the source peer.
New table:
* Stores node_id, bucket for each occurrence in the new table of an entry.
* Once we know the buckets, we can also deduce the bucket positions.
Every other information, such as tried_matrix, map_addr, map_info, random_pos,
be deduced and it is not explicitly stored, instead it is recalculated.
"""
db: aiosqlite.Connection
@classmethod
async def create(cls, connection) -> "AddressManagerStore":
self = cls()
self.db = connection
await self.db.commit()
await self.db.execute("CREATE TABLE IF NOT EXISTS peer_metadata(key text,value text)")
await self.db.commit()
await self.db.execute("CREATE TABLE IF NOT EXISTS peer_nodes(node_id int,value text)")
await self.db.commit()
await self.db.execute("CREATE TABLE IF NOT EXISTS peer_new_table(node_id int,bucket int)")
await self.db.commit()
return self
async def clear(self) -> None:
cursor = await self.db.execute("DELETE from peer_metadata")
await cursor.close()
cursor = await self.db.execute("DELETE from peer_nodes")
await cursor.close()
cursor = await self.db.execute("DELETE from peer_new_table")
await cursor.close()
await self.db.commit()
async def get_metadata(self) -> Dict[str, str]:
cursor = await self.db.execute("SELECT key, value from peer_metadata")
metadata = await cursor.fetchall()
await cursor.close()
return {key: value for key, value in metadata}
async def is_empty(self) -> bool:
metadata = await self.get_metadata()
if "key" not in metadata:
return True
if int(metadata.get("new_count", 0)) > 0:
return False
if int(metadata.get("tried_count", 0)) > 0:
return False
return True
async def get_nodes(self) -> List[Tuple[int, ExtendedPeerInfo]]:
cursor = await self.db.execute("SELECT node_id, value from peer_nodes")
nodes_id = await cursor.fetchall()
await cursor.close()
return [(node_id, ExtendedPeerInfo.from_string(info_str)) for node_id, info_str in nodes_id]
async def get_new_table(self) -> List[Tuple[int, int]]:
cursor = await self.db.execute("SELECT node_id, bucket from peer_new_table")
entries = await cursor.fetchall()
await cursor.close()
return [(node_id, bucket) for node_id, bucket in entries]
async def set_metadata(self, metadata) -> None:
for key, value in metadata:
cursor = await self.db.execute(
"INSERT OR REPLACE INTO peer_metadata VALUES(?, ?)",
(key, value),
)
await cursor.close()
await self.db.commit()
async def set_nodes(self, node_list) -> None:
for node_id, peer_info in node_list:
cursor = await self.db.execute(
"INSERT OR REPLACE INTO peer_nodes VALUES(?, ?)",
(node_id, peer_info.to_string()),
)
await cursor.close()
await self.db.commit()
async def set_new_table(self, entries) -> None:
for node_id, bucket in entries:
cursor = await self.db.execute(
"INSERT OR REPLACE INTO peer_new_table VALUES(?, ?)",
(node_id, bucket),
)
await cursor.close()
await self.db.commit()
async def serialize(self, address_manager: AddressManager):
metadata = []
nodes = []
new_table_entries = []
metadata.append(("key", str(address_manager.key)))
unique_ids = {}
count_ids = 0
for node_id, info in address_manager.map_info.items():
unique_ids[node_id] = count_ids
if info.ref_count > 0:
assert count_ids != address_manager.new_count
nodes.append((count_ids, info))
count_ids += 1
metadata.append(("new_count", str(count_ids)))
tried_ids = 0
for node_id, info in address_manager.map_info.items():
if info.is_tried:
assert info is not None
assert tried_ids != address_manager.tried_count
nodes.append((count_ids, info))
count_ids += 1
tried_ids += 1
metadata.append(("tried_count", str(tried_ids)))
for bucket in range(NEW_BUCKET_COUNT):
for i in range(BUCKET_SIZE):
if address_manager.new_matrix[bucket][i] != -1:
index = unique_ids[address_manager.new_matrix[bucket][i]]
new_table_entries.append((index, bucket))
await self.clear()
await self.set_metadata(metadata)
await self.set_nodes(nodes)
await self.set_new_table(new_table_entries)
async def deserialize(self) -> AddressManager:
address_manager = AddressManager()
metadata = await self.get_metadata()
nodes = await self.get_nodes()
new_table_entries = await self.get_new_table()
address_manager.clear()
address_manager.key = int(metadata["key"])
address_manager.new_count = int(metadata["new_count"])
# address_manager.tried_count = int(metadata["tried_count"])
address_manager.tried_count = 0
new_table_nodes = [(node_id, info) for node_id, info in nodes if node_id < address_manager.new_count]
for n, info in new_table_nodes:
address_manager.map_addr[info.peer_info.host] = n
address_manager.map_info[n] = info
info.random_pos = len(address_manager.random_pos)
address_manager.random_pos.append(n)
address_manager.id_count = len(new_table_nodes)
tried_table_nodes = [(node_id, info) for node_id, info in nodes if node_id >= address_manager.new_count]
# lost_count = 0
for node_id, info in tried_table_nodes:
tried_bucket = info.get_tried_bucket(address_manager.key)
tried_bucket_pos = info.get_bucket_position(address_manager.key, False, tried_bucket)
if address_manager.tried_matrix[tried_bucket][tried_bucket_pos] == -1:
info.random_pos = len(address_manager.random_pos)
info.is_tried = True
id_count = address_manager.id_count
address_manager.random_pos.append(id_count)
address_manager.map_info[id_count] = info
address_manager.map_addr[info.peer_info.host] = id_count
address_manager.tried_matrix[tried_bucket][tried_bucket_pos] = id_count
address_manager.id_count += 1
address_manager.tried_count += 1
# else:
# lost_count += 1
# address_manager.tried_count -= lost_count
for node_id, bucket in new_table_entries:
if node_id >= 0 and node_id < address_manager.new_count:
info = address_manager.map_info[node_id]
bucket_pos = info.get_bucket_position(address_manager.key, True, bucket)
if address_manager.new_matrix[bucket][bucket_pos] == -1 and info.ref_count < NEW_BUCKETS_PER_ADDRESS:
info.ref_count += 1
address_manager.new_matrix[bucket][bucket_pos] = node_id
for node_id, info in list(address_manager.map_info.items()):
if not info.is_tried and info.ref_count == 0:
address_manager.delete_new_entry_(node_id)
address_manager.load_used_table_positions()
return address_manager
| btcgreen/server/address_manager_store.py | 8,148 | Metadata table:
- private key
- new table count
- tried table count
Nodes table:
* Maps entries from new/tried table to unique node ids.
- node_id
- IP, port, together with the IP, port of the source peer.
New table:
* Stores node_id, bucket for each occurrence in the new table of an entry.
* Once we know the buckets, we can also deduce the bucket positions.
Every other information, such as tried_matrix, map_addr, map_info, random_pos,
be deduced and it is not explicitly stored, instead it is recalculated.
address_manager.tried_count = int(metadata["tried_count"]) lost_count = 0 else: lost_count += 1 address_manager.tried_count -= lost_count | 654 | en | 0.815059 |
# Generated by Django 3.0.5 on 2020-05-13 09:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('project_core', '0118_calls_need_to_be_part_of_a_funding_instrument'),
('grant_management', '0041_allows_media_to_not_be_related_blog_post'),
]
operations = [
migrations.CreateModel(
name='ProjectSocialNetwork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True, help_text='Date and time at which the entry was created')),
('modified_on', models.DateTimeField(auto_now=True, help_text='Date and time at which the entry was modified', null=True)),
('url', models.URLField(blank=True, help_text='Address of social media entry (e.g. https://twitter.com/SwissPolar)', null=True)),
('project', models.ForeignKey(help_text='Choose related project', on_delete=django.db.models.deletion.PROTECT, to='project_core.Project')),
],
options={
'abstract': False,
},
),
migrations.RenameField(
model_name='dataset',
old_name='date_published',
new_name='published_date',
),
migrations.RemoveField(
model_name='publication',
name='date_time_published',
),
migrations.AddField(
model_name='publication',
name='published_date',
field=models.DateField(blank=True, help_text='Date of the publication', null=True),
),
migrations.AlterField(
model_name='dataset',
name='doi',
field=models.CharField(blank=True, help_text='DOI reference for entry', max_length=100, null=True),
),
migrations.AlterField(
model_name='dataset',
name='title',
field=models.CharField(help_text='Dataset title', max_length=1000),
),
migrations.AlterField(
model_name='publication',
name='doi',
field=models.CharField(blank=True, help_text='DOI reference for entry', max_length=100, null=True),
),
migrations.AlterField(
model_name='publication',
name='reference',
field=models.CharField(blank=True, help_text='Journal reference for entry', max_length=1000, null=True),
),
migrations.AlterField(
model_name='publication',
name='title',
field=models.CharField(help_text='Publication title', max_length=1000),
),
migrations.AlterField(
model_name='socialnetwork',
name='name',
field=models.CharField(help_text='Please enter social network title (e.g. Twitter, Facebook, Instagram, Blog)', max_length=100),
),
migrations.DeleteModel(
name='ProjectSocialMedia',
),
migrations.AddField(
model_name='projectsocialnetwork',
name='social_network',
field=models.ForeignKey(help_text='Choose the related social network', on_delete=django.db.models.deletion.PROTECT, to='grant_management.SocialNetwork'),
),
]
| ProjectApplication/grant_management/migrations/0042_improves_project_data_publications_social_media_types.py | 3,387 | Generated by Django 3.0.5 on 2020-05-13 09:46 | 45 | en | 0.61429 |
import contextlib
import os.path
import subprocess
import pytest
from pre_commit import parse_shebang
from pre_commit.util import CalledProcessError
from pre_commit.util import cmd_output
from pre_commit.util import cmd_output_b
from testing.auto_namedtuple import auto_namedtuple
TESTING_DIR = os.path.abspath(os.path.dirname(__file__))
def docker_is_running() -> bool: # pragma: win32 no cover
try:
cmd_output_b('docker', 'ps')
except CalledProcessError: # pragma: no cover
return False
else:
return True
def get_resource_path(path):
return os.path.join(TESTING_DIR, 'resources', path)
def cmd_output_mocked_pre_commit_home(
*args, tempdir_factory, pre_commit_home=None, env=None, **kwargs,
):
if pre_commit_home is None:
pre_commit_home = tempdir_factory.get()
env = env if env is not None else os.environ
kwargs.setdefault('stderr', subprocess.STDOUT)
# Don't want to write to the home directory
env = dict(env, PRE_COMMIT_HOME=pre_commit_home)
ret, out, _ = cmd_output(*args, env=env, **kwargs)
return ret, out.replace('\r\n', '\n'), None
skipif_cant_run_coursier = pytest.mark.skipif(
os.name == 'nt' or parse_shebang.find_executable('cs') is None,
reason="coursier isn't installed or can't be found",
)
skipif_cant_run_docker = pytest.mark.skipif(
os.name == 'nt' or not docker_is_running(),
reason="Docker isn't running or can't be accessed",
)
skipif_cant_run_swift = pytest.mark.skipif(
parse_shebang.find_executable('swift') is None,
reason="swift isn't installed or can't be found",
)
xfailif_windows = pytest.mark.xfail(os.name == 'nt', reason='windows')
def run_opts(
all_files=False,
files=(),
color=False,
verbose=False,
hook=None,
remote_branch='',
local_branch='',
from_ref='',
to_ref='',
remote_name='',
remote_url='',
hook_stage='commit',
show_diff_on_failure=False,
commit_msg_filename='',
checkout_type='',
is_squash_merge='',
rewrite_command='',
):
# These are mutually exclusive
assert not (all_files and files)
return auto_namedtuple(
all_files=all_files,
files=files,
color=color,
verbose=verbose,
hook=hook,
remote_branch=remote_branch,
local_branch=local_branch,
from_ref=from_ref,
to_ref=to_ref,
remote_name=remote_name,
remote_url=remote_url,
hook_stage=hook_stage,
show_diff_on_failure=show_diff_on_failure,
commit_msg_filename=commit_msg_filename,
checkout_type=checkout_type,
is_squash_merge=is_squash_merge,
rewrite_command=rewrite_command,
)
@contextlib.contextmanager
def cwd(path):
original_cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(original_cwd)
def git_commit(*args, fn=cmd_output, msg='commit!', all_files=True, **kwargs):
kwargs.setdefault('stderr', subprocess.STDOUT)
cmd = ('git', 'commit', '--allow-empty', '--no-gpg-sign', *args)
if all_files: # allow skipping `-a` with `all_files=False`
cmd += ('-a',)
if msg is not None: # allow skipping `-m` with `msg=None`
cmd += ('-m', msg)
ret, out, _ = fn(*cmd, **kwargs)
return ret, out.replace('\r\n', '\n')
| testing/util.py | 3,408 | pragma: win32 no cover pragma: no cover Don't want to write to the home directory These are mutually exclusive allow skipping `-a` with `all_files=False` allow skipping `-m` with `msg=None` | 189 | en | 0.86301 |
"""Database exceptions."""
class BaseError(Exception):
"""The base exception."""
class NotFoundError(BaseError):
"""When an item was not found in the database."""
| database/open_alchemy/package_database/exceptions.py | 175 | The base exception.
When an item was not found in the database.
Database exceptions. | 84 | en | 0.906384 |
import sys
import os
import re
import importlib
import warnings
is_pypy = '__pypy__' in sys.builtin_module_names
warnings.filterwarnings('ignore',
r'.+ distutils\b.+ deprecated',
DeprecationWarning)
def warn_distutils_present():
if 'distutils' not in sys.modules:
return
if is_pypy and sys.version_info < (3, 7):
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
return
warnings.warn(
"Distutils was imported before Setuptools, but importing Setuptools "
"also replaces the `distutils` module in `sys.modules`. This may lead "
"to undesirable behaviors or errors. To avoid these issues, avoid "
"using distutils directly, ensure that setuptools is installed in the "
"traditional way (e.g. not an editable install), and/or make sure "
"that setuptools is always imported before distutils.")
def clear_distutils():
if 'distutils' not in sys.modules:
return
warnings.warn("Setuptools is replacing distutils.")
mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
for name in mods:
del sys.modules[name]
def enabled():
"""
Allow selection of distutils by environment variable.
"""
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
return which == 'local'
def ensure_local_distutils():
clear_distutils()
distutils = importlib.import_module('setuptools._distutils')
distutils.__name__ = 'distutils'
sys.modules['distutils'] = distutils
# sanity check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
def do_override():
"""
Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
"""
if enabled():
warn_distutils_present()
ensure_local_distutils()
class DistutilsMetaFinder:
def find_spec(self, fullname, path, target=None):
if path is not None:
return
method_name = 'spec_for_{fullname}'.format(**locals())
method = getattr(self, method_name, lambda: None)
return method()
def spec_for_distutils(self):
import importlib.abc
import importlib.util
class DistutilsLoader(importlib.abc.Loader):
def create_module(self, spec):
return importlib.import_module('setuptools._distutils')
def exec_module(self, module):
pass
return importlib.util.spec_from_loader('distutils', DistutilsLoader())
def spec_for_pip(self):
"""
Ensure stdlib distutils when running under pip.
See pypa/pip#8761 for rationale.
"""
if self.pip_imported_during_build():
return
clear_distutils()
self.spec_for_distutils = lambda: None
@staticmethod
def pip_imported_during_build():
"""
Detect if pip is being imported in a build script. Ref #2355.
"""
import traceback
return any(
frame.f_globals['__file__'].endswith('setup.py')
for frame, line in traceback.walk_stack(None)
)
DISTUTILS_FINDER = DistutilsMetaFinder()
def add_shim():
sys.meta_path.insert(0, DISTUTILS_FINDER)
def remove_shim():
try:
sys.meta_path.remove(DISTUTILS_FINDER)
except ValueError:
pass
| DatabaseControlWrapper_JE/venv/Lib/site-packages/_distutils_hack/__init__.py | 3,816 | Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
Allow selection of distutils by environment variable.
Detect if pip is being imported in a build script. Ref #2355.
Ensure stdlib distutils when running under pip.
See pypa/pip#8761 for rationale.
PyPy for 3.6 unconditionally imports distutils, so bypass the warning https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.pyL250 sanity check that submodules load as expected | 584 | en | 0.737584 |
## Start of header boilerplate #################################################
from aocbase import readInput
import re
import collections
def lineParse(s, f, fp):
m = fp.match(s)
if m==None:
raise s
return tuple(map(f, m.groups()))
def fileParse(inp):
return list(inp.splitlines())
## End of header boilerplate ###################################################
def mapMaze(s):
mz = dict()
for y,line in enumerate(s):
for x, c in enumerate(line):
if c != ' ':
mz[x,y] = c
return mz
def findTeleporters(mz):
loc = dict()
for (x, y), value in mz.items():
if isinstance(value, str) and value.isupper():
for dx, dy in ((0,1),(1,0),(-1,0),(0,-1)):
nxtChar = mz.get((x+dx, y+dy), '')
if isinstance(nxtChar, tuple): continue
prevChar = mz.get((x-dx, y-dy), '')
if (nxtChar.isupper() and prevChar == '.'):
name = (mz[min(x, x+dx),min(y, y+dy)] +
mz[max(x+dx, x), max(y+dy, y)])
if name not in loc:
loc[name] = list()
loc[name].append(((x, y), (x-dx, y-dy)))
for l in loc.values():
if len(l) != 2: continue
mz[l[0][0]] = l[1][1]
mz[l[1][0]] = l[0][1]
return loc['AA'][0][0], loc['ZZ'][0][0]
def findTeleportersD(mz):
start, stop = findTeleporters(mz)
minx = min((c[0] for c in mz.keys()))
maxx = max((c[0] for c in mz.keys()))
miny = min((c[1] for c in mz.keys()))
maxy = max((c[1] for c in mz.keys()))
portalLocations = [key for key in mz.keys() if isinstance(mz[key], tuple)]
for x, y in portalLocations:
mz[x, y] = mz[x, y] + (abs(x - minx) < 2 or abs(x - maxx) < 2 or abs(y - miny) < 2 or abs(y - maxy) < 2,)
return start, stop
def colorMap(mz, start, stop):
d = collections.deque()
d.append(start)
v = dict()
v[start] = 0
while len(d)>0:
cur = d.popleft()
x, y = cur
for dx, dy in ((0,1),(1,0),(-1,0),(0,-1)):
nx, ny = x+dx, y+dy
if (nx, ny) == stop:
return v[x,y] -1
if (nx, ny) not in mz:
continue
if isinstance(mz[nx, ny], tuple):
nx, ny = mz[nx, ny]
if mz[nx, ny] != '.':
continue
if (nx, ny) in v and v[x, y] + 1 >= v[nx, ny]:
continue
v[nx, ny] = v[x, y]+1
d.append((nx, ny))
def colorMapD(mz, start, stop):
maxLevel = len([t for t in mz.values() if isinstance(t, tuple)])//2
d = collections.deque()
d.append(start+(0, ))
v = dict()
v[start+(0, )] = 0
while len(d)>0:
cur = d.popleft()
x, y, lvl = cur
for dx, dy in ((0,1),(1,0),(-1,0),(0,-1)):
nx, ny, nlvl = x+dx, y+dy, lvl
if (nx, ny) == stop and lvl == 0:
return v[x,y,lvl] - 1
if (nx, ny) not in mz:
continue
if isinstance(mz[nx, ny], tuple):
nx, ny, outer = mz[nx, ny]
if outer:
if lvl == 0:
continue
nlvl -= 1
else:
if lvl == maxLevel:
continue
nlvl += 1
if mz[nx, ny] != '.':
continue
if (nx, ny, nlvl) in v and v[x, y, lvl] + 1 >= v[nx, ny, nlvl]:
continue
v[nx, ny, nlvl] = v[x, y, lvl]+1
d.append((nx, ny, nlvl))
def part1(pinp):
mz = mapMaze(pinp)
start, stop = findTeleporters(mz)
return colorMap(mz, start, stop)
def part2(pinp):
mz = mapMaze(pinp)
start, stop = findTeleportersD(mz)
return colorMapD(mz, start, stop)
## Start of footer boilerplate #################################################
if __name__ == "__main__":
inp = readInput()
## Update for input specifics ##############################################
parseInp = fileParse(inp)
print("Input is '" + str(parseInp[:10])[:100] +
('...' if len(parseInp)>10 or len(str(parseInp[:10]))>100 else '') + "'")
print("Solution to part 1: {}".format(part1(parseInp)))
print("Solution to part 2: {}".format(part2(parseInp)))
## End of footer boilerplate ###################################################
| Dyr-El-python/day20.py | 4,465 | Start of header boilerplate End of header boilerplate Start of footer boilerplate Update for input specifics End of footer boilerplate | 138 | en | 0.462101 |
import inspect
import warnings
from abc import ABCMeta, abstractmethod
from mmcv_custom.fileio.zipreader import ZipReader
class BaseStorageBackend(metaclass=ABCMeta):
"""Abstract class of storage backends.
All backends need to implement two apis: `get()` and `get_text()`.
`get()` reads the file as a byte stream and `get_text()` reads the file
as texts.
"""
@abstractmethod
def get(self, filepath):
pass
@abstractmethod
def get_text(self, filepath):
pass
class CephBackend(BaseStorageBackend):
"""Ceph storage backend.
Args:
path_mapping (dict|None): path mapping dict from local path to Petrel
path. When `path_mapping={'src': 'dst'}`, `src` in `filepath` will
be replaced by `dst`. Default: None.
"""
def __init__(self, path_mapping=None):
try:
import ceph
warnings.warn('Ceph is deprecate in favor of Petrel.')
except ImportError:
raise ImportError('Please install ceph to enable CephBackend.')
self._client = ceph.S3Client()
assert isinstance(path_mapping, dict) or path_mapping is None
self.path_mapping = path_mapping
def get(self, filepath):
filepath = str(filepath)
if self.path_mapping is not None:
for k, v in self.path_mapping.items():
filepath = filepath.replace(k, v)
value = self._client.Get(filepath)
value_buf = memoryview(value)
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class PetrelBackend(BaseStorageBackend):
"""Petrel storage backend (for internal use).
Args:
path_mapping (dict|None): path mapping dict from local path to Petrel
path. When `path_mapping={'src': 'dst'}`, `src` in `filepath` will
be replaced by `dst`. Default: None.
"""
def __init__(self, path_mapping=None):
try:
from petrel_client import client
except ImportError:
raise ImportError('Please install petrel_client to enable '
'PetrelBackend.')
self._client = client.Client()
assert isinstance(path_mapping, dict) or path_mapping is None
self.path_mapping = path_mapping
def get(self, filepath):
filepath = str(filepath)
if self.path_mapping is not None:
for k, v in self.path_mapping.items():
filepath = filepath.replace(k, v)
value = self._client.Get(filepath)
value_buf = memoryview(value)
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class MemcachedBackend(BaseStorageBackend):
"""Memcached storage backend.
Attributes:
server_list_cfg (str): Config file for memcached server list.
client_cfg (str): Config file for memcached client.
sys_path (str | None): Additional path to be appended to `sys.path`.
Default: None.
"""
def __init__(self, server_list_cfg, client_cfg, sys_path=None):
if sys_path is not None:
import sys
sys.path.append(sys_path)
try:
import mc
except ImportError:
raise ImportError(
'Please install memcached to enable MemcachedBackend.')
self.server_list_cfg = server_list_cfg
self.client_cfg = client_cfg
self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg,
self.client_cfg)
# mc.pyvector servers as a point which points to a memory cache
self._mc_buffer = mc.pyvector()
def get(self, filepath):
filepath = str(filepath)
import mc
self._client.Get(filepath, self._mc_buffer)
value_buf = mc.ConvertBuffer(self._mc_buffer)
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class LmdbBackend(BaseStorageBackend):
"""Lmdb storage backend.
Args:
db_path (str): Lmdb database path.
readonly (bool, optional): Lmdb environment parameter. If True,
disallow any write operations. Default: True.
lock (bool, optional): Lmdb environment parameter. If False, when
concurrent access occurs, do not lock the database. Default: False.
readahead (bool, optional): Lmdb environment parameter. If False,
disable the OS filesystem readahead mechanism, which may improve
random read performance when a database is larger than RAM.
Default: False.
Attributes:
db_path (str): Lmdb database path.
"""
def __init__(self,
db_path,
readonly=True,
lock=False,
readahead=False,
**kwargs):
try:
import lmdb
except ImportError:
raise ImportError('Please install lmdb to enable LmdbBackend.')
self.db_path = str(db_path)
self._client = lmdb.open(
self.db_path,
readonly=readonly,
lock=lock,
readahead=readahead,
**kwargs)
def get(self, filepath):
"""Get values according to the filepath.
Args:
filepath (str | obj:`Path`): Here, filepath is the lmdb key.
"""
filepath = str(filepath)
with self._client.begin(write=False) as txn:
value_buf = txn.get(filepath.encode('ascii'))
return value_buf
def get_text(self, filepath):
raise NotImplementedError
def is_zip_path(path):
return '.zip@' in path
class HardDiskBackend(BaseStorageBackend):
"""Raw hard disks storage backend."""
def get(self, filepath):
filepath = str(filepath)
if is_zip_path(filepath):
value_buf = ZipReader.read(filepath)
else:
with open(filepath, 'rb') as f:
value_buf = f.read()
return value_buf
def get_text(self, filepath):
filepath = str(filepath)
with open(filepath, 'r') as f:
value_buf = f.read()
return value_buf
class FileClient(object):
"""A general file client to access files in different backend.
The client loads a file or text in a specified backend from its path
and return it as a binary file. it can also register other backend
accessor with a given name and backend class.
Attributes:
backend (str): The storage backend type. Options are "disk", "ceph",
"memcached" and "lmdb".
client (:obj:`BaseStorageBackend`): The backend object.
"""
_backends = {
'disk': HardDiskBackend,
'ceph': CephBackend,
'memcached': MemcachedBackend,
'lmdb': LmdbBackend,
'petrel': PetrelBackend,
}
def __init__(self, backend='disk', **kwargs):
if backend not in self._backends:
raise ValueError(
f'Backend {backend} is not supported. Currently supported ones'
f' are {list(self._backends.keys())}')
self.backend = backend
self.client = self._backends[backend](**kwargs)
@classmethod
def register_backend(cls, name, backend):
if not inspect.isclass(backend):
raise TypeError(
f'backend should be a class but got {type(backend)}')
if not issubclass(backend, BaseStorageBackend):
raise TypeError(
f'backend {backend} is not a subclass of BaseStorageBackend')
cls._backends[name] = backend
def get(self, filepath):
return self.client.get(filepath)
def get_text(self, filepath):
return self.client.get_text(filepath)
| mmcv_custom/fileio/file_client.py | 7,801 | Abstract class of storage backends.
All backends need to implement two apis: `get()` and `get_text()`.
`get()` reads the file as a byte stream and `get_text()` reads the file
as texts.
Ceph storage backend.
Args:
path_mapping (dict|None): path mapping dict from local path to Petrel
path. When `path_mapping={'src': 'dst'}`, `src` in `filepath` will
be replaced by `dst`. Default: None.
A general file client to access files in different backend.
The client loads a file or text in a specified backend from its path
and return it as a binary file. it can also register other backend
accessor with a given name and backend class.
Attributes:
backend (str): The storage backend type. Options are "disk", "ceph",
"memcached" and "lmdb".
client (:obj:`BaseStorageBackend`): The backend object.
Raw hard disks storage backend.
Lmdb storage backend.
Args:
db_path (str): Lmdb database path.
readonly (bool, optional): Lmdb environment parameter. If True,
disallow any write operations. Default: True.
lock (bool, optional): Lmdb environment parameter. If False, when
concurrent access occurs, do not lock the database. Default: False.
readahead (bool, optional): Lmdb environment parameter. If False,
disable the OS filesystem readahead mechanism, which may improve
random read performance when a database is larger than RAM.
Default: False.
Attributes:
db_path (str): Lmdb database path.
Memcached storage backend.
Attributes:
server_list_cfg (str): Config file for memcached server list.
client_cfg (str): Config file for memcached client.
sys_path (str | None): Additional path to be appended to `sys.path`.
Default: None.
Petrel storage backend (for internal use).
Args:
path_mapping (dict|None): path mapping dict from local path to Petrel
path. When `path_mapping={'src': 'dst'}`, `src` in `filepath` will
be replaced by `dst`. Default: None.
Get values according to the filepath.
Args:
filepath (str | obj:`Path`): Here, filepath is the lmdb key.
mc.pyvector servers as a point which points to a memory cache | 2,160 | en | 0.726984 |
r"""
This app is used to invert the styleGAN series synthesis network. We find
the matching latent vector w for given images so that we can manipulate
images in the latent feature space.
Ref: https://github.com/rosinality/stylegan2-pytorch/blob/master/projector.py # noqa
"""
import argparse
import os
import sys
import mmcv
import numpy as np
import torch
import torch.nn.functional as F
from mmcv import Config
from mmcv.runner import load_checkpoint
from PIL import Image
from torch import optim
from torchvision import transforms
from torchvision.utils import save_image
from tqdm import tqdm
# yapf: disable
sys.path.append(os.path.abspath(os.path.join(__file__, '../..'))) # isort:skip # noqa
from mmgen.apis import set_random_seed # isort:skip # noqa
from mmgen.models import build_model # isort:skip # noqa
from mmgen.models.architectures.lpips import PerceptualLoss # isort:skip # noqa
# yapf: enable
def parse_args():
parser = argparse.ArgumentParser(
description='Image projector to the StyleGAN-based generator latent \
spaces')
parser.add_argument('config', help='evaluation config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'files',
metavar='FILES',
nargs='+',
help='path to image files to be projected')
parser.add_argument(
'--results-path', type=str, help='path to store projection results.')
parser.add_argument(
'--use-cpu',
action='store_true',
help='whether to use cpu device for sampling')
parser.add_argument('--seed', type=int, default=2021, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--sample-model',
type=str,
default='ema',
help='use which mode (ema/orig) in sampling.')
parser.add_argument(
'--lr-rampup',
type=float,
default=0.05,
help='proportion of the learning rate warmup iters in the total iters')
parser.add_argument(
'--lr-rampdown',
type=float,
default=0.25,
help='proportion of the learning rate decay iters in the total iters')
parser.add_argument(
'--lr', type=float, default=0.1, help='maximum learning rate')
parser.add_argument(
'--noise',
type=float,
default=0.05,
help='strength of the noise level')
parser.add_argument(
'--noise-ramp',
type=float,
default=0.75,
help='proportion of the noise level decay iters in the total iters',
)
parser.add_argument(
'--total-iters', type=int, default=1000, help='optimize iterations')
parser.add_argument(
'--noise-regularize',
type=float,
default=1e5,
help='weight of the noise regularization',
)
parser.add_argument(
'--mse', type=float, default=0, help='weight of the mse loss')
parser.add_argument(
'--n-mean-latent',
type=int,
default=10000,
help='sampling times to obtain the mean latent')
parser.add_argument(
'--w-plus',
action='store_true',
help='allow to use distinct latent codes to each layers',
)
args = parser.parse_args()
return args
def noise_regularize(noises):
loss = 0
for noise in noises:
size = noise.shape[2]
while True:
loss = (
loss +
(noise * torch.roll(noise, shifts=1, dims=3)).mean().pow(2) +
(noise * torch.roll(noise, shifts=1, dims=2)).mean().pow(2))
if size <= 8:
break
noise = noise.reshape([-1, 1, size // 2, 2, size // 2, 2])
noise = noise.mean([3, 5])
size //= 2
return loss
def noise_normalize_(noises):
for noise in noises:
mean = noise.mean()
std = noise.std()
noise.data.add_(-mean).div_(std)
def get_lr(t, initial_lr, rampdown=0.25, rampup=0.05):
lr_ramp = min(1, (1 - t) / rampdown)
lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
lr_ramp = lr_ramp * min(1, t / rampup)
return initial_lr * lr_ramp
def latent_noise(latent, strength):
noise = torch.randn_like(latent) * strength
return latent + noise
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# set random seeds
if args.seed is not None:
print('set random seed to', args.seed)
set_random_seed(args.seed, deterministic=args.deterministic)
# build the model and load checkpoint
model = build_model(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
_ = load_checkpoint(model, args.checkpoint, map_location='cpu')
# sanity check for models without ema
if not model.use_ema:
args.sample_model = 'orig'
if args.sample_model == 'ema':
generator = model.generator_ema
else:
generator = model.generator
mmcv.print_log(f'Sampling model: {args.sample_model}', 'mmgen')
generator.eval()
device = 'cpu'
if not args.use_cpu:
generator = generator.cuda()
device = 'cuda'
img_size = min(generator.out_size, 256)
transform = transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
])
# read images
imgs = []
for imgfile in args.files:
img = Image.open(imgfile).convert('RGB')
img = transform(img)
img = img[[2, 1, 0], ...]
imgs.append(img)
imgs = torch.stack(imgs, 0).to(device)
# get mean and standard deviation of style latents
with torch.no_grad():
noise_sample = torch.randn(
args.n_mean_latent, generator.style_channels, device=device)
latent_out = generator.style_mapping(noise_sample)
latent_mean = latent_out.mean(0)
latent_std = ((latent_out - latent_mean).pow(2).sum() /
args.n_mean_latent)**0.5
latent_in = latent_mean.detach().clone().unsqueeze(0).repeat(
imgs.shape[0], 1)
if args.w_plus:
latent_in = latent_in.unsqueeze(1).repeat(1, generator.num_latents, 1)
latent_in.requires_grad = True
# define lpips loss
percept = PerceptualLoss(use_gpu=device.startswith('cuda'))
# initialize layer noises
noises_single = generator.make_injected_noise()
noises = []
for noise in noises_single:
noises.append(noise.repeat(imgs.shape[0], 1, 1, 1).normal_())
for noise in noises:
noise.requires_grad = True
optimizer = optim.Adam([latent_in] + noises, lr=args.lr)
pbar = tqdm(range(args.total_iters))
# run optimization
for i in pbar:
t = i / args.total_iters
lr = get_lr(t, args.lr, args.lr_rampdown, args.lr_rampup)
optimizer.param_groups[0]['lr'] = lr
noise_strength = latent_std * args.noise * max(
0, 1 - t / args.noise_ramp)**2
latent_n = latent_noise(latent_in, noise_strength.item())
img_gen = generator([latent_n],
input_is_latent=True,
injected_noise=noises)
batch, channel, height, width = img_gen.shape
if height > 256:
factor = height // 256
img_gen = img_gen.reshape(batch, channel, height // factor, factor,
width // factor, factor)
img_gen = img_gen.mean([3, 5])
p_loss = percept(img_gen, imgs).sum()
n_loss = noise_regularize(noises)
mse_loss = F.mse_loss(img_gen, imgs)
loss = p_loss + args.noise_regularize * n_loss + args.mse * mse_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
noise_normalize_(noises)
pbar.set_description(
f' perceptual: {p_loss.item():.4f}, noise regularize:'
f'{n_loss.item():.4f}, mse: {mse_loss.item():.4f}, lr: {lr:.4f}')
results = generator([latent_in.detach().clone()],
input_is_latent=True,
injected_noise=noises)
# rescale value range to [0, 1]
results = ((results + 1) / 2)
results = results[:, [2, 1, 0], ...]
results = results.clamp_(0, 1)
mmcv.mkdir_or_exist(args.results_path)
# save projection results
result_file = {}
for i, input_name in enumerate(args.files):
noise_single = []
for noise in noises:
noise_single.append(noise[i:i + 1])
result_file[input_name] = {
'img': img_gen[i],
'latent': latent_in[i],
'injected_noise': noise_single,
}
img_name = os.path.splitext(
os.path.basename(input_name))[0] + '-project.png'
save_image(results[i], os.path.join(args.results_path, img_name))
torch.save(result_file, os.path.join(args.results_path,
'project_result.pt'))
if __name__ == '__main__':
main()
| apps/stylegan_projector.py | 9,274 | This app is used to invert the styleGAN series synthesis network. We find
the matching latent vector w for given images so that we can manipulate
images in the latent feature space.
Ref: https://github.com/rosinality/stylegan2-pytorch/blob/master/projector.py # noqa
yapf: disable isort:skip noqa isort:skip noqa isort:skip noqa isort:skip noqa yapf: enable set cudnn_benchmark set random seeds build the model and load checkpoint sanity check for models without ema read images get mean and standard deviation of style latents define lpips loss initialize layer noises run optimization rescale value range to [0, 1] save projection results | 650 | en | 0.682301 |
def main(x):
matrix = []
exit_path = []
for i in range(0, x):
j = list(input())
if 'e' in j:
y = j.index("e")
exit_path.append(i)
exit_path.append(y)
j[y] = "-"
matrix.append(j)
row, col = 0, 0
matrix[row][col] = "S"
path = []
searching_path(matrix, path, exit_path, row, col)
def searching_path(m, path, exit_path, i, j):
r, c = len(m), len(m[0])
exit_row, exit_col = exit_path
# If destination is reached print
if i == exit_row and j == exit_col:
print("".join(e for e in path[1:]) + m[i][j])
m[exit_row][exit_col] = "-"
return
# explore
path.append(m[i][j])
# move down
if 0 <= i + 1 <= r - 1 and 0 <= j <= c - 1 and m[i + 1][j] == "-":
m[i + 1][j] = "D"
searching_path(m, path, exit_path, i + 1, j)
# move right
if 0 <= i <= r - 1 and 0 <= j + 1 <= c - 1 and m[i][j + 1] == '-':
m[i][j + 1] = 'R'
searching_path(m, path, exit_path, i, j + 1)
# move left
if 0 <= i <= r - 1 and 0 <= j - 1 <= c - 1 and m[i][j - 1] == '-':
m[i][j - 1] = "L"
searching_path(m, path, exit_path, i, j - 1)
# move up
if 0 <= i - 1 <= r - 1 and 0 <= j <= c - 1 and m[i - 1][j] == '-':
m[i - 1][j] = "U"
searching_path(m, path, exit_path, i - 1, j)
# if none of the above is explorable or invalid index backtrack
path.pop()
main(3)
| Recursion/labyrinth.py | 1,475 | If destination is reached print explore move down move right move left move up if none of the above is explorable or invalid index backtrack | 140 | en | 0.637323 |
from scrapy.utils.project import get_project_settings
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
def db_connect():
"""
Performs database connection using database settings from settings.py.
Returns sqlalchemy engine instance
"""
return create_engine(get_project_settings().get("CONNECTION_STRING"))
def create_table(engine):
""" create tables"""
Base.metadata.create_all(engine)
class Parliament(Base):
"""Sqlalchemy deals model"""
__tablename__ = "parliament"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column("name", String)
date_born = Column("date_born", String)
place_born = Column("place_born", String, nullable=True)
profession = Column("profession", String, nullable=True)
lang = Column("lang", String, nullable=True)
party = Column("party", String, nullable=True)
email = Column("email", String, nullable=True)
url = Column("url", String, nullable=True)
education = Column("education", String, nullable=True)
pp = Column("pp", String)
dob = Column("dob", String)
| src/parliamentbg/parliamentbg/models.py | 1,255 | Sqlalchemy deals model
create tables
Performs database connection using database settings from settings.py.
Returns sqlalchemy engine instance | 142 | en | 0.59689 |
import usocket as socket
import ustruct as struct
from ubinascii import hexlify
class MQTTException(Exception):
pass
class MQTTClient:
def __init__(
self,
client_id,
server,
port=0,
user=None,
password=None,
keepalive=0,
ssl=False,
ssl_params={},
):
if port == 0:
port = 8883 if ssl else 1883
self.client_id = client_id
self.sock = None
self.server = server
self.port = port
self.ssl = ssl
self.ssl_params = ssl_params
self.pid = 0
self.cb = None
self.user = user
self.pswd = password
self.keepalive = keepalive
self.lw_topic = None
self.lw_msg = None
self.lw_qos = 0
self.lw_retain = False
def _send_str(self, s):
self.sock.write(struct.pack("!H", len(s)))
self.sock.write(s)
def _recv_len(self):
n = 0
sh = 0
while 1:
b = self.sock.read(1)[0]
n |= (b & 0x7F) << sh
if not b & 0x80:
return n
sh += 7
def set_callback(self, f):
self.cb = f
def set_last_will(self, topic, msg, retain=False, qos=0):
assert 0 <= qos <= 2
assert topic
self.lw_topic = topic
self.lw_msg = msg
self.lw_qos = qos
self.lw_retain = retain
def connect(self, clean_session=True):
self.sock = socket.socket()
addr = socket.getaddrinfo(self.server, self.port)[0][-1]
self.sock.connect(addr)
if self.ssl:
import ussl
self.sock = ussl.wrap_socket(self.sock, **self.ssl_params)
premsg = bytearray(b"\x10\0\0\0\0\0")
msg = bytearray(b"\x04MQTT\x04\x02\0\0")
sz = 10 + 2 + len(self.client_id)
msg[6] = clean_session << 1
if self.user is not None:
sz += 2 + len(self.user) + 2 + len(self.pswd)
msg[6] |= 0xC0
if self.keepalive:
assert self.keepalive < 65536
msg[7] |= self.keepalive >> 8
msg[8] |= self.keepalive & 0x00FF
if self.lw_topic:
sz += 2 + len(self.lw_topic) + 2 + len(self.lw_msg)
msg[6] |= 0x4 | (self.lw_qos & 0x1) << 3 | (self.lw_qos & 0x2) << 3
msg[6] |= self.lw_retain << 5
i = 1
while sz > 0x7F:
premsg[i] = (sz & 0x7F) | 0x80
sz >>= 7
i += 1
premsg[i] = sz
self.sock.write(premsg, i + 2)
self.sock.write(msg)
# print(hex(len(msg)), hexlify(msg, ":"))
self._send_str(self.client_id)
if self.lw_topic:
self._send_str(self.lw_topic)
self._send_str(self.lw_msg)
if self.user is not None:
self._send_str(self.user)
self._send_str(self.pswd)
resp = self.sock.read(4)
assert resp[0] == 0x20 and resp[1] == 0x02
if resp[3] != 0:
raise MQTTException(resp[3])
return resp[2] & 1
def disconnect(self):
self.sock.write(b"\xe0\0")
self.sock.close()
def ping(self):
self.sock.write(b"\xc0\0")
def publish(self, topic, msg, retain=False, qos=0):
pkt = bytearray(b"\x30\0\0\0")
pkt[0] |= qos << 1 | retain
sz = 2 + len(topic) + len(msg)
if qos > 0:
sz += 2
assert sz < 2097152
i = 1
while sz > 0x7F:
pkt[i] = (sz & 0x7F) | 0x80
sz >>= 7
i += 1
pkt[i] = sz
# print(hex(len(pkt)), hexlify(pkt, ":"))
self.sock.write(pkt, i + 1)
self._send_str(topic)
if qos > 0:
self.pid += 1
pid = self.pid
struct.pack_into("!H", pkt, 0, pid)
self.sock.write(pkt, 2)
self.sock.write(msg)
if qos == 1:
while 1:
op = self.wait_msg()
if op == 0x40:
sz = self.sock.read(1)
assert sz == b"\x02"
rcv_pid = self.sock.read(2)
rcv_pid = rcv_pid[0] << 8 | rcv_pid[1]
if pid == rcv_pid:
return
elif qos == 2:
assert 0
def subscribe(self, topic, qos=0):
assert self.cb is not None, "Subscribe callback is not set"
pkt = bytearray(b"\x82\0\0\0")
self.pid += 1
struct.pack_into("!BH", pkt, 1, 2 + 2 + len(topic) + 1, self.pid)
# print(hex(len(pkt)), hexlify(pkt, ":"))
self.sock.write(pkt)
self._send_str(topic)
self.sock.write(qos.to_bytes(1, "little"))
while 1:
op = self.wait_msg()
if op == 0x90:
resp = self.sock.read(4)
# print(resp)
assert resp[1] == pkt[2] and resp[2] == pkt[3]
if resp[3] == 0x80:
raise MQTTException(resp[3])
return
# Wait for a single incoming MQTT message and process it.
# Subscribed messages are delivered to a callback previously
# set by .set_callback() method. Other (internal) MQTT
# messages processed internally.
def wait_msg(self):
res = self.sock.read(1)
self.sock.setblocking(True)
if res is None:
return None
if res == b"":
raise OSError(-1)
if res == b"\xd0": # PINGRESP
sz = self.sock.read(1)[0]
assert sz == 0
return None
op = res[0]
if op & 0xF0 != 0x30:
return op
sz = self._recv_len()
topic_len = self.sock.read(2)
topic_len = (topic_len[0] << 8) | topic_len[1]
topic = self.sock.read(topic_len)
sz -= topic_len + 2
if op & 6:
pid = self.sock.read(2)
pid = pid[0] << 8 | pid[1]
sz -= 2
msg = self.sock.read(sz)
self.cb(topic, msg)
if op & 6 == 2:
pkt = bytearray(b"\x40\x02\0\0")
struct.pack_into("!H", pkt, 2, pid)
self.sock.write(pkt)
elif op & 6 == 4:
assert 0
# Checks whether a pending message from server is available.
# If not, returns immediately with None. Otherwise, does
# the same processing as wait_msg.
def check_msg(self):
self.sock.setblocking(False)
return self.wait_msg()
| components/py_engine/micropython-lib/micropython/umqtt.simple/umqtt/simple.py | 6,479 | print(hex(len(msg)), hexlify(msg, ":")) print(hex(len(pkt)), hexlify(pkt, ":")) print(hex(len(pkt)), hexlify(pkt, ":")) print(resp) Wait for a single incoming MQTT message and process it. Subscribed messages are delivered to a callback previously set by .set_callback() method. Other (internal) MQTT messages processed internally. PINGRESP Checks whether a pending message from server is available. If not, returns immediately with None. Otherwise, does the same processing as wait_msg. | 486 | en | 0.785338 |
from bcipy.feedback.visual.visual_feedback import VisualFeedback
from psychopy import core
from bcipy.helpers.load import load_json_parameters
from bcipy.display.display_main import init_display_window
# Load a parameters file
parameters = load_json_parameters(
'bcipy/parameters/parameters.json', value_cast=True)
display = init_display_window(parameters)
clock = core.Clock()
# Start Visual Feedback
visual_feedback = VisualFeedback(
display=display, parameters=parameters, clock=clock)
stimulus = 'A'
assertion = 'B'
message = 'Incorrect:'
visual_feedback.message_color = 'red'
timing = visual_feedback.administer(
stimulus, compare_assertion=assertion, message=message)
print(timing)
print(visual_feedback._type())
display.close()
| bcipy/feedback/demo/demo_visual_feedback.py | 749 | Load a parameters file Start Visual Feedback | 44 | ml | 0.05392 |
class RevitLinkOperations(object,IDisposable):
"""
This class is used to extend the IExternalResourceServer interface with methods to support operations
specifically related to Revit links.
"""
def Dispose(self):
""" Dispose(self: RevitLinkOperations) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: RevitLinkOperations,disposing: bool) """
pass
def SetGetLocalPathForOpenCallback(self,makeLocalCopyForOpen):
"""
SetGetLocalPathForOpenCallback(self: RevitLinkOperations,makeLocalCopyForOpen: IGetLocalPathForOpenCallback)
Sets the IGetLocalPathForOpenCallback that will support the "Open (and Unload)"
command for Revit links
obtained from an IExternalResourceServer.
makeLocalCopyForOpen: The IGetLocalPathForOpenCallback that will support the "Open (and Unload)"
command.
"""
pass
def SetOnLocalLinkSharedCoordinatesSavedCallback(self,onLocalLinkSharedCoordinatesSaved):
"""
SetOnLocalLinkSharedCoordinatesSavedCallback(self: RevitLinkOperations,onLocalLinkSharedCoordinatesSaved: IOnLocalLinkSharedCoordinatesSavedCallback)
Sets the callback that will be called when the Revit user saves new shared
coordinate
settings to a linked document obtained from an
IExternalResourceServer.
onLocalLinkSharedCoordinatesSaved: An IOnLocalLinkSharedCoordinatesSavedCallback object that can respond when the
user
saves new shared coordinates to a Revit link document obtained from
IExternalResourceServer.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: RevitLinkOperations) -> bool
"""
| release/stubs.min/Autodesk/Revit/DB/__init___parts/RevitLinkOperations.py | 2,401 | This class is used to extend the IExternalResourceServer interface with methods to support operations
specifically related to Revit links.
Dispose(self: RevitLinkOperations)
ReleaseUnmanagedResources(self: RevitLinkOperations,disposing: bool)
SetGetLocalPathForOpenCallback(self: RevitLinkOperations,makeLocalCopyForOpen: IGetLocalPathForOpenCallback)
Sets the IGetLocalPathForOpenCallback that will support the "Open (and Unload)"
command for Revit links
obtained from an IExternalResourceServer.
makeLocalCopyForOpen: The IGetLocalPathForOpenCallback that will support the "Open (and Unload)"
command.
SetOnLocalLinkSharedCoordinatesSavedCallback(self: RevitLinkOperations,onLocalLinkSharedCoordinatesSaved: IOnLocalLinkSharedCoordinatesSavedCallback)
Sets the callback that will be called when the Revit user saves new shared
coordinate
settings to a linked document obtained from an
IExternalResourceServer.
onLocalLinkSharedCoordinatesSaved: An IOnLocalLinkSharedCoordinatesSavedCallback object that can respond when the
user
saves new shared coordinates to a Revit link document obtained from
IExternalResourceServer.
__enter__(self: IDisposable) -> object
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature
__repr__(self: object) -> str | 1,538 | en | 0.56408 |
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferScene
Gaffer.Metadata.registerNode(
GafferScene.SceneSwitch,
"description",
"""
Chooses between multiple input scene, passing through the
chosen input to the output.
""",
plugs = {
"index" : [
"description",
"""
The index of the input which is passed through. A value
of 0 chooses the first input, 1 the second and so on. Values
larger than the number of available inputs wrap back around to
the beginning.
"""
]
}
)
GafferUI.PlugValueWidget.registerCreator( GafferScene.SceneSwitch, "in[0-9]*", None )
| python/GafferSceneUI/SceneSwitchUI.py | 2,393 | Copyright (c) 2013, Image Engine Design Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of John Haddon nor the names of any other contributors to this software may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 1,584 | en | 0.889322 |
"""Generates the pins file for the SWM320"""
from __future__ import print_function
import re
import sys
import argparse
class Pin:
"""Holds the information associated with a pin."""
def __init__(self, name, port, pbit, preg, IRQn):
self.name = name
self.port = port
self.pbit = pbit
self.preg = preg
self.IRQn = IRQn
def print(self):
print('pin_obj_t pin_{:4s} = PIN({:4s}, {:5s}, {:5s}, {:s}, {:10s});\n'.format(self.name, self.name, self.port, self.pbit, self.preg, self.IRQn))
def print_header(self, hdr_file):
hdr_file.write('extern pin_obj_t pin_{:4s};\n'.format(self.name))
class Pins:
def __init__(self):
self.pins = [] # list of Pin
def find_pin(self, port, pbit):
for pin in self.pins:
if pin.port == port and pin.pbit == pbit:
return pin
def find_pin_by_name(self, name):
for pin in self.pins:
if pin.name == name:
return pin
def parse_af_file(self, filename):
with open(filename, 'r') as f:
for line in f.readlines():
match = re.match(r'#define PORT([ABCD])_PIN(\d+)_GPIO', line)
if match:
name = 'P' + match.group(1) + match.group(2)
for pin in self.pins:
if pin.name == name:
break
else:
preg= 'PIN_BIT_BAND(%s, %2s)' %('GPIO'+match.group(1), match.group(2))
pin = Pin(name, 'GPIO'+match.group(1), 'PIN'+match.group(2), preg, 'GPIO%s_IRQn' %match.group(1))
self.pins.append(pin)
def print(self):
for pin in self.pins:
pin.print()
print('')
print('STATIC const mp_rom_map_elem_t pins_locals_dict_table[] = {')
for pin in self.pins:
print(' {{ MP_ROM_QSTR(MP_QSTR_{:5s}), MP_ROM_PTR(&pin_{:5s}) }},'.format(pin.name, pin.name))
print('};')
print('')
print('MP_DEFINE_CONST_DICT(pins_locals_dict, pins_locals_dict_table);')
def print_header(self, hdr_filename):
with open(hdr_filename, 'wt') as hdr_file:
for pin in self.pins:
pin.print_header(hdr_file)
def print_qstr(self, qstr_filename):
with open(qstr_filename, 'wt') as qstr_file:
for pin in self.pins:
print('Q({})'.format(pin.name), file=qstr_file)
def main():
parser = argparse.ArgumentParser(
prog="make-pins.py",
usage="%(prog)s [options] [command]",
description="Generate board specific pin file"
)
parser.add_argument(
"-a", "--af",
dest="af_filename",
help="Specifies the alternate function file for the chip",
default="../chip/SWM3200_port.h"
)
parser.add_argument(
"-p", "--prefix",
dest="prefix_filename",
help="Specifies beginning portion of generated pins file",
default="SWM320_prefix.c"
)
parser.add_argument(
"-q", "--qstr",
dest="qstr_filename",
help="Specifies name of generated qstr header file",
default="../build-SWM320Lite/pins_qstr.h"
)
parser.add_argument(
"-r", "--hdr",
dest="hdr_filename",
help="Specifies name of generated pin header file",
default="../build-SWM320Lite/pins.h"
)
args = parser.parse_args(sys.argv[1:])
pins = Pins()
print('// This file was automatically generated by make-pins.py')
print('//')
if args.af_filename:
print('// --af {:s}'.format(args.af_filename))
pins.parse_af_file(args.af_filename)
if args.prefix_filename:
print('// --prefix {:s}'.format(args.prefix_filename))
print('')
with open(args.prefix_filename, 'r') as prefix_file:
print(prefix_file.read())
pins.print()
pins.print_qstr(args.qstr_filename)
pins.print_header(args.hdr_filename)
if __name__ == "__main__":
main()
| ports/swm320/boards/make-pins.py | 4,087 | Holds the information associated with a pin.
Generates the pins file for the SWM320
list of Pin | 97 | en | 0.866856 |
# pylint: disable=no-member, redefined-outer-name
"""
Annalist resource types module
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2015, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
# import logging
# log = logging.getLogger(__name__)
from annalist.identifiers import ANNAL
# """
# Each resource type URI or CURIE is associated with a list of one or more file
# extensions and MIME content-types.
#
# The first of each list indicates the value used when creating or serving a
# resource of the indicated type. Any other values given are alternatives
# that are accepted as supplying a resource that is compatible with the type.
#
# File extensions and MIME types are presented as pairs so that an extension
# can be inferred when a MIME content-type is given, and vice versa.
# """
resource_types = (
{ ANNAL.CURIE.Metadata:
[ ("jsonld", "application/ld+json")
, ("json", "application/json")
]
, ANNAL.CURIE.Text:
[ ("txt", "text/plain")
]
, ANNAL.CURIE.Richtext:
[ ("md", "text/markdown")
, ("txt", "text/plain")
]
, ANNAL.CURIE.Image:
[ ("image", "image/*") # Default extension
, ("png", "image/png")
, ("jpg", "image/jpeg")
, ("jpeg", "image/jpeg")
, ("gif", "image/gif")
, ("tiff", "image/tiff")
, ("svg", "image/svg")
, ("pdf", "application/pdf")
]
, ANNAL.CURIE.Audio:
[ ("audio", "audio/*") # Default extension
, ("mp3", "audio/mpeg")
, ("mp4", "audio/mp4")
, ("wav", "audio/wav")
, ("ogg", "audio/ogg")
#@@ needs fleshing out?
]
, ANNAL.CURIE.Resource:
[ ("md", "text/markdown")
, ("txt", "text/plain")
, ("png", "image/png")
, ("jpg", "image/jpeg")
, ("jpeg", "image/jpeg")
, ("gif", "image/gif")
, ("tiff", "image/tiff")
, ("svg", "image/svg")
, ("pdf", "application/pdf")
]
})
default_types = [("dat", "application/octet-stream")]
def file_extension(typeuri):
"""
Returns preferred file extension for resource type
>>> file_extension(ANNAL.CURIE.Metadata) == "jsonld"
True
>>> file_extension(ANNAL.CURIE.Richtext) == "md"
True
"""
return resource_types.get(typeuri, default_types)[0][0]
def content_type(typeuri):
"""
Returns preferred MIME content-type for resource type
>>> content_type(ANNAL.CURIE.Metadata) == "application/ld+json"
True
>>> content_type(ANNAL.CURIE.Richtext) == "text/markdown"
True
"""
return resource_types.get(typeuri, default_types)[0][1]
def file_extension_for_content_type(typeuri, content_type):
"""
Returns file extension for given content-type as an instance of a given type URI,
or None.
>>> file_extension_for_content_type(ANNAL.CURIE.Richtext, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/pdf") == "pdf"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/unknown") == None
True
"""
for fe, ct in resource_types.get(typeuri, default_types):
if ct == content_type:
return fe
return None
def content_type_for_file_extension(typeuri, file_extension):
"""
Returns content-type for given file extension as an instance of a given type URI,
or None.
>>> content_type_for_file_extension(ANNAL.CURIE.Richtext, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "pdf") == "application/pdf"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "unknown") == None
True
"""
for fe, ct in resource_types.get(typeuri, default_types):
if fe == file_extension:
return ct
return None
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
| src/annalist_root/annalist/resourcetypes.py | 4,340 | Returns preferred MIME content-type for resource type
>>> content_type(ANNAL.CURIE.Metadata) == "application/ld+json"
True
>>> content_type(ANNAL.CURIE.Richtext) == "text/markdown"
True
Returns content-type for given file extension as an instance of a given type URI,
or None.
>>> content_type_for_file_extension(ANNAL.CURIE.Richtext, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "pdf") == "application/pdf"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "unknown") == None
True
Returns preferred file extension for resource type
>>> file_extension(ANNAL.CURIE.Metadata) == "jsonld"
True
>>> file_extension(ANNAL.CURIE.Richtext) == "md"
True
Returns file extension for given content-type as an instance of a given type URI,
or None.
>>> file_extension_for_content_type(ANNAL.CURIE.Richtext, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/pdf") == "pdf"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/unknown") == None
True
Annalist resource types module
pylint: disable=no-member, redefined-outer-name import logging log = logging.getLogger(__name__) """ Each resource type URI or CURIE is associated with a list of one or more file extensions and MIME content-types. The first of each list indicates the value used when creating or serving a resource of the indicated type. Any other values given are alternatives that are accepted as supplying a resource that is compatible with the type. File extensions and MIME types are presented as pairs so that an extension can be inferred when a MIME content-type is given, and vice versa. """ Default extension Default extension@@ needs fleshing out? End. | 1,927 | en | 0.519032 |
import logging
import os
from collections import defaultdict
from typing import Dict
from typing import List
from typing import Union
import requests
from cachecontrol import CacheControl
from cachecontrol.caches.file_cache import FileCache
from cachecontrol.controller import logger as cache_control_logger
from cachy import CacheManager
from html5lib.html5parser import parse
from poetry.core.packages import Dependency
from poetry.core.packages import Package
from poetry.core.packages import dependency_from_pep_508
from poetry.core.packages.utils.link import Link
from poetry.core.semver import VersionConstraint
from poetry.core.semver import VersionRange
from poetry.core.semver import parse_constraint
from poetry.core.semver.exceptions import ParseVersionError
from poetry.core.version.markers import parse_marker
from poetry.locations import REPOSITORY_CACHE_DIR
from poetry.utils._compat import Path
from poetry.utils._compat import to_str
from poetry.utils.helpers import download_file
from poetry.utils.helpers import temporary_directory
from poetry.utils.patterns import wheel_file_re
from ..inspection.info import PackageInfo
from .exceptions import PackageNotFound
from .remote_repository import RemoteRepository
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
cache_control_logger.setLevel(logging.ERROR)
logger = logging.getLogger(__name__)
class PyPiRepository(RemoteRepository):
CACHE_VERSION = parse_constraint("1.0.0")
def __init__(self, url="https://pypi.org/", disable_cache=False, fallback=True):
super(PyPiRepository, self).__init__(url.rstrip("/") + "/simple/")
self._base_url = url
self._disable_cache = disable_cache
self._fallback = fallback
release_cache_dir = REPOSITORY_CACHE_DIR / "pypi"
self._cache = CacheManager(
{
"default": "releases",
"serializer": "json",
"stores": {
"releases": {"driver": "file", "path": str(release_cache_dir)},
"packages": {"driver": "dict"},
},
}
)
self._cache_control_cache = FileCache(str(release_cache_dir / "_http"))
self._name = "PyPI"
@property
def session(self):
return CacheControl(requests.session(), cache=self._cache_control_cache)
def find_packages(self, dependency): # type: (Dependency) -> List[Package]
"""
Find packages on the remote server.
"""
constraint = dependency.constraint
if constraint is None:
constraint = "*"
if not isinstance(constraint, VersionConstraint):
constraint = parse_constraint(constraint)
allow_prereleases = dependency.allows_prereleases()
if isinstance(constraint, VersionRange):
if (
constraint.max is not None
and constraint.max.is_prerelease()
or constraint.min is not None
and constraint.min.is_prerelease()
):
allow_prereleases = True
try:
info = self.get_package_info(dependency.name)
except PackageNotFound:
self._log(
"No packages found for {} {}".format(dependency.name, str(constraint)),
level="debug",
)
return []
packages = []
ignored_pre_release_packages = []
for version, release in info["releases"].items():
if not release:
# Bad release
self._log(
"No release information found for {}-{}, skipping".format(
dependency.name, version
),
level="debug",
)
continue
try:
package = Package(info["info"]["name"], version)
except ParseVersionError:
self._log(
'Unable to parse version "{}" for the {} package, skipping'.format(
version, dependency.name
),
level="debug",
)
continue
if package.is_prerelease() and not allow_prereleases:
if constraint.is_any():
# we need this when all versions of the package are pre-releases
ignored_pre_release_packages.append(package)
continue
if not constraint or (constraint and constraint.allows(package.version)):
packages.append(package)
self._log(
"{} packages found for {} {}".format(
len(packages), dependency.name, str(constraint)
),
level="debug",
)
return packages or ignored_pre_release_packages
def package(
self,
name, # type: str
version, # type: str
extras=None, # type: (Union[list, None])
): # type: (...) -> Package
return self.get_release_info(name, version).to_package(name=name, extras=extras)
def search(self, query):
results = []
search = {"q": query}
response = requests.session().get(self._base_url + "search", params=search)
content = parse(response.content, namespaceHTMLElements=False)
for result in content.findall(".//*[@class='package-snippet']"):
name = result.find("h3/*[@class='package-snippet__name']").text
version = result.find("h3/*[@class='package-snippet__version']").text
if not name or not version:
continue
description = result.find("p[@class='package-snippet__description']").text
if not description:
description = ""
try:
result = Package(name, version, description)
result.description = to_str(description.strip())
results.append(result)
except ParseVersionError:
self._log(
'Unable to parse version "{}" for the {} package, skipping'.format(
version, name
),
level="debug",
)
return results
def get_package_info(self, name): # type: (str) -> dict
"""
Return the package information given its name.
The information is returned from the cache if it exists
or retrieved from the remote server.
"""
if self._disable_cache:
return self._get_package_info(name)
return self._cache.store("packages").remember_forever(
name, lambda: self._get_package_info(name)
)
def _get_package_info(self, name): # type: (str) -> dict
data = self._get("pypi/{}/json".format(name))
if data is None:
raise PackageNotFound("Package [{}] not found.".format(name))
return data
def get_release_info(self, name, version): # type: (str, str) -> PackageInfo
"""
Return the release information given a package name and a version.
The information is returned from the cache if it exists
or retrieved from the remote server.
"""
if self._disable_cache:
return PackageInfo.load(self._get_release_info(name, version))
cached = self._cache.remember_forever(
"{}:{}".format(name, version), lambda: self._get_release_info(name, version)
)
cache_version = cached.get("_cache_version", "0.0.0")
if parse_constraint(cache_version) != self.CACHE_VERSION:
# The cache must be updated
self._log(
"The cache for {} {} is outdated. Refreshing.".format(name, version),
level="debug",
)
cached = self._get_release_info(name, version)
self._cache.forever("{}:{}".format(name, version), cached)
return PackageInfo.load(cached)
def find_links_for_package(self, package):
json_data = self._get("pypi/{}/{}/json".format(package.name, package.version))
if json_data is None:
return []
links = []
for url in json_data["urls"]:
h = "sha256={}".format(url["digests"]["sha256"])
links.append(Link(url["url"] + "#" + h))
return links
def _get_release_info(self, name, version): # type: (str, str) -> dict
self._log("Getting info for {} ({}) from PyPI".format(name, version), "debug")
json_data = self._get("pypi/{}/{}/json".format(name, version))
if json_data is None:
raise PackageNotFound("Package [{}] not found.".format(name))
info = json_data["info"]
data = PackageInfo(
name=info["name"],
version=info["version"],
summary=info["summary"],
platform=info["platform"],
requires_dist=info["requires_dist"],
requires_python=info["requires_python"],
files=info.get("files", []),
cache_version=str(self.CACHE_VERSION),
)
try:
version_info = json_data["releases"][version]
except KeyError:
version_info = []
for file_info in version_info:
data.files.append(
{
"file": file_info["filename"],
"hash": "sha256:" + file_info["digests"]["sha256"],
}
)
if self._fallback and data.requires_dist is None:
self._log("No dependencies found, downloading archives", level="debug")
# No dependencies set (along with other information)
# This might be due to actually no dependencies
# or badly set metadata when uploading
# So, we need to make sure there is actually no
# dependencies by introspecting packages
urls = defaultdict(list)
for url in json_data["urls"]:
# Only get sdist and wheels if they exist
dist_type = url["packagetype"]
if dist_type not in ["sdist", "bdist_wheel"]:
continue
urls[dist_type].append(url["url"])
if not urls:
return data.asdict()
info = self._get_info_from_urls(urls)
data.requires_dist = info.requires_dist
if not data.requires_python:
data.requires_python = info.requires_python
return data.asdict()
def _get(self, endpoint): # type: (str) -> Union[dict, None]
try:
json_response = self.session.get(self._base_url + endpoint)
except requests.exceptions.TooManyRedirects:
# Cache control redirect loop.
# We try to remove the cache and try again
self._cache_control_cache.delete(self._base_url + endpoint)
json_response = self.session.get(self._base_url + endpoint)
if json_response.status_code == 404:
return None
json_data = json_response.json()
return json_data
def _get_info_from_urls(self, urls): # type: (Dict[str, List[str]]) -> PackageInfo
# Checking wheels first as they are more likely to hold
# the necessary information
if "bdist_wheel" in urls:
# Check fo a universal wheel
wheels = urls["bdist_wheel"]
universal_wheel = None
universal_python2_wheel = None
universal_python3_wheel = None
platform_specific_wheels = []
for wheel in wheels:
link = Link(wheel)
m = wheel_file_re.match(link.filename)
if not m:
continue
pyver = m.group("pyver")
abi = m.group("abi")
plat = m.group("plat")
if abi == "none" and plat == "any":
# Universal wheel
if pyver == "py2.py3":
# Any Python
universal_wheel = wheel
elif pyver == "py2":
universal_python2_wheel = wheel
else:
universal_python3_wheel = wheel
else:
platform_specific_wheels.append(wheel)
if universal_wheel is not None:
return self._get_info_from_wheel(universal_wheel)
info = None
if universal_python2_wheel and universal_python3_wheel:
info = self._get_info_from_wheel(universal_python2_wheel)
py3_info = self._get_info_from_wheel(universal_python3_wheel)
if py3_info.requires_dist:
if not info.requires_dist:
info.requires_dist = py3_info.requires_dist
return info
py2_requires_dist = set(
dependency_from_pep_508(r).to_pep_508()
for r in info.requires_dist
)
py3_requires_dist = set(
dependency_from_pep_508(r).to_pep_508()
for r in py3_info.requires_dist
)
base_requires_dist = py2_requires_dist & py3_requires_dist
py2_only_requires_dist = py2_requires_dist - py3_requires_dist
py3_only_requires_dist = py3_requires_dist - py2_requires_dist
# Normalizing requires_dist
requires_dist = list(base_requires_dist)
for requirement in py2_only_requires_dist:
dep = dependency_from_pep_508(requirement)
dep.marker = dep.marker.intersect(
parse_marker("python_version == '2.7'")
)
requires_dist.append(dep.to_pep_508())
for requirement in py3_only_requires_dist:
dep = dependency_from_pep_508(requirement)
dep.marker = dep.marker.intersect(
parse_marker("python_version >= '3'")
)
requires_dist.append(dep.to_pep_508())
info.requires_dist = sorted(list(set(requires_dist)))
if info:
return info
# Prefer non platform specific wheels
if universal_python3_wheel:
return self._get_info_from_wheel(universal_python3_wheel)
if universal_python2_wheel:
return self._get_info_from_wheel(universal_python2_wheel)
if platform_specific_wheels and "sdist" not in urls:
# Pick the first wheel available and hope for the best
return self._get_info_from_wheel(platform_specific_wheels[0])
return self._get_info_from_sdist(urls["sdist"][0])
def _get_info_from_wheel(self, url): # type: (str) -> PackageInfo
self._log(
"Downloading wheel: {}".format(urlparse.urlparse(url).path.rsplit("/")[-1]),
level="debug",
)
filename = os.path.basename(urlparse.urlparse(url).path.rsplit("/")[-1])
with temporary_directory() as temp_dir:
filepath = Path(temp_dir) / filename
self._download(url, str(filepath))
return PackageInfo.from_wheel(filepath)
def _get_info_from_sdist(self, url): # type: (str) -> PackageInfo
self._log(
"Downloading sdist: {}".format(urlparse.urlparse(url).path.rsplit("/")[-1]),
level="debug",
)
filename = os.path.basename(urlparse.urlparse(url).path)
with temporary_directory() as temp_dir:
filepath = Path(temp_dir) / filename
self._download(url, str(filepath))
return PackageInfo.from_sdist(filepath)
def _download(self, url, dest): # type: (str, str) -> None
return download_file(url, dest, session=self.session)
def _log(self, msg, level="info"):
getattr(logger, level)("<debug>{}:</debug> {}".format(self._name, msg))
| venv/Lib/site-packages/poetry/repositories/pypi_repository.py | 16,303 | Find packages on the remote server.
Return the package information given its name.
The information is returned from the cache if it exists
or retrieved from the remote server.
Return the release information given a package name and a version.
The information is returned from the cache if it exists
or retrieved from the remote server.
type: (Dependency) -> List[Package] Bad release we need this when all versions of the package are pre-releases type: str type: str type: (Union[list, None]) type: (...) -> Package type: (str) -> dict type: (str) -> dict type: (str, str) -> PackageInfo The cache must be updated type: (str, str) -> dict No dependencies set (along with other information) This might be due to actually no dependencies or badly set metadata when uploading So, we need to make sure there is actually no dependencies by introspecting packages Only get sdist and wheels if they exist type: (str) -> Union[dict, None] Cache control redirect loop. We try to remove the cache and try again type: (Dict[str, List[str]]) -> PackageInfo Checking wheels first as they are more likely to hold the necessary information Check fo a universal wheel Universal wheel Any Python Normalizing requires_dist Prefer non platform specific wheels Pick the first wheel available and hope for the best type: (str) -> PackageInfo type: (str) -> PackageInfo type: (str, str) -> None | 1,376 | en | 0.787626 |
#Hkr
import msvcrt
import os
import sys
import random
from ctypes import windll, byref, wintypes
from ctypes.wintypes import SMALL_RECT
STDOUT = -11
WIN_X = 100
WIN_Y = 60
hdl = windll.kernel32.GetStdHandle(STDOUT)
rect = wintypes.SMALL_RECT(0, 0, WIN_X, WIN_Y) # (left, top, right, bottom)
windll.kernel32.SetConsoleWindowInfo(hdl, True, byref(rect))
os.system('')
# 72
# 75 80 77
def Hello():
posx = 3
posy = 3
sys.stdout.write('WELCOME! press any key to continue\n')
while True:
key = ord(msvcrt.getch())
if key == 13:
sys.stdout.write('\x1Bc')
elif key == 224:
key = ord(msvcrt.getch())
if key == 72:
posy -=1
elif key == 75:
posx -=1
elif key == 80:
posy +=1
elif key == 77:
posx +=1
if posx < 0:
posx = 0
if posy < 0:
posy = 0
if posx > WIN_Y-2:
posx = WIN_Y-2
if posy > WIN_X-2:
posy = WIN_X-2
sys.stdout.write(f"\x1B[48;2;{random.randrange(0,255)};{random.randrange(0,255)};{random.randrange(0,255)}m\x1B[{posy};{posx}H \n")
def Exit():
exit(0)
selected = 0
menu = {
"run":Hello,
"exit":Exit
}
sys.stdout.write('WELCOME! press any key to continue\n\033[?25h')
while True:
key = ord(msvcrt.getch())
if key == 119:
selected = (selected-1) % len(menu)
elif key == 115:
selected = (selected+1) % len(menu)
elif key == 13:
sys.stdout.write('\x1Bc')
menu[list(menu.keys())[selected]]()
sys.stdout.write('\x1Bc')
for elem_num in range(len(menu)):
if elem_num == selected:
sys.stdout.write(f'> {list(menu.keys())[elem_num]}\n')
else:
sys.stdout.write(f' {list(menu.keys())[elem_num]}\n')
sys.stdout.write('\x1Bc') | New Tests.py | 1,897 | Hkr (left, top, right, bottom) 72 75 80 77 | 46 | en | 0.549136 |
from django.utils.cache import get_conditional_response
from django.utils.http import http_date, parse_http_date_safe, unquote_etag
class ConditionalGetMiddleware(object):
"""
Handles conditional GET operations. If the response has an ETag or
Last-Modified header, and the request has If-None-Match or
If-Modified-Since, the response is replaced by an HttpNotModified.
Also sets the Date and Content-Length response-headers.
"""
def process_response(self, request, response):
response['Date'] = http_date()
if not response.streaming and not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
etag = response.get('ETag')
last_modified = response.get('Last-Modified')
if last_modified:
last_modified = parse_http_date_safe(last_modified)
if etag or last_modified:
return get_conditional_response(
request,
etag=unquote_etag(etag),
last_modified=last_modified,
response=response,
)
return response
| venv/lib/python2.7/site-packages/django/middleware/http.py | 1,141 | Handles conditional GET operations. If the response has an ETag or
Last-Modified header, and the request has If-None-Match or
If-Modified-Since, the response is replaced by an HttpNotModified.
Also sets the Date and Content-Length response-headers. | 249 | en | 0.915949 |
from setuptools import setup,find_packages
import os
import shutil
#remove the dist folder first if exists
if os.path.exists("dist"):
shutil.rmtree("dist")
def readme():
with open('README.rst') as f:
return(f.read())
VERSION = '1.0.53'
def write_version_py(filename='SigProfilerTopography/version.py'):
# Copied from numpy setup.py
cnt = """
# THIS FILE IS GENERATED FROM SIGPROFILERTOPOGRAPHY SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
"""
fh = open(filename, 'w')
fh.write(cnt % {'version': VERSION,})
fh.close()
write_version_py()
setup(name="SigProfilerTopography",
version=VERSION,
author="Burcak Otlu",
author_email="burcakotlu@eng.ucsd.edu",
description="SigProfilerTopography provides topography analyses for substitutions, dinucleotides and indels for all given samples.",
url="https://github.com/AlexandrovLab/SigProfilerTopography",
license='UCSD',
packages=find_packages(),
install_requires=[
"SigProfilerMatrixGenerator>=1.1.27",
"SigProfilerSimulator>=1.1.2",
"XlsxWriter>=1.3.7",
"pandas>=1.1.5",
"numpy>=1.20.1",
"matplotlib>=2.2.2",
"scipy>=1.1.0",
"statsmodels>=0.9.0",
"fastrand>=1.2",
"psutil>=5.6.3"],
include_package_data=True,
zip_safe=False)
| setup.py | 1,300 | remove the dist folder first if exists Copied from numpy setup.py | 65 | en | 0.539109 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmts.endpoint import endpoint_data
class SubmitFpCompareJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Mts', '2014-06-18', 'SubmitFpCompareJob','mts')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_FpDBId(self):
return self.get_query_params().get('FpDBId')
def set_FpDBId(self,FpDBId):
self.add_query_param('FpDBId',FpDBId)
def get_MasterMedia(self):
return self.get_query_params().get('MasterMedia')
def set_MasterMedia(self,MasterMedia):
self.add_query_param('MasterMedia',MasterMedia)
def get_UserData(self):
return self.get_query_params().get('UserData')
def set_UserData(self,UserData):
self.add_query_param('UserData',UserData)
def get_QueryMedia(self):
return self.get_query_params().get('QueryMedia')
def set_QueryMedia(self,QueryMedia):
self.add_query_param('QueryMedia',QueryMedia)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_PipelineId(self):
return self.get_query_params().get('PipelineId')
def set_PipelineId(self,PipelineId):
self.add_query_param('PipelineId',PipelineId)
def get_MatchedFrameStorage(self):
return self.get_query_params().get('MatchedFrameStorage')
def set_MatchedFrameStorage(self,MatchedFrameStorage):
self.add_query_param('MatchedFrameStorage',MatchedFrameStorage) | aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/SubmitFpCompareJobRequest.py | 3,102 | Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 754 | en | 0.883564 |
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ShowWhitelistResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'whitelist': 'WhitelistResp'
}
attribute_map = {
'whitelist': 'whitelist'
}
def __init__(self, whitelist=None):
"""ShowWhitelistResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._whitelist = None
self.discriminator = None
if whitelist is not None:
self.whitelist = whitelist
@property
def whitelist(self):
"""Gets the whitelist of this ShowWhitelistResponse.
:return: The whitelist of this ShowWhitelistResponse.
:rtype: WhitelistResp
"""
return self._whitelist
@whitelist.setter
def whitelist(self, whitelist):
"""Sets the whitelist of this ShowWhitelistResponse.
:param whitelist: The whitelist of this ShowWhitelistResponse.
:type: WhitelistResp
"""
self._whitelist = whitelist
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowWhitelistResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/show_whitelist_response.py | 2,876 | Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
Returns true if both objects are equal
ShowWhitelistResponse - a model defined in huaweicloud sdk
Returns true if both objects are not equal
For `print` and `pprint`
Returns the model properties as a dict
Returns the string representation of the model
Gets the whitelist of this ShowWhitelistResponse.
:return: The whitelist of this ShowWhitelistResponse.
:rtype: WhitelistResp
Sets the whitelist of this ShowWhitelistResponse.
:param whitelist: The whitelist of this ShowWhitelistResponse.
:type: WhitelistResp
coding: utf-8 | 765 | en | 0.672647 |
"""optik.option_parser
Provides the OptionParser and Values classes.
"""
__revision__ = "/home/scons/scons/branch.0/baseline/src/engine/SCons/Optik/option_parser.py 0.96.90.D001 2005/02/15 20:11:37 knight"
# Original Optik revision this is based on:
__Optik_revision__ = "option_parser.py,v 1.38.2.1 2002/07/23 01:51:14 gward Exp"
# Copyright (c) 2001 Gregory P. Ward. All rights reserved.
# See the README.txt distributed with Optik for licensing terms.
# created 2001/10/17, GPW (from optik.py)
import sys, os
import string
import types
from SCons.Optik.option import Option, NO_DEFAULT
from SCons.Optik.errors import OptionConflictError, OptionValueError, BadOptionError
def get_prog_name ():
return os.path.basename(sys.argv[0])
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
class Values:
def __init__ (self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def _update_careful (self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if dict.has_key(attr):
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose (self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update (self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError, "invalid update mode: %s" % (repr(mode),)
def read_module (self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file (self, filename, mode="careful"):
vars = {}
execfile(filename, vars)
self._update(vars, mode)
def ensure_value (self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionParser:
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (os.path.basename(sys.argv[0])).
option_list : [Option]
the list of all options accepted on the command-line of
this program
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times.
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary.
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination.
allow_interspersed_args : boolean = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__ (self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error"):
self.set_usage(usage)
self.option_class = option_class
self.version = version
self.set_conflict_handler(conflict_handler)
self.allow_interspersed_args = 1
# Create the various lists and dicts that constitute the
# "option list". See class docstring for details about
# each attribute.
self._create_option_list()
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and the STD_VERSION_OPTION global (if 'version'
# supplied).
self._populate_option_list(option_list)
self._init_parsing_state()
# -- Private methods -----------------------------------------------
# (used by the constructor)
def _create_option_list (self):
self.option_list = []
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _populate_option_list (self, option_list):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
def _init_parsing_state (self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage (self, usage):
if usage is None:
self.usage = "usage: %prog [options]"
elif usage is SUPPRESS_USAGE:
self.usage = None
else:
self.usage = usage
def enable_interspersed_args (self):
self.allow_interspersed_args = 1
def disable_interspersed_args (self):
self.allow_interspersed_args = 0
def set_conflict_handler (self, handler):
if handler not in ("ignore", "error", "resolve"):
raise ValueError, "invalid conflict_resolution value %s" % (repr(handler),)
self.conflict_handler = handler
def set_default (self, dest, value):
self.defaults[dest] = value
def set_defaults (self, **kwargs):
self.defaults.update(kwargs)
def get_default_values(self):
return Values(self.defaults)
# -- Option-adding methods -----------------------------------------
def _check_conflict (self, option):
conflict_opts = []
for opt in option._short_opts:
if self._short_opt.has_key(opt):
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if self._long_opt.has_key(opt):
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "ignore": # behaviour for Optik 1.0, 1.1
pass
elif handler == "error": # new in 1.2
raise OptionConflictError(
"conflicting option string(s): %s"
% string.join( map( lambda x: x[0], conflict_opts),", "),
option)
elif handler == "resolve": # new in 1.2
for (opt, c_option) in conflict_opts:
if len(opt)>2 and opt[:2]=="--":
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
self.option_list.remove(c_option)
def add_option (self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if type(args[0]) is types.StringType:
option = apply(self.option_class,args, kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError, "not an Option instance: %s" % (repr(option),)
else:
raise TypeError, "invalid arguments"
self._check_conflict(option)
self.option_list.append(option)
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif not self.defaults.has_key(option.dest):
self.defaults[option.dest] = None
def add_options (self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option (self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option (self, opt_str):
return (self._short_opt.has_key(opt_str) or
self._long_opt.has_key(opt_str))
def remove_option (self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %s" % (repr(opt_str),))
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
self.option_list.remove(option)
# -- Option-parsing methods ----------------------------------------
def _get_args (self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args (self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError), err:
self.error(err.msg)
args = largs + rargs
return self.check_values(values, args)
def check_values (self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args (self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt (self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbrevation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt (self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = string.split(arg,"=", 1)
rargs.insert(0, next_arg)
had_explicit_value = 1
else:
opt = arg
had_explicit_value = 0
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error("%s option requires a value" % opt)
else:
self.error("%s option requires %d values"
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error("%s option does not take a value" % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts (self, rargs, values):
arg = rargs.pop(0)
stop = 0
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i = i+1 # we have consumed a character
if not option:
self.error("no such option: %s" % opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = 1
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error("%s option requires a value" % opt)
else:
self.error("%s option requires %s values"
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Output/error methods ------------------------------------------
def error (self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
sys.stderr.write("\nSCons error: %s\n" % msg)
sys.exit(2)
def print_usage (self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if file is None:
file = sys.stdout
if self.usage:
usage = string.replace(self.usage,"%prog", get_prog_name())
file.write(usage + "\n")
def print_version (self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if file is None:
file = sys.stdout
if self.version:
version = string.replace(self.version,"%prog", get_prog_name())
file.write(version+"\n")
def print_help (self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
# SCons: don't import wrap_text from distutils, use the
# copy we've included below, so we can avoid being dependent
# on having the right version of distutils installed.
#from distutils.fancy_getopt import wrap_text
if file is None:
file = sys.stdout
self.print_usage(file)
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
file.write("Options:\n")
width = 78 # assume 80 cols for now
option_help = [] # list of (string, string) tuples
lengths = []
for option in self.option_list:
takes_value = option.takes_value()
if takes_value:
metavar = option.metavar or string.upper(option.dest)
opts = [] # list of "-a" or "--foo=FILE" strings
if option.help is SUPPRESS_HELP:
continue
if takes_value:
for sopt in option._short_opts:
opts.append(sopt + ' ' + metavar)
for lopt in option._long_opts:
opts.append(lopt + "=" + metavar)
else:
for opt in option._short_opts + option._long_opts:
opts.append(opt)
opts = string.join(opts,", ")
option_help.append((opts, option.help))
lengths.append(len(opts))
max_opts = min(max(lengths), 26)
for (opts, help) in option_help:
# how much to indent lines 2 .. N of help text
indent_rest = 2 + max_opts + 2
help_width = width - indent_rest
if len(opts) > max_opts:
opts = " " + opts + "\n"
indent_first = indent_rest
else: # start help on same line as opts
opts = " %-*s " % (max_opts, opts)
indent_first = 0
file.write(opts)
if help:
help_lines = wrap_text(help, help_width)
file.write( "%*s%s\n" % (indent_first, "", help_lines[0]))
for line in help_lines[1:]:
file.write(" %*s%s\n" % (indent_rest, "", line))
elif opts[-1] != "\n":
file.write("\n")
# class OptionParser
def _match_abbrev (s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if wordmap.has_key(s):
return s
else:
# Isolate all words with s as a prefix.
possibilities = []
ls = len(s)
for word in wordmap.keys():
if len(word)>=ls and word[:ls]==s:
possibilities.append(word)
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError("no such option: %s" % s)
else:
# More than one possible completion: ambiguous prefix.
raise BadOptionError("ambiguous option: %s (%s?)"
% (s, string.join(possibilities,", ")))
# SCons: Include a snarfed copy of wrap_text(), so we're not dependent
# on the right version of distutils being installed.
import re
WS_TRANS = string.maketrans(string.whitespace, ' ' * len(string.whitespace))
def wrap_text (text, width):
"""wrap_text(text : string, width : int) -> [string]
Split 'text' into multiple lines of no more than 'width' characters
each, and return the list of strings that results.
"""
if text is None:
return []
if len(text) <= width:
return [text]
text = string.expandtabs(text)
text = string.translate(text, WS_TRANS)
chunks = re.split(r'( +|-+)', text)
chunks = filter(None, chunks) # ' - ' results in empty strings
lines = []
while chunks:
cur_line = [] # list of chunks (to-be-joined)
cur_len = 0 # length of current line
while chunks:
l = len(chunks[0])
if cur_len + l <= width: # can squeeze (at least) this chunk in
cur_line.append(chunks[0])
del chunks[0]
cur_len = cur_len + l
else: # this line is full
# drop last chunk if all space
if cur_line and cur_line[-1][0] == ' ':
del cur_line[-1]
break
if chunks: # any chunks left to process?
# if the current line is still empty, then we had a single
# chunk that's too big too fit on a line -- so we break
# down and break it up at the line width
if cur_len == 0:
cur_line.append(chunks[0][0:width])
chunks[0] = chunks[0][width:]
# all-whitespace chunks at the end of a line can be discarded
# (and we know from the re.split above that if a chunk has
# *any* whitespace, it is *all* whitespace)
if chunks[0][0] == ' ':
del chunks[0]
# and store this line in the list-of-all-lines -- as a single
# string, of course!
lines.append(string.join(cur_line, ''))
# while chunks
return lines
# wrap_text ()
| scons-local-0.96.90/SCons/Optik/option_parser.py | 26,744 | Original Optik revision this is based on: Copyright (c) 2001 Gregory P. Ward. All rights reserved. See the README.txt distributed with Optik for licensing terms. created 2001/10/17, GPW (from optik.py) Create the various lists and dicts that constitute the "option list". See class docstring for details about each attribute. Populate the option list; initial sources are the standard_option_list class attribute, the 'option_list' argument, and the STD_VERSION_OPTION global (if 'version' supplied). -- Private methods ----------------------------------------------- (used by the constructor) single letter -> Option instance long option -> Option instance maps option dest -> default value These are set in parse_args() for the convenience of callbacks. -- Simple modifier methods --------------------------------------- -- Option-adding methods ----------------------------------------- behaviour for Optik 1.0, 1.1 new in 1.2 new in 1.2 option has a dest, we need a default -- Option query/removal methods ---------------------------------- -- Option-parsing methods ---------------------------------------- don't modify caller's list Store the halves of the argument list as attributes for the convenience of callbacks: rargs the rest of the command-line (the "r" stands for "remaining" or "right-hand") largs the leftover arguments -- ie. what's left after removing options and their arguments (the "l" stands for "leftover" or "left-hand") We handle bare "--" explicitly, and bare "-" is handled by the standard arg handler since the short arg case ensures that the len of the opt string is greater than 1. process a single long option (possibly with value(s)) process a cluster of short options (possibly with value(s) for the last one only) stop now, leave this arg in rargs Say this is the original argument list: [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] ^ (we are about to process arg(i)). Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of [arg0, ..., arg(i-1)] (any options and their arguments will have been removed from largs). The while loop will usually consume 1 or more arguments per pass. If it consumes 1 (eg. arg is an option that takes no arguments), then after _process_arg() is done the situation is: largs = subset of [arg0, ..., arg(i)] rargs = [arg(i+1), ..., arg(N-1)] If allow_interspersed_args is false, largs will always be *empty* -- still a subset of [arg0, ..., arg(i-1)], but not a very interesting subset! Value explicitly attached to arg? Pretend it's the next argument. we have consumed a character Any characters left in arg? Pretend they're the next arg, and stop consuming characters of arg. option doesn't take a value -- Output/error methods ------------------------------------------ SCons: don't import wrap_text from distutils, use the copy we've included below, so we can avoid being dependent on having the right version of distutils installed.from distutils.fancy_getopt import wrap_text The help for each option consists of two parts: * the opt strings and metavars eg. ("-x", or "-fFILENAME, --file=FILENAME") * the user-supplied help string eg. ("turn on expert mode", "read data from FILENAME") If possible, we write both of these on the same line: -x turn on expert mode But if the opt string list is too long, we put the help string on a second line, indented to the same column it would start in if it fit on the first line. -fFILENAME, --file=FILENAME read data from FILENAME assume 80 cols for now list of (string, string) tuples list of "-a" or "--foo=FILE" strings how much to indent lines 2 .. N of help text start help on same line as opts class OptionParser Is there an exact match? Isolate all words with s as a prefix. No exact match, so there had better be just one possibility. More than one possible completion: ambiguous prefix. SCons: Include a snarfed copy of wrap_text(), so we're not dependent on the right version of distutils being installed. ' - ' results in empty strings list of chunks (to-be-joined) length of current line can squeeze (at least) this chunk in this line is full drop last chunk if all space any chunks left to process? if the current line is still empty, then we had a single chunk that's too big too fit on a line -- so we break down and break it up at the line width all-whitespace chunks at the end of a line can be discarded (and we know from the re.split above that if a chunk has *any* whitespace, it is *all* whitespace) and store this line in the list-of-all-lines -- as a single string, of course! while chunks wrap_text () | 4,655 | en | 0.78633 |
from django.urls import path
# urlpatterns = [
# path("/register", )
# ] | django_forum_engine/account/urls.py | 77 | urlpatterns = [ path("/register", ) ] | 41 | en | 0.310642 |
from django.contrib import admin
from .models import UserProfile,ProfileFeedItem
# Register your models here.
admin.site.register(UserProfile)
admin.site.register(ProfileFeedItem)
| profiles_api/admin.py | 182 | Register your models here. | 26 | en | 0.957485 |
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from collections import namedtuple
from .utils import at_bits
GEN_GLOBALPOOL = "CNN_GlobalPool"
# /** \brief CNN_GlobalPool
# * Generator for Global Pooling (Max or Average)
# *
# \param Name: Name of the generated user kernel
# \param Ctrl: Overide generator default options (TileOrientation, Parallel Features), Def=(TILE_HOR, 1)
# \param In_DataSize: 1: byte, 2: half word, 4: word
# \param Out_DataSize: 1: byte, 2: half word, 4: word
# \param In_Q: In fixed point format
# \param Out_Q: Out fixed point format
# \param In_InL3: 0: In is in L2, 1: In is in L3 memory
# \param Out_InL3: 0: Out is in L2, 1: Out is in L3 memory
# \param InFeat: Number of input feature's maps
# \param OutFeat: Number of output feature's maps (InFeat has to be equal to OutFeat for these generators
# \param Width: Number of columns of a given feature map
# \param Height: Number of lines of a given feature map
# \param PoolOper: KOP_GLOBAL_MAXPOOL or KOP_GLOBAL_AVGPOOL
GlobalPoolATParam = namedtuple('GlobalPoolATParam', [
"GlobalPoolOper"
])
def gen_globalpool_at_params(params):
return GlobalPoolATParam(
GlobalPoolOper="KOP_GLOBAL_AVGPOOL" if params.pool_type == "average" else "KOP_GLOBAL_MAXPOOL"
)
def gen_at_globalpool(code_block, name, in_q, out_q,
in_dim, out_dim, at_globalpool, gen_ctrl=None, at_ver=3):
if gen_ctrl is None:
gen_ctrl = "0"
else:
raise NotImplementedError("genctrl is not yet implemented")
if at_ver < 3:
code_block.write('{}("{}", {}, {}, {}, 1, 1, {}, {}, {}, {}, {});',
GEN_GLOBALPOOL, name, gen_ctrl,
at_bits(in_q), at_bits(out_q), in_dim.shape[0], out_dim.shape[0],
in_dim.shape[1], in_dim.shape[2], at_globalpool.GlobalPoolOper)
else:
code_block.write('{}("{}", {}, {}, {}, {}, {}, 1, 1, {}, {}, {}, {}, {});',
GEN_GLOBALPOOL, name, gen_ctrl,
at_bits(in_q), at_bits(
out_q), in_q.q, out_q.q, in_dim.shape[0], out_dim.shape[0],
in_dim.shape[1], in_dim.shape[2], at_globalpool.GlobalPoolOper)
| tools/nntool/generation/at_generators/cnn_global_pool.py | 3,089 | Copyright (C) 2020 GreenWaves Technologies, SAS This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. /** \brief CNN_GlobalPool * Generator for Global Pooling (Max or Average) * \param Name: Name of the generated user kernel \param Ctrl: Overide generator default options (TileOrientation, Parallel Features), Def=(TILE_HOR, 1) \param In_DataSize: 1: byte, 2: half word, 4: word \param Out_DataSize: 1: byte, 2: half word, 4: word \param In_Q: In fixed point format \param Out_Q: Out fixed point format \param In_InL3: 0: In is in L2, 1: In is in L3 memory \param Out_InL3: 0: Out is in L2, 1: Out is in L3 memory \param InFeat: Number of input feature's maps \param OutFeat: Number of output feature's maps (InFeat has to be equal to OutFeat for these generators \param Width: Number of columns of a given feature map \param Height: Number of lines of a given feature map \param PoolOper: KOP_GLOBAL_MAXPOOL or KOP_GLOBAL_AVGPOOL | 1,698 | en | 0.810533 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
#
# My site is free software; you can redistribute it and/or modify it under
# the terms of the MIT License; see LICENSE file for more details.
"""Invenio digital library framework."""
import os
from setuptools import find_packages, setup
readme = open('README.rst').read()
packages = find_packages()
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('my_site', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='my-site',
version=version,
description=__doc__,
long_description=readme,
keywords='my-site Invenio',
license='MIT',
author='CERN',
author_email='info@my-site.com',
url='https://github.com/my-site/my-site',
packages=packages,
zip_safe=False,
include_package_data=True,
platforms='any',
entry_points={
'console_scripts': [
'my-site = invenio_app.cli:cli',
],
'invenio_base.apps': [
'my_site_records = my_site.records:Mysite',
],
'invenio_base.blueprints': [
'my_site = my_site.theme.views:blueprint',
'my_site_records = my_site.records.views:blueprint',
],
'invenio_assets.webpack': [
'my_site_theme = my_site.theme.webpack:theme',
],
'invenio_config.module': [
'my_site = my_site.config',
],
'invenio_i18n.translations': [
'messages = my_site',
],
'invenio_base.api_apps': [
'my_site = my_site.records:Mysite',
'authors = my_site.authors:Authors',
],
'invenio_pidstore.fetchers': [
'authid = my_site.authors.fetchers:author_pid_fetcher',
],
'invenio_pidstore.minters': [
'authid = my_site.authors.minters:author_pid_minter',
],
'invenio_jsonschemas.schemas': [
'my_site = my_site.records.jsonschemas',
'authors = my_site.authors.jsonschemas',
],
'invenio_search.mappings': [
'records = my_site.records.mappings',
'authors = my_site.authors.mappings',
],
},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Development Status :: 3 - Alpha',
],
)
| 08-data-models-from-scratch/solution/my-site/setup.py | 2,672 | Invenio digital library framework.
-*- coding: utf-8 -*- Copyright (C) 2019 CERN. My site is free software; you can redistribute it and/or modify it under the terms of the MIT License; see LICENSE file for more details. Get the version string. Cannot be done with import! | 273 | en | 0.764769 |
# This file is part of
# the galxy-chop project (https://github.com/vcristiani/galaxy-chop)
# Copyright (c) 2020, Valeria Cristiani
# License: MIT
# Full Text: https://github.com/vcristiani/galaxy-chop/blob/master/LICENSE.txt
"""Fixtures input data."""
# =============================================================================
# IMPORTS
# =============================================================================
import os
from pathlib import Path
import astropy.units as u
from galaxychop import core
import numpy as np
import pytest
# =============================================================================
# PATHS
# =============================================================================
PATH = Path(os.path.abspath(os.path.dirname(__file__)))
TEST_DATA_PATH = PATH / "test_data"
TEST_DATA_REAL_PATH = TEST_DATA_PATH / "real"
# =============================================================================
# Defining utility functions for mocking data
# =============================================================================
def rot_matrix_xaxis(theta=0):
"""
Rotation matrix of a transformation around X axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[1, 0, 0],
[0, np.cos(theta), -1 * np.sin(theta)],
[0, np.sin(theta), np.cos(theta)],
]
)
return A
def rot_matrix_yaxis(theta=0):
"""
Rotation matrix of a transformation around Y axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-1 * np.sin(theta), 0, np.cos(theta)],
]
)
return A
def rot_matrix_zaxis(theta=0):
"""
Rotation matrix of a transformation around Z axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), -1 * np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
]
)
return A
def rotate(pos, vel, matrix):
"""
Rotate.
Applies the rotation `matrix` to a set of particles positions `pos` and
velocities `vel`
Parameters
----------
pos : `np.ndarray`, shape = (N_part, 3)
Positions of particles
vel : `np.ndarray`, shape = (N_part, 3)
Velocities of particles
matrix : `np.ndarray`
Rotation matrix, with shape (3, 3)
Returns
-------
pos_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, positions of particles
vel_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, velocities of particles
"""
pos_rot = pos @ matrix
vel_rot = vel @ matrix
return pos_rot, vel_rot
def distance(x, y, z, m):
"""
Distances calculator.
Calculate distances beetween particles.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
Returns
-------
dx, dy, dz: `np.ndarray`, shape = (N_part, N_part)
Distances between particles.
"""
N_part = len(m)
dx = np.zeros((N_part, N_part))
dy = np.zeros((N_part, N_part))
dz = np.zeros((N_part, N_part))
for i in range(0, N_part - 1):
for j in range(i + 1, N_part):
dx[i, j] = x[j] - x[i]
dy[i, j] = y[j] - y[i]
dz[i, j] = z[j] - z[i]
dx[j, i] = -dx[i, j]
dy[j, i] = -dy[i, j]
dz[j, i] = -dz[i, j]
return dx, dy, dz
def epot(x, y, z, m, eps=0.0):
"""
Potential energy with python.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
eps: `float`
Softening radius
Returns
-------
Upot: `np.ndarray`, shape = (N_part, 1)
Potential energy of particles
"""
G = 4.299e-6
N_part = len(m)
U = np.zeros((N_part, N_part))
dx, dy, dz = distance(x, y, z, m)
dist = np.sqrt(dx ** 2 + dy ** 2 + dz ** 2 + eps ** 2)
for i in range(N_part - 1):
for j in range(i + 1, N_part):
U[i, j] = G * m[j] * m[i] / dist[i, j]
U[j, i] = U[i, j]
Upot = np.sum(U / m, axis=0)
return Upot
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def random_galaxy_params():
"""
Galaxy parameter for test.
This return a function of a dictionary with random params of a Galaxy
object
"""
def make(stars, gas, dm, seed):
random = np.random.default_rng(seed=seed)
x_s = random.random(stars)
y_s = random.random(stars)
z_s = random.random(stars)
vx_s = random.random(stars)
vy_s = random.random(stars)
vz_s = random.random(stars)
m_s = random.random(stars)
x_dm = random.random(dm)
y_dm = random.random(dm)
z_dm = random.random(dm)
vx_dm = random.random(dm)
vy_dm = random.random(dm)
vz_dm = random.random(dm)
m_dm = random.random(dm)
x_g = random.random(gas)
y_g = random.random(gas)
z_g = random.random(gas)
vx_g = random.random(gas)
vy_g = random.random(gas)
vz_g = random.random(gas)
m_g = random.random(gas)
params = {
"m_s": m_s,
"x_s": x_s,
"y_s": y_s,
"z_s": z_s,
"vx_s": vx_s,
"vy_s": vy_s,
"vz_s": vz_s,
"m_dm": m_dm,
"x_dm": x_dm,
"y_dm": y_dm,
"z_dm": z_dm,
"vx_dm": vx_dm,
"vy_dm": vy_dm,
"vz_dm": vz_dm,
"m_g": m_g,
"x_g": x_g,
"y_g": y_g,
"z_g": z_g,
"vx_g": vx_g,
"vy_g": vy_g,
"vz_g": vz_g,
}
return params
return make
@pytest.fixture(scope="session")
def solid_disk():
"""
Mock solid disk.
Creates a mock solid disc of particles with masses
and velocities.
"""
def make(N_part=100, rmax=30, rmin=2, omega=10, seed=42):
random = np.random.RandomState(seed=seed)
r = (rmax - rmin) * random.random_sample(size=N_part) + rmin
phi0 = 2 * np.pi * random.random_sample(size=N_part)
mass = 1.0e8 * np.ones_like(r)
x = r * np.cos(phi0)
y = r * np.sin(phi0)
z = 1 * random.random_sample(size=N_part) - 0.5
xdot = -1 * omega * r * np.sin(phi0)
ydot = omega * r * np.cos(phi0)
zdot = np.zeros_like(xdot)
pos = np.array([x, y, z]).T
vel = np.array([xdot, ydot, zdot]).T
return mass, pos, vel
return make
@pytest.fixture(scope="session")
def mock_dm_halo():
"""
Mock dark matter Halo.
Creates a mock DM halo of particles with masses
and velocities.
"""
def make(N_part=1000, rmax=100, seed=55):
random = np.random.RandomState(seed=seed)
r = random.random_sample(size=N_part) * rmax
cos_t = random.random_sample(size=N_part) * 2.0 - 1
phi0 = 2 * np.pi * random.random_sample(size=N_part)
sin_t = np.sqrt(1 - cos_t ** 2)
mass = 1.0e10 * np.ones_like(r)
x = r * sin_t * np.cos(phi0)
y = r * sin_t * np.sin(phi0)
z = r * cos_t
pos = np.array([x, y, z]).T
return mass, pos
return make
@pytest.fixture
def disc_zero_angle(solid_disk):
"""Disc with no angle of inclination."""
mass, pos, vel = solid_disk(N_part=1000)
return mass, pos, vel
@pytest.fixture
def disc_xrotation(solid_disk):
"""Disc rotated over x axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_xaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_yrotation(solid_disk):
"""Disc rotated over y axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_yaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_zrotation(solid_disk):
"""Disc rotated over z axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_zaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_particles(solid_disk):
"""Solid disc without velocities."""
mass, pos, vel = solid_disk(N_part=100)
return pos[:, 0], pos[:, 1], pos[:, 2], mass
@pytest.fixture
def disc_particles_all(solid_disk):
"""Solid disc with velocities."""
mass_s, pos_s, vel_s = solid_disk(N_part=100)
mass_g, pos_g, vel_g = solid_disk(N_part=100)
return mass_s, pos_s, vel_s, mass_g, pos_g, vel_g
@pytest.fixture(scope="session")
def halo_particles(mock_dm_halo):
"""Spherical mock halo."""
def make(N_part=100, seed=None):
random = np.random.RandomState(seed=seed)
mass_dm, pos_dm = mock_dm_halo(N_part=N_part)
vel_dm = random.random_sample(size=(N_part, 3))
return mass_dm, pos_dm, vel_dm
return make
@pytest.fixture
def mock_galaxy(disc_particles_all, halo_particles):
"""Mock galaxy."""
(mass_s, pos_s, vel_s, mass_g, pos_g, vel_g) = disc_particles_all
mass_dm, pos_dm, vel_dm = halo_particles(N_part=100, seed=42)
g = core.Galaxy(
m_s=mass_s * u.M_sun,
x_s=pos_s[:, 0] * u.kpc,
y_s=pos_s[:, 1] * u.kpc,
z_s=pos_s[:, 2] * u.kpc,
vx_s=vel_s[:, 0] * (u.km / u.s),
vy_s=vel_s[:, 1] * (u.km / u.s),
vz_s=vel_s[:, 2] * (u.km / u.s),
m_dm=mass_dm * u.M_sun,
x_dm=pos_dm[:, 0] * u.kpc,
y_dm=pos_dm[:, 1] * u.kpc,
z_dm=pos_dm[:, 2] * u.kpc,
vx_dm=vel_dm[:, 0] * (u.km / u.s),
vy_dm=vel_dm[:, 1] * (u.km / u.s),
vz_dm=vel_dm[:, 2] * (u.km / u.s),
m_g=mass_g * u.M_sun,
x_g=pos_g[:, 0] * u.kpc,
y_g=pos_g[:, 1] * u.kpc,
z_g=pos_g[:, 2] * u.kpc,
vx_g=vel_g[:, 0] * (u.km / u.s),
vy_g=vel_g[:, 1] * (u.km / u.s),
vz_g=vel_g[:, 2] * (u.km / u.s),
)
return g
@pytest.fixture
def mock_real_galaxy():
"""Mock real galaxy."""
dm = np.loadtxt(TEST_DATA_REAL_PATH / "dark.dat")
s = np.loadtxt(TEST_DATA_REAL_PATH / "star.dat")
g = np.loadtxt(TEST_DATA_REAL_PATH / "gas_.dat")
gal = core.Galaxy(
m_s=s[:, 0] * 1e10 * u.M_sun,
x_s=s[:, 1] * u.kpc,
y_s=s[:, 2] * u.kpc,
z_s=s[:, 3] * u.kpc,
vx_s=s[:, 4] * (u.km / u.s),
vy_s=s[:, 5] * (u.km / u.s),
vz_s=s[:, 6] * (u.km / u.s),
m_dm=dm[:, 0] * 1e10 * u.M_sun,
x_dm=dm[:, 1] * u.kpc,
y_dm=dm[:, 2] * u.kpc,
z_dm=dm[:, 3] * u.kpc,
vx_dm=dm[:, 4] * (u.km / u.s),
vy_dm=dm[:, 5] * (u.km / u.s),
vz_dm=dm[:, 6] * (u.km / u.s),
m_g=g[:, 0] * 1e10 * u.M_sun,
x_g=g[:, 1] * u.kpc,
y_g=g[:, 2] * u.kpc,
z_g=g[:, 3] * u.kpc,
vx_g=g[:, 4] * (u.km / u.s),
vy_g=g[:, 5] * (u.km / u.s),
vz_g=g[:, 6] * (u.km / u.s),
)
return gal
| tests/conftest.py | 11,807 | Solid disc without velocities.
Solid disc with velocities.
Disc rotated over x axis.
Disc rotated over y axis.
Disc with no angle of inclination.
Disc rotated over z axis.
Distances calculator.
Calculate distances beetween particles.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
Returns
-------
dx, dy, dz: `np.ndarray`, shape = (N_part, N_part)
Distances between particles.
Potential energy with python.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
eps: `float`
Softening radius
Returns
-------
Upot: `np.ndarray`, shape = (N_part, 1)
Potential energy of particles
Spherical mock halo.
Mock dark matter Halo.
Creates a mock DM halo of particles with masses
and velocities.
Mock galaxy.
Mock real galaxy.
Galaxy parameter for test.
This return a function of a dictionary with random params of a Galaxy
object
Rotation matrix of a transformation around X axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
Rotation matrix of a transformation around Y axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
Rotation matrix of a transformation around Z axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
Rotate.
Applies the rotation `matrix` to a set of particles positions `pos` and
velocities `vel`
Parameters
----------
pos : `np.ndarray`, shape = (N_part, 3)
Positions of particles
vel : `np.ndarray`, shape = (N_part, 3)
Velocities of particles
matrix : `np.ndarray`
Rotation matrix, with shape (3, 3)
Returns
-------
pos_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, positions of particles
vel_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, velocities of particles
Mock solid disk.
Creates a mock solid disc of particles with masses
and velocities.
Fixtures input data.
This file is part of the galxy-chop project (https://github.com/vcristiani/galaxy-chop) Copyright (c) 2020, Valeria Cristiani License: MIT Full Text: https://github.com/vcristiani/galaxy-chop/blob/master/LICENSE.txt ============================================================================= IMPORTS ============================================================================= ============================================================================= PATHS ============================================================================= ============================================================================= Defining utility functions for mocking data ============================================================================= ============================================================================= Fixtures ============================================================================= | 3,079 | en | 0.494867 |
import asyncio
from typing import (
Dict,
Iterable,
Optional,
Sequence,
Tuple,
cast,
)
from cancel_token import (
CancelToken,
)
from eth_utils import ValidationError, to_tuple
from eth.exceptions import (
BlockNotFound,
)
from eth2.beacon.helpers import (
compute_start_slot_of_epoch,
)
from eth2.beacon.chains.base import (
BaseBeaconChain,
)
from eth2.beacon.types.attestations import (
Attestation,
)
from eth2.beacon.types.blocks import (
BaseBeaconBlock,
)
from eth2.beacon.typing import (
Epoch,
Slot,
HashTreeRoot,
Version,
SigningRoot,
)
from eth2.beacon.constants import (
ZERO_SIGNING_ROOT,
)
from libp2p import (
initialize_default_swarm,
)
from libp2p.typing import TProtocol
from libp2p.crypto.keys import (
KeyPair,
)
from libp2p.host.basic_host import (
BasicHost,
)
from libp2p.network.network_interface import (
INetwork,
)
from libp2p.network.stream.net_stream_interface import (
INetStream,
)
from libp2p.peer.id import (
ID,
)
from libp2p.peer.peerinfo import (
PeerInfo,
)
from libp2p.peer.peerstore import (
PeerStore,
)
from libp2p.pubsub.pubsub import (
Pubsub,
)
from libp2p.pubsub.gossipsub import (
GossipSub,
)
from libp2p.security.base_transport import BaseSecureTransport
from libp2p.security.insecure.transport import PLAINTEXT_PROTOCOL_ID, InsecureTransport
from libp2p.stream_muxer.abc import IMuxedConn
from libp2p.stream_muxer.mplex.exceptions import MplexStreamEOF, MplexStreamReset
from libp2p.stream_muxer.mplex.mplex import MPLEX_PROTOCOL_ID, Mplex
from multiaddr import (
Multiaddr,
protocols,
)
import ssz
from p2p.service import (
BaseService,
)
from .configs import (
GOSSIPSUB_PROTOCOL_ID,
GoodbyeReasonCode,
GossipsubParams,
PUBSUB_TOPIC_BEACON_BLOCK,
PUBSUB_TOPIC_BEACON_ATTESTATION,
REQ_RESP_BEACON_BLOCKS,
REQ_RESP_GOODBYE,
REQ_RESP_HELLO,
REQ_RESP_RECENT_BEACON_BLOCKS,
ResponseCode,
)
from .exceptions import (
HandshakeFailure,
ReadMessageFailure,
RequestFailure,
WriteMessageFailure,
)
from .messages import (
Goodbye,
HelloRequest,
BeaconBlocksRequest,
BeaconBlocksResponse,
RecentBeaconBlocksRequest,
RecentBeaconBlocksResponse,
)
from .topic_validators import (
get_beacon_attestation_validator,
get_beacon_block_validator,
)
from .utils import (
make_rpc_v1_ssz_protocol_id,
make_tcp_ip_maddr,
read_req,
read_resp,
write_req,
write_resp,
)
from dataclasses import dataclass
import operator
from eth_utils.toolz import first
REQ_RESP_HELLO_SSZ = make_rpc_v1_ssz_protocol_id(REQ_RESP_HELLO)
REQ_RESP_GOODBYE_SSZ = make_rpc_v1_ssz_protocol_id(REQ_RESP_GOODBYE)
REQ_RESP_BEACON_BLOCKS_SSZ = make_rpc_v1_ssz_protocol_id(REQ_RESP_BEACON_BLOCKS)
REQ_RESP_RECENT_BEACON_BLOCKS_SSZ = make_rpc_v1_ssz_protocol_id(
REQ_RESP_RECENT_BEACON_BLOCKS
)
@dataclass
class Peer:
node: "Node"
_id: ID
fork_version: Version # noqa: E701
finalized_root: SigningRoot
finalized_epoch: Epoch
head_root: HashTreeRoot
head_slot: Slot
@classmethod
def from_hello_request(
cls, node: "Node", peer_id: ID, request: HelloRequest
) -> "Peer":
return cls(
node=node,
_id=peer_id,
fork_version=request.fork_version,
finalized_root=request.finalized_root,
finalized_epoch=request.finalized_epoch,
head_root=request.head_root,
head_slot=request.head_slot,
)
async def request_beacon_blocks(
self, start_slot: Slot, count: int, step: int = 1
) -> Tuple[BaseBeaconBlock, ...]:
return await self.node.request_beacon_blocks(
self._id,
head_block_root=self.head_root,
start_slot=start_slot,
count=count,
step=step,
)
async def request_recent_beacon_blocks(
self, block_roots: Sequence[HashTreeRoot]
) -> Tuple[BaseBeaconBlock, ...]:
return await self.node.request_recent_beacon_blocks(self._id, block_roots)
class PeerPool:
peers: Dict[ID, Peer]
def __init__(self) -> None:
self.peers = {}
def add(self, peer: Peer) -> None:
self.peers[peer._id] = peer
def remove(self, peer_id: ID) -> None:
del self.peers[peer_id]
def __contains__(self, peer_id: ID) -> bool:
return peer_id in self.peers.keys()
def __len__(self) -> int:
return len(self.peers)
def get_best(self, field: str) -> Peer:
sorted_peers = sorted(
self.peers.values(), key=operator.attrgetter(field), reverse=True
)
return first(sorted_peers)
def get_best_head_slot_peer(self) -> Peer:
return self.get_best("head_slot")
class Node(BaseService):
_is_started: bool = False
key_pair: KeyPair
listen_ip: str
listen_port: int
host: BasicHost
pubsub: Pubsub
bootstrap_nodes: Optional[Tuple[Multiaddr, ...]]
preferred_nodes: Optional[Tuple[Multiaddr, ...]]
chain: BaseBeaconChain
handshaked_peers: PeerPool = None
def __init__(
self,
key_pair: KeyPair,
listen_ip: str,
listen_port: int,
chain: BaseBeaconChain,
security_protocol_ops: Dict[TProtocol, BaseSecureTransport] = None,
muxer_protocol_ops: Dict[TProtocol, IMuxedConn] = None,
gossipsub_params: Optional[GossipsubParams] = None,
cancel_token: CancelToken = None,
bootstrap_nodes: Tuple[Multiaddr, ...] = None,
preferred_nodes: Tuple[Multiaddr, ...] = None) -> None:
super().__init__(cancel_token)
self.listen_ip = listen_ip
self.listen_port = listen_port
self.key_pair = key_pair
self.bootstrap_nodes = bootstrap_nodes
self.preferred_nodes = preferred_nodes
# TODO: Add key and peer_id to the peerstore
if security_protocol_ops is None:
security_protocol_ops = {
PLAINTEXT_PROTOCOL_ID: InsecureTransport(key_pair)
}
if muxer_protocol_ops is None:
muxer_protocol_ops = {MPLEX_PROTOCOL_ID: Mplex}
network: INetwork = initialize_default_swarm(
key_pair=key_pair,
transport_opt=[self.listen_maddr],
muxer_opt=muxer_protocol_ops,
sec_opt=security_protocol_ops,
peerstore_opt=None, # let the function initialize it
disc_opt=None, # no routing required here
)
self.host = BasicHost(network=network, router=None)
if gossipsub_params is None:
gossipsub_params = GossipsubParams()
gossipsub_router = GossipSub(
protocols=[GOSSIPSUB_PROTOCOL_ID],
degree=gossipsub_params.DEGREE,
degree_low=gossipsub_params.DEGREE_LOW,
degree_high=gossipsub_params.DEGREE_HIGH,
time_to_live=gossipsub_params.FANOUT_TTL,
gossip_window=gossipsub_params.GOSSIP_WINDOW,
gossip_history=gossipsub_params.GOSSIP_HISTORY,
heartbeat_interval=gossipsub_params.HEARTBEAT_INTERVAL,
)
self.pubsub = Pubsub(
host=self.host,
router=gossipsub_router,
my_id=self.peer_id,
)
self.chain = chain
self.handshaked_peers = PeerPool()
self.run_task(self.start())
@property
def is_started(self) -> bool:
return self._is_started
async def _run(self) -> None:
self.logger.info("libp2p node %s is up", self.listen_maddr)
await self.cancellation()
async def start(self) -> None:
# host
self._register_rpc_handlers()
# TODO: Register notifees
await self.host.get_network().listen(self.listen_maddr)
await self.connect_preferred_nodes()
# TODO: Connect bootstrap nodes?
# pubsub
await self.pubsub.subscribe(PUBSUB_TOPIC_BEACON_BLOCK)
await self.pubsub.subscribe(PUBSUB_TOPIC_BEACON_ATTESTATION)
self._setup_topic_validators()
self._is_started = True
def _setup_topic_validators(self) -> None:
self.pubsub.set_topic_validator(
PUBSUB_TOPIC_BEACON_BLOCK,
get_beacon_block_validator(self.chain),
False,
)
self.pubsub.set_topic_validator(
PUBSUB_TOPIC_BEACON_ATTESTATION,
get_beacon_attestation_validator(self.chain),
False,
)
async def dial_peer(self, ip: str, port: int, peer_id: ID) -> None:
"""
Dial the peer ``peer_id`` through the IPv4 protocol
"""
await self.host.connect(
PeerInfo(
peer_id=peer_id,
addrs=[make_tcp_ip_maddr(ip, port)],
)
)
async def dial_peer_maddr(self, maddr: Multiaddr) -> None:
"""
Parse `maddr`, get the ip:port and PeerID, and call `dial_peer` with the parameters.
"""
ip = maddr.value_for_protocol(protocols.P_IP4)
port = maddr.value_for_protocol(protocols.P_TCP)
peer_id = ID.from_base58(maddr.value_for_protocol(protocols.P_P2P))
await self.dial_peer(ip=ip, port=port, peer_id=peer_id)
async def connect_preferred_nodes(self) -> None:
if self.preferred_nodes is None or len(self.preferred_nodes) == 0:
return
await asyncio.wait([
self.dial_peer_maddr(node_maddr)
for node_maddr in self.preferred_nodes
])
async def disconnect_peer(self, peer_id: ID) -> None:
if peer_id in self.handshaked_peers:
self.logger.debug("Disconnect from %s", peer_id)
self.handshaked_peers.remove(peer_id)
await self.host.disconnect(peer_id)
else:
self.logger.debug("Already disconnected from %s", peer_id)
async def broadcast_beacon_block(self, block: BaseBeaconBlock) -> None:
await self._broadcast_data(PUBSUB_TOPIC_BEACON_BLOCK, ssz.encode(block))
async def broadcast_attestation(self, attestation: Attestation) -> None:
await self._broadcast_data(PUBSUB_TOPIC_BEACON_ATTESTATION, ssz.encode(attestation))
async def _broadcast_data(self, topic: str, data: bytes) -> None:
await self.pubsub.publish(topic, data)
@property
def peer_id(self) -> ID:
return self.host.get_id()
@property
def listen_maddr(self) -> Multiaddr:
return make_tcp_ip_maddr(self.listen_ip, self.listen_port)
@property
def listen_maddr_with_peer_id(self) -> Multiaddr:
return self.listen_maddr.encapsulate(Multiaddr(f"/p2p/{self.peer_id.to_base58()}"))
@property
def peer_store(self) -> PeerStore:
return self.host.get_network().peerstore
async def close(self) -> None:
# FIXME: Add `tear_down` to `Swarm` in the upstream
network = self.host.get_network()
for listener in network.listeners.values():
listener.server.close()
await listener.server.wait_closed()
# TODO: Add `close` in `Pubsub`
def _register_rpc_handlers(self) -> None:
self.host.set_stream_handler(REQ_RESP_HELLO_SSZ, self._handle_hello)
self.host.set_stream_handler(REQ_RESP_GOODBYE_SSZ, self._handle_goodbye)
self.host.set_stream_handler(REQ_RESP_BEACON_BLOCKS_SSZ, self._handle_beacon_blocks)
self.host.set_stream_handler(
REQ_RESP_RECENT_BEACON_BLOCKS_SSZ,
self._handle_recent_beacon_blocks,
)
#
# RPC Handlers
#
# TODO: Add a wrapper or decorator to handle the exceptions in handlers,
# to close the streams safely. Probably starting from: if the function
# returns successfully, then close the stream. Otherwise, reset the stream.
# TODO: Handle the reputation of peers. Deduct their scores and even disconnect when they
# behave.
# TODO: Register notifee to the `Network` to
# - Record peers' joining time.
# - Disconnect peers when they fail to join in a certain amount of time.
async def _validate_hello_req(self, hello_other_side: HelloRequest) -> None:
state_machine = self.chain.get_state_machine()
state = self.chain.get_head_state()
config = state_machine.config
if hello_other_side.fork_version != state.fork.current_version:
raise ValidationError(
"`fork_version` mismatches: "
f"hello_other_side.fork_version={hello_other_side.fork_version}, "
f"state.fork.current_version={state.fork.current_version}"
)
# Can not validate the checkpoint with `finalized_epoch` higher than ours
if hello_other_side.finalized_epoch > state.finalized_checkpoint.epoch:
return
# Get the finalized root at `hello_other_side.finalized_epoch`
# Edge case where nothing is finalized yet
if (
hello_other_side.finalized_epoch == 0 and
hello_other_side.finalized_root == ZERO_SIGNING_ROOT
):
return
finalized_epoch_start_slot = compute_start_slot_of_epoch(
hello_other_side.finalized_epoch,
config.SLOTS_PER_EPOCH,
)
finalized_root = self.chain.get_canonical_block_root(
finalized_epoch_start_slot)
if hello_other_side.finalized_root != finalized_root:
raise ValidationError(
"`finalized_root` mismatches: "
f"hello_other_side.finalized_root={hello_other_side.finalized_root}, "
f"hello_other_side.finalized_epoch={hello_other_side.finalized_epoch}, "
f"our `finalized_root` at the same `finalized_epoch`={finalized_root}"
)
def _make_hello_packet(self) -> HelloRequest:
state = self.chain.get_head_state()
head = self.chain.get_canonical_head()
finalized_checkpoint = state.finalized_checkpoint
return HelloRequest(
fork_version=state.fork.current_version,
finalized_root=finalized_checkpoint.root,
finalized_epoch=finalized_checkpoint.epoch,
head_root=head.hash_tree_root,
head_slot=head.slot,
)
def _compare_chain_tip_and_finalized_epoch(self,
peer_finalized_epoch: Epoch,
peer_head_slot: Slot) -> None:
checkpoint = self.chain.get_head_state().finalized_checkpoint
head_block = self.chain.get_canonical_head()
peer_has_higher_finalized_epoch = peer_finalized_epoch > checkpoint.epoch
peer_has_equal_finalized_epoch = peer_finalized_epoch == checkpoint.epoch
peer_has_higher_head_slot = peer_head_slot > head_block.slot
if (
peer_has_higher_finalized_epoch or
(peer_has_equal_finalized_epoch and peer_has_higher_head_slot)
):
# TODO: kickoff syncing process with this peer
self.logger.debug("Peer's chain is ahead of us, start syncing with the peer.")
pass
async def _handle_hello(self, stream: INetStream) -> None:
# TODO: Find out when we should respond the `ResponseCode`
# other than `ResponseCode.SUCCESS`.
peer_id = stream.mplex_conn.peer_id
self.logger.debug("Waiting for hello from the other side")
try:
hello_other_side = await read_req(stream, HelloRequest)
has_error = False
except (ReadMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, ReadMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
await self.disconnect_peer(peer_id)
return
self.logger.debug("Received the hello message %s", hello_other_side)
try:
await self._validate_hello_req(hello_other_side)
except ValidationError as error:
self.logger.info(
"Handshake failed: hello message %s is invalid: %s",
hello_other_side,
str(error)
)
await stream.reset()
await self.say_goodbye(peer_id, GoodbyeReasonCode.IRRELEVANT_NETWORK)
await self.disconnect_peer(peer_id)
return
hello_mine = self._make_hello_packet()
self.logger.debug("Sending our hello message %s", hello_mine)
try:
await write_resp(stream, hello_mine, ResponseCode.SUCCESS)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
self.logger.info(
"Handshake failed: failed to write message %s",
hello_mine,
)
await self.disconnect_peer(peer_id)
return
if peer_id not in self.handshaked_peers:
peer = Peer.from_hello_request(self, peer_id, hello_other_side)
self.handshaked_peers.add(peer)
self.logger.debug(
"Handshake from %s is finished. Added to the `handshake_peers`",
peer_id,
)
# Check if we are behind the peer
self._compare_chain_tip_and_finalized_epoch(
hello_other_side.finalized_epoch,
hello_other_side.head_slot,
)
await stream.close()
async def say_hello(self, peer_id: ID) -> None:
hello_mine = self._make_hello_packet()
self.logger.debug(
"Opening new stream to peer=%s with protocols=%s",
peer_id,
[REQ_RESP_HELLO_SSZ],
)
stream = await self.host.new_stream(peer_id, [REQ_RESP_HELLO_SSZ])
self.logger.debug("Sending our hello message %s", hello_mine)
try:
await write_req(stream, hello_mine)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
await self.disconnect_peer(peer_id)
error_msg = f"fail to write request={hello_mine}"
self.logger.info("Handshake failed: %s", error_msg)
raise HandshakeFailure(error_msg)
self.logger.debug("Waiting for hello from the other side")
try:
resp_code, hello_other_side = await read_resp(stream, HelloRequest)
has_error = False
except (ReadMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, ReadMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
await self.disconnect_peer(peer_id)
self.logger.info("Handshake failed: fail to read the response")
raise HandshakeFailure("fail to read the response")
self.logger.debug(
"Received the hello message %s, resp_code=%s",
hello_other_side,
resp_code,
)
# TODO: Handle the case when `resp_code` is not success.
if resp_code != ResponseCode.SUCCESS:
# TODO: Do something according to the `ResponseCode`
error_msg = (
"resp_code != ResponseCode.SUCCESS, "
f"resp_code={resp_code}, error_msg={hello_other_side}"
)
self.logger.info("Handshake failed: %s", error_msg)
await stream.reset()
await self.disconnect_peer(peer_id)
raise HandshakeFailure(error_msg)
hello_other_side = cast(HelloRequest, hello_other_side)
try:
await self._validate_hello_req(hello_other_side)
except ValidationError as error:
error_msg = f"hello message {hello_other_side} is invalid: {str(error)}"
self.logger.info(
"Handshake failed: %s. Disconnecting %s",
error_msg,
peer_id,
)
await stream.reset()
await self.say_goodbye(peer_id, GoodbyeReasonCode.IRRELEVANT_NETWORK)
await self.disconnect_peer(peer_id)
raise HandshakeFailure(error_msg) from error
if peer_id not in self.handshaked_peers:
peer = Peer.from_hello_request(self, peer_id, hello_other_side)
self.handshaked_peers.add(peer)
self.logger.debug(
"Handshake to peer=%s is finished. Added to the `handshake_peers`",
peer_id,
)
# Check if we are behind the peer
self._compare_chain_tip_and_finalized_epoch(
hello_other_side.finalized_epoch,
hello_other_side.head_slot,
)
await stream.close()
async def _handle_goodbye(self, stream: INetStream) -> None:
peer_id = stream.mplex_conn.peer_id
self.logger.debug("Waiting for goodbye from %s", peer_id)
try:
goodbye = await read_req(stream, Goodbye)
has_error = False
except (ReadMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, ReadMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
self.logger.debug("Received the goodbye message %s", goodbye)
if not has_error:
await stream.close()
await self.disconnect_peer(peer_id)
async def say_goodbye(self, peer_id: ID, reason: GoodbyeReasonCode) -> None:
goodbye = Goodbye(reason)
self.logger.debug(
"Opening new stream to peer=%s with protocols=%s",
peer_id,
[REQ_RESP_GOODBYE_SSZ],
)
stream = await self.host.new_stream(peer_id, [REQ_RESP_GOODBYE_SSZ])
self.logger.debug("Sending our goodbye message %s", goodbye)
try:
await write_req(stream, goodbye)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
if not has_error:
await stream.close()
await self.disconnect_peer(peer_id)
@to_tuple
def _get_blocks_from_canonical_chain_by_slot(
self,
slot_of_requested_blocks: Sequence[Slot],
) -> Iterable[BaseBeaconBlock]:
# If peer's head block is on our canonical chain,
# start getting the requested blocks by slots.
for slot in slot_of_requested_blocks:
try:
block = self.chain.get_canonical_block_by_slot(slot)
except BlockNotFound:
pass
else:
yield block
@to_tuple
def _get_blocks_from_fork_chain_by_root(
self,
start_slot: Slot,
peer_head_block: BaseBeaconBlock,
slot_of_requested_blocks: Sequence[Slot],
) -> Iterable[BaseBeaconBlock]:
# Peer's head block is on a fork chain,
# start getting the requested blocks by
# traversing the history from the head.
# `slot_of_requested_blocks` starts with earliest slot
# and end with most recent slot, so we start traversing
# from the most recent slot.
cur_index = len(slot_of_requested_blocks) - 1
block = peer_head_block
if block.slot == slot_of_requested_blocks[cur_index]:
yield block
cur_index -= 1
while block.slot > start_slot and cur_index >= 0:
try:
block = self.chain.get_block_by_root(block.parent_root)
except (BlockNotFound, ValidationError):
# This should not happen as we only persist block if its
# ancestors are also in the database.
break
else:
while block.slot < slot_of_requested_blocks[cur_index]:
if cur_index > 0:
cur_index -= 1
else:
break
if block.slot == slot_of_requested_blocks[cur_index]:
yield block
def _validate_start_slot(self, start_slot: Slot) -> None:
config = self.chain.get_state_machine().config
state = self.chain.get_head_state()
finalized_epoch_start_slot = compute_start_slot_of_epoch(
epoch=state.finalized_checkpoint.epoch,
slots_per_epoch=config.SLOTS_PER_EPOCH,
)
if start_slot < finalized_epoch_start_slot:
raise ValidationError(
f"`start_slot`({start_slot}) lower than our"
f" latest finalized slot({finalized_epoch_start_slot})"
)
def _get_requested_beacon_blocks(
self,
beacon_blocks_request: BeaconBlocksRequest,
requested_head_block: BaseBeaconBlock,
) -> Tuple[BaseBeaconBlock, ...]:
slot_of_requested_blocks = tuple(
beacon_blocks_request.start_slot + i * beacon_blocks_request.step
for i in range(beacon_blocks_request.count)
)
self.logger.info("slot_of_requested_blocks: %s", slot_of_requested_blocks)
slot_of_requested_blocks = tuple(
filter(lambda slot: slot <= requested_head_block.slot, slot_of_requested_blocks)
)
if len(slot_of_requested_blocks) == 0:
return tuple()
# We have the peer's head block in our database,
# next check if the head block is on our canonical chain.
try:
canonical_block_at_slot = self.chain.get_canonical_block_by_slot(
requested_head_block.slot
)
block_match = canonical_block_at_slot == requested_head_block
except BlockNotFound:
self.logger.debug(
(
"The requested head block is not on our canonical chain "
"requested_head_block: %s canonical_block_at_slot: %s"
),
requested_head_block,
canonical_block_at_slot,
)
block_match = False
finally:
if block_match:
# Peer's head block is on our canonical chain
return self._get_blocks_from_canonical_chain_by_slot(
slot_of_requested_blocks
)
else:
# Peer's head block is not on our canonical chain
# Validate `start_slot` is greater than our latest finalized slot
self._validate_start_slot(beacon_blocks_request.start_slot)
return self._get_blocks_from_fork_chain_by_root(
beacon_blocks_request.start_slot,
requested_head_block,
slot_of_requested_blocks,
)
async def _handle_beacon_blocks(self, stream: INetStream) -> None:
peer_id = stream.mplex_conn.peer_id
if peer_id not in self.handshaked_peers:
self.logger.info(
"Processing beacon blocks request failed: not handshaked with peer=%s yet",
peer_id,
)
await stream.reset()
return
self.logger.debug("Waiting for beacon blocks request from the other side")
try:
beacon_blocks_request = await read_req(stream, BeaconBlocksRequest)
has_error = False
except (ReadMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, ReadMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
return
self.logger.debug("Received the beacon blocks request message %s", beacon_blocks_request)
try:
requested_head_block = self.chain.get_block_by_hash_tree_root(
beacon_blocks_request.head_block_root
)
except (BlockNotFound, ValidationError) as error:
self.logger.info("Sending empty blocks, reason: %s", error)
# We don't have the chain data peer is requesting
requested_beacon_blocks: Tuple[BaseBeaconBlock, ...] = tuple()
else:
# Check if slot of specified head block is greater than specified start slot
if requested_head_block.slot < beacon_blocks_request.start_slot:
reason = (
f"Invalid request: head block slot({requested_head_block.slot})"
f" lower than `start_slot`({beacon_blocks_request.start_slot})"
)
try:
await write_resp(stream, reason, ResponseCode.INVALID_REQUEST)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
self.logger.info(
"Processing beacon blocks request failed: failed to write message %s",
reason,
)
return
await stream.close()
return
else:
try:
requested_beacon_blocks = self._get_requested_beacon_blocks(
beacon_blocks_request, requested_head_block
)
except ValidationError as val_error:
reason = "Invalid request: " + str(val_error)
try:
await write_resp(stream, reason, ResponseCode.INVALID_REQUEST)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
self.logger.info(
"Processing beacon blocks request failed: "
"failed to write message %s",
reason,
)
return
await stream.close()
return
# TODO: Should it be a successful response if peer is requesting
# blocks on a fork we don't have data for?
beacon_blocks_response = BeaconBlocksResponse(blocks=requested_beacon_blocks)
self.logger.debug("Sending beacon blocks response %s", beacon_blocks_response)
try:
await write_resp(stream, beacon_blocks_response, ResponseCode.SUCCESS)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
self.logger.info(
"Processing beacon blocks request failed: failed to write message %s",
beacon_blocks_response,
)
return
self.logger.debug(
"Processing beacon blocks request from %s is finished",
peer_id,
)
await stream.close()
async def request_beacon_blocks(self,
peer_id: ID,
head_block_root: HashTreeRoot,
start_slot: Slot,
count: int,
step: int) -> Tuple[BaseBeaconBlock, ...]:
if peer_id not in self.handshaked_peers:
error_msg = f"not handshaked with peer={peer_id} yet"
self.logger.info("Request beacon block failed: %s", error_msg)
raise RequestFailure(error_msg)
beacon_blocks_request = BeaconBlocksRequest(
head_block_root=head_block_root,
start_slot=start_slot,
count=count,
step=step,
)
self.logger.debug(
"Opening new stream to peer=%s with protocols=%s",
peer_id,
[REQ_RESP_BEACON_BLOCKS_SSZ],
)
stream = await self.host.new_stream(peer_id, [REQ_RESP_BEACON_BLOCKS_SSZ])
self.logger.debug("Sending beacon blocks request %s", beacon_blocks_request)
try:
await write_req(stream, beacon_blocks_request)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
error_msg = f"fail to write request={beacon_blocks_request}"
self.logger.info("Request beacon blocks failed: %s", error_msg)
raise RequestFailure(error_msg)
self.logger.debug("Waiting for beacon blocks response")
try:
resp_code, beacon_blocks_response = await read_resp(stream, BeaconBlocksResponse)
has_error = False
except (ReadMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, ReadMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
self.logger.info("Request beacon blocks failed: fail to read the response")
raise RequestFailure("fail to read the response")
self.logger.debug(
"Received beacon blocks response %s, resp_code=%s",
beacon_blocks_response,
resp_code,
)
if resp_code != ResponseCode.SUCCESS:
error_msg = (
"resp_code != ResponseCode.SUCCESS, "
f"resp_code={resp_code}, error_msg={beacon_blocks_response}"
)
self.logger.info("Request beacon blocks failed: %s", error_msg)
await stream.reset()
raise RequestFailure(error_msg)
await stream.close()
beacon_blocks_response = cast(BeaconBlocksResponse, beacon_blocks_response)
return beacon_blocks_response.blocks
async def _handle_recent_beacon_blocks(self, stream: INetStream) -> None:
peer_id = stream.mplex_conn.peer_id
if peer_id not in self.handshaked_peers:
self.logger.info(
"Processing recent beacon blocks request failed: not handshaked with peer=%s yet",
peer_id,
)
await stream.reset()
return
self.logger.debug("Waiting for recent beacon blocks request from the other side")
try:
recent_beacon_blocks_request = await read_req(stream, RecentBeaconBlocksRequest)
has_error = False
except (ReadMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, ReadMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
return
self.logger.debug(
"Received the recent beacon blocks request message %s",
recent_beacon_blocks_request,
)
recent_beacon_blocks = []
for block_root in recent_beacon_blocks_request.block_roots:
try:
block = self.chain.get_block_by_hash_tree_root(block_root)
except (BlockNotFound, ValidationError):
pass
else:
recent_beacon_blocks.append(block)
recent_beacon_blocks_response = RecentBeaconBlocksResponse(blocks=recent_beacon_blocks)
self.logger.debug("Sending recent beacon blocks response %s", recent_beacon_blocks_response)
try:
await write_resp(stream, recent_beacon_blocks_response, ResponseCode.SUCCESS)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
self.logger.info(
"Processing recent beacon blocks request failed: failed to write message %s",
recent_beacon_blocks_response,
)
return
self.logger.debug(
"Processing recent beacon blocks request from %s is finished",
peer_id,
)
await stream.close()
async def request_recent_beacon_blocks(
self,
peer_id: ID,
block_roots: Sequence[HashTreeRoot]) -> Tuple[BaseBeaconBlock, ...]:
if peer_id not in self.handshaked_peers:
error_msg = f"not handshaked with peer={peer_id} yet"
self.logger.info("Request recent beacon block failed: %s", error_msg)
raise RequestFailure(error_msg)
recent_beacon_blocks_request = RecentBeaconBlocksRequest(block_roots=block_roots)
self.logger.debug(
"Opening new stream to peer=%s with protocols=%s",
peer_id,
[REQ_RESP_RECENT_BEACON_BLOCKS_SSZ],
)
stream = await self.host.new_stream(peer_id, [REQ_RESP_RECENT_BEACON_BLOCKS_SSZ])
self.logger.debug("Sending recent beacon blocks request %s", recent_beacon_blocks_request)
try:
await write_req(stream, recent_beacon_blocks_request)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
error_msg = f"fail to write request={recent_beacon_blocks_request}"
self.logger.info("Request recent beacon blocks failed: %s", error_msg)
raise RequestFailure(error_msg)
self.logger.debug("Waiting for recent beacon blocks response")
try:
resp_code, recent_beacon_blocks_response = await read_resp(
stream,
RecentBeaconBlocksResponse,
)
has_error = False
except (ReadMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, ReadMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
self.logger.info("Request recent beacon blocks failed: fail to read the response")
raise RequestFailure("fail to read the response")
self.logger.debug(
"Received recent beacon blocks response %s, resp_code=%s",
recent_beacon_blocks_response,
resp_code,
)
if resp_code != ResponseCode.SUCCESS:
error_msg = (
"resp_code != ResponseCode.SUCCESS, "
f"resp_code={resp_code}, error_msg={recent_beacon_blocks_response}"
)
self.logger.info("Request recent beacon blocks failed: %s", error_msg)
await stream.reset()
raise RequestFailure(error_msg)
await stream.close()
recent_beacon_blocks_response = cast(
RecentBeaconBlocksResponse,
recent_beacon_blocks_response,
)
return recent_beacon_blocks_response.blocks
| trinity/protocol/bcc_libp2p/node.py | 41,629 | noqa: E701 TODO: Add key and peer_id to the peerstore let the function initialize it no routing required here host TODO: Register notifees TODO: Connect bootstrap nodes? pubsub FIXME: Add `tear_down` to `Swarm` in the upstream TODO: Add `close` in `Pubsub` RPC Handlers TODO: Add a wrapper or decorator to handle the exceptions in handlers, to close the streams safely. Probably starting from: if the function returns successfully, then close the stream. Otherwise, reset the stream. TODO: Handle the reputation of peers. Deduct their scores and even disconnect when they behave. TODO: Register notifee to the `Network` to - Record peers' joining time. - Disconnect peers when they fail to join in a certain amount of time. Can not validate the checkpoint with `finalized_epoch` higher than ours Get the finalized root at `hello_other_side.finalized_epoch` Edge case where nothing is finalized yet TODO: kickoff syncing process with this peer TODO: Find out when we should respond the `ResponseCode` other than `ResponseCode.SUCCESS`. Check if we are behind the peer TODO: Handle the case when `resp_code` is not success. TODO: Do something according to the `ResponseCode` Check if we are behind the peer If peer's head block is on our canonical chain, start getting the requested blocks by slots. Peer's head block is on a fork chain, start getting the requested blocks by traversing the history from the head. `slot_of_requested_blocks` starts with earliest slot and end with most recent slot, so we start traversing from the most recent slot. This should not happen as we only persist block if its ancestors are also in the database. We have the peer's head block in our database, next check if the head block is on our canonical chain. Peer's head block is on our canonical chain Peer's head block is not on our canonical chain Validate `start_slot` is greater than our latest finalized slot We don't have the chain data peer is requesting Check if slot of specified head block is greater than specified start slot TODO: Should it be a successful response if peer is requesting blocks on a fork we don't have data for? | 2,134 | en | 0.848365 |
import re
whitespace_re = re.compile('\s+')
def pare(text, size, etc='...'):
'''Pare text to have maximum size and add etc to the end if it's
changed'''
size = int(size)
text = text.strip()
if len(text)>size:
# strip the last word or not
to_be_stripped = not whitespace_re.findall(text[size-1:size+2])
text = text[:size]
if to_be_stripped:
half = size//2
last = None
for mo in whitespace_re.finditer(text[half:]):
last = mo
if last is not None:
text = text[:half+last.start()+1]
return text.rstrip() + etc
else:
return text
| iktomi/utils/text.py | 682 | Pare text to have maximum size and add etc to the end if it's
changed
strip the last word or not | 98 | en | 0.894552 |
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'nomadgram.users'
verbose_name = "Users"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
from .signals import user_signed_up
| nomadgram/users/apps.py | 307 | Override this to put in:
Users system checks
Users signal registration | 70 | en | 0.708613 |
class BotError(Exception):
"""Base bot error."""
class BotAppError(Exception):
"""Bot App Error."""
class BotApiError(Exception):
"""Bot API Error."""
| gopubbot/bot/exceptions.py | 167 | Bot API Error.
Bot App Error.
Base bot error. | 45 | es | 0.32019 |
"""
Auto-generated class for JobResult
"""
from .EnumJobResultName import EnumJobResultName
from .EnumJobResultState import EnumJobResultState
from . import client_support
class JobResult(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(data, id, level, name, startTime, state, stderr, stdout):
"""
:type data: str
:type id: str
:type level: int
:type name: EnumJobResultName
:type startTime: int
:type state: EnumJobResultState
:type stderr: str
:type stdout: str
:rtype: JobResult
"""
return JobResult(
data=data,
id=id,
level=level,
name=name,
startTime=startTime,
state=state,
stderr=stderr,
stdout=stdout,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'JobResult'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'data'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.data = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'id'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.id = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'level'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.level = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'name'
val = data.get(property_name)
if val is not None:
datatypes = [EnumJobResultName]
try:
self.name = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'startTime'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.startTime = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'state'
val = data.get(property_name)
if val is not None:
datatypes = [EnumJobResultState]
try:
self.state = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'stderr'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.stderr = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'stdout'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.stdout = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| pyclient/zeroos/orchestrator/client/JobResult.py | 5,199 | auto-generated. don't touch.
:type data: str
:type id: str
:type level: int
:type name: EnumJobResultName
:type startTime: int
:type state: EnumJobResultState
:type stderr: str
:type stdout: str
:rtype: JobResult
Auto-generated class for JobResult | 247 | en | 0.482067 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.