text string | size int64 | token_count int64 |
|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 27 00:17:55 2020
@author: mehrdad
"""
import json
import numpy as np
import pandas as pd
import time
import math
#import blist
import tslib.mining
import tslib.common
import tslib.trip_detection
import tslib.trip
STORE_RESULTS = False
#output_folder = './data/output'
#all_modes = {'WALK':0, 'RUN':0, 'BUS': 0, 'TRAM':0, 'RAIL':0, 'FERRY':0,
# 'CAR':0, 'SUBWAY':0, 'BICYCLE':0, 'EBICYCLE':0}
#all_modes_df = pd.DataFrame(data=all_modes.values(), index=all_modes.keys())
#from pyfiles.common.modalchoice import ModalChoice
# ----------------------------------------------------------------------------------------
def combine_sequential_modes(multimodal_summary):
multimodal_summary = multimodal_summary.replace('RUN','WALK')
l = multimodal_summary.split('->')
new_l = []
prev_mode = 'none'
for mode in l:
if mode == prev_mode:
pass
else:
new_l.append(mode)
prev_mode = mode
new_modes_str = '->'.join(new_l)
return new_modes_str
def fix_ebike_in_computed_multimodes(data):
print("fix_ebike_in_computed_multimodes() ...")
start = time.time()
ebikes = data[data['mode']=='EBICYCLE']
new_multimodal = ebikes.multimodal_summary.apply(lambda x: x.replace('BICYCLE', 'EBICYCLE'))
data.update(new_multimodal)
new_d_by_mode = ebikes.distance_by_mode.apply(lambda x: x.replace('EBICYCLE', 'ERASED'))
new_d_by_mode = new_d_by_mode.apply(lambda x: x.replace('BICYCLE', 'EBICYCLE'))
#ebikes.distance_by_mode.values[0]
#'{"RUN": 0.0, "WALK": 0.0, "EBICYCLE": 0.0, "BICYCLE": 20427.21}'
data.update(new_d_by_mode)
# TODO: If planning to also update time_by_mode, the time values should be reduced according to ebike speed
# ...
#temp = pd.DataFrame()
#temp['multimodal_summary'] = ebikes.multimodal_summary.apply(lambda x: x.replace('BICYCLE', 'EBICYCLE'))
end = time.time()
print("elapsed", end-start)
def compute_modes_distance_shares_per_trip(data):
# ----------------------------------------
# Get distances-by-mode
#temp = data.distance_by_mode.apply(lambda x: dict(json.loads(x)))
temp = data.distance_by_mode.apply(json.loads)
d_df = temp.to_frame()
d_df['distance'] = data.distance
start = time.time()
# compute modes distance shares per trip ---------------
#users = blist.blist() # maybe better performance for larger datasets
#users = list(np.zeros(len(d_df), dtype=int)) # not necessary unless profiler shows that list.append() is a bottleneck
users = list()
trip_ids = list()
plan_ids = list()
mode_shares = list()
max_mode_shares = list()
max_modes = list()
total_d_errors =list()
row_index = 0
for trip in d_df.itertuples():
users.append(trip.Index[0])
trip_ids.append(trip.Index[1])
if len(trip.Index)==3: # for computed_trips
plan_ids.append(trip.Index[2])
total_distance = trip.distance
d = trip.distance_by_mode
dvals = np.array(list(d.values()))
total = math.fsum(dvals)
if total>0:
shares = dvals/total
max_share = shares.max()
max_index = shares.argmax()
max_mode = list(d.keys())[max_index]
shares = shares[shares>0]
else:
max_share = 0
max_mode = 'UNDEFINED'
shares = []
mode_shares.append(shares)
max_mode_shares.append(max_share)
max_modes.append(max_mode)
total_d_errors.append(total_distance - total)
#users[row_index] = trip.Index[0]
row_index += 1
all_data={#'user': users,
#'trip': trip_ids,
'max_mode': max_modes,
'max_mode_share':max_mode_shares,
'mode_shares':mode_shares,
'total_d_error':total_d_errors}
#users = np.apply_along_axis(lambda x: x[0] , 1, indexes)
#trip_ids = np.apply_along_axis(lambda x: x[1] , 1, indexes)
#plan_ids = np.apply_along_axis(lambda x: x[2] , 1, indexes)
if len(trip.Index) == 2:
all_index=[users, trip_ids]
elif len(trip.Index) == 3: # for computed_trips
all_index=[users, trip_ids, plan_ids]
mode_distance_shares = pd.DataFrame(index=all_index, data=all_data)
# mode_distance_shares.set_index(keys=['user', 'trip'], inplace=True)
end = time.time()
print("compute_modes_distance_shares_per_trip(): elapsed", end-start)
return mode_distance_shares
# -----------------------------------------
def compute_mainmode_per_trip(mode_distance_shares):
# Compute main-mode per trip -----------------------
start = time.time()
mainmodes = []
mainmode_shares = []
for trip in mode_distance_shares.itertuples():
MIN_SHARE = tslib.mining.MIN_DISTANCE_SHARE_OF_MAINMODE
if trip.max_mode_share < MIN_SHARE and trip.max_mode_share > 0:
main_mode = 'SMALL_SHARE'
main_mode_share = 0 # we don't have a main-mode for this trip
else:
main_mode = trip.max_mode
main_mode_share = trip.max_mode_share
if main_mode == 'RUN':
main_mode = 'WALK'
mainmodes.append(main_mode)
mainmode_shares.append(main_mode_share)
mode_distance_shares['mainmode'] = mainmodes
mode_distance_shares['mainmode_share'] = mainmode_shares
end = time.time()
print("elapsed", end-start)
return mode_distance_shares
# -----------------------------------------------------------
def get_all_mode_shares(mode_distance_shares):
# Get all mode distance shares, for later stats -----------------------
start = time.time()
share_values_history = []
for trip in mode_distance_shares.itertuples():
share_values_history.extend(trip.mode_shares)
share_values_history_df = pd.DataFrame(data={'mode_distance_share': share_values_history})
end = time.time()
print("elapsed", end-start)
return share_values_history_df
# ---------------------------------------
def combine_samemode_leg_sequences(trips):
# Refine multimodal_summary of each trip, combine modes repeated right after each other:
trips['multimodal_summary_combined'] = trips.multimodal_summary.apply(combine_sequential_modes)
def compute_mainmodes_for_observed(trips):
print("compute_mainmodes_for_observed(): Given ",len(trips),"trip records")
mode_distance_shares = compute_modes_distance_shares_per_trip(trips)
mode_distance_shares = compute_mainmode_per_trip(mode_distance_shares)
# optional?: share_values_history_df = get_all_mode_shares(mode_distance_shares)
# Update the records:
trips['old_mode'] = trips['mode']
trips['mode'] = mode_distance_shares['mainmode']
trips['mainmode'] = mode_distance_shares['mainmode']
trips['mainmode_share'] = mode_distance_shares['mainmode_share']
if STORE_RESULTS:
store_filename_suffix = 'observed'
print("saving to file ...")
mode_distance_shares.to_csv('./trips/output/'+'mode_distance_shares_'+store_filename_suffix+'.csv')
#share_values_history_df.to_csv('./trips/output/share_values_history_df_'+store_filename_suffix+'.csv')
def compute_mainmodes_for_computed(trips):
print("compute_mainmodes_for_computed(): Given ",len(trips),"trip records")
mode_distance_shares = compute_modes_distance_shares_per_trip(trips)
mode_distance_shares = compute_mainmode_per_trip(mode_distance_shares)
trips['mainmode_share'] = mode_distance_shares['mainmode_share']
tslib.trip_detection.compute_mainmode_of_PT_trips(trips)
tslib.trip_detection.compute_mainmode_of_non_PT_trips(trips)
def fix_alts_with_misplaced_plan_id(session_data):
computed_trips_ = session_data.computed_trips
# POSSIBLE FIXES
# See: X.multimodal_summary.value_counts() of following datasets:
# also:
# np.histogram(pt_alts_by_planid.car_distance/pt_alts_by_planid.distance, bins=[0, 0.01, 0.3, 0.7, 1])
# np.histogram(pt_alts_by_planid.bike_distance/pt_alts_by_planid.distance, bins=[0, 0.01, 0.3, 0.7, 1])
# np.histogram(pt_alts_by_planid.walk_distance/pt_alts_by_planid.distance, bins=[0, 0.3, 0.7, 1])
# np.histogram(pt_alts_by_planid.pt_distance/pt_alts_by_planid.distance, bins=[0, 0.3, 0.7, 1])
walk_alts_by_planid = tslib.trip_detection.get_alts_by_planid(computed_trips_, [1]) # supposed to be walk alts
#OK, but very few CAR, BIKE and PT
# for which, bike and pt should be fine because they have the correct mainmode ??
bike_alts_by_planid = tslib.trip_detection.get_alts_by_planid(computed_trips_, [2]) # supposed to be bike alts
#OK, but very few CAR and PT
car_alts_by_planid = tslib.trip_detection.get_alts_by_planid(computed_trips_, [3]) # supposed to be bike alts
# Has ~400 PT, 60 WALK
# The PT ones wihtout SMALL_SHARE are fine already
pt_alts_by_planid = tslib.trip_detection.get_alts_by_planid(computed_trips_, [4,5,6]) # only can be PT alts
#test: (pt_alts_by_planid[pt_alts_by_planid.mainmode == 'WALK']).multimodal_summary.value_counts()
# 2551 are only WALK leg, and apparently those trips already have plan_id=1 WALK computed:
# Implies PT is not available or not posisble? ... see the distances
# Update computed_trips as 'mainmode' = PT_PLANNED_AS_WALK ?!!
# the rest have at least one PT leg.
# How to fixe computed_trips?
# Is it ok if for a 'PT' trip, actual motorized distance is only e.g. 20%?
# 12191 reciords mainmode == SMALL_SHARE ***
# Update SMALL_SHARE to a PT mode
# to the largest share ?!
# to dft.old_mode.value_counts() ?!
# to 'MULTI_PT_MODES' or 'PT_SMALL_SHARE' and then add 'MULTI_PT_MODES', etc. to PT_MODES ?!
# Make the corrections:
# PT alts:
# .1
revise = pt_alts_by_planid[(pt_alts_by_planid.multimodal_summary == 'WALK') & (pt_alts_by_planid.mainmode != 'PT_PLANNED_AS_WALK')]
computed_trips_.loc[computed_trips_.index.isin(revise.index), 'mainmode'] = 'PT_PLANNED_AS_WALK'
# .2
revise = pt_alts_by_planid[pt_alts_by_planid.mainmode == 'SMALL_SHARE']
#revise['pt_d_share'] = revise.pt_distance/revise.distance
#revise[['old_mode', 'multimodal_summary', 'pt_distance', 'pt_d_share']]
#dft.multimodal_summary
computed_trips_.loc[computed_trips_.index.isin(revise.index), 'mainmode'] = 'PT_SMALL_SHARE'
# . Where mainmode incorrectly classified as 'WALK' because walk leg had largest distance-share
revise = pt_alts_by_planid[(pt_alts_by_planid.multimodal_summary.apply(tslib.trip.has_pt_leg)) &\
(~ pt_alts_by_planid.mainmode.isin(tslib.trip_detection.PT_MODES))]
computed_trips_.loc[computed_trips_.index.isin(revise.index), 'mainmode'] = 'PT_SMALL_SHARE'
# .3
revise = car_alts_by_planid[car_alts_by_planid.multimodal_summary.apply(tslib.trip.has_pt_leg) &\
(car_alts_by_planid.mainmode == 'SMALL_SHARE')]
computed_trips_.loc[computed_trips_.index.isin(revise.index), 'mainmode'] = 'PT_SMALL_SHARE'
# ======================================================
def save_to_file(session_data):
print("Saving trips to file ...")
output_folder = session_data.settings.DATAOUT_FOLDER
tslib.common.save_dataframe_to_file(output_folder,'observed_trips', session_data.observed_trips)
tslib.common.save_dataframe_to_file(output_folder,'computed_trips', session_data.computed_trips)
def load_data_with_fixed_modes(session_data):
print("Loading trips from file ...")
data_storage_folder = session_data.settings.DATASTORE_FOLDER
session_data.observed_trips = tslib.common.load_dataframe_from_file(data_storage_folder,'observed_trips')
session_data.computed_trips = tslib.common.load_dataframe_from_file(data_storage_folder,'computed_trips')
| 12,410 | 4,548 |
from abc import ABC, abstractmethod
import gym
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import time
from setcpp import SmoothnessDPL1Cost, SmoothnessDPPairGridCost, SmoothnessGreedy
import tqdm
from typing import Optional, Tuple, NoReturn
from data.synthia import Frame, append_xyz_to_depth_map
from devices.light_curtain import LCReturn
from lc_planner.planner import PlannerRT
from lc_planner.config import LCConfig
import utils
########################################################################################################################
# region Base Env class
########################################################################################################################
class Env(ABC, gym.Env):
"""
Base class for implementing environments for safety envelope tracking.
This is intended to mimic the OpenAI Gym wrapper.
"""
def __init__(self,
lc_config: LCConfig,
thetas: np.ndarray,
min_range: float,
max_range: float,
use_random_curtain: bool,
random_curtain_updates_main_curtain: bool,
random_curtain_cache_file: str,
random_curtain_sampling: str,
random_curtain_spacing_power: float,
vertical_range: Tuple[float, float],
r_hit_intensity_thresh: int,
r_recession: float,
pp_smoothing: Optional[str],
tracking_rtol: float,
tracking_atol: float,
baseline_config: dict,
debug: bool = False):
assert len(thetas) == lc_config.CAMERA_PARAMS['width']
self._lc_config = lc_config
self._thetas = thetas
self._min_range = min_range
self._max_range = max_range
self._use_random_curtain = use_random_curtain
self._random_curtain_updates_main_curtain = random_curtain_updates_main_curtain
self._random_curtain_sampling = random_curtain_sampling
self._random_curtain_spacing_power = random_curtain_spacing_power
self._vertical_range = vertical_range
self._r_hit_intensity_thresh = r_hit_intensity_thresh
self._r_recession = r_recession
self._pp_smoothing = pp_smoothing
self._rtol = tracking_rtol
self._atol = tracking_atol
self._debug = debug
# config for handcrafted baseline policy
self.baseline_config = baseline_config
# options
self._RANGES_PER_RAY_V2 = 1000 # 01.9cm apart
# random curtain generator
self.rand_curtain_gen = RandomCurtainGenerator(cache_file=random_curtain_cache_file)
ranges = self.get_ranges(self.min_range, self.max_range, self._RANGES_PER_RAY_V2,
self._random_curtain_spacing_power)
if not self.rand_curtain_gen.has_curtains:
plannerV2 = PlannerRT(self._lc_config, ranges, self.C, version=2)
self.rand_curtain_gen.generate(planner=plannerV2, sampling=self._random_curtain_sampling)
# smoothing
self._SMOOTHNESS = 0.05
smoothness_args = (self.C, self.min_range, self.max_range, self._SMOOTHNESS)
self._smoothnessDPL1Cost = SmoothnessDPL1Cost(*smoothness_args)
self._smoothnessDPPairGridCost = SmoothnessDPPairGridCost(*smoothness_args)
self._smoothnessGreedy = SmoothnessGreedy(*smoothness_args)
# stores the most recently obtained intensities.
# this is used by heuristic_greedy smoothing to define a priority over camera rays.
# this should be initialized in self.reset()
self.intensities = None # (C,)
if self._debug:
# visualize 20 random curtains
for i in range(20):
design_pts = plannerV2._planner.randomCurtainDiscrete(self._random_curtain_sampling)
design_pts = np.array(design_pts, dtype=np.float32) # (C, 3)
plt.plot(design_pts[:, 0], design_pts[:, 1])
plt.ylim(0, 20)
plt.xlim(-7, 7)
plt.title("power: {}, vel: {}, acc: {}".format(
self._random_curtain_spacing_power,
self._lc_config.LASER_PARAMS["max_omega"],
self._lc_config.LASER_PARAMS["max_alpha"]), fontsize='xx-large')
plt.tight_layout()
plt.show()
@property
def thetas(self):
return self._thetas # (C,) in degrees and in increasing order in [-fov/2, fov/2]
@property
def min_range(self):
return self._min_range
@property
def max_range(self):
return self._max_range
@property
def H(self):
return self._lc_config.CAMERA_PARAMS['height'] # number of camera rows
@property
def C(self):
return self._lc_config.CAMERA_PARAMS['width'] # number of camera columns
@staticmethod
def get_ranges(min_range, max_range, num_ranges, power):
# generate numbers between 0 and 1
unit_spacing = np.linspace(0, 1, num_ranges, dtype=np.float32) # (R,)
unit_spacing = np.power(unit_spacing, power) # (R,)
ranges = min_range + (max_range - min_range) * unit_spacing # (R,)
return ranges
def safety_envelope(self,
frame: Frame) -> np.ndarray:
"""
Computes ground truth safety envelope from the ground truth depth map in the frame.
The safety envelope for each camera column is the smallest bev range value across all pixels in that column.
Args:
frame (Frame): frame containing ground truth depth.
Returns:
se_ranges: (np.ndarray, dtype=np.float32, shape=(C,)) the ranges of the ground truth safety envelope,
one per camera ray.
"""
depth = frame.depth.copy() # (H, C)
# append x, y, z to depth
P2 = frame.calib["P2"][:3, :3] # (3, 3)
cam_xyz = append_xyz_to_depth_map(depth[:, :, None], P2) # (H, C, 3); axis 2 is (x, y, z) in cam frame
cam_x, cam_y, cam_z = cam_xyz[:, :, 0], cam_xyz[:, :, 1], cam_xyz[:, :, 2] # all are (H, C)
bev_range = np.sqrt(np.square(cam_x) + np.square(cam_z)) # (H, C) sqrt(x**2 + z**2)
# we do not care about objects beyond "max_range"
bev_range = bev_range.clip(max=self.max_range) # (H, C)
# pixels that are outside the vertical range are assumed to be infinitely far away
# (note that cam_y points downwards)
vrange_min, vrange_max = self._vertical_range
outside_vrange_mask = (-cam_y < vrange_min) | (-cam_y > vrange_max) # (H, C)
bev_range[outside_vrange_mask] = self.max_range
se_ranges = bev_range.min(axis=0) # (C,)
return se_ranges.astype(np.float32)
def augment_frame_data(self, frame: Frame) -> NoReturn:
"""Compute the gt safety envelope and add it to the frame"""
se_ranges = self.safety_envelope(frame) # (C,)
se_design_pts = utils.design_pts_from_ranges(se_ranges, self.thetas) # (C, 2)
frame.annos["se_ranges"] = se_ranges
frame.annos["se_design_pts"] = se_design_pts
####################################################################################################################
# region Env API functions
####################################################################################################################
def reset(self,
vid: Optional[int] = None,
start: Optional[int] = None) -> np.ndarray:
"""Resets the state of the environment, returns the initial envelope and also initializes self.intensities.
Args:
vid (int): video id.
start (int): start frame of video.
Returns:
init_envelope (np.ndarray, dtype=np.float32, shape=(C,)): the initial envelope.
"""
raise NotImplementedError
def step(self,
action: Optional[np.ndarray],
score: Optional[float] = None,
get_gt: bool = False) -> Tuple[LCReturn, bool, dict]:
"""
Compute the observations from the current step.
This is derived by placing the light curtain computed from observations in the previous timestep,
in the current frame.
Args:
action (np.ndarray, dtype=np.float32, shape=(C,)): Ranges of the light curtain.
This is optional; if None, then the ground truth action will be used instead (for behavior cloning).
score (Optional[float]): the score of the front curtain that needs to be published.
get_gt (bool): whether to compute gt_action or not
Returns:
observation (LCReturn): agent's observation of the current environment. This is the return from the front
light curtain. Always returns a valid observation, even when end=True.
end (bool): is True for the last valid observation in the episode. No further calls to step() should be
made after a end=True has been returned.
info (dict): Contains auxiliary diagnostic information (helpful for debugging, and sometimes learning).
{
'gt_action' (optional): (np.ndarray, dtype=float32, shape=(C,))
the light curtain placement that should be considered `ground
truth' for the previous timestep. This is what `action' should
ideally be equal to.
'ss_action' (optional): (np.ndarray, dtype=np.float32, shape=(C,))
partial ground truth self-supervision signal generated by random
light curtains. note that the mask is equal to
(ss_action < self.max_range).
}
"""
self.env_step_begin()
info = {}
################################################################################################################
# region Random curtain
################################################################################################################
if self._use_random_curtain:
# place random curtain and move f_curtain to wherever hits are observed
r_curtain, r_hits = self.env_place_r_curtain()
# compute self-supervision signal
# these are the ranges of the random curtain for those camera rays where a hit was observed.
# rays that did not observe a hit are masked out.
ss_action = r_curtain.copy() # (C,)
ss_action[~r_hits] = self.max_range
info['ss_action'] = ss_action
# endregion
################################################################################################################
# region Pre-processing forecasting curtain
################################################################################################################
if action is not None:
# clip curtain between min and max range
f_curtain = action.clip(min=self.min_range, max=self.max_range) # (C,)
if self._use_random_curtain and self._random_curtain_updates_main_curtain:
# update f_curtain by moving it to locations where the random curtain observed returns
# update only those locations where the random curtain detected objects *closer* than the main curtain
r_update_mask = r_hits & (r_curtain < f_curtain) # (C,)
f_curtain[r_update_mask] = r_curtain[r_update_mask] - self._r_recession
# since f_curtain is being updated, self.intensities must also be updated.
# furthermore, the locations of random curtain hits should get the highest priority
self.intensities[r_update_mask] = 1.1
# endregion
################################################################################################################
# region Smoothing forecasting curtain
################################################################################################################
if action is not None:
if self._pp_smoothing == "heuristic_global":
# heuristic smoothing: difference between ranges on consecutive rays shouldn't exceed a threshold
# global optimization: minimizes the sum of L1 differences across all rays using DP
if self._use_random_curtain and self._random_curtain_updates_main_curtain:
# when using random curtains, the cost will be hierarchical:
# (sum of L1 costs over rays in r_update_mask, sum of L1 costs over rays outside r_update_mask)
# this priorities being close to the locations updated by r_curtain more than the other locations.
ranges = np.array(self._smoothnessDPPairGridCost.getRanges(), dtype=np.float32) # (R,)
flat_cost = np.abs(ranges.reshape(-1, 1) - f_curtain) # (R, C)
# hierarchical cost
# - (L1cost, 0): if on ray in r_update_mask
# - (0, L1cost): if on ray outside r_update_mask
pair_cost = np.zeros([len(ranges), self.C, 2], dtype=np.float32) # (R, C, 2)
pair_cost[:, r_update_mask, 0] = flat_cost[:, r_update_mask]
pair_cost[:, ~r_update_mask, 1] = flat_cost[:, ~r_update_mask]
f_curtain = np.array(self._smoothnessDPPairGridCost.smoothedRanges(pair_cost), dtype=np.float32) # (C,)
else:
f_curtain = np.array(self._smoothnessDPL1Cost.smoothedRanges(f_curtain), dtype=np.float32) # (C,)
elif self._pp_smoothing == "heuristic_greedy":
# heuristic smoothing: difference between ranges on consecutive rays shouldn't exceed a threshold
# greedy optimization: greedily smoothes ranges while iterating over rays prioritized by largest weights
f_curtain = np.array(self._smoothnessGreedy.smoothedRanges(f_curtain, self.intensities), dtype=np.float32) # (C,)
elif self._pp_smoothing == "planner_global":
# create L1 cost function
ranges = self.plannerV2.ranges # (R,)
cmap = -np.abs(ranges.reshape(-1, 1) - f_curtain) # (R, C)
design_pts = self.plannerV2.get_design_points(cmap) # (C, 2)
assert design_pts.shape == (self.plannerV2.num_camera_angles, 2)
f_curtain = np.linalg.norm(design_pts, axis=1) # (C,)
else:
raise Exception(f"env.pp_smoothing must be " +
"\"heuristic_global\" or \"heuristic_greedy\" or \"planner_global\"")
# endregion
################################################################################################################
# region GT-action and placing forecasting curtain
################################################################################################################
if (action is None) and (get_gt == False):
raise Exception("Must compute gt_action in behavior cloning")
# the next line gets the ground truth action for the previous timestep
# in the ideal policy, `action' should match this `gt_action'
if get_gt:
info['gt_action'] = self.env_current_gt_action() # (C,)
# if action is set to None (for eg. in behavior cloning), use the ground truth action instead
if action is None:
f_curtain = info['gt_action']
# placing forecasting curtain
obs: LCReturn = self.env_place_f_curtain(f_curtain, score=score)
# the next line updates self.intensities
self.intensities = obs.bev_intensities() / 255.0
# the next line computes `end', which checks whether another env.step() call can be made
end = self.env_end()
time.sleep(0) # interrupt, useful for RealEnv
return obs, end, info
def done(self,
f_curtain: np.ndarray,
se_ranges: np.ndarray) -> bool:
"""
Whether the episode transitions to the terminal state or not.
Done is true when the curtain has moved too far away from the safety envelope on any camera ray i.e.
abs(f_curtain - se_ranges) > (atol + rtol * se_ranges) for any camera ray
Args:
f_curtain (np.ndarray, dtype=float32, shape=(C,)): curtain placement
se_ranges (np.ndarray, dtype=float32, shape=(C,)): ground truth safety envelope.
Returns:
done (bool): whether f_curtain is too far away from se_ranges on any camera ray.
"""
# the next line computes the mask over rays; only these rays should count towards termination
mask = se_ranges < self.max_range # (C,)
f_curtain = f_curtain[mask] # (C',)
se_ranges = se_ranges[mask] # (C',)
# bad_rays = np.abs(f_curtain - se_ranges) > self._atol + self._rtol * se_ranges # (C')
# frac_bad_rays = bad_rays.sum() / mask.sum().clip(min=1)
# return frac_bad_rays >= 0.5
return np.any(np.abs(f_curtain - se_ranges) > self._atol + self._rtol * se_ranges)
def render(self, mode='human'):
pass
# endregion
####################################################################################################################
# region Env-specific helper functions for step()
####################################################################################################################
@abstractmethod
def env_step_begin(self) -> NoReturn:
"""
Env-specific helper function for step().
Any pre-processing that needs to be done at the start of the step() function.
"""
raise NotImplementedError
@abstractmethod
def env_place_r_curtain(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Env-specific helper function for step().
Places a random curtain and gets return.
Returns:
r_curtain (np.ndarray, dtype=float32, shape=(C,)): ranges of the unified random curtain
r_hits (np.ndarray, dtype=bool, shape=(C,)): mask where hits were found in the unified random curtain
"""
raise NotImplementedError
@abstractmethod
def env_place_f_curtain(self,
f_curtain: np.ndarray,
score: Optional[float]) -> LCReturn:
"""
Env-specific helper function for step().
Places a forecasting curtain and gets return.
Args:
f_curtain (np.ndarray, dtype=float32, shape=(C,)): ranges of the forecasting curtain
score (Optional[float]): score from the previous timestep. SimEnv uses this to publish score to Kittiviewer.
Returns:
f_return (LCReturn): Forecasting curtain return.
"""
raise NotImplementedError
@abstractmethod
def env_current_gt_action(self) -> np.ndarray:
"""
Env-specific helper function for step().
Computes the current gt_action.
Returns:
gt_action (np.ndarray, dtype=float32, shape=(C,)): current gt action
"""
raise NotImplementedError
@abstractmethod
def env_end(self) -> bool:
"""Computes the end flag, which checks whether another env.step() call can be made"""
raise NotImplementedError
# endregion
####################################################################################################################
# region Legacy helper functions
####################################################################################################################
def _debug_visualize_curtains(self, f_curtain, r_curtain):
design_pts = utils.design_pts_from_ranges(f_curtain, self.thetas)
x, z = design_pts[:, 0], design_pts[:, 1]
plt.plot(x, z, c='b')
design_pts = utils.design_pts_from_ranges(r_curtain, self.thetas)
x, z = design_pts[:, 0], design_pts[:, 1]
plt.plot(x, z, c='r')
plt.ylim(0, 21)
plt.show()
def _random_curtain(self,
r_type: str = "linear") -> np.ndarray:
"""Computes a random curtain across the entire scene
Args:
r_type (str): type of the random curtain. Options are (1) "uniform", (2) "linear".
Returns:
curtain (np.ndarray, dtype=np.float32, shape=(C,)): range per camera ray that may not correpsond to a
valid curtain.
"""
limits_lo = np.ones(self.C, dtype=np.float32) * 0.5 * self.min_range # (C,)
limits_hi = np.ones(self.C, dtype=np.float32) * self.max_range # (C,)
if r_type == "uniform":
curtain = np.random.uniform(low=limits_lo, high=limits_hi) # (C,)
elif r_type == "linear":
curtain = np.sqrt(np.random.uniform(low=np.square(limits_lo), high=np.square(limits_hi))) # (C,)
else:
raise Exception("r_type must be one of [uniform/linear]")
return curtain
# endregion
####################################################################################################################
# endregion
########################################################################################################################
# region Random curtain generator class
########################################################################################################################
class RandomCurtainGenerator:
def __init__(self,
cache_file: str):
self.curtains = None # (N, C)
self.ptr = 0
self.cache_file = Path(cache_file)
if self.cache_file.exists():
self.load_from_cache_file()
@property
def has_curtains(self):
return self.curtains is not None
def load_from_cache_file(self):
self.curtains = np.loadtxt(self.cache_file).astype(np.float32) # (N, C)
utils.cprint(f'Loaded {len(self.curtains)} random curtains from cache!', color='yellow')
def generate(self,
planner: PlannerRT,
sampling: str,
num_curtains: int=1000):
assert not self.cache_file.exists(), "Cannot generate curtains if cache file already exists"
with open(self.cache_file, 'w') as f:
for _ in tqdm.trange(num_curtains, desc='Creating random curtain cache ...'):
while True:
curtain = self.generate_curtain_from_planner(planner, sampling)
# don't save degenerate curtains 90% of whose rays are behind 3m
if not ((curtain < 3.0).mean() > 0.9):
break
print(' '.join([str(e) for e in curtain]), file=f)
self.load_from_cache_file()
@staticmethod
def generate_curtain_from_planner(planner, sampling):
r_curtain = planner._planner.randomCurtainDiscrete(sampling)
r_curtain = np.array(r_curtain, dtype=np.float32) # (C, 3)
r_curtain = np.linalg.norm(r_curtain[:, :2], axis=1) # (C,)
return r_curtain
def next(self):
curtain = self.curtains[self.ptr]
self.ptr += 1
if self.ptr == len(self.curtains):
self.ptr = 0
return curtain # (C,)
# endregion
########################################################################################################################
| 24,054 | 6,806 |
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Example Plugin that displays some dynamic content (a clock) and examples of
text formatting."""
from datetime import datetime
from prompt_toolkit.filters import Condition, has_focus
from prompt_toolkit.formatted_text import (
FormattedText,
HTML,
merge_formatted_text,
)
from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
from prompt_toolkit.layout import FormattedTextControl, Window, WindowAlign
from prompt_toolkit.mouse_events import MouseEvent, MouseEventType
from pw_console.plugin_mixin import PluginMixin
from pw_console.widgets import ToolbarButton, WindowPane, WindowPaneToolbar
from pw_console.get_pw_console_app import get_pw_console_app
# Helper class used by the ClockPane plugin for displaying dynamic text,
# handling key bindings and mouse input. See the ClockPane class below for the
# beginning of the plugin implementation.
class ClockControl(FormattedTextControl):
"""Example prompt_toolkit UIControl for displaying formatted text.
This is the prompt_toolkit class that is responsible for drawing the clock,
handling keybindings if in focus, and mouse input.
"""
def __init__(self, clock_pane: 'ClockPane', *args, **kwargs) -> None:
self.clock_pane = clock_pane
# Set some custom key bindings to toggle the view mode and wrap lines.
key_bindings = KeyBindings()
# If you press the v key this _toggle_view_mode function will be run.
@key_bindings.add('v')
def _toggle_view_mode(_event: KeyPressEvent) -> None:
"""Toggle view mode."""
self.clock_pane.toggle_view_mode()
# If you press the w key this _toggle_wrap_lines function will be run.
@key_bindings.add('w')
def _toggle_wrap_lines(_event: KeyPressEvent) -> None:
"""Toggle line wrapping."""
self.clock_pane.toggle_wrap_lines()
# Include the key_bindings keyword arg when passing to the parent class
# __init__ function.
kwargs['key_bindings'] = key_bindings
# Call the parent FormattedTextControl.__init__
super().__init__(*args, **kwargs)
def mouse_handler(self, mouse_event: MouseEvent):
"""Mouse handler for this control."""
# If the user clicks anywhere this function is run.
# Mouse positions relative to this control. x is the column starting
# from the left size as zero. y is the row starting with the top as
# zero.
_click_x = mouse_event.position.x
_click_y = mouse_event.position.y
# Mouse click behavior usually depends on if this window pane is in
# focus. If not in focus, then focus on it when left clicking. If
# already in focus then perform the action specific to this window.
# If not in focus, change focus to this clock pane and do nothing else.
if not has_focus(self.clock_pane)():
if mouse_event.event_type == MouseEventType.MOUSE_UP:
get_pw_console_app().focus_on_container(self.clock_pane)
# Mouse event handled, return None.
return None
# If code reaches this point, this window is already in focus.
# On left click
if mouse_event.event_type == MouseEventType.MOUSE_UP:
# Toggle the view mode.
self.clock_pane.toggle_view_mode()
# Mouse event handled, return None.
return None
# Mouse event not handled, return NotImplemented.
return NotImplemented
class ClockPane(WindowPane, PluginMixin):
"""Example Pigweed Console plugin window that displays a clock.
The ClockPane is a WindowPane based plugin that displays a clock and some
formatted text examples. It inherits from both WindowPane and
PluginMixin. It can be added on console startup by calling: ::
my_console.add_window_plugin(ClockPane())
For an example see:
https://pigweed.dev/pw_console/embedding.html#adding-plugins
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, pane_title='Clock', **kwargs)
# Some toggle settings to change view and wrap lines.
self.view_mode_clock: bool = True
self.wrap_lines: bool = False
# Counter variable to track how many times the background task runs.
self.background_task_update_count: int = 0
# ClockControl is responsible for rendering the dynamic content provided
# by self._get_formatted_text() and handle keyboard and mouse input.
# Using a control is always necessary for displaying any content that
# will change.
self.clock_control = ClockControl(
self, # This ClockPane class
self._get_formatted_text, # Callable to get text for display
# These are FormattedTextControl options.
# See the prompt_toolkit docs for all possible options
# https://python-prompt-toolkit.readthedocs.io/en/latest/pages/reference.html#prompt_toolkit.layout.FormattedTextControl
show_cursor=False,
focusable=True,
)
# Every FormattedTextControl object (ClockControl) needs to live inside
# a prompt_toolkit Window() instance. Here is where you specify
# alignment, style, and dimensions. See the prompt_toolkit docs for all
# opitons:
# https://python-prompt-toolkit.readthedocs.io/en/latest/pages/reference.html#prompt_toolkit.layout.Window
self.clock_control_window = Window(
# Set the content to the clock_control defined above.
content=self.clock_control,
# Make content left aligned
align=WindowAlign.LEFT,
# These two set to false make this window fill all available space.
dont_extend_width=False,
dont_extend_height=False,
# Content inside this window will have its lines wrapped if
# self.wrap_lines is True.
wrap_lines=Condition(lambda: self.wrap_lines),
)
# Create a toolbar for display at the bottom of this clock window. It
# will show the window title and buttons.
self.bottom_toolbar = WindowPaneToolbar(self)
# Add a button to toggle the view mode.
self.bottom_toolbar.add_button(
ToolbarButton(
key='v', # Key binding for this function
description='View Mode', # Button name
# Function to run when clicked.
mouse_handler=self.toggle_view_mode,
))
# Add a checkbox button to display if wrap_lines is enabled.
self.bottom_toolbar.add_button(
ToolbarButton(
key='w', # Key binding for this function
description='Wrap', # Button name
# Function to run when clicked.
mouse_handler=self.toggle_wrap_lines,
# Display a checkbox in this button.
is_checkbox=True,
# lambda that returns the state of the checkbox
checked=lambda: self.wrap_lines,
))
# self.container is the root container that contains objects to be
# rendered in the UI, one on top of the other.
self.container = self._create_pane_container(
# Display the clock window on top...
self.clock_control_window,
# and the bottom_toolbar below.
self.bottom_toolbar,
)
# This plugin needs to run a task in the background periodically and
# uses self.plugin_init() to set which function to run, and how often.
# This is provided by PluginMixin. See the docs for more info:
# https://pigweed.dev/pw_console/plugins.html#background-tasks
self.plugin_init(
plugin_callback=self._background_task,
# Run self._background_task once per second.
plugin_callback_frequency=1.0,
plugin_logger_name='pw_console_example_clock_plugin',
)
def _background_task(self) -> bool:
"""Function run in the background for the ClockPane plugin."""
self.background_task_update_count += 1
# Make a log message for debugging purposes. For more info see:
# https://pigweed.dev/pw_console/plugins.html#debugging-plugin-behavior
self.plugin_logger.debug('background_task_update_count: %s',
self.background_task_update_count)
# Returning True in the background task will force the user interface to
# re-draw.
# Returning False means no updates required.
return True
def toggle_view_mode(self):
"""Toggle the view mode between the clock and formatted text example."""
self.view_mode_clock = not self.view_mode_clock
self.redraw_ui()
def toggle_wrap_lines(self):
"""Enable or disable line wraping/truncation."""
self.wrap_lines = not self.wrap_lines
self.redraw_ui()
def _get_formatted_text(self):
"""This function returns the content that will be displayed in the user
interface depending on which view mode is active."""
if self.view_mode_clock:
return self._get_clock_text()
return self._get_example_text()
def _get_clock_text(self):
"""Create the time with some color formatting."""
# pylint: disable=no-self-use
# Get the date and time
date, time = datetime.now().isoformat(sep='_',
timespec='seconds').split('_')
# Formatted text is represented as (style, text) tuples.
# For more examples see:
# https://python-prompt-toolkit.readthedocs.io/en/latest/pages/printing_text.html
# These styles are selected using class names and start with the
# 'class:' prefix. For all classes defined by Pigweed Console see:
# https://cs.opensource.google/pigweed/pigweed/+/main:pw_console/py/pw_console/style.py;l=189
# Date in cyan matching the current Pigweed Console theme.
date_with_color = ('class:theme-fg-cyan', date)
# Time in magenta
time_with_color = ('class:theme-fg-magenta', time)
# No color styles for line breaks and spaces.
line_break = ('', '\n')
space = ('', ' ')
# Concatenate the (style, text) tuples.
return FormattedText([
line_break,
space,
space,
date_with_color,
space,
time_with_color,
])
def _get_example_text(self):
"""Examples of how to create formatted text."""
# pylint: disable=no-self-use
# Make a list to hold all the formatted text to display.
fragments = []
# Some spacing vars
wide_space = ('', ' ')
space = ('', ' ')
newline = ('', '\n')
# HTML() is a shorthand way to style text. See:
# https://python-prompt-toolkit.readthedocs.io/en/latest/pages/printing_text.html#html
# This formats 'Foreground Colors' as underlined:
fragments.append(HTML('<u>Foreground Colors</u>\n'))
# Standard ANSI colors examples
fragments.append(
FormattedText([
# These tuples follow this format:
# (style_string, text_to_display)
('ansiblack', 'ansiblack'),
wide_space,
('ansired', 'ansired'),
wide_space,
('ansigreen', 'ansigreen'),
wide_space,
('ansiyellow', 'ansiyellow'),
wide_space,
('ansiblue', 'ansiblue'),
wide_space,
('ansimagenta', 'ansimagenta'),
wide_space,
('ansicyan', 'ansicyan'),
wide_space,
('ansigray', 'ansigray'),
wide_space,
newline,
('ansibrightblack', 'ansibrightblack'),
space,
('ansibrightred', 'ansibrightred'),
space,
('ansibrightgreen', 'ansibrightgreen'),
space,
('ansibrightyellow', 'ansibrightyellow'),
space,
('ansibrightblue', 'ansibrightblue'),
space,
('ansibrightmagenta', 'ansibrightmagenta'),
space,
('ansibrightcyan', 'ansibrightcyan'),
space,
('ansiwhite', 'ansiwhite'),
space,
]))
fragments.append(HTML('\n<u>Background Colors</u>\n'))
fragments.append(
FormattedText([
# Here's an example of a style that specifies both background
# and foreground colors. The background color is prefixed with
# 'bg:'. The foreground color follows that with no prefix.
('bg:ansiblack ansiwhite', 'ansiblack'),
wide_space,
('bg:ansired', 'ansired'),
wide_space,
('bg:ansigreen', 'ansigreen'),
wide_space,
('bg:ansiyellow', 'ansiyellow'),
wide_space,
('bg:ansiblue ansiwhite', 'ansiblue'),
wide_space,
('bg:ansimagenta', 'ansimagenta'),
wide_space,
('bg:ansicyan', 'ansicyan'),
wide_space,
('bg:ansigray', 'ansigray'),
wide_space,
('', '\n'),
('bg:ansibrightblack', 'ansibrightblack'),
space,
('bg:ansibrightred', 'ansibrightred'),
space,
('bg:ansibrightgreen', 'ansibrightgreen'),
space,
('bg:ansibrightyellow', 'ansibrightyellow'),
space,
('bg:ansibrightblue', 'ansibrightblue'),
space,
('bg:ansibrightmagenta', 'ansibrightmagenta'),
space,
('bg:ansibrightcyan', 'ansibrightcyan'),
space,
('bg:ansiwhite', 'ansiwhite'),
space,
]))
# These themes use Pigweed Console style classes. See full list in:
# https://cs.opensource.google/pigweed/pigweed/+/main:pw_console/py/pw_console/style.py;l=189
fragments.append(HTML('\n\n<u>Current Theme Foreground Colors</u>\n'))
fragments.append([
('class:theme-fg-red', 'class:theme-fg-red'),
newline,
('class:theme-fg-orange', 'class:theme-fg-orange'),
newline,
('class:theme-fg-yellow', 'class:theme-fg-yellow'),
newline,
('class:theme-fg-green', 'class:theme-fg-green'),
newline,
('class:theme-fg-cyan', 'class:theme-fg-cyan'),
newline,
('class:theme-fg-blue', 'class:theme-fg-blue'),
newline,
('class:theme-fg-purple', 'class:theme-fg-purple'),
newline,
('class:theme-fg-magenta', 'class:theme-fg-magenta'),
newline,
])
fragments.append(HTML('\n<u>Current Theme Background Colors</u>\n'))
fragments.append([
('class:theme-bg-red', 'class:theme-bg-red'),
newline,
('class:theme-bg-orange', 'class:theme-bg-orange'),
newline,
('class:theme-bg-yellow', 'class:theme-bg-yellow'),
newline,
('class:theme-bg-green', 'class:theme-bg-green'),
newline,
('class:theme-bg-cyan', 'class:theme-bg-cyan'),
newline,
('class:theme-bg-blue', 'class:theme-bg-blue'),
newline,
('class:theme-bg-purple', 'class:theme-bg-purple'),
newline,
('class:theme-bg-magenta', 'class:theme-bg-magenta'),
newline,
])
fragments.append(HTML('\n<u>Theme UI Colors</u>\n'))
fragments.append([
('class:theme-fg-default', 'class:theme-fg-default'),
space,
('class:theme-bg-default', 'class:theme-bg-default'),
space,
('class:theme-bg-active', 'class:theme-bg-active'),
space,
('class:theme-fg-active', 'class:theme-fg-active'),
space,
('class:theme-bg-inactive', 'class:theme-bg-inactive'),
space,
('class:theme-fg-inactive', 'class:theme-fg-inactive'),
newline,
('class:theme-fg-dim', 'class:theme-fg-dim'),
space,
('class:theme-bg-dim', 'class:theme-bg-dim'),
space,
('class:theme-bg-dialog', 'class:theme-bg-dialog'),
space,
('class:theme-bg-line-highlight', 'class:theme-bg-line-highlight'),
space,
('class:theme-bg-button-active', 'class:theme-bg-button-active'),
space,
('class:theme-bg-button-inactive',
'class:theme-bg-button-inactive'),
space,
])
# Return all formatted text lists merged together.
return merge_formatted_text(fragments)
| 17,884 | 4,835 |
import numpy as np
import superfast
def test_sum():
a = np.array([[1,2,3],[2,3,4]]).astype('float64')
assert(a.sum() == superfast.sum_row_major_order(a))
| 163 | 68 |
from __future__ import absolute_import, division, print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
class SSIM(nn.Module):
def __init__(self):
super(SSIM, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, x, y):
x = self.refl(x)
y = self.refl(y)
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2)
return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)
def upsample(x):
return F.interpolate(x, scale_factor=2, mode="nearest")
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv = Conv3x3(in_channels, out_channels)
self.nonlin = nn.ELU(inplace=True)
def forward(self, x):
out = self.conv(x)
out = self.nonlin(out)
return out
class Conv1x1(nn.Module):
def __init__(self, in_channels, out_channels, bias=False):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(int(in_channels), int(out_channels), kernel_size=1, stride=1, bias=bias)
def forward(self, x):
out = self.conv(x)
return out
class Conv3x3(nn.Module):
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv3x3, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(1)
else:
self.pad = nn.ZeroPad2d(1)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
class Conv5x5(nn.Module):
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv5x5, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(2)
else:
self.pad = nn.ZeroPad2d(2)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 5)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
class CRPBlock(nn.Module):
def __init__(self, in_planes, out_planes, n_stages):
super(CRPBlock, self).__init__()
for i in range(n_stages):
setattr(self, '{}_{}'.format(i + 1, 'pointwise'), Conv1x1(in_planes if (i == 0) else out_planes, out_planes, False))
self.stride = 1
self.n_stages = n_stages
self.maxpool = nn.MaxPool2d(kernel_size=5, stride=1, padding=2)
def forward(self, x):
top = x
for i in range(self.n_stages):
top = self.maxpool(top)
top = getattr(self, '{}_{}'.format(i + 1, 'pointwise'))(top)
x = top + x
return x
def compute_depth_errors(gt, pred):
thresh = torch.max((gt / pred), (pred / gt))
a1 = (thresh < 1.25 ).float().mean()
a2 = (thresh < 1.25 ** 2).float().mean()
a3 = (thresh < 1.25 ** 3).float().mean()
rmse = (gt - pred) ** 2
rmse = torch.sqrt(rmse.mean())
rmse_log = (torch.log(gt) - torch.log(pred)) ** 2
rmse_log = torch.sqrt(rmse_log.mean())
abs_rel = torch.mean(torch.abs(gt - pred) / gt)
sq_rel = torch.mean((gt - pred) ** 2 / gt)
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 | 3,816 | 1,593 |
import numpy as np
from plato.core import as_floatx, create_shared_variable, symbolic, add_update
from theano import tensor as tt
class FutureWeightGradCalculator(object):
def __init__(self, kp, kd, shapes):
"""
:param kp:
:param kd:
:param shapes: A tuple that specifies (minibatch_size, n_in, n_out)
"""
self.kp = kp
self.kd = kd
self.r = kd/as_floatx(kp+kd)
self.scale = (1./as_floatx(kp**2 + 2*kp*kd))
self.x_past = create_shared_variable(np.zeros((shapes[0], shapes[1])))
self.e_past = create_shared_variable(np.zeros((shapes[0], shapes[2])))
@symbolic
def compute_grad(self, xc, ec, x_true = None, e_true = None):
"""
:param xc:
:param ec:
:param x:
:param e:
:return:
"""
x_past = self.x_past*self.r if x_true is None else x_true*(self.kp+self.kd)-xc
e_past = self.e_past*self.r if e_true is None else e_true*(self.kp+self.kd)-ec
w_grad = self.scale * (xc.T.dot(e_past+ec) + x_past.T.dot(ec))
if x_true is None:
add_update(self.x_past, x_past + xc)
if e_true is None:
add_update(self.e_past, e_past + ec)
return w_grad
@symbolic
def past_weight_grad_calculator2(xs, es, kp_x, kd_x, kp_e, kd_e, shapes):
"""
This attempt never really got off the ground. It doesn't work
"""
kp_x, kd_x, kp_e, kd_e = [as_floatx(k) for k in (kp_x, kd_x, kp_e, kd_e)]
n_samples, n_in, n_out = shapes
rx = kd_x/(kp_x+kd_x)
re = kd_e/(kp_e+kd_e)
xr = create_shared_variable(np.zeros((n_samples, n_in)))
er = create_shared_variable(np.zeros((n_samples, n_out)))
# xr_new = xr*rx + xs/(kp_x+kd_x)
# er_new = er*re + es/(kp_e+kd_e)
arr = rx*re/(1-rx*re)
xr_new = xr*arr + xs/(kp_x+kd_x)
er_new = er*arr + es/(kp_e+kd_e)
xsum = create_shared_variable(np.zeros((n_samples, n_in)))
esum = create_shared_variable(np.zeros((n_samples, n_out)))
xsum_new = xsum+xr_new
esum_new = esum+er_new
x_nospikes = tt.eq(xs, 0)
e_nospikes = tt.eq(es, 0)
dw = xs.T.dot(esum_new) + xsum_new.T.dot(es)
add_update(xr, xr_new)
add_update(er, er_new)
add_update(xsum, xsum_new*x_nospikes)
add_update(esum, esum_new*e_nospikes)
return xs.T.dot(er) + xr.T.dot(es)
# return xr.T.dot(er)
# return dw | 2,413 | 1,073 |
import logging
from cart import Cart
from django.conf import settings
from django.contrib.sites.models import get_current_site
from django.utils import translation
from mailtemplates.models import EMailTemplate
from payment.models import PrePayment
from payment.services.paypal import paypal
from shop.checkout_wizard import condition_step_3, CheckoutWizardBase
from shop.models import Product, Order
from django.http import Http404, HttpResponseNotAllowed
from django.shortcuts import redirect, render_to_response, render
from django.template import RequestContext
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
logger = logging.getLogger(__name__)
@never_cache
def index_view(request):
return render_to_response("cuescience_shop/cart/index.html", RequestContext(request))
@never_cache
def add_view(request, product_id):
if request.method != "GET":
return HttpResponseNotAllowed(["GET"])
next = request.GET.get("next", "/")
cart = Cart(request)
try:
product = Product.objects.get(pk=product_id)
except Product.DoesNotExist:
raise Http404
cart.add(product, product.price)
return redirect(next)
@never_cache
def remove_view(request, product_id):
if request.method != "POST":
return HttpResponseNotAllowed(["POST"])
next = request.GET.get("next", "/")
cart = Cart(request)
try:
product = Product.objects.get(pk=product_id)
except Product.DoesNotExist:
raise Http404
cart.remove(product)
return redirect(next)
@never_cache
def update_view(request):
if request.method != "POST":
return HttpResponseNotAllowed(["POST"])
next = request.GET.get("next", "/")
cart = Cart(request)
for item in cart:
quantity = request.POST.get("quantity-{0}".format(item.product.pk), None)
isNone = quantity is None
if isNone:
continue
isSame = int(quantity) == item.quantity
if isSame:
continue
quantity = int(quantity)
if quantity == 0:
item.delete()
continue
item.quantity = quantity
item.save()
return redirect(next)
class CheckoutWizard(CheckoutWizardBase):
template_name = "cuescience_shop/cart/wizard.html"
def create_paypalpayment(self, cart):
paypalservice = paypal.PayPalService()
transaction = paypal.Transaction(total=cart.summary())
for cart_item in cart:
print("ITEM {0}".format(cart_item))
product = cart_item.product
item = paypal.Item(product.title, cart_item.get_unit_price(), cart_item.quantity, "EUR", sku=product.id)
transaction.item_list.append(item)
#TODO add translation
item = paypal.Item("Versand / Shipping", cart.shipping_costs(), 1, "EUR", sku=0)
transaction.item_list.append(item)
domain = get_current_site(self.request)
payment_result = paypalservice.create_payment(transaction, domain)
return payment_result
def done(self, form_list, **kwargs):
cart = Cart(self.request)
cart.create_cart()
order = Order(cart=cart.cart)
client = form_list[0].save(commit=False)
address = form_list[1].save()
client.shipping_address = address
billing_address = address
if condition_step_3(self):
billing_address = form_list[2].save()
client.billing_address = billing_address
client.save()
order.client = client
payment_option = self.get_cleaned_data_for_step("4").get("payment_options", None)
print ("PAYMENT {0}".format(self.get_cleaned_data_for_step("4")))
language = translation.get_language().upper()
if payment_option == "PayPal":
result = self.create_paypalpayment(cart)
order.payment = result.paypal_payment_db
order.save()
# we need to do the checkout after saving the order,
# if something went wrong
cart.check_out()
mail_result = EMailTemplate.objects.send("{0}_ORDER_SUCCESS_PAYPAL".format(language), client.email,
{"order": order, "billing_address": billing_address,
"shipping_address": address,
"paypal_url": order.payment.approval_url})
if result.payment.error:
logger.error("PayPal payment went wrong! Errors: {0}".format(result.payment.error))
return render(self.request, "cuescience_shop/failure_paypal.html", {"order": order})
elif not result.payment.errors and order.payment.approval_url:
return render(self.request, "cuescience_shop/success_paypal.html", {"order": order})
elif payment_option == "Prepayment":
payment = PrePayment()
payment.save()
order.payment = payment
order.save()
cart.check_out()
mail_result = EMailTemplate.objects.send("{0}_ORDER_SUCCESS_PREPAYMENT".format(language), client.email,
{"order": order, "billing_address": billing_address,
"shipping_address": address})
return render(self.request, "cuescience_shop/success.html", {"order": order})
return render_to_response("cuescience_shop/cart/index.html", RequestContext(self.request))
| 5,603 | 1,572 |
class Solution:
def strWithout3a3b(self, A: int, B: int) -> str:
if not A and not B: return ''
if A >= B:
a = 2 if A >= 2 else 1
b = 2 if A - a - B < 1 and B >= 2 else 1 if B else 0
return a * 'a' + b * 'b' + self.strWithout3a3b(A - a, B - b)
else:
b = 2 if B >= 2 else 1
a = 2 if B - b - A < 1 and A >= 2 else 1 if A else 0
return b * 'b' + a * 'a' + self.strWithout3a3b(A - a, B - b)
| 498 | 202 |
"""Clean API"""
import logging
from pathlib import Path
from . import readers
log = logging.getLogger(__name__)
def kinetic_model(src, dst=None, params=None, model='srtmb_basis', input_interp_method='linear',
w=None, r1=1, k2p=0.000250, beta_lim=None, n_beta=40, linear_phase_start=500,
linear_phase_end=None, km_outputs=None, thr=0.1, fig=False):
"""
Args:
src (Path or str): input patient directory or filename
dst (Path or str): output directory (default: `src` directory)
params (Path or str): config (relative to `src` directory)
model (str): any model from `niftypad.models` (see `niftypad.models.NAMES`)
input_interp_method (str): the interpolation method for getting reference input:
linear, cubic, exp_1, exp_2, feng_srtm
w (ndarray): weights for weighted model fitting
r1 (float): a pre-chosen value between 0 and 1 for r1, used in srtmb_asl_basis
k2p (float): a pre-chosen value for k2p, in second^-1, used in
srtmb_k2p_basis, logan_ref_k2p, mrtm_k2p
beta_lim (list[int]): [beta_min, beta_max] for setting the lower and upper limits
of beta values in basis functions, used in srtmb_basis, srtmb_k2p_basis, srtmb_asl_basis
n_beta (int): number of beta values/basis functions, used in
srtmb_basis, srtmb_k2p_basis, srtmb_asl_basis
linear_phase_start (int): start time of linear phase in seconds, used in logan_ref,
logan_ref_k2p, mrtm, mrtm_k2p
linear_phase_end (int): end time of linear phase in seconds, used in logan_ref,
logan_ref_k2p, mrtm, mrtm_k2p
km_outputs (list[str]): the kinetic parameters to save, e.g. ['R1', 'k2', 'BP']
thr (float): threshold value between 0 and 1. Used to mask out voxels with mean value
over time exceeding `thr * max(image value)`
fig (bool): whether to show a figure to check model fitting
"""
import nibabel as nib
import numpy as np
from niftypad import basis
from niftypad.image_process.parametric_image import image_to_parametric
from niftypad.models import get_model_inputs
from niftypad.tac import Ref
src_path = Path(src)
if src_path.is_dir():
fpath = next(src_path.glob('*.nii'))
else:
fpath = src_path
src_path = fpath.parent
log.debug("file:%s", fpath)
if dst is None:
dst_path = src_path
else:
dst_path = Path(dst)
assert dst_path.is_dir()
meta = readers.find_meta(src_path, filter(None, [params, fpath.stem]))
dt = np.asarray(meta['dt'])
ref = np.asarray(meta['ref'])
ref = Ref(ref, dt)
# change ref interpolation to selected method
ref.run_interp(input_interp_method=input_interp_method)
log.debug("looking for first `*.nii` file in %s", src_path)
img = nib.load(fpath)
# pet_image = img.get_fdata(dtype=np.float32)
pet_image = np.asanyarray(img.dataobj)
# basis functions
if beta_lim is None:
beta_lim = [0.01 / 60, 0.3 / 60]
# change ref.inputf1cubic -> ref.input_interp_1
b = basis.make_basis(ref.input_interp_1, dt, beta_lim=beta_lim, n_beta=n_beta, w=w, k2p=k2p)
if km_outputs is None:
km_outputs = ['R1', 'k2', 'BP']
# change ref.inputf1cubic -> ref.input_interp_1
user_inputs = {
'dt': dt, 'ref': ref, 'inputf1': ref.input_interp_1, 'w': w, 'r1': r1, 'k2p': k2p,
'beta_lim': beta_lim, 'n_beta': n_beta, 'b': b, 'linear_phase_start': linear_phase_start,
'linear_phase_end': linear_phase_end, 'fig': fig}
model_inputs = get_model_inputs(user_inputs, model)
# log.debug("model_inputs:%s", model_inputs)
parametric_images_dict, pet_image_fit = image_to_parametric(pet_image, dt, model, model_inputs,
km_outputs, thr=thr)
for kp in parametric_images_dict:
nib.save(nib.Nifti1Image(parametric_images_dict[kp], img.affine),
f"{dst_path / fpath.stem}_{model}_{kp}_{fpath.suffix}")
nib.save(nib.Nifti1Image(pet_image_fit, img.affine),
f"{dst_path / fpath.stem}_{model}_fit_{fpath.suffix}")
| 4,178 | 1,534 |
"""Raw CLI commands."""
from typing import IO
import click
from ..._templates import artifact_builder, replication_listener, source_region_core
from ..._util.workers_zip import build_and_write_workers
from .add import add_to_deployment
from .init import init_project
_TEMPLATES = {"builder": artifact_builder, "listener": replication_listener, "core-source": source_region_core}
@click.group("raw")
def raw_cli():
"""Raw Accretion commands. Not recommended for direct use."""
@raw_cli.command()
@click.argument("template_type", type=click.Choice(_TEMPLATES.keys()))
@click.argument("output", type=click.File(mode="w", encoding="utf-8"))
def generate(template_type: str, output: IO):
"""Generate a template.
OUTPUT : Where to write the template?
\f
:param str template_type: The type of template to generate.
:param str output: Where to write the template?
"""
template = _TEMPLATES[template_type].build()
output.write(template.to_json(indent=4))
@raw_cli.command()
@click.argument("output", type=click.File(mode="wb"))
def build_workers(output: IO):
"""Build the workers zip file.
OUTPUT : Where to write the zip?
\f
:param str output: Where to write the workers zip?
"""
build_and_write_workers(outfile=output)
raw_cli.add_command(add_to_deployment)
raw_cli.add_command(init_project)
| 1,360 | 435 |
class ZiggeoConfig:
def __init__(self):
self.server_api_url = "https://srv-api.ziggeo.com"
self.regions = {"r1":"https://srv-api-eu-west-1.ziggeo.com"}
self.api_url = "https://api-us-east-1.ziggeo.com"
self.api_regions = {"r1":"https://api-eu-west-1.ziggeo.com"}
self.cdn_url = "https://video-cdn.ziggeo.com"
self.cdn_regions = {"r1":"https://video-cdn-eu-west-1.ziggeo.com"}
self.resilience_factor = 5
self.request_timeout = 60 | 503 | 200 |
_base_ = './optimizer.py'
optimizer = dict(
type='Adam',
lr=0.003
)
| 77 | 38 |
from IPython.display import display
from IPython.display import HTML
import IPython.core.display as di
# This line will hide code by default when the notebook is exported as HTML
di.display_html('<script>jQuery(function() {if (jQuery("body.notebook_app").length == 0) { jQuery(".input_area").toggle(); jQuery(".prompt").toggle();}});</script>', raw=True)
# This line will add a button to toggle visibility of code blocks, for use with the HTML export version
di.display_html('''<button onclick="jQuery('.input_area').toggle(); jQuery('.prompt').toggle();">Show/hide code</button>''', raw=True)
di.display_html("""
<style>
#customers {
font-family: "Trebuchet MS", Arial, Helvetica, sans-serif;
border-collapse: collapse;
width: 100get_ipython().run_line_magic(";", "")
}
#customers td, #customers th {
border: 1px solid #ddd;
padding: 8px;
text-align: center;
}
.content:nth-child(even){background-color: #f2f2f2;}
.content:hover{background-color:#C7C9C7;}
#customers th {
padding-top: 12px;
padding-bottom: 12px;
text-align: center;
color: white;
}
.first{
background-color: #4B6D80;
font-size:20px;
}
.second{
background-color: #71A4BF;
}
.third{
background-color: #B1D0E8;
color: white;
}
#customers a {
color: black;
padding: 10px 20px;
text-align: center;
text-decoration: none;
text-decoration-line: none;
text-decoration-style: solid;
text-decoration-color: currentcolor;
text-decoration-thickness: auto;
display: inline-block;
font-size: 16px;
margin-left: 20px;
}
</style>
""", raw=True)
di.display_html("""
<table id="customers">
<thead class="first">
<th colspan=5>Table of contents</th>
<tbody>
<tr>
<td colspan=5 class="cell"><a href='#Importing-Require'>Importing Require Libraries"</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#DataLoad'>Load</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#DataInsights'>Exploration Data - Data Insights</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#SummaryStatistics'>Exploration Data - Summary Statistics</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#DataLoad'>Data Cleaning</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#DataVisualization'>Data Visualization</a></td>
</tr>
<tr>
<td class="cell"><a href='#missing-value'>check missing values</a></td>
<td class="cell"><a href='#correlation'>correlation</a></td>
<td class="cell"><a href='#'>Correlation Heat Maps - Seaborn</a></td>
<td class="cell"><a href='#Outliers'>Outliers</a></td>
<td class="cell"><a href='#distribution-Skewness'>distribution-Skewness</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#Prediction'>Prediction Age and pay - Linear Regression</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#Comments-on-results'>Comments on results</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#References'>References</a></td>
</tr>
</tbody>
</table>
""", raw=True)
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm # Predict
import statsmodels.formula.api as smf #Predict
from sklearn import datasets, linear_model #Learn
from sklearn.metrics import mean_squared_error #Learn
get_ipython().run_line_magic("matplotlib", " inline")
df = pd.read_csv('dataset/credit_cards_dataset.csv',sep=',')
df.head()
df.shape
df.columns.values
df.info()
df.describe()
df.AGE.unique()
df.LIMIT_BAL.unique()
df.MARRIAGE.value_counts()
# - This tells us count of each MARRIAGE score in descending order.
# - "MARRIAGE" has most values concentrated in the categories 2, 1 .
# - Only a few observations made for the categories 3 & 0
## DATA CLEANING
### On the Dataset description , we don't have "MARRIAGE Status" = 0, so we need to clean up these values
df = df.loc[df["MARRIAGE"].isin([1,2])]
df
# Data Visualization
sns.heatmap(df.isnull(),cbar=False,yticklabels=False,cmap = 'viridis')
plt.figure(figsize=(6,4))
sns.heatmap(df.corr(),cmap='Blues',annot=False)
plt.figure(figsize=(6,4))
sns.heatmap(df.corr(),cmap='Blues',annot=True)
#Quality correlation matrix
k = 12 #number of variables for heatmap
cols = df.corr().nlargest(k, 'LIMIT_BAL')['LIMIT_BAL'].index
cm = df[cols].corr()
plt.figure(figsize=(10,6))
sns.heatmap(cm, annot=True, cmap = 'viridis')
l = df.columns.values
number_of_columns=12
number_of_rows = len(l)-1/number_of_columns
plt.figure(figsize=(number_of_columns,5*number_of_rows))
for i in range(0,len(l)):
plt.subplot(number_of_rows + 1,number_of_columns,i+1)
sns.set_style('whitegrid')
sns.boxplot(df[l[i]],color='green',orient='v')
plt.tight_layout()
plt.figure(figsize=(2*number_of_columns,5*number_of_rows))
for i in range(0,len(l)):
plt.subplot(number_of_rows + 1,number_of_columns,i+1)
sns.distplot(df[l[i]],kde=True)
from sklearn.model_selection import train_test_split
train, test = train_test_split(df, test_size=0.2, random_state=4)
results1 = smf.ols('AGE ~ PAY_0 + PAY_2 + PAY_3 + PAY_4 ', data=df).fit()
print(results1.summary())
y = train["AGE"]
cols = ["PAY_0","PAY_2","PAY_3","PAY_4"]
X=train[cols]
regr = linear_model.LinearRegression()
regr.fit(X,y)
ytrain_pred = regr.predict(X)
print("In-sample Mean squared error: get_ipython().run_line_magic(".2f"", "")
% mean_squared_error(y, ytrain_pred))
ytest = test["AGE"]
cols = ["PAY_0","PAY_2","PAY_3","PAY_4"]
Xtest=test[cols]
ypred = regr.predict(Xtest)
print("Out-of-sample Mean squared error: get_ipython().run_line_magic(".2f"", "")
% mean_squared_error(ytest, ypred))
| 5,998 | 2,259 |
from dataclasses import dataclass
from typing import Dict
@dataclass(frozen=True)
class Facility:
name: str
exists: bool
build_cost: float
supply: float
transport_cost: Dict[str, float] # customer to cost
| 228 | 72 |
#!/usr/bin/python
# vim:set et sw=4:
#
# Originally from:
# http://cvs.fedoraproject.org/viewvc/F-13/ca-certificates/certdata2pem.py?revision=1.1&content-type=text%2Fplain&view=co
#
# certdata2pem.py - converts certdata.txt into PEM format.
#
# Copyright (C) 2009 Philipp Kern <pkern@debian.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
# USA.
import base64
import os.path
import re
import sys
import textwrap
objects = []
# Dirty file parser.
in_data, in_multiline, in_obj = False, False, False
field, type, value, obj = None, None, None, dict()
for line in sys.stdin:
# Ignore the file header.
if not in_data:
if line.startswith('BEGINDATA'):
in_data = True
continue
# Ignore comment lines.
if line.startswith('#'):
continue
# Empty lines are significant if we are inside an object.
if in_obj and len(line.strip()) == 0:
objects.append(obj)
obj = dict()
in_obj = False
continue
if len(line.strip()) == 0:
continue
if in_multiline:
if not line.startswith('END'):
if type == 'MULTILINE_OCTAL':
line = line.strip()
for i in re.finditer(r'\\([0-3][0-7][0-7])', line):
value += chr(int(i.group(1), 8))
else:
value += line
continue
obj[field] = value
in_multiline = False
continue
if line.startswith('CKA_CLASS'):
in_obj = True
line_parts = line.strip().split(' ', 2)
if len(line_parts) > 2:
field, type = line_parts[0:2]
value = ' '.join(line_parts[2:])
elif len(line_parts) == 2:
field, type = line_parts
value = None
else:
raise NotImplementedError, 'line_parts < 2 not supported.'
if type == 'MULTILINE_OCTAL':
in_multiline = True
value = ""
continue
obj[field] = value
if len(obj.items()) > 0:
objects.append(obj)
# Build up trust database.
trust = dict()
for obj in objects:
if obj['CKA_CLASS'] != 'CKO_NETSCAPE_TRUST':
continue
# For some reason, OpenSSL on Maemo has a bug where if we include
# this certificate, and it winds up as the last certificate in the file,
# then OpenSSL is unable to verify the server certificate. For now,
# we'll just omit this particular CA cert, since it's not one we need
# for crash reporting.
# This is likely to be fragile if the NSS certdata.txt changes.
# The bug is filed upstream:
# https://bugs.maemo.org/show_bug.cgi?id=10069
if obj['CKA_LABEL'] == '"ACEDICOM Root"':
continue
# We only want certs that are trusted for SSL server auth
if obj['CKA_TRUST_SERVER_AUTH'] == 'CKT_NETSCAPE_TRUSTED_DELEGATOR':
trust[obj['CKA_LABEL']] = True
for obj in objects:
if obj['CKA_CLASS'] == 'CKO_CERTIFICATE':
if not obj['CKA_LABEL'] in trust or not trust[obj['CKA_LABEL']]:
continue
sys.stdout.write("-----BEGIN CERTIFICATE-----\n")
sys.stdout.write("\n".join(textwrap.wrap(base64.b64encode(obj['CKA_VALUE']), 64)))
sys.stdout.write("\n-----END CERTIFICATE-----\n\n")
| 3,834 | 1,283 |
timestamp = "02-03-2022 13:12:15 on flin (by mightqxc)"
| 56 | 33 |
import json
import os
def read_config_file(filename):
"""
Loads and returns a configuration from the supplied filename / path.
Parameters
----------
filename: string
The name/path of the config file to load.
Returns
----------
config: object
The resulting configuration laoded from the JSON file
"""
print(filename.split('.')[-1])
if filename.split('.')[-1] not in ['json']:
raise IOError('Only json type are supported now!')
if not os.path.exists(filename):
raise FileNotFoundError('Config file does not exist!')
with open(filename, 'r') as f:
config = json.load(f)
return config | 701 | 196 |
# ------------------------------
# 26. Remove Duplicates from Sorted Array
#
# Description:
# Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length.
# Do not allocate extra space for another array, you must do this in place with constant memory.
#
# For example,
# Given input array nums = [1,1,2],
#
# Your function should return length = 2, with the first two elements of nums being 1 and 2 respectively. It doesn't matter what you leave beyond the new length.
#
# Version: 1.0
# 09/17/17 by Jianfa
# ------------------------------
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0:
return 0
else:
nums_len = len(nums)
i = 0
check = nums[0] - 1
while i < nums_len:
if check != nums[i]:
check = nums[i]
i += 1
else:
nums.pop(i)
nums_len -= 1
return len(nums)
# Used for test
if __name__ == "__main__":
test = Solution()
nums = [1,1,1,2,3,4,4,4,4]
print(test.removeDuplicates(nums))
# ------------------------------
# Good idea from other solution:
# Actually there is no need to really remove value from the list. As the last sentence said
# "It doesn't matter what you leave beyond the new length." So we can just modify the first several
# numbers which is the length of unique values, but leave other values behind unchanged. We set two
# runner: a fast runner and a slow runner. As long as a different value is met, modify the corresponding
# value in position of slow runner, otherwise move the fast runner.
# Here is a link for reference:
# https://leetcode.com/problems/remove-duplicates-from-sorted-array/solution/ | 1,944 | 560 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Troels Agergaard Jacobsen
__version__ = '0.2.0'
__description__ = 'Command line utility to backup Evernote notes and notebooks.'
| 197 | 75 |
"""Plot clustered spikes
Usage:
python ncpsort.cluster_synthetic_data.inference_plot_synthetic \
--inference_dir ./inference_synthetic_N-1000/cluster_S-150-beam_NCP-10000 \
--min_cls_size 50 --plot_type overlay
or --inference_dir --min_cls_size 50 --plot_type tsne
"""
import numpy as np
import torch
import time
import json
import argparse
import os
from ncpsort.utils.spike_utils import get_chan_nbrs, select_template_channels, template_window
from ncpsort.utils.plotting import DEFAULT_COLORS
from ncpsort.utils.plotting import plot_spike_clusters_and_gt_in_rows
from ncpsort.utils.plotting import plot_spike_clusters_and_templates_overlay
from ncpsort.utils.plotting import plot_raw_and_encoded_spikes_tsne
parser = argparse.ArgumentParser(description='Plot inference results.')
parser.add_argument('--inference_dir', type=str)
parser.add_argument('--min_cls_size', type=int, default=0)
parser.add_argument('--topn', type=int, default=1)
parser.add_argument('--plot_mfm', action="store_const", const=True, default=False)
parser.add_argument('--plot_type', type=str, default="overlay")
if __name__ == "__main__":
args = parser.parse_args()
do_corner_padding = True
output_dir = args.inference_dir
with open(os.path.join(output_dir, "infer_params.json"), "r") as f:
infer_params = json.load(f)
min_cls_size = args.min_cls_size
templates = None
templates_use = None
templates_name = None
infer_params['nbr_dist'] = 70
infer_params['n_nbr'] = 7
print("parameters:\n", json.dumps(infer_params, indent=2))
geom = np.array([
[-585.0, 270.0],
[-645.0, 270.0],
[-525.0, 270.0],
[-615.0, 210.0],
[-555.0, 210.0],
[-615.0, 330.0],
[-555.0, 330.0]]
)
chans_with_nbrs, chan_to_nbrs = get_chan_nbrs(geom, infer_params['nbr_dist'], infer_params['n_nbr'], keep_less_nbrs=False)
print("{} channels used:".format(len(chans_with_nbrs)))
print(chans_with_nbrs)
topn = args.topn
data_dir = os.path.join(output_dir, "data_ncp")
# fig_dir_by_row = os.path.join(output_dir, "figures_by_row")
# if not os.path.isdir(fig_dir_by_row): os.mkdir(fig_dir_by_row)
fig_dir_overlay = os.path.join(output_dir, "figs_overlay_min-cls-{}_temp-{}".format(min_cls_size, templates_name))
if not os.path.isdir(fig_dir_overlay): os.mkdir(fig_dir_overlay)
fig_dir_vert_overlay = os.path.join(output_dir, "figs_overlay_vertical_min-cls-{}_temp-{}".format(min_cls_size, templates_name))
if not os.path.isdir(fig_dir_vert_overlay): os.mkdir(fig_dir_vert_overlay)
if args.plot_mfm:
mfm_dir = os.path.join(infer_params['data_name'], "cluster_mfm", "data_mfm")
input_dir = infer_params['data_name']
fnames_list = [x.rstrip(".npz") for x in os.listdir(os.path.join(input_dir, "data_input")) if x.endswith(".npz")]
fnames_list = sorted(fnames_list)
for fname in fnames_list:
if args.plot_mfm:
mfm_fname = [x for x in os.listdir(mfm_dir) if fname in x and x.endswith(".npy")]
mfm_fname = mfm_fname[0].rstrip(".npy")
npy_fname = os.path.join(mfm_dir, "{}.npy".format(mfm_fname))
mfm_clusters = np.load(npy_fname)
mfm_name = "MFM"
else:
mfm_clusters = None
mfm_name = None
print("Plotting {}:".format(fname))
npz_fname = os.path.join(data_dir, "{}_ncp.npz".format(fname))
npz = np.load(npz_fname)
clusters, nll, data_arr, gt_labels = npz['clusters'], npz['nll'], npz['data_arr'], npz['gt_labels']
# plot_spike_clusters_and_gt_in_rows(
# css, nll, data_arr, gt_labels, topn=topn,
# figdir=fig_dir_by_row, fname_postfix=fname,
# plot_params={"spacing":1.25, "width":0.9, "vscale":1.5, "subplot_adj":0.9},
# downsample=3)
temp_in_ch = None
templates_name = "{} templates".format(templates_name) if templates_name else None
nbr_channels = np.arange(len(geom))
if args.plot_type == 'overlay':
plot_spike_clusters_and_templates_overlay(
clusters, nll, data_arr, geom, nbr_channels, DEFAULT_COLORS, topn=topn,
extra_clusters=mfm_clusters, extra_name=mfm_name, gt_labels=gt_labels,
min_cls_size=min_cls_size, templates=temp_in_ch, template_name=templates_name,
figdir=fig_dir_overlay, fname_postfix=fname, size_single=(9,6),
plot_params={"time_scale":1.1, "scale":8., "alpha_overlay":0.1})
n_ch = len(nbr_channels)
vertical_geom = np.stack([np.zeros(n_ch), - np.arange(n_ch) * 12 * 7]).T
plot_spike_clusters_and_templates_overlay(
clusters, nll, data_arr, vertical_geom, np.arange(n_ch), DEFAULT_COLORS, topn=topn,
extra_clusters=mfm_clusters, extra_name=mfm_name, gt_labels=gt_labels,
min_cls_size=min_cls_size, templates=temp_in_ch, template_name=templates_name,
figdir=fig_dir_vert_overlay, fname_postfix=fname, size_single=(2.5,18), vertical=True,
plot_params={"time_scale":1.1, "scale":8., "alpha_overlay":0.1})
elif args.plot_type == 'tsne':
fig_dir_tsne = os.path.join(output_dir, "figs_tsne_min-cls-{}".format(min_cls_size))
if not os.path.isdir(fig_dir_tsne): os.mkdir(fig_dir_tsne)
tsne_dir = os.path.join(infer_params['data_name'], "spike_encoder_it-18600/data_encoder")
fname = [x for x in os.listdir(tsne_dir) if fname in x and x.endswith(".npz")]
data_encoded = np.load(os.path.join(tsne_dir, "{}".format(fname[0])))
data_encoded = data_encoded['encoded_spikes']
fname = fname[0].rstrip("_encoded_spikes.npz")
plot_raw_and_encoded_spikes_tsne(
clusters, nll, data_arr, data_encoded, DEFAULT_COLORS, topn=topn,
extra_clusters=mfm_clusters, extra_name=mfm_name, gt_labels=gt_labels,
min_cls_size=min_cls_size, sort_by_count=True,
figdir=fig_dir_tsne, fname_postfix=fname, size_single=(6,6),
tsne_params={'seed': 0, 'perplexity': 30},
plot_params={'pt_scale': 1}, show=False
)
| 6,373 | 2,454 |
"""
Find (old) loanwords between two languages
"""
from ast import literal_eval
from functools import partial
from logging import getLogger
from pandas import DataFrame, Series, concat, read_csv
from tqdm import tqdm
from panphon.distance import Distance
from loanpy.helpers import gensim_multiword
from loanpy.adrc import Adrc
logger = getLogger(__name__)
class NoPhonMatch(Exception):
pass
def read_data(path2forms, adrc_col): # explosion means explode
"""
Reads a column with adapted or reconstructed words in a forms.csv file, \
drops empty elements, drops elements with certain keywords used by \
loanpy.adrc.Adrc.adapt and \
loanpy.adrc.Adrc.reconstruct, such as "not old", "wrong phonotactics", etc. \
Splits elements by ", " and assigns every word its own spot in the \
pandas Series which is returned. Called by loanpy.loanfinder.Search.__init__
:param path2forms: path to CLDF's forms.csv
:type path2forms: pathlib.PosixPath | str | None
:param adrc_col: name of column containing predicted \
adapted or reconstructed words
:type adrc_col: str
:return: Series object with one word per element. \
Words can be reg-exes as well
:rtype: pandas.core.series.Series
:Example:
>>> from pathlib import Path
>>> from loanpy.loanfinder import __file__, read_data
>>> PATH2READ_DATA = Path(__file__).parent / "tests" / \
"input_files" / "ad_read_data.csv"
>>> read_data(PATH2READ_DATA, "col1")
0 a
1 blub
1 club
Name: col1, dtype: object
"""
# so the class can be initiated even without path2forms
if path2forms is None:
return None
# these red flags are returned by adapt() and reconstruct()
todrop = "wrong clusters|wrong phonotactics|not old|wrong vowel harmony"
# reading only 1 column saves RAM. Expensive calculations ahead.
df_forms = read_csv(path2forms, encoding="utf-8",
usecols=[adrc_col]).fillna("")
# drops columns with red flags
df_forms = df_forms[~df_forms[adrc_col].str.contains(todrop)]
# reconstructed words don't have ", " so nothing should happen there
df_forms[adrc_col] = df_forms[adrc_col].str.split(", ")
# explode is the pandas Series equivalent of flattening a nested list
df_forms = df_forms.explode(adrc_col) # one word per row
return df_forms[adrc_col] # a pandas Series object
def gen(iterable1, iterable2, function, prefix="Calculating", *args):
"""
A generator that applies a function to two iterables, \
incl. tqdm-progress-bar with customisable prefix. \
Called by loanpy.loanfinder.Search.loans to calculate phonological and \
semantic distances.
:param iterable1: The first iterable, will be zipped with \
iterable2 and and looped through.
:type iterable1: pathlib.PosixPath | list | iterable
:param iterable2: The second iterable, will be zipped with \
iterable1 and and looped through.
:type iterable2: pathlib.PosixPath | list | iterable
:param function: The function that should be applied to the elements of \
the tuples from the two zipped iterables.
:type function: function
:param prefix: The text that should be displayed by the progress-bar
:type prefix: str, default="Calculating"
:param args: positional arguments that shall be passed to the function
:type args: type depends on requirements of function \
passed to param <function>.
:return: the outputs of the function passed to param <function>
:rtype: generator object
:Example:
>>> from loanpy.loanfinder import gen
>>> list(gen([1, 2, 3], [4, 5, 6], lambda x, y: x+y))
Calculating: 100%|███████████████████████████████████| \
3/3 [00:00<00:00, 7639.90it/s]
[5, 7, 9]
>>> from loanpy.loanfinder import gen
>>> list(gen([1, 2, 3], [4, 5, 6], lambda x, y, z: x+y+z, "running", 1))
running: 100%|███████████████████████████████████| \
3/3 [00:00<00:00, 7639.90it/s]
[6, 8, 10]
"""
for ele1, ele2 in zip(tqdm(iterable1, prefix), iterable2):
yield function(ele1, ele2, *args) # can't pass kwargs!
class Search():
"""
Define the two word lists, the measurements to \
calculate phonological distance and semantic similarity \
and the thresholds below or above which to accept matches.
:param path2donordf: The path to forms.csv of the \
donor language containing a column of predicted adaptations into \
the recipient language.
:type path2donordf: pathlib.PosixPath | str | None, \
default=None
:param path2recipdf: The path to forms.csv of the \
recipient language, containing a column of \
predicted backward-reconstructions stored as regular expressions.
:type path2recipdf: pathlib.PosixPath | str | None, \
default=None
:param donorcol: The name of the column in the donor \
language's forms.csv containing a column of predicted adaptations into \
the tentative recipient language.
:type donorcol: str, default="ad"
:param recipcol: The name of the column in the recipient \
language's forms.csv containing a column of words in that language. When \
searching for old loanwords, this column can consist of regular \
expressions \
that represent backward reconstructions of present-day words.
:type recipcol: str, default="rc"
:param phondist: The maximal phonological distance between two words. \
By default, matches have to be identical.
:type phondist: int, default=0
:param phondist_msr: The name of the phonological distance measure, \
which has to be a method of panphon.distance.Distance
:type phondist_msr: "doglo_prime_distance" | \
"dolgo_prime_distance_div_maxlen" | \
"fast_levenshtein_distance" | \
"fast_levenshtein_distance_div_maxlen" | \
"feature_difference" | \
"feature_edit_distance" | \
"feature_edit_distance_div_maxlen" | \
"hamming_feature_edit_distance" | \
"hamming_feature_edit_distance_div_maxlen" | \
"hamming_substitution_cost" | \
"jt_feature_edit_distance" | \
"jt_feature_edit_distance_div_maxlen" | \
"jt_hamming_feature_edit_distance" | \
"jt_hamming_feature_edit_distance_div_maxlen" | \
"jt_weighted_feature_edit_distance" | \
"jt_weighted_feature_edit_distance_div_maxlen" | \
"levenshtein_distance", default="hamming_feature_edit_distance"
:param semsim: The minimal semantic similarity between the \
meaning of words. By default, meanings have to be identical.
:type semsim: int (float between -1 and 1 for gensim), default=1
:param semsim_msr: The function with which to measure semantic \
similarity.
:type semsim_msr: function of type func(a: str, b: str) -> int, \
default=loanpy.helpers.gensim_multiword
:param scdictlist_ad: list of correspondence dictionaries between \
tentative donor and recipient language generated with \
loanpy.qfysc.get_sound_corresp. Not a dictionary, therefore sequence \
important. \
Will be used in loanpy.loanfinder.Search.likeliestphonmatch to \
calculate likelihood \
(NSE) from predicted adaptation vs source word.
:type scdictlist_ad: None | list of 6 dicts. Dicts 0, 1, 2 \
capture phonological \
correspondences, dicts 3, 4, 5 phonotactic ones. dict0/dict3: the actual \
correspondences, dict1/dict4: How often they occur in the data, \
dict2/dict5: list of \
cognates in which they occur. default=None
:param scdictlist_rc: list of correspondence dictionaries between \
present-day language and past stage of that language generated with \
loanpy.qfysc.get_sound_corresp. Not a dictionary, therefore sequence \
important. \
Will be used in loanpy.loanfinder.Search.likeliestphonmatch to \
calculate likelihood \
(NSE) from predicted reconstruction vs source word.
:type scdictlist_rc: None | list of 6 dicts. Dicts 0, 1, 2 \
capture phonological \
correspondences, dicts 3, 4, 5 phonotactic ones. dict0/dict3: the actual \
correspondences, dict1/dict4: How often they occur in the data, \
dict2/dict5: list of \
cognates in which they occur. default=None
:Example:
>>> from pathlib import Path
>>> from loanpy.loanfinder import Search, __file__
>>> path2rec = Path(__file__).parent / "tests" \
/ "input_files"/ "hun.csv"
>>> path2don = Path(__file__).parent / "tests" \
/ "input_files"/ "got.csv"
>>> path2sc_ad = Path(__file__).parent / "tests" / "input_files" / \
"sc_ad_3cogs.txt"
>>> path2sc_rc = Path(__file__).parent / "tests" / "input_files" / \
"sc_rc_3cogs.txt"
>>> search_obj = Search(\
path2donordf=path2don, \
path2recipdf=path2rec, \
scdictlist_ad=path2sc_ad, \
scdictlist_rc=path2sc_rc)
How to plug in different semantic similarity measurement function, \
e.g. BERT:
>>> from loanpy import loanfinder
>>> from loanpy.helpers import plug_in_model
>>> # pip install transformers==4.19.2
>>> from sentence_transformers import SentenceTransformer
>>> from sklearn.metrics.pairwise import cosine_similarity
>>> plug_in_model(SentenceTransformer("bert-base-nli-mean-tokens"))
>>> def bert_similarity(sentence1, sentence2):
>>> return float(\
cosine_similarity(helpers.model.encode([sentence1]), \
helpers.model.encode([sentence2])))
>>> path2rec = Path(__file__).parent / "tests" \
/ "input_files"/ "hun.csv"
>>> path2don = Path(__file__).parent / "tests" \
/ "input_files"/ "got.csv"
>>> path2sc_ad = Path(__file__).parent / "tests" / "input_files" / \
"sc_ad_3cogs.txt"
>>> path2sc_rc = Path(__file__).parent / "tests" / "input_files" / \
"sc_rc_3cogs.txt"
>>> # plug in bert_similarity here into param <semsim_msr>
>>> search_obj = Search(path2donordf=path2don, path2recipdf=path2rec, \
scdictlist_ad=path2sc_ad, scdictlist_rc=path2sc_rc, \
semsim_msr=bert_similarity)
"""
def __init__(self, path2donordf=None, path2recipdf=None, donorcol="ad",
recipcol="rc",
phondist=0, phondist_msr="hamming_feature_edit_distance",
semsim=1, semsim_msr=gensim_multiword,
scdictlist_ad=None, scdictlist_rc=None):
# pandas Series of predicted adapted donor words in which to search
self.search_in = read_data(path2donordf, donorcol)
# pd Series of reg-exes of reconstructed recipient words to search for
self.search_for = read_data(path2recipdf, recipcol)
# path to donor and recipient forms.csv to read extra infos later
self.donpath, self.recpath = path2donordf, path2recipdf
# names of the columns containing adapted and reconstructed words
self.doncol, self.reccol = donorcol, recipcol # used in postprocessing
self.phondist = phondist # maximal phonological distance of a mtach
self.phondist_msr = getattr(Distance(), phondist_msr) # distnc measure
self.semsim = semsim # minimal semantic similarity of a match
self.semsim_msr = semsim_msr # semantic similarity measuring function
# normalised sum of examples for adaptions and reconstructions
self.get_nse_ad = Adrc(scdictlist=scdictlist_ad, mode="adapt").get_nse
self.get_nse_rc = Adrc(scdictlist=scdictlist_rc,
mode="reconstruct").get_nse
def phonmatch(self, search_for, index, dropduplicates=True):
"""
Check if a regular expression is contained \
in a wordlist and replace it with a number. \
The wordlist is a pandas Series object that gets initiated in \
loanpy.loanfinder.Search. To pass a wordlist in through the parameter \
of this function, use loanpy.loanfinder.Search.phonmatch_small
:param search_for: The regular expression for which to search in the \
donor language.
:type search_for: str
:param index: The number with which to replace a match. \
(This number will be \
used to merge the rest of the recipient language's \
data frame, so it should represent \
its index there.)
:type index: idx
:param dropduplicates: If set to True, this will drop matches \
that have the same \
index in the wordlist \
(There's one adapted donor-word per row, but its index \
is the same as the original donor word's from which it was adapted. \
Therefore, one recipient word can match with the same donor \
word through multiple \
adaptations. Since the semantics are the same for all of \
those matches, the first match can be picked and duplicates \
dropped safely. This saves a lot of time and energy. \
Later, loanpy.loanfinder.Search.likeliestphonmatch calculates \
the likeliest phonological matches, \
but only for those phonological matches, whose semantics already matched.)
:type dropduplicates: bool, default=True
:return: a pandas data frame containing \
phonological matches. The index \
indicates the position (row) of the word in the data frame assigned \
to loanpy.loanfinder.Search.search_in. \
The column "recipdf_idx" is intended to indicate \
the position of the word in the word list of the recipient language. \
It is the same value as the one passed to param <index>.
:rtype: pandas.core.series.Series
:Example:
>>> from pathlib import Path
>>> from loanpy.loanfinder import Search, __file__
>>> path2read_data = Path(__file__).parent / "tests" / \
"input_files" / "ad_read_data.csv"
>>> search_obj = Search(path2donordf=path2read_data, donorcol="col1")
>>> search_obj.phonmatch(search_for="(b|c)?lub", index=99,
>>> dropduplicates=False)
match recipdf_idx
1 blub 99
1 club 99
"""
# maximal phonetic distance == 0 means only identical words are matches
if self.phondist == 0: # will drop all non-identical elements
matched = self.search_in[self.search_in.str.match(search_for)]
else: # will otherwise drop everything above the max distance
self.phondist_msr = partial(self.phondist_msr, target=search_for)
matched = self.search_in[
self.search_in.apply(self.phondist_msr) <= self.phondist]
# creates new col "recipdf_idx" - keys to the input df
dfphonmatch = DataFrame({"match": matched, "recipdf_idx": index})
# this makes things more economical. dropping redundancies
if dropduplicates is True:
dfphonmatch = dfphonmatch[~dfphonmatch.index.duplicated(
keep='first')]
# returns a pandas data frame
return dfphonmatch
def loans(self, write_to=False, postprocess=False, merge_with_rest=False):
"""
Searches for phonological matches \
and calculates their semantic similarity. Returns candidate list of loans.
:param write_to: indicate if results should be written to file. \
If yes, provide path.
:type write_to: pathlib.PosixPath | str | None | False, \
default=False
:param postprocess: Indicate if results should be post-processed. See \
loanpy.loanfinder.Search.postprocess for more details
:type postprocess: bool, default=False
:param merge_with_rest: Indicate if additional info from input \
data frame columns should be copied into the output data frame. \
Helps with quick debugging sometimes. See \
loanpy.loanfinder.Search.merge_with_rest for more details
:type merge_with_rest: bool, default=False
:returns: data frame with potential loanwords
:rtype: pandas.core.series.Series
:Example:
>>> from pathlib import Path
>>> from loanpy.loanfinder import Search, __file__
>>> from loanpy.helpers import plug_in_model
>>> from gensim.models import word2vec
>>> from gensim.test.utils import common_texts
>>> in_got = path2donordf=Path(__file__).parent / "tests" / \
"input_files" / "loans_got.csv"
>>> in_hun = path2donordf=Path(__file__).parent / "tests" / \
"input_files" / "loans_hun.csv"
>>> search_obj = Search(in_got, in_hun, semsim=0.1)
>>> # plug in dummy vectors, api (default) would need \
internet + a minute to load
>>> plug_in_model(word2vec.Word2Vec(common_texts, min_count=1).wv)
>>> search_obj.loans()
match recipdf_idx Meaning_x Meaning_y gensim_multiword
0 blub 0 computer, interface human 0.109408
"""
# find phonological matches
dfmatches = concat(gen(self.search_for, self.search_for.index,
self.phonmatch,
"searching for phonological matches: "))
# raise exception if no matches found
if len(dfmatches) == 0:
raise NoPhonMatch("no phonological matches found")
# add translations for semantic comparison
dfmatches = dfmatches.merge(read_csv(self.recpath, encoding="utf-8",
usecols=["Meaning"]).fillna(""),
left_on="recipdf_idx", right_index=True)
dfmatches = dfmatches.merge(read_csv(self.donpath, encoding="utf-8",
usecols=["Meaning"]).fillna(""),
left_index=True, right_index=True)
# calculate semantic similarity of phonological matches
dfmatches[self.semsim_msr.__name__] = list(gen(dfmatches["Meaning_x"],
dfmatches["Meaning_y"],
self.semsim_msr,
"calculating semantic \
similarity of phonological matches: "))
# sorting and cutting off words with too low semantic similarity
logger.warning("cutting off by semsim=" +
str(self.semsim) +
"and ranking by semantic similarity")
dfmatches = dfmatches[dfmatches[
self.semsim_msr.__name__] >= self.semsim]
dfmatches = dfmatches.sort_values(by=self.semsim_msr.__name__,
ascending=False)
# 3 optional extra steps indicated in params, skipped by default
if postprocess:
dfmatches = self.postprocess(dfmatches)
if merge_with_rest:
dfmatches = self.merge_with_rest(dfmatches)
if write_to:
dfmatches.to_csv(write_to, encoding="utf-8", index=False)
logger.warning(f"file written to {write_to}")
logger.warning(f"done. Insert date and time later here.")
return dfmatches
def postprocess(self, dfmatches):
"""
Will replace every phonological match \
in the output data frame with its most likely version.
:param dfmatches: The entire data frame with potential loanwords
:type dfmatches: pandas.core.series.Series
:returns: the same data frame but with likelier adaptations of donor \
words
:rtype: pandas.core.series.Series
:Example:
>>> from pathlib import Path
>>> from pandas import DataFrame
>>> from loanpy.loanfinder import Search, __file__
>>> PATH2SC_AD = Path(__file__).parent / "tests" \
/ "input_files" / "sc_ad_likeliest.txt"
>>> PATH2SC_RC = Path(__file__).parent / "tests" \
/ "input_files" / "sc_rc_likeliest.txt"
>>> search_obj = Search(
>>> path2donordf=Path(__file__).parent / "tests" \
/ "input_files" / "loans_got.csv",
>>> path2recipdf=Path(__file__).parent / "tests" / \
"input_files" / "loans_hun.csv",
>>> scdictlist_ad=PATH2SC_AD, scdictlist_rc=PATH2SC_RC,
>>> semsim=0.2)
>>> dfin = DataFrame({"match": ["blub"], "recipdf_idx": [0],
>>> "Meaning_x": ["computer, interface"],
>>> "Meaning_y": ["human"], "semsim_msr": [0.10940766]})
>>> search_obj.postprocess(dfin)
postprocessing...
recipdf_idx Meaning_x ... align_ad\
nse_combined
0 0 computer, interface ... ['b<b', 'l<l', 'u<u', 'b<b']\
15.0
"""
logger.warning(f"postprocessing...")
# read in data for likeliestphonmatch, i.e. col Segments in both,
# donor and recipient data frames
dfmatches = dfmatches.merge(read_csv(self.recpath, encoding="utf-8",
usecols=["Segments",
self.reccol]).fillna(""),
left_on="recipdf_idx", right_index=True)
dfmatches = dfmatches.merge(read_csv(self.donpath, encoding="utf-8",
usecols=["Segments",
self.doncol]).fillna(""),
left_index=True, right_index=True)
dfmatches["Segments_x"] = [i.replace(" ", "")
for i in dfmatches["Segments_x"]]
dfmatches["Segments_y"] = [i.replace(" ", "")
for i in dfmatches["Segments_y"]]
# calculate likeliest phonological matches
newcols = concat([self.likeliestphonmatch(ad, rc, segd, segr)
for ad, rc, segd, segr
in zip(dfmatches[self.doncol],
dfmatches[self.reccol],
dfmatches["Segments_y"],
dfmatches["Segments_x"])])
del dfmatches["match"] # delete non-likeliest matches
newcols.index = dfmatches.index # otherwise concat wont work
dfmatches = concat([dfmatches, newcols], axis=1) # add new cols
# delete redundant data
del (dfmatches["Segments_x"], dfmatches[self.reccol],
dfmatches["Segments_y"], dfmatches[self.doncol])
return dfmatches # same structure as input df
def likeliestphonmatch(self, donor_ad, recip_rc, donor_segment,
recip_segment):
"""
Called by loanpy.loanfinder.postprocess. \
Calculates the nse of recip_rc-recip_segment \
and donor_ad-donor_segment, adds them together \
and picks the word pair with the highest sum. \
Adds 2*4 columns from loanpy.adrc.Adrc.get_nse.
:param donor_ad: adapted words in the donor data frame
:type donor_ad: str (not a regular expression, words separated by ", ")
:param recip_rc: a reconstructed word
:type recip_rc: str (regular expression)
:param donor_segment: the original (non-adapted) donor word
:type donor_segment: str
:param recip_segment: the original (non-reconstructed) recipient word
:type recip_segment: str
:returns: The likeliest phonological match
:rtype: pandas.core.series.Series
:Example:
>>> from pathlib import Path
>>> from pandas import DataFrame
>>> from loanpy.loanfinder import Search, __file__
>>> PATH2SC_AD = Path(__file__).parent / "tests" \
/ "input_files" / "sc_ad_likeliest.txt"
>>> PATH2SC_RC = Path(__file__).parent / "tests" \
/ "input_files" / "sc_rc_likeliest.txt"
>>> PATH2READ_DATA = Path(__file__).parent / "tests" \
/ "input_files" / "ad_read_data.csv"
>>> search_obj = Search(
>>> PATH2READ_DATA, donorcol="col1",
>>> scdictlist_ad=PATH2SC_AD, scdictlist_rc=PATH2SC_RC)
>>> search_obj.likeliestphonmatch(donor_ad="a, blub, \
club", recip_rc="(b|c)?lub",
>>> donor_segment="elub", recip_segment="dlub")
match nse_rc se_rc ... distr_ad\
align_ad nse_combined
0 blub 10.0 50 ... [0, 0, 10, 10, 0] \
['e<V', 'C<b', 'l<l', 'u<u', 'b<b'] 14.0
[1 rows x 10 columns]
"""
# step 1: serach for phonological matches between
# reconstructed reg-ex and list of predicted adaptations
dfph = self.phonmatch_small(Series(donor_ad.split(", "), name="match"),
recip_rc, dropduplicates=False)
# get the nse score between original and predictions
# and write to new columns
# cols se_rc, lst_rc, se_ad, lst_ad are just extra info for the user
dfph = DataFrame([(wrd,) + self.get_nse_rc(recip_segment, wrd) +
self.get_nse_ad(donor_segment, wrd)
for wrd in dfph["match"]],
columns=["match", "nse_rc", "se_rc", "distr_rc",
"align_rc", "nse_ad", "se_ad", "distr_ad",
"align_ad"])
# add combined nse
dfph["nse_combined"] = dfph["nse_rc"] + dfph["nse_ad"]
# get idx of max combined, keep only that idx (=likeliest match)
dfph = dfph[dfph.index == dfph["nse_combined"].idxmax()]
return dfph
def phonmatch_small(self, search_in, search_for, index=None,
dropduplicates=True):
"""
Same as loanpy.loanfinder.Search.phonmatch but search_in \
has to be added as a parameter. Found this \
to be the most elegant solution b/c \
loanpy.loanfinder.Search.likeliestphonmatch() inputs lots of \
small and very different search_in-dfs, while loans() inputs one big df.
:param search_in: The iterable to search within
:type search_in: pandas.core.series.Series
:param search_for: See loanpy.loanfinder.Search.phonmatch
:type search_for: str
:param index: See loanpy.loanfinder.Search.phonmatch
:type index: str | None, default=None
:param dropduplicates: See loanpy.loanfinder.Search.phonmatch
:type dropduplicates: bool, default=True
:returns: See loanpy.loanfinder.Search.phonmatch
:rtype: pandas.core.series.Series
"""
# for inline comments see loanpy.loanfinder.Search.phonmatch
if self.phondist == 0:
matched = search_in[search_in.str.match(search_for)]
else:
self.phondist_msr = partial(self.phondist_msr, target=search_for)
matched = search_in[
search_in.apply(self.phondist_msr) <= self.phondist]
dfphonmatch = DataFrame({"match": matched, "recipdf_idx": index})
if dropduplicates is True:
dfphonmatch = dfphonmatch[
~dfphonmatch.index.duplicated(keep='first')]
return dfphonmatch
def merge_with_rest(self, dfmatches):
"""
Merges the output data frame with the remaining columns \
from both input data frames. This helps to inspect results quickly manually.
:param dfmatches: The output data frame
:type dfmatches: pandas.core.frame.DataFrame
:returns: same data frame with extra cols added from both \
input forms.csv
:rtype: pandas.core.frame.DataFrame
"""
logger.warning("Merging with remaining columns from input data frames")
# avoid duplicates
dfmatches = dfmatches.drop(["Meaning_x", "Meaning_y"], axis=1)
dfmatches = dfmatches.merge(read_csv(self.donpath,
encoding="utf-8").fillna(""),
left_index=True, right_index=True)
dfmatches = dfmatches.merge(read_csv(self.recpath,
encoding="utf-8").fillna(""),
left_on="recipdf_idx", right_index=True)
dfmatches = dfmatches.sort_values(by=self.semsim_msr.__name__,
ascending=False) # unsorted by merge
return dfmatches
| 27,920 | 8,483 |
import scipy
from scipy.io import loadmat
import random
import numpy as np
from sklearn.metrics import zero_one_loss
from sklearn.naive_bayes import BernoulliNB,MultinomialNB,GaussianNB
import matplotlib.pyplot as plt
from sklearn.feature_selection import mutual_info_classif
import os
data = loadmat('../data/XwindowsDocData.mat')
Xtrain = data['xtrain']
Xtrain = scipy.sparse.csc_matrix.toarray(Xtrain)
Xtest = data['xtest']
Xtest = scipy.sparse.csc_matrix.toarray(Xtest)
ytrain = data['ytrain']
ytest = data['ytest']
model = BernoulliNB()
model.fit(Xtrain, ytrain)
ypred_train = model.predict(Xtrain)
err_train = np.mean(zero_one_loss(ytrain, ypred_train))
ypred_test = model.predict(Xtest)
err_test = np.mean(zero_one_loss(ytest, ypred_test))
print('misclassification rates on train = '+str(err_train*100) +
' pc, on test = '+str(err_test*100)+' pc\n')
C = np.unique(data['ytrain']).size
print()
for i in range(0, C):
plt.bar(np.arange(0, 600, 1), np.exp(model.feature_log_prob_)[i, :])
plt.title(r'$P(x_j=1 \mid y='+str(i+1)+')$')
fileName = 'naiveBayesBow'+str(i+1)+'ClassCond'
plt.savefig(r'../figures/'+fileName)
plt.show()
| 1,172 | 473 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code parses date/times, so please
#
# pip install python-dateutil
#
# To use this code, make sure you
#
# import json
#
# and then, to convert JSON from a string, do
#
# result = remote_config_event_data_from_dict(json.loads(json_string))
from typing import Optional, Any, Union, TypeVar, Type, cast
from datetime import datetime
import dateutil.parser
T = TypeVar("T")
def from_str(x: Any) -> str:
assert isinstance(x, str)
return x
def from_none(x: Any) -> Any:
assert x is None
return x
def from_union(fs, x):
for f in fs:
try:
return f(x)
except:
pass
assert False
def from_int(x: Any) -> int:
assert isinstance(x, int) and not isinstance(x, bool)
return x
def from_datetime(x: Any) -> datetime:
return dateutil.parser.parse(x)
def to_class(c: Type[T], x: Any) -> dict:
assert isinstance(x, c)
return cast(Any, x).to_dict()
class UpdateUser:
"""Aggregation of all metadata fields about the account that performed the update."""
"""Email address."""
email: Optional[str]
"""Image URL."""
image_url: Optional[str]
"""Display name."""
name: Optional[str]
def __init__(self, email: Optional[str], image_url: Optional[str], name: Optional[str]) -> None:
self.email = email
self.image_url = image_url
self.name = name
@staticmethod
def from_dict(obj: Any) -> 'UpdateUser':
assert isinstance(obj, dict)
email = from_union([from_str, from_none], obj.get("email"))
image_url = from_union([from_str, from_none], obj.get("imageUrl"))
name = from_union([from_str, from_none], obj.get("name"))
return UpdateUser(email, image_url, name)
def to_dict(self) -> dict:
result: dict = {}
result["email"] = from_union([from_str, from_none], self.email)
result["imageUrl"] = from_union([from_str, from_none], self.image_url)
result["name"] = from_union([from_str, from_none], self.name)
return result
class RemoteConfigEventData:
"""The data within all Firebase Remote Config events."""
"""The user-provided description of the corresponding Remote Config template."""
description: Optional[str]
"""Only present if this version is the result of a rollback, and will be the
version number of the Remote Config template that was rolled-back to.
"""
rollback_source: Optional[str]
"""Where the update action originated."""
update_origin: Union[int, None, str]
"""When the Remote Config template was written to the Remote Config server."""
update_time: Optional[datetime]
"""What type of update was made."""
update_type: Union[int, None, str]
"""Aggregation of all metadata fields about the account that performed the update."""
update_user: Optional[UpdateUser]
"""The version number of the version's corresponding Remote Config template."""
version_number: Optional[str]
def __init__(self, description: Optional[str], rollback_source: Optional[str], update_origin: Union[int, None, str], update_time: Optional[datetime], update_type: Union[int, None, str], update_user: Optional[UpdateUser], version_number: Optional[str]) -> None:
self.description = description
self.rollback_source = rollback_source
self.update_origin = update_origin
self.update_time = update_time
self.update_type = update_type
self.update_user = update_user
self.version_number = version_number
@staticmethod
def from_dict(obj: Any) -> 'RemoteConfigEventData':
assert isinstance(obj, dict)
description = from_union([from_str, from_none], obj.get("description"))
rollback_source = from_union([from_str, from_none], obj.get("rollbackSource"))
update_origin = from_union([from_int, from_str, from_none], obj.get("updateOrigin"))
update_time = from_union([from_datetime, from_none], obj.get("updateTime"))
update_type = from_union([from_int, from_str, from_none], obj.get("updateType"))
update_user = from_union([UpdateUser.from_dict, from_none], obj.get("updateUser"))
version_number = from_union([from_str, from_none], obj.get("versionNumber"))
return RemoteConfigEventData(description, rollback_source, update_origin, update_time, update_type, update_user, version_number)
def to_dict(self) -> dict:
result: dict = {}
result["description"] = from_union([from_str, from_none], self.description)
result["rollbackSource"] = from_union([from_str, from_none], self.rollback_source)
result["updateOrigin"] = from_union([from_int, from_str, from_none], self.update_origin)
result["updateTime"] = from_union([lambda x: x.isoformat(), from_none], self.update_time)
result["updateType"] = from_union([from_int, from_str, from_none], self.update_type)
result["updateUser"] = from_union([lambda x: to_class(UpdateUser, x), from_none], self.update_user)
result["versionNumber"] = from_union([from_str, from_none], self.version_number)
return result
def remote_config_event_data_from_dict(s: Any) -> RemoteConfigEventData:
return RemoteConfigEventData.from_dict(s)
def remote_config_event_data_to_dict(x: RemoteConfigEventData) -> Any:
return to_class(RemoteConfigEventData, x)
| 5,961 | 1,788 |
"""
bluey_beacon.py
This program connects to the bluey 1.1 Nordic nRF52 dev board
made by Electronut Labs. The firmware on Bluey has to be running the
"bluey beacon" example.
This code is expected to be run on a Raspverry Pi 3.
It assumes that you have bluez installed - it uses hcidump and hcitool.
The code reads advertisement packets from bluey, parses the
Temperature, Humidity, and Ambient Light informationb and
posts that to freeboard.io dashboard via dweet.io.
Electronut Labs
electronut.in
References:
1. Ruuvi Project.
https://github.com/ttu/ruuvitag-sensor/
"""
import re
import sys
import os
import urllib2
import time
import subprocess
# constant
pow_16 = 65536.0
# decode temperature
def decodeT(temp_val):
return ((temp_val / pow_16) * 165 - 40)
# decode humidity
def decodeH(humid_val):
return ((humid_val / pow_16) * 100)
# decode ambient light
def decodeL(adc_ch0, adc_ch1):
result = 999.99
channelRatio = (adc_ch1)/(float)(adc_ch0);
# below formula is from datasheet
if(channelRatio >= 0 and channelRatio <= 0.52):
result = (0.0315 * adc_ch0) - (0.0593 * adc_ch0 * pow(channelRatio, 1.4))
elif(channelRatio > 0.52 and channelRatio <= 0.65):
result = (0.0229 * adc_ch0) - (0.0291 * adc_ch1)
elif(channelRatio > 0.65 and channelRatio <= 0.80):
result = (0.0157 * adc_ch0) - (0.0180 * adc_ch1)
elif(channelRatio > 0.80 and channelRatio <= 1.30):
result = (0.00338 * adc_ch0) - (0.00260 * adc_ch1)
elif(channelRatio > 1.30):
result = 0;
return result
# decode T/H/L data
def decodeData(x1, x2, x3, x4):
T = decodeT(x1)
H = decodeH(x2)
L = decodeL(x3, x4)
return (T, H, L)
"""
This class uses hctool and hcidump to parse BLE adv data.
"""
class BLEScanner:
hcitool = None
hcidump = None
def start(self):
print('Start receiving broadcasts')
DEVNULL = subprocess.DEVNULL if sys.version_info > (3, 0) else open(os.devnull, 'wb')
subprocess.call('sudo hciconfig hci0 reset', shell = True, stdout = DEVNULL)
self.hcitool = subprocess.Popen(['sudo', '-n', 'hcitool', 'lescan', '--duplicates'], stdout = DEVNULL)
self.hcidump = subprocess.Popen(['sudo', '-n', 'hcidump', '--raw'], stdout=subprocess.PIPE)
def stop(self):
print('Stop receiving broadcasts')
subprocess.call(['sudo', 'kill', str(self.hcidump.pid), '-s', 'SIGINT'])
subprocess.call(['sudo', '-n', 'kill', str(self.hcitool.pid), '-s', "SIGINT"])
def get_lines(self):
data = None
try:
print("reading hcidump...\n")
#for line in hcidump.stdout:
while True:
line = self.hcidump.stdout.readline()
line = line.decode()
if line.startswith('> '):
yield data
data = line[2:].strip().replace(' ', '')
elif line.startswith('< '):
data = None
else:
if data:
data += line.strip().replace(' ', '')
except KeyboardInterrupt as ex:
print("kbi")
return
except Exception as ex:
print(ex)
return
# main() function
def main():
# use sys.argv if needed
if len(sys.argv) < 2:
print('Usage: python bluey-beacon.py MACADDR')
exit(0)
print 'starting...'
deviceId = sys.argv[1]
scanner = BLEScanner()
scanner.start()
# dweet.io base URL
baseURL = "https://dweet.io/dweet/for/bluey-beacon-0001?"
data = None
while True:
for line in scanner.get_lines():
if line:
found_mac = line[14:][:12]
reversed_mac = ''.join(
reversed([found_mac[i:i + 2] for i in range(0, len(found_mac), 2)]))
mac = ':'.join(a+b for a,b in zip(reversed_mac[::2], reversed_mac[1::2]))
data = line[26:]
if mac == deviceId and len(data) == 66:
#print(mac, data)
if u'626C756579' in data:
data2 = data[24:50]
#print(data)
x1 = int(data2[0:4], 16)
x2 = int(data2[4:8], 16)
x3 = int(data2[8:12], 16)
x4 = int(data2[12:16], 16)
#print("%x %x %x %x\n" % (x1, x2, x3, x4))
T, H, L = decodeData(x1, x2, x3, x4)
dweetURL = baseURL + "T=%.2f&&H=%.1f&&L=%d" % (T, H, L)
print(dweetURL)
try:
f = urllib2.urlopen(dweetURL)
res = f.read()
print(res)
f.close()
except:
print("dweet failed!")
scanner.stop()
exit(0)
# call main
if __name__ == '__main__':
main()
| 5,051 | 1,797 |
# -*- coding: utf-8 -*-
"""
Created on Wed May 8 12:43:42 2019
@author: solale
In this version I will try to shrink the network and reduce the tensorization
"""
# Multilayer Perceptron
import pandas
import numpy
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# from tensorflow import set_random_seed
# set_random_seed(2)
import tensorflow
import tensorflow.keras
import math
from tensorflow.keras.utils import plot_model
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2DTranspose,Input, Reshape, Conv2D, Flatten
from tensorflow.keras.layers import Dense
from sklearn.metrics import mean_squared_error
# from tensorflow.keras.layers.merge import concatenate
from tensorflow.keras.layers import concatenate
import argparse
# from tensorflow.keras.utils.np_utils import to_categorical
from tensorflow.keras import utils
# import tensorflow as tf
from sklearn import preprocessing
from keras_ex.gkernel import GaussianKernel
# https://github.com/darecophoenixx/wordroid.sblo.jp/tree/master/lib/keras_ex/gkernel
def custom_loss_1 (y_true, y_pred):
A = tensorflow.keras.losses.mean_squared_error(y_true[:,0:4], y_pred[:,0:4])
return A
def custom_loss_2 (y_true, y_pred):
B = tensorflow.keras.losses.categorical_crossentropy(y_true[:,-4:], y_pred[:,-4:])
return n*B
def custom_loss (y_true, y_pred):
A = tensorflow.keras.losses.mean_squared_error(y_true[:,0:4], y_pred[:,0:4])
B = tensorflow.keras.losses.categorical_crossentropy(y_true[:,-4:], y_pred[:,-4:])
m=1
return((m*A)+ (n*B))
########################## argument getting
#parser = argparse.ArgumentParser()
#parser.add_argument("--i", )
#parser.add_argument("--j", )
#parser.add_argument("--k", )
#parser.add_argument("--m", )
#a = parser.parse_args()
##
#i=int(a.i)
#j=int(a.j)
#k=int(a.k)
#####
i = 64 #64
# j=16
# k=64
n = 75 #105
n_splits=10
max_epochs=500
BatchSize=350
N_AlternatingControler=2
###################### ######################
# Reading Multi Modal Y for train and Test
# train data
AllDataset = pandas.read_csv('./XY_BLD_Converter', low_memory=False)
AllDataset = AllDataset.set_index(AllDataset.RID)
AllDataset = AllDataset.fillna(0)
AllDataset['DX'] = AllDataset['DX'].map({'NL':0, 'MCI':1, 'Converter':2, 'Dementia':3})
le = preprocessing.LabelEncoder()
AllDataset['DX'] = le.fit_transform(AllDataset['DX'])
###################### MRI ######################
MRI_X = AllDataset.loc[:,['Ventricles', 'Hippocampus', 'WholeBrain', 'Entorhinal', 'Fusiform', 'MidTemp', 'ICV']]
MRI_Y = AllDataset.loc[:, ['MMSE_BLD','MMSE_6','MMSE_12','MMSE_24','DX']]
MRI_RID = AllDataset.RID
# normalize data
MRI_X = (MRI_X - MRI_X.mean())/ (MRI_X.max() - MRI_X.min())
###################### PET ######################
PET_X = AllDataset.loc[:,['FDG', 'PIB', 'AV45']]
PET_Y = AllDataset.loc[:, ['MMSE_BLD','MMSE_6','MMSE_12','MMSE_24','DX']]
PET_RID = AllDataset.RID
# normalize data
PET_X = (PET_X - PET_X.mean()) / (PET_X.max() - PET_X.min())
###################### COG ######################
COG_X = AllDataset.loc[:, ['RAVLTimmediate', 'RAVLTlearning', 'RAVLTforgetting', 'RAVLTpercforgetting','FAQ',
'EcogPtMem', 'EcogPtLang', 'EcogPtVisspat', 'EcogPtPlan', 'EcogPtOrgan', 'EcogPtDivatt', 'EcogPtTotal',
'EcogSPMem', 'EcogSPLang', 'EcogSPVisspat', 'EcogSPPlan', 'EcogSPOrgan', 'EcogSPDivatt', 'EcogSPTotal']]#'CDRSB', 'MOCA',
COG_Y = AllDataset.loc[:, ['MMSE_BLD','MMSE_6','MMSE_12','MMSE_24','DX']]
COG_RID = AllDataset.RID
# normalize data
COG_X = (COG_X - COG_X.mean()) / (COG_X.std())
###################### CSF ######################
CSF_X = AllDataset.loc[:,['ABETA', 'PTAU', 'TAU']]
CSF_Y = AllDataset.loc[:, ['MMSE_BLD','MMSE_6','MMSE_12','MMSE_24','DX']]
CSF_RID = AllDataset.RID
# normalize data
CSF_X = (CSF_X - CSF_X.mean()) / (CSF_X.max() - CSF_X.min())
###################### RF ######################
# RF_X = AllDataset.loc[:,['AGE', 'PTEDUCAT', 'APOE4','female','male']]
# RF_Y = AllDataset.loc[:, ['MMSE_BLD','MMSE_6','MMSE_12','MMSE_24','DX']]
# RF_RID = AllDataset.RID
# # normalize data
# RF_X.AGE = (RF_X - RF_X.mean()) / (RF_X.max() - )
RF_X_1 = AllDataset.loc[:,['AGE','PTEDUCAT']]
# normalize age and years of education
RF_X_1 = (RF_X_1 - RF_X_1.mean()) / (RF_X_1.max() - RF_X_1.min())
RF_X_1=RF_X_1.fillna(0)
# normalize apoe4
RF_X_A = AllDataset.loc[:,['APOE4']]
RF_X_A=RF_X_A-1
RF_X_A=RF_X_A.fillna(0)
# normalize gender
RF_X_gender = AllDataset.loc[:,['female','male']]
# RF_X_sex[RF_X_sex=='Male']=-1
# RF_X_sex[RF_X_sex=='Female']=1
RF_X_gender=RF_X_gender.fillna(0)
#construct RF
RF_X = pandas.concat([RF_X_1, RF_X_A, RF_X_gender], axis=1)
##############################################
from tensorflow.keras.layers import Dropout
import numpy as np
from sklearn.metrics import r2_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedKFold
from scipy.stats import pearsonr, spearmanr
# FCN specifications
units_L2 = 25
units_L3 = 7
####################################### MRI FCN ###############################################
# mri FCN
MRI_inp_dim = MRI_X.shape[1]
MRI_visible = Input(shape=(MRI_inp_dim,))
hiddenMRI1 = Dense(2*MRI_inp_dim, kernel_initializer='normal', activation='linear')(MRI_visible)
hiddenMRI2 = hiddenMRI1
MRI_output = Dense(MRI_inp_dim, kernel_initializer='normal', activation='linear')(hiddenMRI2)
####################################### PET FCN ###############################################
PET_inp_dim = PET_X.shape[1]
PET_visible = Input(shape=(PET_inp_dim,))
hiddenPET1 = Dense(2*PET_inp_dim, kernel_initializer='normal', activation='linear')(PET_visible)
hiddenPET2=hiddenPET1
PET_output = Dense(PET_inp_dim, kernel_initializer='normal', activation='linear')(hiddenPET2)
####################################### COG FCN ###############################################
# mri FCN
COG_inp_dim = COG_X.shape[1]
COG_visible = Input(shape=(COG_inp_dim,))
hiddenCOG1 = Dense(2*COG_inp_dim, kernel_initializer='normal', activation='linear')(COG_visible)
hiddenCOG2=hiddenCOG1
COG_output = Dense(COG_inp_dim, kernel_initializer='normal', activation='linear')(hiddenCOG2)
####################################### CSF FCN ###############################################
CSF_inp_dim = CSF_X.shape[1]
CSF_visible = Input(shape=(CSF_inp_dim,))
hiddenCSF1 = Dense(2*CSF_inp_dim, kernel_initializer='normal', activation='linear')(CSF_visible)
hiddenCSF2=hiddenCSF1
CSF_output = Dense(CSF_inp_dim, kernel_initializer='normal', activation='linear')(hiddenCSF2)
####################################### CSF FCN ###############################################
RF_inp_dim = RF_X.shape[1]
RF_visible = Input(shape=(RF_inp_dim,))
hiddenRF1 = Dense(2*RF_inp_dim, kernel_initializer='normal', activation='linear')(RF_visible)
hiddenRF2=hiddenRF1
RF_output = Dense(RF_inp_dim, kernel_initializer='normal', activation='linear')(hiddenRF2)
#################################### Concat FCN ###############################################
merge = concatenate([MRI_output, PET_output, COG_output, CSF_output, RF_output])#
# print(merge.shape[1])
# interpretation layer
# hidden1 = Dense(100, activation='relu')(merge)
hidden1 = GaussianKernel(100, merge.shape[1], kernel_gamma="auto", name='gkernel1')(merge)
# hidden1 = Dropout(0.1)(hidden1)
hidden1_reshape = Reshape((10, 10, 1))(hidden1)
layer2D_1 = Conv2DTranspose(filters=10, kernel_size=(3,3), strides=(1, 1), padding="same")(hidden1_reshape)
layer2D_2 = Conv2DTranspose(filters=10, kernel_size=(3,3), strides=(1, 1), dilation_rate=(2,2),padding="same")(hidden1_reshape)
#layer2D_3 = Conv2DTranspose(filters=10, kernel_size=(3,3), strides=(1, 1), dilation_rate=(3,3), padding="same")(hidden1_reshape)
layer2D_4 = concatenate([layer2D_1,layer2D_2])#concatenate([layer2D_1,layer2D_2,layer2D_3])
# input layer
visible = layer2D_4
# first feature extractor
conv1 = Conv2D(i, kernel_size=3)(visible)#relu
conv1 = Dropout(0.1)(conv1)
flat1 = Flatten()(conv1)
## cutting out from hidden1 output
# prediction output
output_reg = Dense(4, activation='relu',kernel_regularizer=tensorflow.keras.regularizers.l1(0.01))(flat1)#relu
outout_class = Dense(4, activation='softmax',kernel_regularizer=tensorflow.keras.regularizers.l1(0.01))(flat1)#softmax
output=concatenate([output_reg, outout_class])
categorical_labels = utils.to_categorical(COG_Y.iloc[:,-1], num_classes=4)
X_all=[MRI_X.values, PET_X.values, COG_X.values, CSF_X.values, RF_X.values]#
YTrain = COG_Y
YTrain1 = YTrain.reset_index()
Y_Train = pandas.concat ([YTrain1[['MMSE_BLD','MMSE_6','MMSE_12','MMSE_24']], pandas.DataFrame(categorical_labels)], axis=1)
Y_all=Y_Train
AccScores = []
AccDetails=[]
All_Predicts_class=[]
All_Truth_class=[]
All_Predicts_reg=[]
All_Truth_reg=[]
AllRegErrors = np.zeros(shape=(4,1),dtype='float16')
X_all=[MRI_X.values, PET_X.values, COG_X.values, CSF_X.values, RF_X.values]#
Y_all=Y_Train
All_RMSE=np.zeros(shape=(4,1),dtype='float16')
model = Model(inputs= [MRI_visible, PET_visible, COG_visible, CSF_visible, RF_visible], outputs=output) #
#keras.utils.plot_model(model,to_file='model-final.png', show_shapes=True)
OPTIMIZER_1=tensorflow.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
OPTIMIZER_2=tensorflow.keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.save_weights('SavedInitialWeights.h5')
callback_stop = tensorflow.keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=0, patience=20, verbose=0,
mode='auto', baseline=None, restore_best_weights=False
)
max_epochs_Alternating=max_epochs/N_AlternatingControler
max_epochs_Alternating=np.int(max_epochs_Alternating)
import matplotlib.pyplot as plt
for repeator in range(0,1):
#print('Repeat No: ', repeator+1)
# define n_splits-fold cross validation test harness
kfold = StratifiedKFold(n_splits, shuffle=True, random_state=repeator)
FoldCounter=0
for train, test in kfold.split(X_all[1], COG_Y.iloc[:,-1].values):
FoldCounter=FoldCounter+1
model.load_weights('SavedInitialWeights.h5')
X_train_here=[X_all[0][train], X_all[1][train], X_all[2][train], X_all[3][train], X_all[4][train]]#
print('---Repeat No: ', repeator+1, ' ---Fold No: ', FoldCounter)
# model.compile(loss=custom_loss, optimizer=OPTIMIZER_1)
# History = model.fit(X_train_here, Y_all.values[train],
# epochs= max_epochs_Alternating, batch_size=BatchSize, verbose=0)#250-250
model.compile(loss=custom_loss, optimizer=OPTIMIZER_2)
History = model.fit(X_train_here, Y_all.values[train], validation_split=0.1,
epochs= 2*max_epochs_Alternating, batch_size=BatchSize,
callbacks=[callback_stop], verbose=0)#250-250
plt.plot(History.history['loss'])
plt.plot(History.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.grid()
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('Fold_'+str(FoldCounter)+'_History.png')
plt.close()
# for iters in range(N_AlternatingControler):
# # Fit the model
# if np.random.rand() < 0.5:
# model.compile(loss=custom_loss, optimizer=OPTIMIZER_1)
# print(iters)
# else:
# model.compile(loss=custom_loss, optimizer=OPTIMIZER_2)
# History = model.fit(X_train_here, Y_all.values[train], epochs= max_epochs_Alternating, batch_size=BatchSize, verbose=0)#250-250
X_test_here=[X_all[0][test], X_all[1][test], X_all[2][test], X_all[3][test], X_all[4][test]]#
Y_Validation=model.predict(X_test_here)
MSE_0 = mean_squared_error(Y_all.iloc[test, 0], Y_Validation[:, 0])#/Y_Pred_MultiModal.shape[0]
MSE_6 = mean_squared_error(Y_all.iloc[test, 1], Y_Validation[:, 1])#/Y_Pred_MultiModal.shape[0]
MSE_12 = mean_squared_error(Y_all.iloc[test, 2], Y_Validation[:, 2])#/Y_Pred_MultiModal.shape[0]
MSE_24 = mean_squared_error(Y_all.iloc[test, 3], Y_Validation[:, 3])#/Y_Pred_MultiModal.shape[0]
All_RMSE[0]=math.sqrt(MSE_0)
All_RMSE[1]=math.sqrt(MSE_6)
All_RMSE[2]=math.sqrt(MSE_12)
All_RMSE[3]=math.sqrt(MSE_24)
print([math.sqrt(MSE_0), math.sqrt(MSE_6), math.sqrt(MSE_12), math.sqrt(MSE_24)])
AllRegErrors=np.append(AllRegErrors,All_RMSE,axis=1)
#rho1, pval1 = spearmanr(Y_Pred_MultiModal[:, 0], Y_all.iloc[:, 0])
##### Classification
All_Predicts_class.append(Y_Validation[:,-4:])
All_Predicts_reg.append(Y_Validation[:,0:4])
All_Truth_class.append(COG_Y.iloc[test,-1])
All_Truth_reg.append(Y_all.iloc[test, 0:4])
DX_pred = np.argmax(Y_Validation[:,-4:], axis=1)
DX_real= COG_Y.iloc[test,-1]
score=accuracy_score(DX_real, DX_pred)
print (accuracy_score(DX_real, DX_pred))
AccScores.append(score*100)
# target_names = ['class 0', 'class 1', 'class 2', 'class 3']
target_names = ['CN', 'MCI_nc', 'MCI_c', 'AD']
class_names = target_names
Details=classification_report(DX_real, DX_pred, target_names=target_names,output_dict=True)
print(classification_report(DX_real, DX_pred, target_names=target_names))
AccDetails.append(Details)
#print >> f1, classification_report(DX_real, DX_pred, target_names=target_names)
print('#########################################################################')
print('#########################################################################')
print(i, n)
print('Average Result:')
print('########')
print('Mean of RMSE : ', np.mean(AllRegErrors[:,1:],1))
print('Mean of RMSE ALL: ', np.mean(AllRegErrors[:,1:]))
print('Mean of accuracy : ',np.mean(AccScores))
print(' --------------------- ')
print('std of RMSE : ', np.std(AllRegErrors[:,1:],1))
print('std of RMSE ALL: ', np.std(AllRegErrors[:,1:]))
print('std of accuracy : ',np.std(AccScores))
AD_precision=[]; MCI_nc_precision=[]; MCI_c_precision=[]; CN_precision=[];
AD_recall=[]; MCI_nc_recall=[]; MCI_c_recall=[]; CN_recall=[]
AD_f1=[]; MCI_nc_f1=[]; MCI_c_f1=[]; CN_f1=[]
AD_support=[]; MCI_nc_support=[]; MCI_c_support=[]; CN_support=[]
for i in range(len(AccDetails)):
Details=AccDetails[i]
A=Details['AD']['precision']
AD_precision.append(A)
A=Details['MCI_c']['precision']
MCI_c_precision.append(A)
A=Details['MCI_nc']['precision']
MCI_nc_precision.append(A)
A=Details['CN']['precision']
CN_precision.append(A)
A=Details['AD']['recall']
AD_recall.append(A)
A=Details['MCI_c']['recall']
MCI_c_recall.append(A)
A=Details['MCI_nc']['recall']
MCI_nc_recall.append(A)
A=Details['CN']['recall']
CN_recall.append(A)
A=Details['AD']['f1-score']
AD_f1.append(A)
A=Details['MCI_c']['f1-score']
MCI_c_f1.append(A)
A=Details['MCI_nc']['f1-score']
MCI_nc_f1.append(A)
A=Details['CN']['f1-score']
CN_f1.append(A)
A=Details['AD']['support']
AD_support.append(A)
A=Details['MCI_c']['support']
MCI_c_support.append(A)
A=Details['MCI_nc']['support']
MCI_nc_support.append(A)
A=Details['CN']['support']
CN_support.append(A)
print(' --------------------- ')
print(' --------------------- ')
print('Mean of precision of AD : ', np.mean(AD_precision))
print('Mean of precision of MCI_c : ', np.mean(MCI_c_precision))
print('Mean of precision of MCI_nc : ', np.mean(MCI_nc_precision))
print('Mean of precision of CN : ', np.mean(CN_precision))
print(' --------------------- ')
print('std of precision of AD : ', np.std(AD_precision))
print('std of precision of MCI_c : ', np.std(MCI_c_precision))
print('std of precision of MCI_nc : ', np.std(MCI_nc_precision))
print('std of precision of CN : ', np.std(CN_precision))
print(' --------------------- ')
print(' --------------------- ')
print('Mean of recall of AD : ', np.mean(AD_recall))
print('Mean of recall of MCI_c : ', np.mean(MCI_c_recall))
print('Mean of recall of MCI_nc : ', np.mean(MCI_nc_recall))
print('Mean of recall of CN : ', np.mean(CN_recall))
print(' --------------------- ')
print('std of recall of AD : ', np.std(AD_recall))
print('std of recall of MCI_c : ', np.std(MCI_c_recall))
print('std of recall of MCI_nc : ', np.std(MCI_nc_recall))
print('std of recall of CN : ', np.std(CN_recall))
print(' --------------------- ')
print(' --------------------- ')
print('Mean of f1-score of AD : ', np.mean(AD_f1))
print('Mean of f1-score of MCI_c : ', np.mean(MCI_c_f1))
print('Mean of f1-score of MCI_nc : ', np.mean(MCI_nc_f1))
print('Mean of f1-score of CN : ', np.mean(CN_f1))
print(' --------------------- ')
print('std of f1-score of AD : ', np.std(AD_f1))
print('std of f1-score of MCI_c : ', np.std(MCI_c_f1))
print('std of f1-score of MCI_nc : ', np.std(MCI_nc_f1))
print('std of f1-score of CN : ', np.std(CN_f1))
print(' --------------------- ')
print(' --------------------- ')
print('Mean of support of AD : ', np.mean(AD_support))
print('Mean of support of MCI_c : ', np.mean(MCI_c_support))
print('Mean of support of MCI_nc : ', np.mean(MCI_nc_support))
print('Mean of support of CN : ', np.mean(CN_support))
print(' --------------------- ')
print('std of support of AD : ', np.std(AD_support))
print('std of support of MCI_c : ', np.std(MCI_c_support))
print('std of support of MCI_nc : ', np.std(MCI_nc_support))
print('std of support of CN : ', np.std(CN_support))
print('#########################################################################')
print('#########################################################################')
DataDict={"AccScores":AccScores,"AccDetails":AccDetails,"AllRegErrors":AllRegErrors
, "All_Predicts_class": All_Predicts_class , "All_Truth_class": All_Truth_class,
"All_Predicts_reg": All_Predicts_reg , "All_Truth_reg": All_Truth_reg}
import pickle
pickle.dump(DataDict,open("pkl_Results_Combined_2.pkl","wb"))
| 18,374 | 7,293 |
#!/usr/bin/env python3
#
# Calculates sentiment polarity scores over the progression of a speech
# and writes them to STDOUT. The output can be parsed by gnuplot.
#
# Original author: Bastian Rieck
from textblob import TextBlob
import os
import sys
"""
Calculates sentiments over the progression of a given speech. The
results of this function are scaled such that the total *time* of
the speech lies between [0,1].
"""
def make_sentiment_curve(text, title):
blob = TextBlob(text)
n = len(blob.sentences)
polarities = []
print("\"%s\"" % title)
for index, sentence in enumerate(blob.sentences):
polarity = sentence.sentiment.polarity
t = index / (n-1)
polarities.append(polarity)
print(t, polarity)
# Try to mitigate issues with floating point numbers; I am pretty sure
# that this should *not* be that relevant here, though.
sum_polarities = sum( sorted(polarities) )
mean_polarity = sum_polarities / n
print("\n")
print("\"%s\"" % title)
print("0.0 %f" % mean_polarity)
print("1.0 %f" % mean_polarity)
print("\n")
"""
Extracts a year and a name from a filename.
"""
def get_year_and_name(filename):
basename = os.path.basename(filename)
name = os.path.splitext(basename)[0]
name = name.replace("_", " ")
year = name[:4]
name = name[5:]
return year, name
"""
main
"""
if __name__ == "__main__":
for filename in sys.argv[1:]:
year, name = get_year_and_name(filename)
text = ""
title = "%s (%s)" % (name, year)
with open(filename) as f:
text = f.read()
make_sentiment_curve(text, title)
| 1,641 | 579 |
import numpy as np
import matplotlib.pyplot as plt
import warnings
import pandas as pd
import scipy.stats as st
import statsmodels as sm
import seaborn as sns
import math
import collections
from collections import Counter
import statistics
import matplotlib
# By state level - Fuels emission factors - 1999 to 2017
HHV_Coal= 19.73 # Coke Coal HHV (mm BTU/ short ton) Source: Emission Factors for Greenhouse Gas Inventories
HHV_Gas= 1.033 # Natural gas HHV (mmBtu/ mcf) Source: Emission Factors for Greenhouse Gas Inventories
HHV_Pet= 0.145*42 # Crude Oil (close to distilled Oil) HHV (mmBtu/ barrel) Source: Emission Factors for Greenhouse Gas Inventories
# Input Files
emissions_regions = pd.read_csv('https://raw.githubusercontent.com/zahraghh/EmissionFactorElectricity/master/emission_annual_state.csv')
generation_regions = pd.read_csv('https://raw.githubusercontent.com/zahraghh/EmissionFactorElectricity/master/annual_generation_state.csv')
consumption_regions = pd.read_csv('https://raw.githubusercontent.com/zahraghh/EmissionFactorElectricity/master/consumption_annual_state.csv')
US_states= pd.read_csv('https://raw.githubusercontent.com/zahraghh/EmissionFactorElectricity/master/US_states.csv')
# Characteristics of Inputs
states= US_states['States']
emissions_regions_year= emissions_regions['Year']
consumption_regions_year= consumption_regions['YEAR']
generation_regions_year= generation_regions['YEAR']
emissions_regions_state= emissions_regions['State']
consumption_regions_state= consumption_regions['STATE']
generation_regions_state= generation_regions['STATE']
emissions_regions_type= emissions_regions['Producer Type']
consumption_regions_type= consumption_regions['TYPE OF PRODUCER']
generation_regions_type= generation_regions['TYPE OF PRODUCER']
emissions_regions_source= emissions_regions['Energy Source']
consumption_regions_source= consumption_regions['ENERGY SOURCE']
generation_regions_source= generation_regions['ENERGY SOURCE']
emissions_regions_CO2= emissions_regions['CO2'] # metric tones
emissions_regions_SO2= emissions_regions['SO2'] # metric tones
emissions_regions_NOx= emissions_regions['Nox'] # metric tones
consumption_regions_fuels= consumption_regions['CONSUMPTION for ELECTRICITY'] #Short tones, barrel, Mcf
generation_regions_fuels= generation_regions['GENERATION (Megawatthours)'] #Short tones, barrel, Mcf
def EF(year, Fuel, emissions_regions_xxx, Electric_scale, HHV):
emission= []
emission_state = []
consumption= []
consumption_state= []
generation= []
generation_state= []
for k in range(50):
for i in range(len(emissions_regions)):
if emissions_regions_year[i]== year and emissions_regions_state[i]== states[k] and emissions_regions_type[i]== Electric_scale and emissions_regions_source[i]== Fuel:
emission.append(emissions_regions_xxx[i]*1000) # converting metric ton to kg CO2/SO2/NOx
emission_state.append(states[k])
for j in range(len(consumption_regions)):
if consumption_regions_year[j]== year and consumption_regions_state[j]==states[k] and consumption_regions_type[j]==Electric_scale and consumption_regions_source[j]==Fuel:
consumption.append(int(consumption_regions_fuels[j])*HHV) # converting original unit to mmBTU
consumption_state.append(states[k])
for m in range(len(generation_regions)):
if generation_regions_year[m]== year and generation_regions_state[m]==states[k] and generation_regions_type[m]==Electric_scale and generation_regions_source[m]==Fuel:
generation.append(int(generation_regions_fuels[m])) # MWh electricity generation from each fuel type
generation_state.append(states[k])
dict_c={} # Dictonary map for consumption
dict_e={} # Dictonary map for emissions
dict_g={} # Dictonary map for generation
for c in range(len(consumption)):
dict_c[consumption_state[c]]= consumption[c]
for k in range(50):
if not states[k] in dict_c.keys():
dict_c[states[k]]=0
for e in range(len(emission)):
dict_e[emission_state[e]]= emission[e]
for k in range(50):
if not states[k] in dict_e.keys():
dict_e[states[k]]=0
for g in range(len(generation)):
dict_g[generation_state[g]]= generation[g]
for k in range(50):
if not states[k] in dict_g.keys():
dict_g[states[k]]=0
EF_st={k: dict_e[k]/ dict_c[k] for k in dict_e.keys() & dict_c if dict_c[k]}
GE_st={k: dict_e[k]/ dict_g[k] for k in dict_e.keys() & dict_c if dict_c[k]}
return dict_c, dict_e, dict_g, EF_st, GE_st ## mmBTU, kg CO2, MWh
EF_coal_results = EF(2017, 'Coal', emissions_regions_CO2, 'Total Electric Power Industry', HHV_Coal) #kg CO2/ mmBTU
EF_gas_results = EF(2017, 'Natural Gas', emissions_regions_CO2, 'Total Electric Power Industry', HHV_Gas) #kg CO2/ mmBTU
EF_pet_results = EF(2017, 'Petroleum', emissions_regions_CO2, 'Total Electric Power Industry', HHV_Pet) #kg CO2/ mmBTU
EF_coal_list=list(EF_coal_results[3].values()) #kg CO2/mm BTU
EF_gas_list=list(EF_gas_results[3].values()) #kg CO2/mm BTU
EF_pet_list=list(EF_pet_results[3].values()) #kg CO2/mm BTU
GE_coal_list=list(EF_coal_results[4].values()) #kg CO2/MWh
GE_gas_list=list(EF_gas_results[4].values()) #kg CO2/MWh
GE_pet_list=list(EF_pet_results[4].values()) #kg CO2/MWh
EF_coal_list = [i for i in EF_coal_list if 85 < i < 120]
EF_gas_list = [i for i in EF_gas_list if 43 < i < 63]
EF_pet_list = [i for i in EF_pet_list if 50 < i < 100]
def printinfo(list_EF):
return print( "/STD: ", round(statistics.stdev(list_EF),2),"/Mean: ",round(statistics.mean(list_EF),2),"/Median: ",round(statistics.median(list_EF),2),
"/Coef of variation %: ", round(statistics.stdev(list_EF)*100/statistics.mean(list_EF),2),
"/Relative Range: ", round((max(list_EF)-min(list_EF))/statistics.mean(list_EF),2))
#Electricity CO2 EF
bins=20
def fit_and_plot(dist,data):
params = dist.fit(data)
arg = params[:-2]
loc = params[-2]
scale = params[-1]
x = np.linspace( min(data), 150, bins)
bin_centers = 0.5*(x[1:] + x[:-1])
x = (x + np.roll(x, -1))[:-1] / 2.0
y, x = np.histogram(data, bins=bins, density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
pdf= dist.pdf(bin_centers, loc=loc, scale=scale, *arg)
return x, y, params, arg, loc, scale,
num_simulations=1
num_reps=10000
coal_params=fit_and_plot(st.levy_stable, EF_coal_list)
gas_params=fit_and_plot(st.lognorm, EF_gas_list)
pet_params=fit_and_plot(st.johnsonsu, EF_pet_list)
def EGEF_state(state):
state_stats = []
#electricty_generation_total_state = generation_regions[generation_regions['STATE']==state][generation_regions['YEAR']==2017][generation_regions['TYPE OF PRODUCER']=='Total Electric Power Industry'][generation_regions['ENERGY SOURCE']=='Total']['GENERATION (Megawatthours)']
electricty_generation_total_state = generation_regions[
(generation_regions['STATE']==state) &
(generation_regions['YEAR']==2017) &
(generation_regions['TYPE OF PRODUCER']=='Total Electric Power Industry') &
(generation_regions['ENERGY SOURCE']=='Total')]['GENERATION (Megawatthours)']
for i in range(num_simulations):
# Choose random inputs for the uncertain inputs: Coal, Natural gas, Petroleum.
coal_EF_rd = st.levy_stable.rvs(alpha=coal_params[2][0], beta=coal_params[2][1], loc= coal_params[2][2] , scale= coal_params[2][3] , size=num_reps)
gas_EF_rd = st.lognorm.rvs(s=gas_params[2][0], loc= gas_params[2][1] , scale= gas_params[2][2] , size=num_reps)
pet_EF_rd = st.johnsonsu.rvs(a=pet_params[2][0], b=pet_params[2][1], loc= pet_params[2][2] , scale= pet_params[2][3] , size=num_reps)
state_stats.append((coal_EF_rd*EF_coal_results[0][state] + gas_EF_rd*EF_gas_results[0][state] + pet_EF_rd*EF_pet_results[0][state])*2.20462/float(electricty_generation_total_state)) # EF_Electriicty (lb/MWh) Average distribution of fuels in the U.S.
data_new= state_stats
return data_new[0]
| 8,174 | 3,183 |
import os #tools for working with the CLI
import logging #needed for logging
import pigpio #needed for GPIO control
import time #needed for function timing
import threading #needed for OLED data continuous updating
import csv #needed for temporary data logging
import config as global_vars #import global variable initialization module
from pigpio_dht import DHT22 #temp and humidity sensor
from datetime import datetime #needed for control timing
from CalibrationAndDiagnostics.helpers import * #import helper functions and classes
##Create a class for handling variable target values, including default target values
class target:
'This class creates and accesses the Target.ini file'
#Get current directory for target value file
PROJECT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
PATH = "%s/Data/Target.ini" % PROJECT_DIRECTORY
##Create an initialization function for creating a default pinout file
def __init__(self):
if (os.path.isfile(target.PATH) == False): #check if file already exists
self.Target = open(target.PATH, "w+") #create file if none exists
self.Target.close()
self.configfile = open(target.PATH, "w+")
self.Config = ConfigParser()
self.Config.add_section('Water')
self.Config.add_section('Soil')
self.Config.add_section('Light')
self.Config.add_section('Temp')
self.Config.add_section('Humidity')
self.Config.set('Light', 'Hours', '16') #set value of lighting hours in ini file
self.Config.set('Light', 'Time', '12:00') #set value of lighting start time in ini file
self.Config.set('Water', 'Water', '12:00') #set value of water start time in ini file
self.Config.set('Soil', 'Soil', '25') #set value of soil moisture in ini file
self.Config.set('Temp', 'Temp', '70') #set value of temperature in ini file
self.Config.set('Humidity', 'Humidity', '55') #set value of humidity in ini file
self.Config.write(self.configfile) #save ini file
self.configfile.close()
#Create a function for getting target values from the Target.ini file
#param - parameter to be adjusted (Water, Soil, Hours, etc)
#parent - config section to look in (Light, Water, Soil, etc)
def getTarget(self, param, parent=None):
self.Config = ConfigParser()
self.Config.read(target.PATH)
try:
if parent == None:
return self.Config.get(param, param) #return target based on Target.ini file
else:
return self.Config.get(parent, param) #return target based on Target.ini file
except Exception as e:
logging.error("Failed to get target value: %s" % e)
return None
#Create a function for setting values in the Target.ini file
#param - parameter to be adjusted (Water, Soil, Hours, etc)
#value - new target value to be added
#parent - config section to look in (Light, Water, Soil, etc)
def setTarget(self, param, value, parent=None):
self.Config = ConfigParser()
self.Config.read(target.PATH)
self.configfile = open(target.PATH, "w+")
try:
if parent == None:
self.Config.set(param, param, str(value)) #if param has no parent, param is the parent and also the section
else:
self.Config.set(parent, param, str(value)) #otherise, parent is the section
except Exception as e:
logging.error("Failed to set target value: %s" % e)
return 'Failed'
with open(target.PATH, 'w') as configfile: #open pinout.ini as file object
self.Config.write(configfile) #save ini file
##Create a class which displays key data periodically
class dataGlance(threading.Thread):
#Create a function to initialize threads and data variables
def __init__(self):
threading.Thread.__init__(self)
self.pins = pinout() #initialize pinout
self.oled = oled_utility(128, 32, self.pins.getAddr('OLED')) #initialize OLED display
#Create a function to run the thread
def run(self):
#Create a loop to loop through data to display
while global_vars.data_glance_exit_flag == False:
self.oled.write_center(global_vars.current_temp, title="Temp") #write temp
for i in range(0, 1000): #Create controlled delay which intermittently checks for exit flag
if global_vars.data_glance_exit_flag == False:
i = i + 1
time.sleep(0.01)
else:
break
self.oled.write_center(global_vars.current_humidity, title="Humidity") #write humidity
for i in range(0, 1000): #Create controlled delay which intermittently checks for exit flag
if global_vars.data_glance_exit_flag == False:
i = i + 1
time.sleep(0.01)
else:
break
self.oled.write_center(global_vars.current_soil, title="Soil") #write soil
for i in range(0, 1000): #Create controlled delay which intermittently checks for exit flag
if global_vars.data_glance_exit_flag == False:
i = i + 1
time.sleep(0.01)
else:
break
##Create a class which collects and stores data as fast as the sensors allow
class dataCollect(threading.Thread):
#Create a function to initialize thread and data variables
def __init__(self, TEMP, FLOAT):
threading.Thread.__init__(self)
self.FLOAT = FLOAT
#Initialize DHT 22
self.DHT_SENSOR = DHT22(TEMP)
#initialize pigpio
self.pi = pigpio.pi() #Initialize pigpio
#Attempt to initialize sensor data
try:
[global_vars.current_temp, global_vars.current_humidity] = getTempHumidity(self.DHT_SENSOR)
global_vars.current_soil = getSoilMoisture()
global_vars.current_float = getFloat(self.pi, self.FLOAT)
except Exception as e:
logging.error("Failed one or more sensor readings: %s" % e) #exception block to prevent total failure if any sensor fails a reading
#Reinitialize sensor with higher timeout
self.DHT_SENSOR = DHT22(TEMP, timeout_secs=5)
#Create a function to run the thread
def run(self):
timer = 0 #create a timer for logging
prev_light = global_vars.currently_lighting #store initial value of light
#temporary code to make a csv of sensor data
PROJECT_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) #Get current directory for log files and for pin file
path = "%s/Data/SensorData.csv" % PROJECT_DIRECTORY
prev_log_time = int(time.strftime("%M")) #store the minute that data is logged
#Create a loop to constantly check and update the sensor data values
while True:
#Get current sensor values
try:
[global_vars.current_temp, global_vars.current_humidity] = getTempHumidity(self.DHT_SENSOR)
global_vars.current_soil = getSoilMoisture()
global_vars.current_float = getFloat(self.pi, self.FLOAT)
except Exception as e:
logging.error("Failed one or more sensor readings: %s" % e) #exception block to prevent total failure if any sensor fails a reading
#Check if it has been 5 minutes since last log
if int(time.strftime("%M")) >= prev_log_time + 5 or (prev_log_time >= 56 and int(time.strftime("%M")) >= 5-(60-prev_log_time) and int(time.strftime("%M")) < 10):
prev_log_time = int(time.strftime("%M")) #reset log time
events = [] #create empty list of events
#check if pump occured, then reset pump flag if it did
if global_vars.pumped == True:
events.append("Pumped") #add "pumped" to events list
global_vars.pumped = False #reset pump flag
#check if lighting status changed
if global_vars.currently_lighting != prev_light:
#determine whether lights were turned on or off based on initial state
if prev_light == True:
events.append("Light Off")
else:
events.append("Light On")
prev_light = global_vars.currently_lighting #set previous lighting to the current value
data_row = [datetime.now(), global_vars.current_temp, global_vars.current_humidity, global_vars.current_soil]
data_row.extend(events)
#temporary code to write to csv
with open(path, mode='a') as data:
data_writer = csv.writer(data)
data_writer.writerow(data_row)
time.sleep(5) #give the sensors a 5 second rest
##Create a class which adjusts target parameters based on the OLED menu and stores the values
class targetAdjust(threading.Thread):
#Create a function to initialize the thread and target object
def __init__(self):
threading.Thread.__init__(self)
self.target = target() #create instance of target object
#Create function to run the thread, which allows the user to adjust each parameter and stores it to the Target.ini file
def run(self):
[self.user_choice, self.node] = target_select()
if self.user_choice != None: #if user selected a value
if self.node.parent.option == "Light": #If the parent is light, be sure to include it in the ini file update
self.target.setTarget(self.node.option, self.user_choice, parent="Light")
else: #otherwise include only the parameter and value
self.target.setTarget(self.node.option, self.user_choice)
time.sleep(1) #sleep 1 second to prevent user from entering into target adjustment mode again
##Create a class responsible for all aspects of actuator control
class actuatorControl(threading.Thread):
#Create a function to initalize the thread and all necessary object instances
def __init__(self, pi, PUMP, LIGHT, FAN_ONE, FAN_TWO):
threading.Thread.__init__(self)
self.target = target() #create instance of target object
self.pi = pi
#intialize all pin number variables
self.pump = PUMP
self.light = LIGHT
self.fan_one = FAN_ONE
self.fan_two = FAN_TWO
#Create a funcion to calculate end time based on start time and hours
def endTime(self, start, hours):
minutes = int(60 * int(hours)) #calculate number of minutes in case of decimal hours
remaining_minutes = minutes % 60 #calculate number of non-whole hour minutes
whole_hours = (minutes-remaining_minutes) / 60 #calculate number of whole number hours
start_hour = int(start[0:2]) #extract starting hour
start_minute = int(start[3:5]) #extract starting minute
#first add the number of hours and minutes
end_hour = int(start_hour + whole_hours)
end_minute = int(start_minute + remaining_minutes)
#check if hours are over 23 or minutes are over 59 then subtract 24 and 60 respectively
if end_hour > 23:
end_hour = end_hour - 24
if end_minute > 59:
end_minute = end_minute - 60
#format the string appropriately
if end_hour < 10:
end_hour = "0%s" % end_hour #add 0 to beginning if < 10
if end_minute < 10:
end_minute = "0%s" % end_minute #add 0 to beginning if < 10
return "{}:{}".format(end_hour, end_minute) #return formatted string
#Create a function to run the thread
def run(self):
float_down = 0 #track how long float_sensor is down
#Create inifinite loop for controlling the pump indefinitely
while True:
#LIGHT CONTROL
try:
current_time = time.strftime("%H:%M") #store current time
target_time = self.target.getTarget("Time", parent="Light") #store target time
target_hours = self.target.getTarget("Hours", parent="Light") #store number of hours to run
end_time = self.endTime(target_time, target_hours) #calculate end time
#turn light on if it passes checks necessary to be within time range
if current_time >= target_time and current_time < end_time:
self.pi.write(self.light, 1) #turn light on
global_vars.currently_lighting = True
elif current_time >= target_time and end_time<target_time:
self.pi.write(self.light, 1) #turn light on
global_vars.currently_lighting = True
elif current_time<end_time and end_time<target_time:
self.pi.write(self.light, 1) #turn light on
global_vars.currently_lighting = True
elif target_time == end_time:
self.pi.write(self.light, 1) #turn light on
global_vars.currently_lighting = True
else:
self.pi.write(self.light, 0) #turn light off otherwise
global_vars.currently_lighting = False
except Exception as e:
logging.error("Failed to control light, reattempting: %s" % e)
time.sleep(10)
#PUMP CONTROL
try:
current_time = time.strftime("%H:%M") #store current time
target_time = self.target.getTarget("Water") #store target time
#if it's time to water, begin other necessary checks
if current_time == target_time:
if global_vars.current_float != 0: #if float sensor if up, it's fine to water
float_down = 0 #reset count of times float has been down
target_soil = self.target.getTarget("Soil") #get target soil moisture value
#run the pump until the timer hits 30 seconds or the current soil moisture is greater than the target
t = 0 #reset timer
while t <= 40 and global_vars.current_soil<int(target_soil):
global_vars.pumped = True #set pumped flag to true to indicate the pump occured
self.pi.write(self.pump, 1) #run pump
t = t + 1 #increase timer
time.sleep(1) #1 second delay
self.pi.write(self.pump, 0) #turn pump back off
elif global_vars.current_float == 0 and float_down < 4: #continue pumping as long as pump counter is less than 4 (4 days)
float_down = float_down + 1 #increment counter for each watering
target_soil = self.target.getTarget("Soil") #get target soil moisture value
#run the pump until the timer hits 30 seconds or the current soil moisture is greater than the target
t = 0 #reset timer
while t <= 40 and global_vars.current_soil<int(target_soil):
global_vars.pumped = True #set pumped flag to true to indicate the pump occured
self.pi.write(self.pump, 1) #run pump
t = t + 1 #increase timer
time.sleep(1) #1 second delay
self.pi.write(self.pump, 0) #turn pump back off
except Exception as e:
logging.error("Failed to control pump: %s" % e)
#FAN CONTROL
try:
target_humidity = int(self.target.getTarget("Humidity")) #get current target humidity
target_temp = int(self.target.getTarget("Temp")) #get current target temp
#If either temp or humidity is too high, turn the fans on (or if temp = 0, then turn fans on to be safe)
if global_vars.current_temp>target_temp or global_vars.current_humidity>target_humidity or global_vars.current_temp == 0:
self.pi.write(self.fan_one, 1)
self.pi.write(self.fan_two, 1)
else: #otherwise make sure theyr're off
self.pi.write(self.fan_one, 0)
self.pi.write(self.fan_two, 0)
except Exception as e:
logging.error("Failed to control temp or humidity: %s" % e)
| 16,757 | 4,662 |
nums = list(map(int,input().split(" ")))
list = []
for j in range(0,5):
sum = 0
for i in range(0,4):
sum += nums[i]
list.append(sum)
temp = nums[0]
del nums[0]
nums.append(temp)
print("{} {}".format(min(list), max(list)))
| 257 | 110 |
import instrumenter
import unittest
class TestIfExpr(unittest.TestCase):
def test_if_expr(self):
src = """
5 if x > 7 else 10
""".strip()
expected = """
stepper_lib.if_expr(x > 7, 5, 10)
""".strip()
actual = instrumenter.instrument(src, "ifexpr").strip()
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main() | 352 | 145 |
"""
This module provides Run.SummaryList data access object
"""
from WMCore.Database.DBFormatter import DBFormatter
from dbs.utils.dbsExceptionHandler import dbsExceptionHandler
class SummaryList(DBFormatter):
def __init__(self, logger, dbi, owner=""):
DBFormatter.__init__(self, logger, dbi)
self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else ""
self.sql = """SELECT MAX(LUMI_SECTION_NUM) AS MAX_LUMI
FROM {owner}FILE_LUMIS FL""".format(owner=self.owner)
def execute(self, conn, dataset="", run_num=-1, transaction=False):
binds = dict(run_num=run_num)
wheresql = "WHERE RUN_NUM=:run_num"
if dataset:
joins = """JOIN {owner}FILES FS ON FS.FILE_ID=FL.FILE_ID
JOIN {owner}DATASETS DS ON FS.DATASET_ID=DS.DATASET_ID""".format(owner=self.owner)
wheresql = "{wheresql} AND DS.DATASET=:dataset".format(wheresql=wheresql)
sql = "{sql} {joins} {wheresql}".format(sql=self.sql, joins=joins, wheresql=wheresql)
binds.update(dataset=dataset)
else:
sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql)
cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
result = []
for cursor in cursors:
result.extend(self.formatCursor(cursor, size=100))
return result
| 1,394 | 464 |
from rewx.core import *
| 25 | 10 |
import pytest
@pytest.fixture
def setup(kube):
pass
def test_something(setup, kube):
assert True == True
| 117 | 43 |
from ..protocol import Proto
import abc
from typing import Any
class Events(abc.ABC):
def __init__(self):
self._evmap = {
Proto.ON_NODE_STATUS: self.on_node_status,
Proto.ON_WARN: self.on_warning,
Proto.ON_WATCH_INI: self.on_watch_init,
Proto.ON_WATCH_UPD: self.on_watch_update,
Proto.ON_WATCH_DEL: self.on_watch_delete,
Proto.ON_WATCH_STOP: self.on_watch_stop,
}
def __call__(self, tp: Proto, data: Any) -> None:
self._evmap.get(tp)(data)
@abc.abstractmethod
def on_reconnect(self) -> None:
"""On re-connect
Called after a re-concect is finished (including authentication)
"""
pass
@abc.abstractmethod
def on_node_status(self, status: str) -> None:
"""On node status
status: String containing a `new` node status.
Optional values:
- OFFLINE
- CONNECTING
- BUILDING
- SHUTTING_DOWN
- SYNCHRONIZING
- AWAY
- AWAY_SOON
- READY
"""
pass
@abc.abstractmethod
def on_warning(self, warn: dict) -> None:
"""On warning
warn: a dictionary with `warn_msg` and `warn_code`. for example:
{
"warn_msg": "some warning message"
"warn_code": 1
}
"""
pass
@abc.abstractmethod
def on_watch_init(self, data: dict) -> None:
"""On watch init.
Initial data from a single thing. for example:
{
"#": 123,
"name": "ThingsDB!",
...
}
"""
pass
@abc.abstractmethod
def on_watch_update(self, data: dict) -> None:
"""On watch update.
Updates for a thing with ID (#). One event may contain more than one
job. for example:
{
"#": 123,
"jobs": [
{
"set": {
"answer": 42
}
}
]
}
"""
pass
@abc.abstractmethod
def on_watch_delete(self, data: dict) -> None:
"""On watch delete.
The thing is removed from the collection (and garbage collected).
for example:
{
"#": 123
}
"""
pass
@abc.abstractmethod
def on_watch_stop(self, data: dict) -> None:
"""On watch stop.
The thing is not watched anymore due to either call to `unwatch()`, or
by a unwatch request (REQ_UNWATCH). This event is *not* triggered when
a connection to a node has been lost.
for example:
{
"#": 123
}
"""
pass
| 2,852 | 843 |
import pygame
import time
import random
pygame.init()
# display
display_width = 1024
display_height = 768
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('Relâmpago Marquinhos')
# colors
black = (0,0,0)
white = (255,255,255)
red = (255,0,0)
# car
carImg = pygame.image.load('racecar.png')
car_width = carImg.get_width()
car_height = carImg.get_height()
# boundaries
display_x_boundary = display_width - car_width
display_y_boundary = display_height - car_height
clock = pygame.time.Clock()
def car(x,y):
gameDisplay.blit(carImg, (x,y))
def draw_block(block_x, block_y, block_width, block_height, color):
pygame.draw.rect(gameDisplay, color, [block_x, block_y, block_width, block_height])
def text_objects(text, font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
def message_display(text):
largeText = pygame.font.Font('freesansbold.ttf',50)
TextSurf, TextRect = text_objects(text, largeText)
TextRect.center = (round(display_width/2),round(display_height/2))
gameDisplay.blit(TextSurf, TextRect)
pygame.display.update()
def explode():
message_display('Explosion!')
def game_loop():
car_x = round((display_width * 0.45))
car_y = round((display_height * 0.8))
car_x_change = 0
car_y_change = 0
car_speed = 10
block_width = 100
block_height = 100
block_x = random.randrange(0, display_width - block_width)
block_y = -block_height
block_speed = 7
gameExit = False
crashed = False
while not gameExit:
gameDisplay.fill(white)
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
#print(event)
############################
if not crashed:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
car_x_change = -car_speed
elif event.key == pygame.K_RIGHT:
car_x_change = car_speed
elif event.key == pygame.K_UP:
car_y_change = -car_speed
elif event.key == pygame.K_DOWN:
car_y_change = car_speed
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
car_x_change = 0
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
car_y_change = 0
#######################
car_x += car_x_change
car_y += car_y_change
block_y += block_speed
if car_x > display_x_boundary:
car_x = display_x_boundary
elif car_x < 0:
car_x = 0
if car_y > display_y_boundary:
car_y = display_y_boundary
elif car_y < 0:
car_y = 0
if car_y < block_y + block_height and car_y > block_y:
#print('y crossover')
if car_x > block_x and car_x < block_x + block_width or car_x + car_width > block_x and car_x + car_width < block_x + block_width:
#print('x crossover')
car_y = block_y + block_height
crashed = True
if car_y >= display_y_boundary:
explode()
if crashed:
if car_x_change < 1 and car_x_change >= 0 or car_x_change <= 0 and car_x_change > -1:
car_x_change = 0
crashed = False
car_x_change = car_x_change / 1.05
car_y_change = round(car_speed / 1.1)
print(car_x_change)
print(car_speed)
# print(car_y_change)
car(car_x,car_y)
draw_block(block_x, block_y, block_width, block_height, black)
#print("Blk X: {} - {}; Blk Y: {} - {}".format(block_x, (block_x + block_width), block_y, (block_y + block_height)))
if block_y > display_height:
block_y = 0 - block_height
block_x = random.randrange(0, display_width - block_width)
#print("Car X: {} - {}; Car Y: {} - {}".format(car_x, (car_x + car_width), car_y, (car_y + car_height)))
#print(keys_disabled)
pygame.display.update()
clock.tick(60)
game_loop()
pygame.quit()
quit() | 4,650 | 1,619 |
"""Base classes to be used in FSM."""
import copy
from datetime import datetime
from typing import Dict, Iterator, List, Tuple, Type # noqa
import pytz
from tuco.exceptions import (
TucoAlreadyLockedError,
TucoEventNotFoundError,
TucoInvalidStateChangeError,
TucoInvalidStateHolderError,
)
from tuco.locks import MemoryLock
from tuco.locks.base import BaseLock # noqa
from tuco.meta import FSMBase
from tuco.properties import Event, FinalState, State, Timeout
__all__ = ("FSM",)
mockable_utcnow = datetime.utcnow # Easier to write tests
class FSM(metaclass=FSMBase):
"""Class that handle event transitions.
Your state machines should extend from this.
"""
#: The default initial state is "new" but can be overridden
initial_state = "new"
state_attribute = "current_state"
date_attribute = "current_state_date"
id_field = "id"
fatal_state = "fatal_error"
lock_class = MemoryLock # type: Type[BaseLock]
_states = None # type: Dict[str, State]
def __init__(self, container_object) -> None:
"""Initialize the container object with the initial state."""
self.container_object = container_object
for field in (self.state_attribute, self.date_attribute, self.id_field):
if not hasattr(container_object, field):
raise TucoInvalidStateHolderError(
"Required field {!r} not found inside {!r}.".format(field, container_object)
)
if self.current_state is None:
self.current_state = self.initial_state
self.lock = self.lock_class(self, self.id_field)
def __enter__(self) -> "FSM":
"""Lock the state machine."""
self.lock.lock()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""If TucoAlreadyLockedError did not throw, unlock the machine."""
if exc_type and issubclass(exc_type, TucoAlreadyLockedError):
return
self.lock.unlock()
def __repr__(self) -> str:
"""Basic representation."""
return "<{} - current_state {!r} with holder {} - ID {!r}>".format(
self.__class__.__name__,
self.current_state,
self.container_object.__class__.__name__,
getattr(self.container_object, self.id_field),
)
@property
def current_time(self) -> datetime:
"""Return utcnow and should be extended if you care about time zone or something else."""
return mockable_utcnow()
@property
def current_state_date(self) -> datetime:
"""Return current date stored in object."""
return getattr(self.container_object, self.date_attribute)
@property
def current_state(self) -> str:
"""Return the current state stored in object."""
return getattr(self.container_object, self.state_attribute)
@current_state.setter
def current_state(self, new_state) -> None:
"""Set a state on container object."""
call_on_change = bool(self.current_state)
old_state = copy.copy(self.container_object)
if new_state != self.fatal_state:
if not self.state_allowed(new_state):
raise TucoInvalidStateChangeError(
"Old state {!r}, new state {!r}.".format(self.current_state, new_state)
)
setattr(self.container_object, self.state_attribute, new_state)
setattr(self.container_object, self.date_attribute, self.current_time)
for command in self.current_state_instance.on_enter:
command(self.container_object)
else:
setattr(self.container_object, self.state_attribute, new_state)
setattr(self.container_object, self.date_attribute, self.current_time)
if call_on_change:
self._call_on_change(old_state, self.container_object)
def state_allowed(self, state_name) -> bool:
"""Check if the transition to the new state is allowed."""
if self.current_state is None and state_name == self.initial_state:
return True
if isinstance(self.current_state_instance, FinalState):
return False
if self.current_state_instance.timeout and self.current_state_instance.timeout.target_state == state_name:
return True
if any(event for event in self.possible_events if event.target_state == state_name):
return True
current_state = self.current_state_instance
if current_state.error and current_state.error.target_state == state_name:
return True
for event in current_state.events:
if event.error and event.error.target_state == state_name:
return True
return False
@property
def current_state_instance(self) -> State:
"""Return the current `State` instance."""
return self._states[self.current_state]
@property
def possible_events(self) -> List[Event]:
"""Return all possible events for the current state."""
return self.possible_events_from_state(self.current_state)
@classmethod
def possible_events_from_state(cls, state_name) -> List[Event]:
"""Return all possible events from a specific state.
:param state_name: State to check
"""
state = cls._states[state_name]
return getattr(state, "events", [])
def _get_event(self, event_name) -> Event:
"""Get an event inside current state based on it's name."""
for event in self.possible_events:
if event.event_name == event_name:
return event
raise TucoEventNotFoundError(
"Event {!r} not found in {!r} on current state {!r}".format(
event_name, [event.event_name for event in self.possible_events], self.current_state
)
)
def event_allowed(self, event_name) -> bool:
"""Check if is possible to run an event.
:param event_name: Event to check.
"""
try:
self._get_event(event_name)
except TucoEventNotFoundError:
return False
return True
def _trigger_error(self, event) -> None:
"""Search for an error handler inside event, and then inside state."""
if event.error:
error = event.error
else:
error = self._states[self.current_state].error
if not error:
return
for command in error.commands:
command(self.container_object)
self.current_state = error.target_state
def trigger(self, event_name, *args, **kwargs) -> bool:
"""Trigger an event and call its commands with specified arguments..
:param event_name: Event to execute.
"""
event = self._get_event(event_name)
for command in event.commands:
try:
return_value = command(self.container_object, *args, **kwargs)
except Exception as e:
self._call_on_error(e, event.target_state)
raise
if not return_value:
self._trigger_error(event)
return False
self.current_state = event.target_state
return True
def trigger_timeout(self) -> bool:
"""Trigger timeout if it's possible."""
timeout = self.current_state_instance.timeout
if not timeout:
return False
if datetime.utcnow().replace(tzinfo=pytz.UTC) < (self.current_state_date + timeout.timedelta):
return False
for command in timeout.commands:
try:
command(self.container_object)
except Exception as e:
self._call_on_error(e, timeout.target_state)
raise
self.current_state = timeout.target_state
return True
@classmethod
def get_all_states(cls) -> Dict[str, State]:
"""List all states for this state machine."""
return cls._states
@classmethod
def get_all_timeouts(cls) -> Iterator[Tuple[str, Timeout]]:
"""List all configured timeouts for this state machine."""
for state_name, state in cls._states.items():
if isinstance(state, FinalState) or not state.timeout:
continue
yield (state_name, state.timeout)
@classmethod
def get_all_finals(cls) -> Iterator[FinalState]:
"""List all configured final states for this state machine."""
for state_name, state in cls._states.items():
if isinstance(state, FinalState):
yield state_name
def _call_on_change(self, old_state, new_state) -> None:
"""If on_change function exists, call it.
:param old_state: A shallow copy of the holder object.
:param new_state: The changed version of the object holder.
"""
function = getattr(self, "_on_change_event", None)
if function:
function(old_state, new_state)
def _call_on_error(self, exception, new_state) -> None:
"""If on_error function exists, call it."""
function = getattr(self, "_on_error_event", None)
if function:
function(self.current_state, new_state, exception)
@classmethod
def generate_graph(cls, file_format="svg") -> str:
"""Generate a SVG graph."""
from .graph_builder import generate_from_class
return generate_from_class(cls, file_format)
| 9,521 | 2,639 |
from PIL import Image
import torch
import numpy as np
cityscapes_palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0,
70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
#camvid_palette = [128, 128, 128, 128, 0, 0, 192, 192, 128, 128, 64, 128, 60, 40, 222, 128, 128, 0, 192, 128, 128, 64,
# 64,
# 128, 64, 0, 128, 64, 64, 0, 0, 128, 192]
camvid_palette = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0,
0, 255, 255, 255, 0, 0, 0, 0, 0, 0]
pascal_PALETTE = [120, 120, 120, 180, 120, 120, 6, 230, 230, 80, 50, 50,
4, 200, 3, 120, 120, 80, 140, 140, 140, 204, 5, 255,
230, 230, 230, 4, 250, 7, 224, 5, 255, 235, 255, 7,
150, 5, 61, 120, 120, 70, 8, 255, 51, 255, 6, 82,
143, 255, 140, 204, 255, 4, 255, 51, 7, 204, 70, 3,
0, 102, 200, 61, 230, 250, 255, 6, 51, 11, 102, 255,
255, 7, 71, 255, 9, 224, 9, 7, 230, 220, 220, 220,
255, 9, 92, 112, 9, 255, 8, 255, 214, 7, 255, 224,
255, 184, 6, 10, 255, 71, 255, 41, 10, 7, 255, 255,
224, 255, 8, 102, 8, 255, 255, 61, 6, 255, 194, 7,
255, 122, 8, 0, 255, 20, 255, 8, 41, 255, 5, 153,
6, 51, 255, 235, 12, 255, 160, 150, 20, 0, 163, 255,
140, 140, 140, 250, 10, 15, 20, 255, 0, 31, 255, 0,
255, 31, 0, 255, 224, 0, 153, 255, 0, 0, 0, 255,
255, 71, 0, 0, 235, 255, 0, 173, 255, 31, 0, 255]
ade_PALETTE = [120, 120, 120, 180, 120, 120, 6, 230, 230, 80, 50, 50,
4, 200, 3, 120, 120, 80, 140, 140, 140, 204, 5, 255,
230, 230, 230, 4, 250, 7, 224, 5, 255, 235, 255, 7,
150, 5, 61, 120, 120, 70, 8, 255, 51, 255, 6, 82,
143, 255, 140, 204, 255, 4, 255, 51, 7, 204, 70, 3,
0, 102, 200, 61, 230, 250, 255, 6, 51, 11, 102, 255,
255, 7, 71, 255, 9, 224, 9, 7, 230, 220, 220, 220,
255, 9, 92, 112, 9, 255, 8, 255, 214, 7, 255, 224,
255, 184, 6, 10, 255, 71, 255, 41, 10, 7, 255, 255,
224, 255, 8, 102, 8, 255, 255, 61, 6, 255, 194, 7,
255, 122, 8, 0, 255, 20, 255, 8, 41, 255, 5, 153,
6, 51, 255, 235, 12, 255, 160, 150, 20, 0, 163, 255,
140, 140, 140, 250, 10, 15, 20, 255, 0, 31, 255, 0,
255, 31, 0, 255, 224, 0, 153, 255, 0, 0, 0, 255,
255, 71, 0, 0, 235, 255, 0, 173, 255, 31, 0, 255,
11, 200, 200, 255, 82, 0, 0, 255, 245, 0, 61, 255,
0, 255, 112, 0, 255, 133, 255, 0, 0, 255, 163, 0,
255, 102, 0, 194, 255, 0, 0, 143, 255, 51, 255, 0,
0, 82, 255, 0, 255, 41, 0, 255, 173, 10, 0, 255,
173, 255, 0, 0, 255, 153, 255, 92, 0, 255, 0, 255,
255, 0, 245, 255, 0, 102, 255, 173, 0, 255, 0, 20,
255, 184, 184, 0, 31, 255, 0, 255, 61, 0, 71, 255,
255, 0, 204, 0, 255, 194, 0, 255, 82, 0, 10, 255,
0, 112, 255, 51, 0, 255, 0, 194, 255, 0, 122, 255,
0, 255, 163, 255, 153, 0, 0, 255, 10, 255, 112, 0,
143, 255, 0, 82, 0, 255, 163, 255, 0, 255, 235, 0,
8, 184, 170, 133, 0, 255, 0, 255, 92, 184, 0, 255,
255, 0, 31, 0, 184, 255, 0, 214, 255, 255, 0, 112,
92, 255, 0, 0, 224, 255, 112, 224, 255, 70, 184, 160,
163, 0, 255, 153, 0, 255, 71, 255, 0, 255, 0, 163,
255, 204, 0, 255, 0, 143, 0, 255, 235, 133, 255, 0,
255, 0, 235, 245, 0, 255, 255, 0, 122, 255, 245, 0,
10, 190, 212, 214, 255, 0, 0, 204, 255, 20, 0, 255,
255, 255, 0, 0, 153, 255, 0, 41, 255, 0, 255, 204,
41, 0, 255, 41, 255, 0, 173, 0, 255, 0, 245, 255,
71, 0, 255, 122, 0, 255, 0, 255, 184, 0, 92, 255,
184, 255, 0, 0, 133, 255, 255, 214, 0, 25, 194, 194,
102, 255, 0, 92, 0, 255]
mvd_palette = [165, 42, 42, 0, 192, 0, 196, 196, 196, 190, 153, 153, 180, 165, 180, 90, 120, 150, 102, 102, 156, 128, 64, 255, 140, 140, 200, 170, 170, 170, 250, 170, 160, 96, 96, 96, 230, 150, 140, 128, 64, 128, 110, 110, 110, 244, 35, 232, 150, 100, 100, 70, 70, 70, 150, 120, 90, 220, 20, 60, 255, 0, 0, 255, 0, 100, 255, 0, 200, 200, 128, 128, 255, 255, 255, 64, 170, 64, 230, 160, 50, 70, 130, 180, 190, 255, 255, 152, 251, 152, 107, 142, 35, 0, 170, 30, 255, 255, 128, 250, 0, 30, 100, 140, 180, 220, 220, 220, 220, 128, 128, 222, 40, 40, 100, 170, 30, 40, 40, 40, 33, 33, 33, 100, 128, 160, 142, 0, 0, 70, 100, 150, 210, 170, 100, 153, 153, 153, 128, 128, 128, 0, 0, 80, 250, 170 , 30, 192, 192, 192, 220, 220, 0, 140, 140, 20, 119, 11, 32, 150, 0, 255, 0, 60, 100, 0, 0, 142, 0, 0, 90, 0, 0, 230, 0, 80, 100, 128, 64, 64, 0, 0, 110, 0, 0, 70, 0, 0 , 192, 32, 32, 32, 120, 10, 10, 0, 0, 0]
zero_pad = 256 * 3 - len(pascal_PALETTE)
for i in range(zero_pad):
pascal_PALETTE.append(0)
zero_pad = 256 * 3 - len(ade_PALETTE)
for i in range(zero_pad):
ade_PALETTE.append(0)
zero_pad = 256 * 3 - len(cityscapes_palette)
for i in range(zero_pad):
cityscapes_palette.append(0)
zero_pad = 256 * 3 - len(camvid_palette)
for i in range(zero_pad):
camvid_palette.append(0)
#zero_pad = 256 * 3 - len(mvd_palette)
#for i in range(zero_pad):
# mvd_palette.append(0)
def pas_colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(pascal_PALETTE)
return new_mask
def ade_colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(ade_PALETTE)
return new_mask
def cityscapes_colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(cityscapes_palette)
return new_mask
def camvid_colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(camvid_palette)
return new_mask
def mvd_colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(mvd_palette)
return new_mask
class VOCColorize(object):
def __init__(self, n=22):
self.cmap = voc_color_map(22)
self.cmap = torch.from_numpy(self.cmap[:n])
def __call__(self, gray_image):
size = gray_image.shape
color_image = np.zeros((3, size[0], size[1]), dtype=np.uint8)
for label in range(0, len(self.cmap)):
mask = (label == gray_image)
color_image[0][mask] = self.cmap[label][0]
color_image[1][mask] = self.cmap[label][1]
color_image[2][mask] = self.cmap[label][2]
# handle void
mask = (255 == gray_image)
color_image[0][mask] = color_image[1][mask] = color_image[2][mask] = 255
return color_image
def voc_color_map(N=256, normalized=False):
def bitget(byteval, idx):
return ((byteval & (1 << idx)) != 0)
dtype = 'float32' if normalized else 'uint8'
cmap = np.zeros((N, 3), dtype=dtype)
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7 - j)
g = g | (bitget(c, 1) << 7 - j)
b = b | (bitget(c, 2) << 7 - j)
c = c >> 3
cmap[i] = np.array([r, g, b])
cmap = cmap / 255 if normalized else cmap
return cmap
| 7,962 | 5,298 |
from django.conf import settings
from rest_framework.settings import import_from_string
from .mixins import FileContentMixin, DocStringContentMixin, MarkupProcessMixin, NoProcessMixin, SafeProcessMixin
APIDOC_DEFAULT_DOCUMENTER_CLASSES = getattr(
settings,
'APIDOC_DEFAULT_DOCUMENTER_CLASSES',
['rest_framework_apidoc.apidoc.MDDocStringsDocumenter']
)
def get_view_description(view_cls, html=False, request=None):
documenters = []
if hasattr(view_cls, 'documenter_classes'):
for cls in view_cls.documenter_classes:
documenters.append(cls())
else:
for cls in APIDOC_DEFAULT_DOCUMENTER_CLASSES:
documenter_class = import_from_string(cls, "APIDOC_DEFAULT_DOCUMENTER_CLASS")
documenters.append(documenter_class())
for documenter in documenters:
description = documenter.get_description(view_cls, html, request)
if description:
return description
return ""
class Documenter(object):
def get_description(self, view_cls, html=True, request=None):
if html:
return self.process(self.get_content(view_cls, html, request))
return self.get_content(view_cls, html, request=None)
class RSTFilesDocumenter(Documenter, FileContentMixin, MarkupProcessMixin):
extension = ".rst"
markup = "restructuredtext"
class RSTDocStringsDocumenter(Documenter, DocStringContentMixin, MarkupProcessMixin):
markup = "restructuredtext"
class MDFilesDocumenter(Documenter, FileContentMixin, MarkupProcessMixin):
extension = ".md"
markup = "markdown"
class MDDocStringsDocumenter(Documenter, DocStringContentMixin, MarkupProcessMixin):
markup = "markdown"
class TextileFilesDocumenter(Documenter, FileContentMixin, MarkupProcessMixin):
extension = ".textile"
markup = "textile"
class TextileDocStringsDocumenter(Documenter, DocStringContentMixin, MarkupProcessMixin):
markup = "textile"
class TxtFilesDocumenter(Documenter, FileContentMixin, NoProcessMixin):
extension = ".txt"
class TxtDocStringsDocumenter(Documenter, DocStringContentMixin, NoProcessMixin):
pass
class HtmlFilesDocumenter(Documenter, FileContentMixin, SafeProcessMixin):
extension = ".html"
class HtmlDocStringsDocumenter(Documenter, DocStringContentMixin, SafeProcessMixin):
pass
| 2,342 | 744 |
from malcolm.modules.ADCore.parts import ExposureDetectorDriverPart
class AndorDriverPart(ExposureDetectorDriverPart):
def setup_detector(self, child, completed_steps, steps_to_do, params=None):
fs = super(AndorDriverPart, self).setup_detector(
child, completed_steps, steps_to_do, params)
child.wait_all_futures(fs)
# Need to reset acquirePeriod as it's sometimes wrong
fs = child.acquirePeriod.put_value_async(
child.exposure.value + self.readout_time.value)
return fs
| 541 | 166 |
import json
from typing import Callable, TypeVar, cast
from .constants import CUSTOM_LOG_FORMAT, CUSTOM_EVENT_NAME_MAP, CUSTOM_PAGE_NAME_MAP
from datetime import datetime
import logging
from airflow.settings import TIMEZONE
from airflow.utils.session import create_session
import functools
T = TypeVar("T", bound=Callable)
_logger = logging.getLogger(__name__)
def access_log(event, page, msg):
'''
一个装饰器,用于将访问事件记录日志,供抓取分析,格式和参数对照见constants.py文件
示例:
@access_log('VIEW', 'CURVES', '查看曲线对比页面')
'''
def decorator(func: T) -> T:
@functools.wraps(func)
def wrapper(*args, **kwargs):
_logger.info(repr(args))
_logger.info(repr(kwargs))
ret = func(*args, **kwargs)
from flask_login import current_user # noqa: F401
full_msg = CUSTOM_LOG_FORMAT.format(
datetime.now(tz=TIMEZONE).strftime("%Y-%m-%d %H:%M:%S"),
current_user if current_user and current_user.is_active else 'anonymous',
getattr(current_user, 'last_name', '') if current_user and current_user.is_active else 'anonymous',
CUSTOM_EVENT_NAME_MAP[event], CUSTOM_PAGE_NAME_MAP[page], msg
)
_logger.info(full_msg)
return ret
return cast(T, wrapper)
return decorator
| 1,336 | 471 |
import os
import re
from setuptools import setup
currentPath = os.path.abspath(os.path.dirname(__file__))
def find_version(filename):
with open(filename, 'r') as fh:
# Read first 2048 bytes, __version__ string will be within that
data = fh.read(2048)
match = re.search(r'^__version__ = [\'"]([\w\d.\-]*)[\'"]$', data, re.M)
if match:
return match.group(1)
raise RuntimeError('Unable to find version string.')
# Get the long description from the README file
with open(os.path.join(currentPath, 'README.md'), 'r') as f:
longDescription = f.read()
longDescription = '\n' + longDescription
REQUIREMENTS = {
'core': [
'PyQt5',
'click',
'pyyaml',
],
'test': [
'pytest',
'pytest-cov',
],
'dev': [
# 'requirement-for-development-purposes-only',
],
'doc': [
],
}
setup(name='pyqt5ac',
version=find_version('pyqt5ac.py'),
description='Python module to automatically compile UI and RC files in PyQt5 to Python files',
long_description=longDescription,
long_description_content_type='text/markdown',
author='Addison Elliott',
author_email='addison.elliott@gmail.com',
url='https://github.com/addisonElliott/pyqt5ac',
license='MIT License',
install_requires=REQUIREMENTS['core'],
extras_require={
**REQUIREMENTS,
# The 'dev' extra is the union of 'test' and 'doc', with an option
# to have explicit development dependencies listed.
'dev': [req
for extra in ['dev', 'test', 'doc']
for req in REQUIREMENTS.get(extra, [])],
# The 'all' extra is the union of all requirements.
'all': [req for reqs in REQUIREMENTS.values() for req in reqs],
},
python_requires='>=3',
py_modules=['pyqt5ac'],
entry_points={
'console_scripts': ['pyqt5ac = pyqt5ac:cli']
},
keywords='pyqt pyqt5 qt qt5 qt auto compile generate ui rc pyuic5 pyrcc5 resource designer creator automatic',
classifiers=[
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
project_urls={
'Source': 'https://github.com/addisonElliott/pyqt5ac',
'Tracker': 'https://github.com/addisonElliott/pyqt5ac/issues',
}
)
| 2,674 | 854 |
from setuptools import setup
setup(
name='pydale',
version='0.1.0a1',
description='A Transfer Learning Python package',
url='https://github.com/sz144/TPy',
author='Shuo Zhou',
author_email='szhou20@sheffield.ac.uk',
license='MIT License',
packages=['pydale'],
install_requires=['numpy', 'scipy', 'pandas',
'scikit-learn', 'cvxopt', 'osqp'
],
classifiers=[
"Programming Language :: Python :: 3",
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 799 | 254 |
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import kendalltau,chi2_contingency, pearsonr
import pandas as pd
def plot_var_num(dataset,variabile):
""" Plot delle variabili numeriche
--------------
Parametri:
dataset: dataset di riferimento
variabile: variabile da plottare
--------------
Output
ritorna l'istogramma ed il boxplot della variabile
Se assegnata ad un elemento python ritorna i quartili, valore medio, massimo e medio
della distribuzione"""
descrizione = dataset[variabile].describe()
plt.figure(figsize = (14,8))
plt.subplot(1,2,1)
sns.histplot(dataset[variabile], color = "red")
plt.title("Istogramma della variabile {}".format(variabile))
plt.subplot(1,2,2)
sns.boxplot(dataset[variabile])
plt.title("Boxplot della variabile {}".format(variabile))
plt.show()
return descrizione
def dipendenza_correlazione(dataset, variabile1, variabile2, p_value_lv = 0.05):
""" Funzione che restituisce la dipendenza tra due variabili
---------------------
Parametri:
dataset:dataset di riferimento
variabile1, variabile2: stringa del nome delle variabili su cui attuare il test
p_value_lv: livello di significatività
---------------------
Output:
Date due variabili numeriche
dataframe contentente:
* il coefficiente di correlazione di Perason
* p_value associato
* Booleano che indica se il test è significativo per un livello pari a p_value_lv
Date due variabili categoriche
dataframe contentente:
* testchi2
* p_value associato
* Booleano che indica se il test è significativo per un livello pari a p_value_lv
Date due variabili mischiate
dataframe contenente
* coefficiente di correlazione di kendall
* p_value associato
* Booleano che indica se il test è significativo per un livello pari a p_value_lv
"""
if dataset[variabile1].dtypes == "int64" and dataset[variabile2].dtypes == "int64":
p_value = pearsonr(dataset[variabile1], dataset[variabile2])[1]
corr = pearsonr(dataset[variabile1], dataset[variabile2])[0]
data = pd.DataFrame(data = {"corr": [corr], "p_value": [p_value]}, index = ["Pearson"])
sign_0 = []
if data["p_value"][0] < p_value_lv:
sign_0.append(True)
else:
sign_0.append(False)
data["sign_{}".format(p_value_lv)] = sign_0
elif dataset[variabile1].dtypes == "object" and dataset[variabile2].dtypes == "object":
p_value = chi2_contingency(pd.crosstab(dataset[variabile1], dataset[variabile2]))[1]
data = pd.DataFrame(data = {"p_value": [p_value]}, index = ["chi2"])
sign_0 = []
if data["p_value"][0] < p_value_lv:
sign_0.append(True)
else:
sign_0.append(False)
data["sign_{}".format(p_value_lv)] = sign_0
else:
correlation = kendalltau(dataset[variabile1],dataset[variabile2])[0]
p_value = kendalltau(dataset[variabile1],dataset[variabile2])[1]
data = pd.DataFrame(data = {"correlation": [correlation],"p_value":[p_value]},
index = ["Kendall"])
sign_0 = []
if data["p_value"][0] < p_value_lv:
sign_0.append(True)
else:
sign_0.append(False)
data["sign_{}".format(p_value_lv)] = sign_0
return data
def analisi_variabili_categoriche(dataset, variabile1, variabile2, normalize = "index"):
""" Resituisce due grafici:
* il barplot della variabile1
* il barplot della variabile1 condizionata alla variabile2
* Se assegnata ad un elemento Python ritorna la quantità di
osservazioni per ciascuna categoria della variabile1
* Se assegnata ad un elemento Python ritorna la tabella di
contingenza tra la variabile1 e la variabile 2
----------------------------
Parametri:
dataset: dataset di riferimento
variabile1: stringa del nome della variabile per cui disegnare i grafici
variabile2: stringa del nome della variabile di condizionamento
normalize: stringa che permette di decidere come indicizzare la colonna
["index" per riga, "column" per colonna, "all" per entrambi, "False" nessuna
normalizzazione]
"""
conteggio = dataset[variabile1].value_counts()/len(dataset)
tabella = pd.crosstab(dataset[variabile1], dataset[variabile2], normalize = normalize)
plt.figure(figsize =(15,22))
plt.subplot(2,1,1)
sns.countplot(y = dataset[variabile1])
plt.title("""Barplot della variabile "{}" """.format(variabile1))
plt.subplot(2,1,2)
sns.countplot(y = dataset[variabile1], hue = dataset[variabile2])
plt.title("""Barplot della variabile "{}" condizionata alla variabile {} """.format(variabile1,variabile2))
return conteggio,tabella
def unificazione_categorie(dataset, variabile, categoria_da_trasf, trasformazione):
""" Unifica due o più categorie con la stessa stringa
------------------
Parameters:
dataset: dataset di riferimento
variabile: stringa della variabile per la quale avverrà il cambiamento delle categorie
categoria_da_trasf: lista della/e categorie sulla quale applicare il cambiamento
trasformazione: stringa che indica la categoria da attribuire
------------------
Output:
dataset trasformato """
for categoria in categoria_da_trasf:
dataset = dataset.replace(categoria, trasformazione)
return dataset
def histplot_per_categorie(dataset, variabile1, variabile_divisione):
""" Plot che ritorna più istogrammi della variabile a cui siamo interessati
che suddividono il dataset nelle diverse categorie della variabile scelta
per la divisione
-----------------
Parametri:
dataset: dataset di riferimento
variabile1: stringa della variabile numerica per la quale siamo interessati
a conoscere la distribuzione
variabile_divisione: stringa della variabile per la quale siamo interessati
avvenga la suddivisione del dataset
-------------------
"""
lunghezza = len(dataset[variabile_divisione].value_counts().index)
plt.figure(figsize = (20,10))
for i in range(1,lunghezza+1):
plt.subplot(1, lunghezza,i)
data = dataset[dataset[variabile_divisione] == dataset[variabile_divisione].value_counts().index[i-1]]
sns.histplot(data[variabile1])
plt.title("Istogramma della variabile '{}', data la categoria {}".format(variabile1,
dataset[variabile_divisione].value_counts().index[i-1]))
def histplot_1_per_categorie(dataset, variabile1, variabile_divisione, x = 1, y = 0):
""" Plot che ritorna più istogrammi della variabile a cui siamo interessati
che suddividono il dataset nelle diverse categorie della variabile scelta
per la divisione
-----------------
Parametri:
dataset: dataset di riferimento
variabile1: stringa della variabile numerica per la quale siamo interessati
a conoscere la distribuzione
variabile_divisione: stringa della variabile per la quale siamo interessati
avvenga la suddivisione del dataset
x,y: definiscono la suddivisione dei plot per riga e colonna rispettivamente
(Nota: il prodotto tra x e y deve essere uguale o maggiore alle categorie della
variabile per la quale avviene lo split)
-------------------"""
if y == 0:
lunghezza = len(dataset[variabile_divisione].value_counts().index)
plt.figure(figsize = (20,10))
for i in range(1,lunghezza+1):
plt.subplot(1, lunghezza,i)
data = dataset[dataset[variabile_divisione] ==\
dataset[variabile_divisione].value_counts().index[i-1]]
sns.histplot(data[variabile1])
plt.title("Istogramma della variabile '{}', data la categoria {}".format(variabile1,
dataset[variabile_divisione].value_counts().index[i-1]))
else:
lunghezza = len(dataset[variabile_divisione].value_counts().index)
plt.figure(figsize = (20,10))
for i in range(1,lunghezza+1):
plt.subplot(x, y,i)
data = dataset[dataset[variabile_divisione] == \
dataset[variabile_divisione].value_counts().index[i-1]]
sns.histplot(data[variabile1])
plt.title("Istogramma della variabile '{}', data la categoria {}"\
.format(variabile1,dataset[variabile_divisione].value_counts().index[i-1]))
| 8,738 | 2,745 |
class NoMatchException(Exception):
"""
This exception is thrown when we fail to find a Spotify equivalent for a google play music track
Args:
message (str): Description of the exception
"""
def __init__(self, message: str):
super().__init__(message)
class SpotifyMalformedTrackException(Exception):
"""
This exception is thrown when we attempt to create a SpotifyTrack using a malformed track object
Args:
message (str): Description of the exception
"""
def __init__(self, message: str):
super().__init__(message)
| 592 | 155 |
from django.urls import path
from . import views
urlpatterns = [
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path("", views.index, name="index"),
path("open", views.openPage, name="openPage"),
path("close", views.closePage, name="closePage"),
path("news", views.news, name="news"),
path("stocks", views.stocks, name="stocks"),
path("stocks/<str:symbol>", views.stockinfo, name="stockinfo"),
path("api/v1/open", views.open, name="open"),
path("api/v1/close", views.close, name="close")
] | 638 | 210 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Images',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('img_file_name', models.CharField(max_length=200)),
('EXIF_LensSerialNumber', models.CharField(max_length=200)),
('MakerNote_SerialNumberFormat', models.CharField(max_length=200)),
('EXIF_BodySerialNumber', models.CharField(max_length=200)),
('MakerNote_InternalSerialNumber', models.CharField(max_length=200)),
('MakerNote_SerialNumber', models.CharField(max_length=200)),
('Image_BodySerialNumber', models.CharField(max_length=200)),
('add_date', models.DateTimeField(auto_now_add=True)),
('pub_date', models.DateTimeField(verbose_name=b'date published')),
],
options={
},
bases=(models.Model,),
),
]
| 1,198 | 338 |
# Collections namedTuple nr7
import collections
from collections import namedtuple
Point = namedtuple("Point", {"x": 0, "y": 0, "z": 0})
newP = Point(3, 4, 5)
print(newP.x, newP.y, newP.z)
print(newP._fields)
newP = newP._replace(x=6)
print(newP)
p2 = Point._make(["a", "b", "c"])
print(p2)
| 295 | 131 |
import os
from pathlib import Path
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from jive.AJIVE import AJIVE
from jive.PCA import PCA
import warnings
import time, datetime
from random import shuffle
warnings.filterwarnings(action='once')
def getVarianceExplained(original, joint, individual, label):
from numpy.linalg import norm
joint_var = norm(joint)**2/norm(original)**2
individual_var = norm(individual)**2/norm(original)**2
residual_var = 1-joint_var-individual_var
return pd.DataFrame([residual_var, individual_var, joint_var], index=['Residual','Individual','Joint'], columns=[label])
def plotVarianceExplained(df, figsize=[10,6]):
var_plot = plt.figure(figsize=figsize, facecolor='w')
df.plot.bar(stacked=True, figsize=figsize, table=True)
plt.xticks([])
plt.tight_layout()
return var_plot
parser = argparse.ArgumentParser(description='Run AJIVE')
parser.add_argument('-a', required=True, type=str, help='input matrix 1')
parser.add_argument('-ra', required=True, type=int, help='initial signal rank 1')
parser.add_argument('-rb', required=True, type=int, help='initial signal rank 2')
parser.add_argument('-n', required=True, type=str, help='name prefix')
parser.add_argument('-o', required=True, type=str, help='output files path')
args = parser.parse_args()
a_path = args.a
ra = args.ra
rb = args.rb
name_prefix = args.n
output_dir = Path(args.o)
#Create output directory if it doesn't exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
#Read in files
a = pd.read_csv(a_path, index_col=0)
a = a.sample(frac=1)
#Randomly split in half
a1 = a.iloc[:a.shape[0]//2]
a2 = a.iloc[a.shape[0]//2:]
a = a1.T
b = a2.T
#Run AJIVE
jive_start = time.time()
ajive = AJIVE(init_signal_ranks={'A': ra, 'B': rb})
ajive.fit(blocks={'A': a, 'B': b})
jive_end = time.time()
jive_time = str(datetime.timedelta(seconds=jive_end-jive_start))
print('AJIVE time: ' + jive_time)
#Diagnostic Plot
sns.set_context('notebook', font_scale=1)
diag_plot = plt.figure(0, figsize=[10,10])
ajive.plot_joint_diagnostic()
diag_plot.savefig(os.path.join(output_dir, name_prefix + '_diagnostic.png'))
#Save AJIVE matrices
a_joint_full = pd.DataFrame(ajive.blocks['A'].joint.full_, index=a.index, columns=a.columns)
a_individual_full = pd.DataFrame(ajive.blocks['A'].individual.full_, index=a.index, columns=a.columns)
b_joint_full = pd.DataFrame(ajive.blocks['B'].joint.full_, index=b.index, columns=b.columns)
b_individual_full = pd.DataFrame(ajive.blocks['B'].individual.full_, index=b.index, columns=b.columns)
#Variance Plot
plt_df = getVarianceExplained(a, a_joint_full, a_individual_full, 'A').join(getVarianceExplained(b, b_joint_full, b_individual_full, 'B')).T
plt_df.to_csv(os.path.join(output_dir, name_prefix + '_var_explained.csv'))
| 2,879 | 1,073 |
from ..limit import *
class Any(Limit):
slug = 'any'
name = 'Limit: Any'
def __init__(self, **kwargs):
super().__init__(None)
self.args = kwargs.values()
for arg in self.args:
arg.limits = self.limits
def exhausted(self) -> bool:
return any(arg.exhausted() for arg in self.args)
def left(self) -> dict:
return {k: v for arg in self.args for k, v in arg.left().items()}
def __info__(self):
return {
'slug': self.slug,
'name': self.name,
'*args': [arg.__info() for arg in self.args]
}
__all__ = [
'Any'
]
| 643 | 214 |
print('-=-' * 15)
print('Analisador de Triângulos')
print('-=-' * 15)
sega = float(input('Primeiro Segmento: '))
segb = float(input('Segundo Segmento: '))
segc = float(input('Terceiro Segmento: '))
if sega < segb + segc and segb < sega + segc and segc < sega + segb:
print('Os segmentos acima PODEM FORMAR um triângulo!')
else:
print('Os segmenstos acima NÃO PODEM FORMAR um triângulo!')
| 396 | 167 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.urls import include, path
from django.contrib.auth import views as auth_views
urlpatterns = [
path('login', auth_views.LoginView.as_view(), name='vcn-account-login'),
path('logout', auth_views.LogoutView.as_view(), {'next_page': '/'}, name='vcn-account-logout'),
path('', include('dj_vcn_accounts.urls', namespace='dj_vcn_accounts')),
]
| 445 | 152 |
# /bin/hacken GameJam 2019, f0wL
"""This example lights up the third NeoPixel while button A is being pressed, and lights up the
eighth NeoPixel while button B is being pressed."""
from adafruit_circuitplayground.express import cpx
import random
import time
import math
cpx.pixels.brightness = 0.1
while True:
rnd = random.randrange(1,10,1)
rnd1 = random.randrange(0,255,1)
rnd2 = random.randrange(0,255,1)
rnd3 = random.randrange(0,255,1)
cpx.pixels[rnd] = (rnd1, rnd2, rnd3)
if rnd < 5:
if cpx.button_b:
cpx.play_file("dog.wav")
if rnd > 5:
if cpx.button_a:
cpx.play_file("dog.wav")
cpx.pixels.fill((0,0,0))
| 736 | 292 |
'''
For testing with the text version
'''
b = [
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
]
def print_board(board):
for i in range(len(board)):
if i % 3 == 0 and i != 0:
print("- - - - - - - - - - - -")
for j in range(len(board[0])):
if j % 3 == 0 and j != 0:
print(" | ", end="")
print(board[i][j], end="")
if j == 8:
print()
else:
print(" ", end="")
def find_empty_cell(board):
for row in range(len(board)):
for col in range(len(board[0])):
if board[row][col] == 0:
return (row, col)
return False
def is_valid(board, target, pos):
# Check row and col
for i in range(len(board[0])):
if (pos[1] != i and target == board[pos[0]][i]) or \
(pos[0] != i and target == board[i][pos[1]]):
return False
# Check sub-grid
brow = pos[0]//3
bcol = pos[1]//3
for i in range(3):
for j in range(3):
if (i, j) != pos and target == board[brow*3+i][bcol*3+j]:
return False
return True
def solve(board):
empty = find_empty_cell(board)
if not empty:
return True
row, col = empty
for i in range(0, 10):
if is_valid(board, i, (row, col)):
board[row][col] = i
if solve(board):
return True
board[row][col] = 0
return False | 1,787 | 804 |
from .context import Context as _Context
from .user import User
from .document import Document
from .order import Order
from .order_fulfillment import OrderFulfillment
from .prices import public as prices
from .catalog import public as catalog
from .taxes import public as tax
from .core.enums import DocumentStatus as status
from .core.enums import Gender as gender
from .core.enums import License as license
from .core.enums import PriceType as pricetype
from .core.enums import CatalogType as catalogtype
from .core.enums import TaxType as taxtype
from .core.enums import BookBinding as binding
from .core.enums import IsbnType as _isbn
from .core.enums import Subscription as subscription
from .core.enums import PublicationType as publication
from .core.enums import OnixStyle as _onixstyle
from .core.enums import OnixType as _onixtype
from .core.enums import OnixStatus as _onixstatus
from .context.onix import OnixProduct as _onixproduct
from .core.enums import AdultFlag as adult_flag
from .core.enums import ChildrenFlag as children_flag
from .core.enums import AcademicCategory as academic_category
from .core.enums import Language as language
from .core.enums import DRM as drm
from .core.enums import VLBCategory as vlb_category
from .core.enums import Country as country
from .core.enums import ContributorRole as role
from .core.enums import BisacCode as bisac
from .core.enums import Currency as currency
from .core.enums import EBookFileType as _ebook_filetype
from .core.enums import PreviewFileType as _preview_filetype
from .core.enums import FileType as _filetype
from .core.enums import EventTarget as _event_target
from .core.enums import EventAction as _event_action
from .core.enums import EventType as _event_type
from .core.enums import EventResult as _event_result
from .core.enums import ProvisionRuleAlgorithm as _provision_algorithm
from .core.enums import ProvisionRuleRole as _provision_role
from .core.enums import ProvisionChannelType as _provision_channel_type
from .core.enums import ProvisionChannelBase as _provision_channel_base
from .core.enums import UsersSearchType as users_search_type
from .core.enums import PreviewDisplayMode as _preview_display_mode
from .core.enums import PreviewTOCVisible as _preview_toc_visible
from .core.enums import ProfileShow as _profile_show
from .core.enums import AssetsCoverType as _assets_cover_type
from .core.enums import OrderItemType as _order_item_type
from .core.enums import ShippingType as _shipping_type
from .core.enums import ShippingStatus as _shipping_status
from .core.enums import ShippingLevel as _shipping_level
from .core.enums import ProcessingType as processing
from .assets import AssetNotReady, AssetExpired, AssetNotFound
from .gjp import ObjectNotFound, TemporaryNotAvailable, AssetCreationError
from .order import Address as _address
from .order import Person as _person
from .content import Content
class order(object):
address = _address
seller = _person
buyer = _person
class item(object):
type = _order_item_type
class shipping(object):
type = _shipping_type
status = _shipping_status
level = _shipping_level
class isbn(object):
class book(object):
all = _isbn.book
class ebook(object):
all = _isbn.ebook
epub = _isbn.epub
pdf = _isbn.pdf
mobi = _isbn.mobi
ibooks = _isbn.ibooks
class audiobook(object):
all = _isbn.audiobook
class software(object):
all = _isbn.software
class files(object):
ebook_filetype = _ebook_filetype
preview_filetype = _preview_filetype
filetype = _filetype
class assets(object):
cover = _assets_cover_type
class events(object):
target = _event_target
action = _event_action
type = _event_type
result = _event_result
class onix(object):
style = _onixstyle
type = _onixtype
status = _onixstatus
product = _onixproduct
class provision(object):
algorithm = _provision_algorithm
role = _provision_role
class channel(object):
type = _provision_channel_type
base = _provision_channel_base
class preview(object):
display_mode = _preview_display_mode
toc_visible = _preview_toc_visible
def context(api_key=None,
host="api.openpublishing.com",
auth=None,
timeout=30,
proxies=None,
verify=None,
cert=None,
log=None,
validate_json = False):
"""Generate Context object."""
requests_kwargs = {}
if auth is not None:
requests_kwargs['auth'] = auth
if timeout is not None:
requests_kwargs['timeout'] = timeout
if proxies is not None:
requests_kwargs['proxies'] = proxies
if verify is not None:
requests_kwargs['verify'] = verify
if cert is not None:
requests_kwargs['cert'] = cert
if host.startswith('http://'):
host = 'https://' + host[7:]
elif host.startswith('https://'):
pass
else:
host = 'https://' + host
return _Context(host=host,
api_key=api_key,
log=log,
validate_json=validate_json,
requests_kwargs=requests_kwargs)
class profile(object):
show = _profile_show
| 5,321 | 1,555 |
"""
Parse, diff, and upload sample manifests.
Manifests are listings of known samples with associated barcodes and other,
minimal metadata. They are usually produced by the lab processing collection
tubes and contain the link between encounter survey data and molecular biology
results.
The workflow for processing new or updated manifests is generally:
parse → diff (usually) → upload → etl
The first three correspond to subcommands below; the last to the "manifest"
subcommand of the "etl" command.
"""
import click
import fnmatch
import logging
import pandas
import re
import yaml
from functools import reduce
from deepdiff import DeepHash
from hashlib import sha1
from os import chdir
from os.path import dirname
from typing import Iterable, List, Optional, Set, Tuple, Union
from id3c.cli import cli
from id3c.cli.io import LocalOrRemoteFile, urlopen
from id3c.cli.io.google import *
from id3c.cli.io.pandas import read_excel
from id3c.db.session import DatabaseSession
from id3c.json import dump_ndjson, load_ndjson
from id3c.utils import format_doc
LOG = logging.getLogger(__name__)
PROVENANCE_KEY = "_provenance"
RESERVED_COLUMNS = {"sample", "collection", "date"}
@cli.group("manifest", help = __doc__)
def manifest():
pass
@manifest.command("parse")
@click.argument("workbook", metavar = "<filepath>")
@click.option("--sheet",
metavar = "<name>",
help = "Name of the workbook sheet to read",
required = True)
@click.option("--sample-column",
metavar = "<column>",
help = "Name of the single column containing sample barcodes. "
"Must match exactly; shell-style glob patterns are supported.",
required = False)
@click.option("--collection-column",
metavar = "<column>",
help = "Name of the single column containing collection barcodes. "
"Must match exactly; shell-style glob patterns are supported.",
required = False)
@click.option("--date-column",
metavar = "<column>",
help = "Name of the single column containing the sample collected date.",
required = False)
@click.option("--sample-type",
metavar = "<type>",
help = "The type of sample within this manifest. "
"Only applicable to samples from self-test kits.",
type=click.Choice(["utm", "rdt"]),
required = False)
@click.option("--extra-column", "extra_columns",
metavar = "<field>:<column>|<field>:{…}",
help = "Name of an additional <column> to extract into manifest record <field>. "
"Must match exactly; shell-style glob patterns are supported. "
"May be specified multiple times. "
"Option value is parsed as a YAML fragment, so additional options supported by the sibling command \"parse-with-config\" may be inlined for testing, but you're likely better off using a config file at that point.",
multiple = True)
@click.option("--row-filter",
metavar = "<query>",
help = "The pandas query to filter rows (using the python engine) in the manifest. "
"Column names refer to columns in the manifest itself. "
"Example: `corrective action`.notnull() and `corrective action`.str.lower().str.startswith(\"discard\") ",
required = False)
def parse(**kwargs):
"""
Parse a single manifest workbook sheet.
<filepath> must be a path or URL to an Excel workbook or Google Sheets
spreadsheet with at least one sheet in it, identified by name using the required option --sheet.
Supported URL schemes include http[s]:// and s3://, as well as others.
The --sample-column option specifies the name of the column
containing the sample barcode. The --collection-column option specifies
the name of the column containing the collection barcode. You must supply one
or both of those options.
The --date-column specifies the name of the column containing the sample collected date.
Other columns may be extracted into the manifest records as desired using the
--extra-column option.
The row-filter entry specifies a pandas query to filter
(using the python engine) rows in the manifest. Column names refer to columns
in the manifest itself.
Example: `corrective action`.notnull() and `corrective action`.str.lower().str.startswith("discard")
Manifest records are output to stdout as newline-delimited JSON records.
You will likely want to redirect stdout to a file.
"""
kwargs["extra_columns"] = [
(dst, yaml.safe_load(src))
for dst, src
in [arg.split(":", 1) for arg in kwargs["extra_columns"]]
]
manifest = _parse(**kwargs)
dump_ndjson(manifest)
@manifest.command("parse-using-config")
@click.argument("config_file",
metavar = "<config.yaml>",
type = click.File("r"))
def parse_using_config(config_file):
"""
Parse multiple manifest sheets specified by a config file.
<config.yaml> must be a file with at least one YAML document in it. Each
document corresponds closely to the command-line options taken by the
"parse" command (a sibling to this command). For example, the following
configuration contains two documents:
\b
---
workbook: OneDrive/SFS Prospective Samples 2018-2019.xlsx
sheet: HMC
sample_column: "Barcode ID*"
date_column: "Coll_date"
extra_columns:
collection:
name: "Collection ID*"
barcode: true
aliquots:
name: "Aliquot [ABC]"
multiple: true
date: "Collection date*"
aliquot_date: "Date aliquoted"
racks:
name: "Rack [ABC]*"
multiple: true
notes: "Notes"
\b
---
workbook: OneDrive/SFS Retrospective Samples 2018-2019.xlsx
sheet: HMC
sample_column: "Barcode ID*"
extra_columns:
aliquots:
name: "Aliquot [ABC]"
multiple: true
date: "Collection date*"
aliquot_date: "Date aliquoted"
racks:
name: "Rack [ABC]*"
multiple: true
test_results: "Test ResulTS"
...
A YAML document can also contain a list of workbooks that share the same
format:
\b
---
workbooks:
- s3://bucketname/seattleflu/bbi/2020_2021_sfs_aliquoting_01.xlsx
- s3://bucketname/seattleflu/bbi/2020_2021_sfs_aliquoting_02.xlsx
- s3://bucketname/seattleflu/bbi/2020_2021_sfs_aliquoting_03.xlsx
sheet: aliquoting
sample_column: sample_id
extra_columns:
barcode: sample_id
collection_date: collection_date
mrn: mrn
accession_no: accession
sample_origin: sample_origin
...
The sample_column entry specifies the name of the column
containing the sample barcode. The collection_column entry specifies
the name of the column containing the collection barcode. You must supply one
or both of those entries.
The date_column specifies the name of the column containing the sample collected date.
The row_filter entry specifies a pandas query to filter
(using the python engine) rows in the manifest. Column names refer to columns
in the manifest itself.
Example: `corrective action`.notnull() and `corrective action`.str.lower().str.startswith("discard")
The key: value pairs in "extra_columns" name destination record fields (as
the key) and source columns (as the value). For most source columns, a
simple string name (or shell-glob pattern) is enough. Other behaviour is
available by using a dictionary value.
To collect values from multiple source columns into one record field,
specify a dictionary like:
\b
field:
name: column_[abc]
multiple: true
To mark a field as containing unique barcodes, similar to the built-in
"sample_column" option, specify a dictionary like:
\b
field:
name: column
barcode: true
Barcode fields are checked for duplicates and any records containing a
duplicated value are dropped with a warning.
Relative paths in <config.yaml> are treated relative to the containing
directory of the configuration file itself.
All manifest records parsed are output to stdout as newline-delimited JSON
records. You will likely want to redirect stdout to a file.
"""
configs = list(yaml.safe_load_all(config_file))
if config_file.name != "<stdin>":
config_dir = dirname(config_file.name)
# dirname is the empty string if we're in the same directory as the
# config file.
if config_dir:
chdir(config_dir)
for config in configs:
kwargs_list = []
try:
workbooks = config.get("workbooks") or [config["workbook"]]
for workbook in workbooks:
kwargs = {
"workbook": workbook,
"sheet": config["sheet"],
"sample_column": config.get("sample_column"),
"collection_column": config.get("collection_column"),
"date_column": config.get("date_column"),
"extra_columns": list(config.get("extra_columns", {}).items()),
"sample_type": config.get("sample_type"),
"row_filter" : config.get("row_filter")
}
kwargs_list.append(kwargs)
except KeyError as key:
LOG.error(f"Required key «{key}» missing from config {config}")
raise key from None
for kwargs in kwargs_list:
dump_ndjson(_parse(**kwargs))
def _parse(*,
workbook,
sheet,
sample_column = None,
collection_column = None,
date_column = None,
extra_columns: List[Tuple[str, Union[str, dict]]] = [],
sample_type = None,
row_filter: Optional[str] = None):
"""
Internal function powering :func:`parse` and :func:`parse_using_config`.
"""
if not sample_column and not collection_column:
raise ValueError("You must specify the sample_column, the collection_column, or both.")
disallowed_extra_columns = {dst for dst, src in extra_columns} & RESERVED_COLUMNS
assert len(disallowed_extra_columns) == 0, \
f"A reserved column name has been configured in extra_columns: {disallowed_extra_columns}"
# Used to capture internal provenance metadata for data tracing
digest = None
# Determine if the workbook URL is for a Google Document and if so
# retrieve the Google Sheets file as an Excel spreadsheet. Otherwise,
# retrieve it using urlopen.
google_docs_document_id = extract_document_id_from_google_url(workbook)
if google_docs_document_id:
LOG.debug(f"Reading Google Sheets document «{workbook}»")
with export_file_from_google_drive(google_docs_document_id, GoogleDriveExportFormat.EXCEL) as file:
workbook_bytes = file.read()
etag = get_document_etag(google_docs_document_id)
digest = sha1(etag.encode()).hexdigest()
else:
LOG.debug(f"Reading Excel workbook «{workbook}»")
with urlopen(workbook, "rb") as file:
workbook_bytes = file.read()
digest = sha1(workbook_bytes).hexdigest()
LOG.debug(f"Parsing sheet «{sheet}» in workbook «{workbook}»")
# Read all columns as strings using our pandas wrapper
manifest = read_excel(workbook_bytes, sheet_name = sheet)
LOG.debug(f"Columns in manifest: {list(manifest.columns)}")
# Strip leading/trailing spaces from values and replace missing values and
# empty strings (possibly from stripping) with None so they are converted
# to null in JSON.
#
# Note that the two .replace() calls can't be combined because the first
# instance of NA → None will change the column dtype from string → object
# and render subsequent comparisons to NA invalid.
manifest = manifest.apply(
lambda column: (
column
.str.strip()
.replace({pandas.NA: ""})
.replace({"": None, "na": None})))
# If a filter query was provided filter the manifest rows
# using the python engine.
if row_filter:
manifest = manifest.query(row_filter, engine="python")
# Construct parsed manifest by copying columns from source to destination.
# This approach is used to allow the same source column to end up as
# multiple destination columns.
parsed_manifest = pandas.DataFrame()
column_map: List[Tuple[str, dict]] = []
if sample_column:
column_map += [("sample", {"name": sample_column, "barcode": True})]
if collection_column:
column_map += [("collection", {"name": collection_column, "barcode": True})]
if date_column:
column_map += [("date", {"name": date_column})]
column_map += [
(dst, src) if isinstance(src, dict) else (dst, {"name":src})
for dst, src
in extra_columns
if src]
for dst, src in column_map:
if src.get("multiple"):
parsed_manifest[dst] = select_columns(manifest, src["name"]).apply(list, axis="columns")
else:
parsed_manifest[dst] = select_column(manifest, src["name"])
# Set of columns names for barcodes
barcode_columns = {dst for dst, src in column_map if src.get("barcode")}
parsed_manifest = perform_qc(sample_column, collection_column, barcode_columns, parsed_manifest)
# Add sample type for kit related samples
if sample_type:
parsed_manifest["sample_type"] = sample_type
parsed_manifest[PROVENANCE_KEY] = list(
map(lambda index: {
"workbook": workbook,
"sha1sum": digest,
"sheet": sheet,
# Account for header row and convert from 0-based to 1-based indexing
"row": index + 2,
}, parsed_manifest.index))
# Return a standard list of dicts instead of a DataFrame
return parsed_manifest.to_dict(orient = "records")
@manifest.command("diff")
@click.argument("manifest_a",
metavar = "<manifest-a.ndjson>",
type = LocalOrRemoteFile("r"))
@click.argument("manifest_b",
metavar = "<manifest-b.ndjson>",
type = LocalOrRemoteFile("r"))
@format_doc(PROVENANCE_KEY = PROVENANCE_KEY)
def diff(manifest_a, manifest_b):
"""
Compare two manifests and output new or changed records.
<manifest-a.ndjson> and <manifest-b.ndjson> must be newline-delimited JSON
files produced by the "parse" or "parse-using-config" commands which are
siblings to this command.
Records in <manifest-b.ndjson> which do not appear in <manifest-a.ndjson>
will be output to stdout. The internal provenance-tracking field,
"{PROVENANCE_KEY}", is ignored for the purposes of comparison.
"""
manifest_a_hashes = {
deephash(record)
for record in load_ndjson(manifest_a) }
new_or_changed = (
record for record in load_ndjson(manifest_b)
if deephash(record) not in manifest_a_hashes )
dump_ndjson(new_or_changed)
@manifest.command("upload")
@click.argument("manifest_file",
metavar = "<manifest.ndjson>",
type = LocalOrRemoteFile("r"))
def upload(manifest_file):
"""
Upload manifest records into the database receiving area.
<manifest.ndjson> must be a newline-delimited JSON file produced by this
command's sibling commands.
Once records are uploaded, the manifest ETL routine will reconcile the
manifest records with known identifiers and existing samples.
"""
db = DatabaseSession()
try:
LOG.info(f"Copying sample manifest records from {manifest_file.path}")
row_count = db.copy_from_ndjson(("receiving", "manifest", "document"), manifest_file)
LOG.info(f"Received {row_count:,} manifest records")
LOG.info("Committing all changes")
db.commit()
except:
LOG.info("Rolling back all changes; the database will not be modified")
db.rollback()
raise
def select_column(table: pandas.DataFrame, name: str) -> pandas.Series:
"""
Select the single column matching *name* in *table*.
*table* must be a :class:`pandas.DataFrame`.
*name* must be a string, which may contain shell-style wildcards and
pattern matching.
Matching is performed case-insensitively. An `AssertionError` is raised if
no columns are found or if more than one column is found.
Returns a :class:`pandas.Series` column from *table*.
"""
matching = select_columns(table, name)
assert len(matching.columns) == 1, f"More than one column name matching «{name}»: {matching.columns}"
return matching[matching.columns[0]]
def select_columns(table: pandas.DataFrame, name: str) -> pandas.DataFrame:
"""
Select one or more columns matching *name* in *table*.
*table* must be a :class:`pandas.DataFrame`.
*name* must be a string, which may contain shell-style wildcards and
pattern matching.
Matching is performed case-insensitively. An `AssertionError` is raised if
no columns are found.
Returns a :class:`pandas.DataFrame` containing a subset of columns in
*table*.
"""
pattern = re.compile(fnmatch.translate(name), re.IGNORECASE)
matches = list(filter(pattern.match, table.columns.astype(str)))
assert matches, f"No column name matching «{name}» found; column names are: {list(table.columns)}"
return table[matches]
def perform_qc(sample_column: str, collection_column: str, barcode_columns: Set[str],
parsed_manifest: pandas.DataFrame) -> pandas.DataFrame:
"""
Perform quality control on the manifest data, dropping rows which violate
our standards for complete and accurate data.
"""
parsed_manifest = drop_missing_barcodes(sample_column, collection_column, parsed_manifest)
# Drop any rows that have duplicated barcodes
parsed_manifest = deduplicate_barcodes(parsed_manifest, barcode_columns)
return parsed_manifest
def drop_missing_barcodes(sample_column: str, collection_column: str,
parsed_manifest: pandas.DataFrame) -> pandas.DataFrame:
"""
Drop rows that have no data for the *sample_column* and/or the *collection_column*, depending
on which columns are configured. If both *sample_column* and *collection_column* are configured,
drop rows if both columns don't have data.
>>> drop_missing_barcodes(sample_column='sample', collection_column='collection', \
parsed_manifest=pandas.DataFrame([['aa', 'bb', 'foo'], [None, 'dd', 'bar'], \
['ee', None, 'baz'], [None, None, 'fizz']], \
columns=['sample', 'collection', 'other']))
sample collection other
0 aa bb foo
1 None dd bar
2 ee None baz
>>> drop_missing_barcodes(sample_column='sample', collection_column=None, \
parsed_manifest=pandas.DataFrame([['aa', 'bb', 'foo'], [None, 'dd', 'bar'], \
['ee', None, 'baz'], [None, None, 'fizz']], \
columns=['sample', 'collection', 'other']))
sample collection other
0 aa bb foo
2 ee None baz
>>> drop_missing_barcodes(sample_column=None, collection_column='collection', \
parsed_manifest=pandas.DataFrame([['aa', 'bb', 'foo'], [None, 'dd', 'bar'], \
['ee', None, 'baz'], [None, None, 'fizz']], \
columns=['sample', 'collection', 'other']))
sample collection other
0 aa bb foo
1 None dd bar
"""
if sample_column and collection_column:
parsed_manifest = parsed_manifest.dropna(subset = ["sample", "collection"], how='all')
elif sample_column:
parsed_manifest = parsed_manifest.dropna(subset = ["sample"])
elif collection_column:
parsed_manifest = parsed_manifest.dropna(subset = ["collection"])
return parsed_manifest
def deduplicate_barcodes(df: pandas.DataFrame, columns: Iterable) -> pandas.DataFrame:
"""
Check all barcode columns for duplicates and drops records that have
duplicated barcodes.
>>> deduplicate_barcodes(pandas.DataFrame([['aa', 'bb', 'foo'], ['aa', 'cc', 'bar']], \
columns=['sample', 'collection', 'other']), columns=['sample', 'collection'])
Empty DataFrame
Columns: [sample, collection, other]
Index: []
>>> deduplicate_barcodes(pandas.DataFrame([['aa', 'bb', 'foo'], ['aa', 'cc', 'bar']], \
columns=['sample', 'collection', 'other']), columns=['collection'])
sample collection other
0 aa bb foo
1 aa cc bar
>>> deduplicate_barcodes(pandas.DataFrame([['aa', 'bb', 'foo'], ['aa', 'cc', 'bar'], \
['bb', 'aa', 'baz']], columns=['sample', 'collection', 'other']), \
columns=['sample', 'collection'])
sample collection other
2 bb aa baz
"""
deduplicated = df
for column in columns:
# Drop null values so they don't get counted as duplicates
col = df[column].dropna()
# Find duplicates within column
duplicates = col[col.duplicated(keep=False)]
# If duplicates are found, drop rows with duplicate barcodes
if len(duplicates) > 0:
LOG.warning(f"Found duplicate barcodes in column «{column}»")
dup_barcodes = list(duplicates.unique())
LOG.warning(f"Duplicated barcodes: {dup_barcodes}")
LOG.warning(f"Dropping records with duplicate barcodes")
deduplicated_df = df[(~df[column].duplicated(keep=False)) \
| (df[column].isnull())][column].to_frame()
common_idx = deduplicated.index.intersection(deduplicated_df.index)
deduplicated = deduplicated.loc[common_idx]
return deduplicated
def deephash(record):
"""
Return a :class:`DeepHash` of the given manifest *record*, ignoring
the provenance information.
"""
return DeepHash(record, exclude_paths = {f"root['{PROVENANCE_KEY}']"})[record]
| 22,234 | 6,293 |
# Generated by Django 3.0.4 on 2020-04-12 14:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('csone', '0007_gcinotes_upload'),
]
operations = [
migrations.AlterField(
model_name='gcinotes',
name='upload',
field=models.CharField(default='test', max_length=5000),
),
]
| 419 | 156 |
import logging
from typing import Optional, Dict, Any, List, Tuple, NamedTuple
import torch
from data.edits import Edit
from dpu_utils.ptutils import BaseComponent
from mlcomponents.seqdecoding import SeqDecoder
from mlcomponents.seqencoder import SequenceEncoder
class EncoderDecoder(BaseComponent):
LOGGER = logging.getLogger('EncoderDecoder')
def __init__(self, name: str, input_sequence_encoder: SequenceEncoder,
output_sequence_decoder: SeqDecoder,
hyperparameters: Optional[Dict[str, Any]] = None) -> None:
super(EncoderDecoder, self).__init__(name, hyperparameters)
self.__input_sequence_encoder = input_sequence_encoder
self.__output_sequence_decoder = output_sequence_decoder
@classmethod
def default_hyperparameters(cls) -> Dict[str, Any]:
return { }
def _finalize_component_metadata_and_model(self) -> None:
pass
@property
def input_sequence_encoder(self):
return self.__input_sequence_encoder
@property
def output_sequence_decoder(self):
return self.__output_sequence_decoder
def _load_metadata_from_sample(self, data_to_load: Edit) -> None:
self.__input_sequence_encoder.load_metadata_from_sample(data_to_load.input_sequence)
self.__output_sequence_decoder.load_metadata_from_sample(SeqDecoder.InputOutputSequence(
input_sequence=data_to_load.input_sequence,
output_sequence=data_to_load.output_sequence
))
TensorizedData = NamedTuple('EncoderDecoderTensorizedData', [
('input_sequence', Any),
('output_sequence', Any),
])
def load_data_from_sample(self, data_to_load: Edit) -> Optional['EncoderDecoder.TensorizedData']:
return self.TensorizedData(
input_sequence=self.__input_sequence_encoder.load_data_from_sample([SeqDecoder.START] + data_to_load.input_sequence + [SeqDecoder.END]),
output_sequence=self.__output_sequence_decoder.load_data_from_sample(SeqDecoder.InputOutputSequence(
input_sequence=[SeqDecoder.START] + data_to_load.input_sequence + [SeqDecoder.END],
output_sequence=data_to_load.output_sequence
))
)
def initialize_minibatch(self) -> Dict[str, Any]:
return {
'input_sequences': self.__input_sequence_encoder.initialize_minibatch(),
'output_sequences': self.__output_sequence_decoder.initialize_minibatch(),
}
def extend_minibatch_by_sample(self, datapoint: 'EncoderDecoder.TensorizedData', accumulated_minibatch_data: Dict[str, Any]) -> bool:
continue_extending = self.__input_sequence_encoder.extend_minibatch_by_sample(
datapoint=datapoint.input_sequence,
accumulated_minibatch_data=accumulated_minibatch_data['input_sequences'])
continue_extending &= self.__output_sequence_decoder.extend_minibatch_by_sample(
datapoint=datapoint.output_sequence,
accumulated_minibatch_data=accumulated_minibatch_data['output_sequences'])
return continue_extending
def finalize_minibatch(self, accumulated_minibatch_data: Dict[str, Any]) -> Dict[str, Any]:
return {
'input_sequences': self.__input_sequence_encoder.finalize_minibatch(accumulated_minibatch_data['input_sequences']),
'output_sequences': self.__output_sequence_decoder.finalize_minibatch(accumulated_minibatch_data['output_sequences'])
}
def forward(self, *, input_sequences: Dict[str, Any], output_sequences: Dict[str, Any]):
input_encoding = self.__input_sequence_encoder.forward(
input_sequence_data=input_sequences,
return_embedded_sequence=True
)
memories, memories_lengths, output_state, input_sequence_token_embeddings = input_encoding
decoder_loss = self.__output_sequence_decoder.forward(memories=memories, memories_lengths=memories_lengths,
initial_state=output_state,
input_sequence_token_embeddings=input_sequence_token_embeddings,
**output_sequences)
return decoder_loss
def greedy_decode(self, input_sequences: Dict[str, Any],
ground_input_sequences: List[List[str]], max_length: int=50) -> List[Tuple[List[List[str]], List[float]]]:
with torch.no_grad():
ground_input_sequences, initial_state, memories, memory_lengths = self.__prepare_decoding(ground_input_sequences,
input_sequences)
return self.__output_sequence_decoder.greedy_decode(memories, memory_lengths,
initial_state=initial_state, max_length=max_length,
memories_str_representations=[[SeqDecoder.START] + g + [SeqDecoder.END] for g in ground_input_sequences])
def beam_decode(self, input_sequences: Dict[str, Any],
ground_input_sequences: List[List[str]], max_length: int=150) -> List[Tuple[List[List[str]], List[float]]]:
with torch.no_grad():
ground_input_sequences, initial_state, memories, memory_lengths = self.__prepare_decoding(ground_input_sequences,
input_sequences)
return self.__output_sequence_decoder.beam_decode(memories, memory_lengths,
initial_state=initial_state, max_length=max_length,
memories_str_representations=[[SeqDecoder.START] + g + [SeqDecoder.END] for g in ground_input_sequences],
)
def __prepare_decoding(self, ground_input_sequences, input_sequences):
memories, memory_lengths, output_state = self.__input_sequence_encoder.forward(
input_sequence_data=input_sequences)
return ground_input_sequences, output_state, memories, memory_lengths
def compute_likelihood(self, *, input_sequences: Dict[str, Any], output_sequences: Dict[str, Any],
return_debug_info: bool = False):
with torch.no_grad():
memories, memories_lengths, output_state = self.__input_sequence_encoder.forward(input_sequence_data=input_sequences)
return self.__output_sequence_decoder.compute_likelihood(memories=memories,
memories_lengths=memories_lengths,
initial_state=output_state,
return_debug_info= return_debug_info,
**output_sequences)
| 7,467 | 2,049 |
#===========================================================================
#
# Copyright (c) 2014, California Institute of Technology.
# U.S. Government Sponsorship under NASA Contract NAS7-03001 is
# acknowledged. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#===========================================================================
""": A class containing tick mark style information."""
__version__ = "$Revision: #1 $"
#===========================================================================
from . import types as S
from .MplBasicLineStyle import MplBasicLineStyle
from .MplTextStyle import MplTextStyle
import matplotlib.axis as mplaxis
#===========================================================================
__all__ = [ 'MplTickStyle' ]
#===========================================================================
class MplTickStyle( S.SubStyle ):
""": Style properties for managing matplotlib axis tick elements.
"""
labels = S.property.SubStyle( MplTextStyle, doc = """
The style properties for any text labels placed at tick marks along the
primary axis edge.
If this is on the X-Axis, then the primary edge is the bottom.
If this is on the Y-Axis, then the primary edge is the left.
= SEE ALSO
- :ref:`MplTextStyle <mplStyle_MplTextStyle>`
""" )
secondaryLabels = S.property.SubStyle( MplTextStyle, doc = """
The style properties for any text labels placed at tick marks along the
secondary axis edge.
If this is on the X-Axis, then the secondary edge is the top.
If this is on the Y-Axis, then the secondary edge is the right.
= SEE ALSO
- :ref:`MplTextStyle <mplStyle_MplTextStyle>`
""" )
marks = S.property.SubStyle( MplBasicLineStyle, doc = """
The style properties for the tick marks along the primary axis edge.
If this is on the X-Axis, then the primary edge is the bottom.
If this is on the Y-Axis, then the primary edge is the left.
= SEE ALSO
- :ref:`MplBasicLineStyle <mplStyle_MplBasicLineStyle>`
""" )
secondaryMarks = S.property.SubStyle( MplBasicLineStyle, doc = """
The style properties for the tick marks along the secondary axis edge.
If this is on the X-Axis, then the secondary edge is the top.
If this is on the Y-Axis, then the secondary edge is the right.
= SEE ALSO
- :ref:`MplBasicLineStyle <mplStyle_MplBasicLineStyle>`
""" )
grid = S.property.SubStyle( MplBasicLineStyle, doc = """
The style properties for the grid lines.
Grid lines are present for each tick mark. This means that if there is no
tick locator for an axis, then there are no ticks to use for grid lines.
Setting the visibility of the tick marks to True will ensure that a tick
locator is present to use for generating grid lines.
= SEE ALSO
- :ref:`MplBasicLineStyle <mplStyle_MplBasicLineStyle>`
""" )
length = S.property.Float( min = 0.0, doc = """
The length of the ticks (in points).
""" )
width = S.property.Float( min = 0.0, doc = """
The width of the ticks (in points).
""" )
pad = S.property.Float( doc = """
The spacing between the ticks and their labels (in points).
""" )
#-----------------------------------------------------------------------
def apply( self, obj, defaults = {}, **kwargs ):
""": Apply this style to the given object using the supplied defaults.
= NOTE
- This can apply to any matplotlib Tick.
= INPUT VARIABLES
- obj The object to apply the style to.
- defaults Keyword-value dictionary with defaults values to use if a
property value is not specified.
- kwargs Keyword-value dictionary whose values will supercede
any values set by the properties of this sub-style.
"""
if not isinstance( obj, mplaxis.Tick ):
msg = "Unable to apply this sub-style to the given element." \
"Expected a matplotlib 'Tick' and instead received the " \
"following:\n%s" % (obj,)
raise Exception( msg )
# Labels
subKwargs = kwargs.get( 'labels', {} )
subDefaults = S.lib.resolveDefaults( defaults, ['text', 'labels'] )
self.labels.apply( obj.label1, subDefaults, **subKwargs )
value = self.labels.getValue( 'visible', subDefaults, **subKwargs )
if value is not None:
obj.label1On = value
# Secondary Labels
subKwargs = kwargs.get( 'secondaryLabels', {} )
subDefaults = S.lib.resolveDefaults( defaults,
['text', 'labels', 'secondaryLabels'] )
self.secondaryLabels.apply( obj.label2, subDefaults, **subKwargs )
value = self.secondaryLabels.getValue( 'visible',
subDefaults, **subKwargs )
if value is not None:
obj.label2On = value
# marks
subKwargs = kwargs.get( 'marks', {} )
subDefaults = S.lib.resolveDefaults( defaults, ['marks'] )
self.marks.apply( obj.tick1line, subDefaults, **subKwargs )
value = self.marks.getValue( 'visible', subDefaults, **subKwargs )
if value is not None:
obj.tick1On = value
# Secondary Marks
subKwargs = kwargs.get( 'secondaryMarks', {} )
subDefaults = S.lib.resolveDefaults( defaults, ['secondaryMarks'] )
self.secondaryMarks.apply( obj.tick2line, subDefaults, **subKwargs )
value = self.secondaryMarks.getValue( 'visible',
subDefaults, **subKwargs )
if value is not None:
obj.tick2On = value
# Grid
subKwargs = kwargs.get( 'grid', {} )
subDefaults = S.lib.resolveDefaults( defaults, ['grid'] )
self.grid.apply( obj.gridline, subDefaults, **subKwargs )
value = self.grid.getValue( 'visible', subDefaults, **subKwargs )
if value is not None:
obj.gridOn = value
# Activate the grid as appropriate
#FUTURE: This should be here using Tick.major, but matplotlib
#FUTURE: needs to be fixed first.
#FUTURE obj.grid( self.grid.visible )
#FUTURE: Setup minor tick locators (as necessary)
# Length
value = self.getValue( 'length', defaults, **kwargs )
if value is not None:
obj._size = value
obj.tick1line.set_markersize( obj._size )
obj.tick2line.set_markersize( obj._size )
# Width
value = self.getValue( 'width', defaults, **kwargs )
if value is not None:
obj._width = value
obj.tick1line.set_markeredgewidth( obj._width )
obj.tick2line.set_markeredgewidth( obj._width )
# Pad
value = self.getValue( 'pad', defaults, **kwargs )
if value is not None:
obj.set_pad( value )
#-----------------------------------------------------------------------
| 8,195 | 2,444 |
#!/usr/bin/env python3
# encoding: utf-8
"""
tests.test_path_newsonkrumm2009
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Based on the data available at:
https://www.microsoft.com/en-us/research/publication/hidden-markov-map-matching-noise-sparseness/
Notes:
* There is a 'bug' in the map available from the website.
Multiple segments (streets) in the map are not connected but have overlappen, but
disconnected, nodes.
For example, the following nodes are on the same location and
should be connected because the given path runs over this road:
- 884147801204 and 884148400033
- 884148100260 and 884148001002
* The path is missing a number of observations. For those parts non-emitting nodes are required.
This occurs at:
- 2770:2800 (index 2659 is start)
- 2910:2929
:author: Wannes Meert
:copyright: Copyright 2018 DTAI, KU Leuven and Sirris.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
import os
import sys
import logging
import pickle
from pathlib import Path
import csv
from datetime import datetime
from itertools import product
import pytest
import leuvenmapmatching as mm
from leuvenmapmatching.matcher import base
from leuvenmapmatching.matcher.distance import DistanceMatcher
from leuvenmapmatching.map.sqlite import SqliteMap
import leuvenmapmatching.visualization as mm_viz
MYPY = False
if MYPY:
from typing import List, Tuple
logger = mm.logger
this_path = Path(os.path.realpath(__file__)).parent / "rsrc" / "newson_krumm_2009"
gps_data = this_path / "gps_data.txt"
gps_data_pkl = gps_data.with_suffix(".pkl")
ground_truth_route = this_path / "ground_truth_route.txt"
road_network = this_path / "road_network.txt"
road_network_db = road_network.with_suffix(".sqlite")
directory = None
base.default_label_width = 34
def read_gps(route_fn):
route = []
with route_fn.open("r") as route_f:
reader = csv.reader(route_f, delimiter='\t')
next(reader)
for row in reader:
date, time, lat, lon = row[:4]
date_str = date + " " + time
ts = datetime.strptime(date_str, '%d-%b-%Y %H:%M:%S')
lat = float(lat)
lon = float(lon)
route.append((lat, lon, ts))
logger.debug(f"Read GPS trace of {len(route)} points")
return route
def read_paths(paths_fn):
paths = []
with paths_fn.open("r") as paths_f:
reader = csv.reader(paths_f, delimiter='\t')
next(reader)
for row in reader:
pathid, trav = row[:2]
pathid = int(pathid)
trav = int(trav)
paths.append((pathid, trav))
logger.debug(f"Read correct trace of {len(paths)} nodes")
return paths
def parse_linestring(line):
# type: (str) -> List[Tuple[float, float]]
line = line[line.index("(") + 1:line.index(")")]
latlons = []
for lonlat in line.split(", "):
lon, lat = lonlat.split(" ")
latlons.append((float(lat), float(lon)))
return latlons
def read_map(map_fn):
logger.debug(f"Reading map ...")
mmap = SqliteMap("road_network", use_latlon=True, dir=this_path)
node_cnt = 0
edge_cnt = 0
# new_node_id = 1000000000000
new_node_id = 1
with map_fn.open("r") as map_f:
reader = csv.reader(map_f, delimiter='\t')
next(reader)
for row in reader:
eid, nf, nt, twoway, speed, length, innernodes = row
eid = int(eid)
nf = int(nf)
nt = int(nt)
length = int(length)
twoway = int(twoway)
speed = float(speed)
if twoway == 0:
twoway = False
elif twoway == 1:
twoway = True
else:
raise Exception(f"Unknown value for twoway: {twoway}")
innernodes = parse_linestring(innernodes)
# Add nodes to map
mmap.add_node(nf, innernodes[0], ignore_doubles=True, no_index=True, no_commit=True)
mmap.add_node(nt, innernodes[-1], ignore_doubles=True, no_index=True, no_commit=True)
node_cnt += 2
prev_node = nf
assert(length < 1000)
idx = 1
for innernode in innernodes[1:-1]:
# innernode_id = nf * 1000 + idx
innernode_id = new_node_id
new_node_id += 1
mmap.add_node(innernode_id, innernode, no_index=True, no_commit=True) # Should not be double
node_cnt += 1
mmap.add_edge(prev_node, innernode_id, speed=speed, edge_type=0,
path=eid, pathnum=idx, no_index=True, no_commit=True)
edge_cnt += 1
if twoway:
mmap.add_edge(innernode_id, prev_node, speed=speed, edge_type=0,
path=eid, pathnum=-idx, no_index=True, no_commit=True)
edge_cnt += 1
prev_node = innernode_id
idx += 1
mmap.add_edge(prev_node, nt, speed=speed, edge_type=0,
path=eid, pathnum=idx, no_index=True, no_commit=True)
edge_cnt += 1
if twoway:
mmap.add_edge(nt, prev_node, speed=speed, edge_type=0,
path=eid, pathnum=-idx, no_index=True, no_commit=True)
edge_cnt += 1
if node_cnt % 100000 == 0:
mmap.db.commit()
logger.debug(f"... done: {node_cnt} nodes and {edge_cnt} edges")
mmap.reindex_nodes()
mmap.reindex_edges()
assert(new_node_id < 100000000000)
return mmap
def correct_map(mmap):
"""Add edges between nodes with degree > 2 that are on the exact same location.
This ignore that with bridges, the roads might not be connected. But we need a correct
because the dataset has a number of interrupted paths.
"""
def correct_edge(labels):
labels = [label for label in labels if label > 100000000000]
logger.info(f"Add connections between {labels}")
for l1, l2 in product(labels, repeat=2):
mmap.add_edge(l1, l2, edge_type=1)
mmap.find_duplicates(func=correct_edge)
def load_data():
max_route_length = None # 200
# Paths
paths = read_paths(ground_truth_route)
# Map
if road_network_db.exists():
map_con = SqliteMap.from_file(road_network_db)
logger.debug(f"Read road network from db file {road_network_db} ({map_con.size()} nodes)")
else:
map_con = read_map(road_network)
correct_map(map_con)
logger.debug(f"Create road network to db file {map_con.db_fn} ({map_con.size()} nodes)")
# Route
if gps_data_pkl.exists():
with gps_data_pkl.open("rb") as ifile:
route = pickle.load(ifile)
logger.debug(f"Read gps route from file ({len(route)} points)")
else:
route = read_gps(gps_data)
if max_route_length:
route = route[:max_route_length]
with gps_data_pkl.open("wb") as ofile:
pickle.dump(route, ofile)
route = [(lat, lon) for lat, lon, _ in route]
return paths, map_con, route
def test_route_slice1():
if directory:
import matplotlib.pyplot as plt
nodes, map_con, route = load_data()
zoom_path = True
matcher = DistanceMatcher(map_con, min_prob_norm=0.001,
max_dist=200,
dist_noise=6, dist_noise_ne=12,
obs_noise=30, obs_noise_ne=150,
non_emitting_states=True)
route_slice = route[2657:2662]
matcher.match(route_slice)
path_pred = matcher.path_pred_onlynodes
path_sol = [172815, 172816, 172817, 172818, 172819, 172820, 172821, 172822, 172823, 172824,
172825, 172826, 172827, 172828, 172829, 172830, 884148100261, 172835, 172836,
172837, 884148100254, 172806, 884148100255, 172807] # Can change when building db
assert len(path_pred) == len(path_sol)
def test_bug1():
map_con = SqliteMap("map", use_latlon=True)
map_con.add_nodes([
(1, (47.590439915657, -122.238368690014)),
(2, (47.5910192728043, -122.239519357681)),
(3, (47.5913706421852, -122.240168452263))
])
map_con.add_edges([
(1, 2),
(2, 3)
])
path = [
# (47.59043333, -122.2384167),
(47.59058333, -122.2387),
(47.59071667, -122.2389833),
(47.59086667, -122.2392667),
(47.59101667, -122.23955),
(47.59115, -122.2398333)
]
path_sol = [(1, 2), (2, 3)]
matcher = DistanceMatcher(map_con, min_prob_norm=0.001,
max_dist=200, obs_noise=4.07,
non_emitting_states=True)
matcher.match(path, unique=True)
path_pred = matcher.path_pred
if directory:
import matplotlib.pyplot as plt
matcher.print_lattice_stats()
logger.debug("Plotting post map ...")
fig = plt.figure(figsize=(100, 100))
ax = fig.get_axes()
mm_viz.plot_map(map_con, matcher=matcher, use_osm=True, ax=ax,
show_lattice=False, show_labels=True, show_graph=True, zoom_path=True,
show_matching=True)
plt.savefig(str(directory / "test_newson_bug1.png"))
plt.close(fig)
logger.debug("... done")
assert path_pred == path_sol, f"Edges not equal:\n{path_pred}\n{path_sol}"
@pytest.mark.skip(reason="Takes a long time")
def test_route():
if directory:
import matplotlib.pyplot as plt
else:
plt = None
paths, map_con, route = load_data()
route = [(lat, lon) for lat, lon, _ in route]
zoom_path = True
# zoom_path = slice(2645, 2665)
slice_route = None
# slice_route = slice(650, 750)
# slice_route = slice(2657, 2662) # First location where some observations are missing
# slice_route = slice(2770, 2800) # Observations are missing
# slice_route = slice(2910, 2950) # Interesting point
# slice_route = slice(2910, 2929) # Interesting point
# slice_route = slice(6825, 6833) # Outlier observation
# if directory is not None:
# logger.debug("Plotting pre map ...")
# mm_viz.plot_map(map_con_latlon, path=route_latlon, use_osm=True,
# show_lattice=False, show_labels=False, show_graph=False, zoom_path=zoom_path,
# filename=str(directory / "test_newson_route.png"))
# logger.debug("... done")
matcher = DistanceMatcher(map_con, min_prob_norm=0.0001,
max_dist=200,
dist_noise=15, dist_noise_ne=30,
obs_noise=30, obs_noise_ne=150,
non_emitting_states=True)
if slice_route is None:
pkl_fn = this_path / "nodes_pred.pkl"
if pkl_fn.exists():
with pkl_fn.open("rb") as pkl_file:
logger.debug(f"Reading predicted nodes from pkl file")
route_nodes = pickle.load(pkl_file)
else:
matcher.match(route)
route_nodes = matcher.path_pred_onlynodes
with pkl_fn.open("wb") as pkl_file:
pickle.dump(route_nodes, pkl_file)
from leuvenmapmatching.util.evaluation import route_mismatch_factor
print(route_nodes[:10])
# route_edges = map_con.nodes_to_paths(route_nodes)
# print(route_edges[:10])
grnd_paths, _ = zip(*paths)
print(grnd_paths[:10])
route_paths = map_con.nodes_to_paths(route_nodes)
print(route_paths[:10])
logger.debug(f"Compute route mismatch factor")
factor, cnt_matches, cnt_mismatches, total_length, mismatches = \
route_mismatch_factor(map_con, route_paths, grnd_paths,window=None, keep_mismatches=True)
logger.debug(f"factor = {factor}, "
f"cnt_matches = {cnt_matches}/{cnt_mismatches} of {len(grnd_paths)}/{len(route_paths)}, "
f"total_length = {total_length}\n"
f"mismatches = " + " | ".join(str(v) for v in mismatches))
else:
_, last_idx = matcher.match(route[slice_route])
logger.debug(f"Last index = {last_idx}")
# matcher.match(route[2657:2662]) # First location where some observations are missing
# matcher.match(route[2770:2800]) # Observations are missing
# matcher.match(route[2910:2950]) # Interesting point
# matcher.match(route[2910:2929]) # Interesting point
# matcher.match(route[6000:])
path_pred = matcher.path_pred_onlynodes
if directory:
matcher.print_lattice_stats()
logger.debug("Plotting post map ...")
fig = plt.figure(figsize=(200, 200))
ax = fig.get_axes()
mm_viz.plot_map(map_con, matcher=matcher, use_osm=True, ax=ax,
show_lattice=False, show_labels=True, zoom_path=zoom_path,
show_matching=True, show_graph=False)
plt.savefig(str(directory / "test_newson_route_matched.png"))
plt.close(fig)
logger.debug("... done")
logger.debug("Best path:")
for m in matcher.lattice_best:
logger.debug(m)
print(path_pred)
@pytest.mark.skip(reason="Takes a too long")
def test_bug2():
from leuvenmapmatching.util.openstreetmap import locations_to_map
map_con = SqliteMap("map", use_latlon=True, dir=directory)
path = [
(50.87205, 4.66089), (50.874550000000006, 4.672980000000001), (50.87538000000001, 4.67698),
(50.875800000000005, 4.6787600000000005), (50.876520000000006, 4.6818), (50.87688000000001, 4.683280000000001),
(50.87814, 4.68733), (50.87832, 4.68778), (50.87879, 4.68851), (50.87903000000001, 4.68895),
(50.879560000000005, 4.689170000000001), (50.87946, 4.6900900000000005),
(50.879290000000005, 4.6909600000000005), (50.87906, 4.6921800000000005), (50.87935, 4.6924),
(50.879720000000006, 4.69275), (50.88002, 4.6930700000000005), (50.880430000000004, 4.693440000000001),
(50.880660000000006, 4.69357), (50.880660000000006, 4.6936100000000005), (50.88058, 4.694640000000001),
(50.88055000000001, 4.69491), (50.88036, 4.696160000000001), (50.88009, 4.697550000000001),
(50.87986, 4.6982800000000005), (50.879720000000006, 4.698790000000001), (50.87948, 4.699730000000001),
(50.87914000000001, 4.6996400000000005), (50.87894000000001, 4.6995000000000005),
(50.878800000000005, 4.699350000000001), (50.8785, 4.6991000000000005), (50.87841, 4.6990300000000005)
]
locations_to_map(path, map_con, filename=directory / "osm.xml")
path_sol = [(5777282112, 2633552218), (2633552218, 5777282111), (5777282111, 5777282110), (5777282110, 1642021707),
(1642021707, 71361087), (71361087, 71364203), (71364203, 1151697757), (1151697757, 1647339017),
(1647339017, 1647339030), (1647339030, 2058510349), (2058510349, 2633552212), (2633552212, 1380538577),
(1380538577, 1439572271), (1439572271, 836434313), (836434313, 2633771041), (2633771041, 5042874484),
(5042874484, 5042874485), (5042874485, 2518922583), (2518922583, 2659762546), (2659762546, 5777282063),
(5777282063, 2633771037), (2633771037, 2633771035), (2633771035, 2633771033), (2633771033, 1151668705),
(1151668705, 2633771094), (2633771094, 1151668722), (1151668722, 1151668724), (1151668724, 5543948222),
(5543948222, 2058481517), (2058481517, 16933576), (16933576, 5543948221), (5543948221, 2518923620),
(2518923620, 5543948020), (5543948020, 5543948019), (5543948019, 18635886), (18635886, 18635887),
(18635887, 1036909153), (1036909153, 2658942230), (2658942230, 1001099975), (1001099975, 16933574),
(16933574, 1125604152), (1125604152, 5543948238), (5543948238, 1125604150), (1125604150, 1125604148),
(1125604148, 2634195334), (2634195334, 2087854243), (2087854243, 5543948237), (5543948237, 160226603),
(160226603, 180130266), (180130266, 5543948227), (5543948227, 5543948226), (5543948226, 1195681902),
(1195681902, 101135392), (101135392, 2606704673), (2606704673, 18635977), (18635977, 1026111708),
(1026111708, 1026111631), (1026111631, 16571375), (16571375, 2000680621), (2000680621, 999580042),
(999580042, 16571370), (16571370, 2000680620), (2000680620, 5078692402), (5078692402, 5543948008),
(5543948008, 16571371), (16571371, 999579936), (999579936, 2639836143), (2639836143, 5543948014),
(5543948014, 5222992316), (5222992316, 30251323), (30251323, 159701080), (159701080, 3173217124),
(3173217124, 1165209673), (1165209673, 1380538689), (1380538689, 2878334668), (2878334668, 2871137399),
(2871137399, 2876902981), (2876902981, 2873624508), (2873624508, 2873624509), (2873624509, 2899666507),
(2899666507, 2899666518), (2899666518, 2899666513), (2899666513, 2903073945), (2903073945, 2903073951),
(2903073951, 1380538681), (1380538681, 2914810627), (2914810627, 2914810618), (2914810618, 2914810607),
(2914810607, 2914810604), (2914810604, 2914810483), (2914810483, 2914810462), (2914810462, 2914810464),
(2914810464, 1312433523), (1312433523, 20918594), (20918594, 2634267817), (2634267817, 2967425445),
(2967425445, 3201523879), (3201523879, 157217466), (157217466, 2963305939), (2963305939, 3201523877),
(3201523877, 3889275909), (3889275909, 3889275897), (3889275897, 157255077), (157255077, 30251882),
(30251882, 157245624), (157245624, 1150903673), (1150903673, 4504936404)]
matcher = DistanceMatcher(map_con, min_prob_norm=0.001,
max_dist=200, obs_noise=4.07,
non_emitting_states=True)
nodes, idx = matcher.match(path, unique=True)
path_pred = matcher.path_pred
if directory:
import matplotlib.pyplot as plt
matcher.print_lattice_stats()
logger.debug("Plotting post map ...")
fig = plt.figure(figsize=(100, 100))
ax = fig.get_axes()
mm_viz.plot_map(map_con, matcher=matcher, use_osm=True, ax=ax,
show_lattice=False, show_labels=True, show_graph=False, zoom_path=True,
show_matching=True)
plt.savefig(str(directory / "test_newson_bug1.png"))
plt.close(fig)
logger.debug("... done")
assert path_pred == path_sol, f"Edges not equal:\n{path_pred}\n{path_sol}"
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
directory = Path(os.environ.get('TESTDIR', Path(__file__).parent))
print(f"Saving files to {directory}")
# test_route()
# test_route_slice1()
# test_bug1()
test_bug2()
| 19,035 | 8,932 |
# Generated by Django 2.0.1 on 2019-06-07 01:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0065_auto_20190606_2240'),
]
operations = [
migrations.AddField(
model_name='document',
name='experimentacao_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Experimentacao', verbose_name='Experimentacao'),
),
migrations.AlterField(
model_name='document',
name='aula_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Aula', verbose_name='Aula'),
),
]
| 785 | 274 |
from janus.qm_wrapper.qm_wrapper import QMWrapper
from janus.qm_wrapper.psi4_wrapper import Psi4Wrapper
| 104 | 38 |
import urllib
import urllib2
import sys
import batch_interface
import json
if len(sys.argv) == 1:
print 'Usage: python %s <command> [parameter=value] ...' % sys.argv[0]
print 'Try python %s batch/help to see list of functions' % sys.argv[0]
print '\tor pythor %s batch/reload to reload functions' % sys.argv[0]
sys.exit(0)
command = sys.argv[1]
post_arg_list = [x.split('=') for x in sys.argv[2:]]
post_args = {k:v for k,v in post_arg_list}
post_data = urllib.urlencode(post_args)
url = 'http://127.0.0.1:%s/%s' % (batch_interface.PORT, command)
print 'Querying %s' % url
response = urllib2.urlopen(url, data=post_data)
msg = json.loads(response.read())
print 'Status: '+ msg['status']
print msg['message']
| 729 | 276 |
#-------------------------------------------------------------------------------
# Name: cache.py
# Purpose:
#
# Author: xuming
#
# Created: 23-01-2011
# Copyright: (c) xuming 2011
# Licence: GPL
#-------------------------------------------------------------------------------
#!/usr/bin/env python
"""A simple cache warp for micolog
The main purpose of this module is to design a common layer to deal with all
methods which need been cached!
"""
from google.appengine.api import memcache
from utils import format_date
from datetime import datetime
from settings import ENABLE_MEMCACHE
def vcache(key="", time=0,args=()):
"""
Cache for normal method which return some object
example::
@vcache("blog.hotposts",args=('count'))
def hotposts(self,count=8):
return Entry.all().filter('entrytype =', 'post').filter("published =", True).order('-readtimes').fetch(count)
args:
key: keyname fo memcache
args: the list of cached args
time: relative number of seconds from current time.
"""
def _decorate(method):
def _wrapper(*cargs, **kwargs):
if not ENABLE_MEMCACHE:
return method(*cargs, **kwargs)
skey=key
if hasattr(cargs[0],"vkey"):
skey=key+cargs[0].vkey
for arg in args:
if kwargs.has_key(arg):
skey+="_"+str(arg)+"_"+str(kwargs[arg])
result=memcache.get(skey)
if result==None:
result = method(*cargs, **kwargs)
memcache.set(skey, result, time)
return result
return _wrapper
return _decorate
def cache(key="",time=0):
"""
Cache for request handler method, such as: get or post.
It will cache the web page.
example::
@cache(time=600)
def get(self,tags=None):
args:
key: optional key name. Request. path_qs as default.
time: relative number of seconds from current time.
"""
def _decorate(method):
def _wrapper(*args, **kwargs):
if not ENABLE_MEMCACHE:
method(*args, **kwargs)
return
request=args[0].request
response=args[0].response
skey=key+ request.path_qs
#logging.info('skey:'+skey)
html= memcache.get(skey)
#arg[0] is BaseRequestHandler object
if html:
#logging.info('cache:'+skey)
response.last_modified =html[1]
ilen=len(html)
if ilen>=3:
response.set_status(html[2])
if ilen>=4:
for skey,value in html[3].items():
response.headers[skey]=value
response.out.write(html[0])
else:
if 'last-modified' not in response.headers:
response.last_modified = format_date(datetime.utcnow())
method(*args, **kwargs)
result=response.body
status_code = response.status_int
memcache.set(skey,(result,response.last_modified,status_code,response.headers),time)
return _wrapper
return _decorate | 3,286 | 922 |
from typing import Optional
from fastapi import FastAPI
import redis
import json
from models.team_member import Team_Member
app = FastAPI()
r = redis.StrictRedis(host='localhost', port=6379, db=0, password="sOmE_sEcUrE_pAsS", socket_timeout=None, connection_pool=None, charset='utf-8', errors='strict', unix_socket_path=None)
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.get("/teammember/{item_id}")
def read_teammember(item_id: int):
head = {"item_id": item_id}
data = r.get(f"TeamMember_{item_id}")
memberData = json.loads(data)
rdict = head | memberData
return rdict
@app.put("/teammember/{item_id}")
def put_teammember(item_id: int, item: Team_Member):
head = {"item_id": item_id}
return r.set(f"TeamMember_{item_id}", json.dumps(head|item.dict()))
@app.get("/teammembers/")
def get_list_all_members():
return None | 908 | 344 |
'''
the number numbers are not always the same length
23
* 4
4 * 3 + 20 * 4
'''
def multiply_two_numbers(A, B):
| 133 | 52 |
import pygame as pg
class Keyboard:
def __init__(self):
self.reset_keys()
def reset_keys(self):
self.keys = {
pg.K_ESCAPE: False,
pg.K_LEFT: False,
pg.K_RIGHT: False,
pg.K_DOWN: False,
pg.K_UP: False,
pg.K_a: False,
pg.K_b: False,
}
def press(self, key):
try:
self.keys[key] = True
except KeyError:
pass
def release(self, key):
try:
self.keys[key] = False
except KeyError:
pass
def update(self, events):
for e in events:
if e.type == pg.KEYDOWN:
self.press(e.key)
elif e.type == pg.KEYUP:
self.release(e.key)
def is_pressed(self, key):
try:
return self.keys[key]
except KeyError:
return False
| 915 | 286 |
from . import serialize_parameters
from SmartFramework.string.encodings import ascii_printables
from SmartFramework.tools.dictionaries import sorted_dict, sorted_filtered # ,filtered
from SmartFramework.tools.objects import (
isInstance,
class_has_method,
ismethod_methoddescriptor_or_function,
) # ,hasMethod
from SmartFramework.tools.functions import cached_one_arg_func
from inspect import isclass, signature
import types
from pybase64 import b64decode
from apply import apply
from pickle import PicklingError
from copyreg import __newobj__, __newobj_ex__, dispatch_table
import blosc
from collections.abc import MutableSequence, Mapping
import sys
from importlib import import_module
try:
import numpy
use_numpy = True
except:
use_numpy = False
ascii_printables_ = ascii_printables # sert juste à éviter warning
not_memorized_types = (int, float)
# --- PLUGINS API -------------------------------
# encoding -------------
dispatch_table # pickle plugins (used by serializejson if not serializejson plugin or methode)
serializejson_ = {} # serializejson plugins
serializejson_builtins = {} # serializejson plugins actives even if strict_pickle is True
encoder_parameters = {} # encoder extra parameters for plugins, with their default value
getters = (
{}
) # getters for dumped classes. keys are string corresponding to the qualified name, values are True (for automatic getters detection) or dictionnary of {"attribut" : "getAttribut" }
property_types = {property} # property types
blosc_compressions = {(name if name == "blosclz" else "blosc_" + name): name for name in blosc.cnames}
# encoding & decoding --------------
properties = (
{}
) # properties for loaded classes. keys are classes, values are True (for automtic properties detection) or list of ["attribut1","attribut2",..]}
# decoding ---------------------
authorized_classes = set() # qualified names of classes autorized to be loaded
setters = (
{}
) # setters for loaded classes. keys are string corresponding to the qualified name, values are True (for automatic setters detection) or dictionnary of {"attribut" : "setAttribut" }
constructors = (
{}
) # custom construtors for loaded classes. keys are string corresponding to the class qualified name, value is the constructor
decoder_parameters = {} # decoder extra parameters for plugins with their default value
consts = {} # dictionnary associating const string to const values
# @profile
def getstate(
self,
*,
split_dict_slots=True,
keep=None,
add=None,
remove=None,
filter_="_",
properties=False,
getters=False,
extra_getters=None,
sort_keys=True,
lasts=None,
last_classes=None,
remove_default_values=False,
default_values=None,
):
"""
Generic __gestate__ method to retrieve the state of an object .
Args:
split_dict_slots : True if you want to stay compatible with pickle
keep : names of attributes/properties/getters to keep (and order if sort_keys is False)
add : names of attributes to add even if should be filtered by the filter
remove: names of attributes to remove even if not filtered by the filter
filter\_: (bool or str) filter attributes starting with the given string. ("_" by default)
properties : Whether properties will be saved.
- `False`: (default) no properties are saved
- `True` : all properties (or only thus in keep) are saved
- `list/tuple` : names of properties to save
getters : Whether values from getters will be saved.
- `False` :(default) None getters are saved
- `True` : all getters (or only thus in keep) are saved. (getters are guessed with introspection)
- `dict` : dictionnary of "attribut": "getter". ex: {"a":"getA","b":"getB", ("c","d") : "getCD"} this option allow the finest control and is faster because getters are not guessed from introspection. With tuple as key in this dict, you can retrieve several attributes values from one getter.
extra_getters
dictionnary of extra getters. ex: {"c":"retrive_c"}
useful when getters is True and not all gettters are guessed by introspection.
sort_keys (True by default )
whether sort the state alphabeticaly.
Be careful, if False the restoration of the object attributes may be in an arbitrary order.
lasts : names of attributes/properties/getters to put at last (and order if sort_keys is False)
last_classes (set):
set of classes to put at the end (even if sort_keys is True)
Useful for classes referencing other objets, when you want to be sure that the referenced obejcts has already been serialized sooner in order to serialize only reférences.
(ex : Qt Layouts)
remove_default_values :(False, True)
whether attribut/properties/getter with same value as default value will be
removed for lighter and more readable serialized files.
If remove_default_values is True, add you still want to keep a attribut value even if same as his default value,
use add = "attribut_name" parameter
default_values : (None or dict)
- `dict` : dict of {"attribut":"attribut_default_value",...;}
- `None` : serializejson will create an new instance of the object's class, calling __init__() without any argument to know the default values.
"""
# ,...,("x","y",None):"getXYZ"}
# With tuple as key ou can retrieve several attributes values from one getter.
# with None in this tuple you can skip values returned by this getter.
# list or tuple : names of attributs having getters to save. (getters are guessed with introspection)
# or type(getters) in (list,tuple):
#
# elif type(getters) in (list,tuple):
# getters = {getter:class_getters[getter] for getter in getters}
if filter_ is True:
filter_ = "_"
_getattribute = self.__getattribute__
_dict = getattr(self, "__dict__", {})
has_slots = hasattr(self, "__slots__")
if (properties is True) or (getters is True):
slots, class_properties, class_getters, _ = slots_properties_getters_setters_from_class(type(self))
if properties is True:
properties = class_properties
if getters is True:
getters = class_getters
else:
if has_slots:
slots = slots_from_class(type(self))
if getters is False:
getters = {}
if extra_getters:
getters.update(extra_getters)
if split_dict_slots and (has_slots or properties or getters): # ATTENTION PAS LE CAS SI PAS DE __SLOT__ ?
state_dict = dict()
state_slots = dict()
splited_dict_slots = True
else:
state_dict = state_slots = dict()
splited_dict_slots = False
if keep is not None:
for key in keep:
if key in getters:
state_slots[key] = _getattribute(getters[key])()
elif key in properties: # on commenc pas voir si existe sous forme de propriété
state_slots[key] = _getattribute(
key
) # le stocker dans state_slots permet à pickle de restaurer des properties dans pickle._dumps.load_build :if slotstate:for k, v in slotstate.items():setattr(inst, k, v)
elif key in slots:
if hasattr(self, key):
state_slots[key] = _getattribute(key)
elif key in _dict: # optimisable pour eviter de lire deux fois
state_dict[key] = _dict[key]
else:
state_slots[key] = _getattribute(
key
) # permet de marcher avec attribut référencés null part est accessible uniquement via self.__getattr__() ?
else:
if remove is None:
remove = set()
if add is None:
add = set()
# get poperties ------------------
if properties:
for key in properties:
if key not in remove:
state_slots[key] = _getattribute(
key
) # le stocker dans state_slots permet à pickle de restaurer des properties dans pickle._dumps.load_build :if slotstate:for k, v in slotstate.items():setattr(inst, k, v)
# get getters -------------
if getters:
for key, getter_name in getters.items():
if type(key) is tuple:
result = _getattribute(getter_name)()
for k, value in zip(key, result):
if k is not None and k not in remove:
state_slots[k] = value
elif key not in remove:
try:
state_slots[key] = _getattribute(getter_name)()
except TypeError:
pass # n'arrive pas à regler le pb avec inspect qui n'arrive pas à parser arguement du getter
# get __slots__ attributs ----------------
check_prop_getters = bool(state_slots)
if has_slots:
if remove or filter_ or check_prop_getters:
for key in slots:
if (
key not in remove
and hasattr(self, key)
and ((key in add) or not (filter_ and key.startswith(filter_)))
and not (
check_prop_getters
and (key in state_slots or (key.startswith("_") and key[1:] in state_slots))
)
):
state_slots[key] = _getattribute(key)
else:
for key in slots:
if hasattr(self, key):
state_slots[key] = _getattribute(key)
# get __dict__ attributs -----------------
if remove or filter_ or check_prop_getters:
for key in _dict:
if (
key not in remove
and ((key in add) or not (filter_ and key.startswith(filter_)))
and not (
check_prop_getters and (key in state_slots or (key.startswith("_") and key[1:] in state_slots))
)
):
state_dict[key] = _dict[key]
else:
if not state_slots or splited_dict_slots:
state_dict = _dict # no copie A REVOIR , ON SPLIT DICT ET SLOT MEME SI PAS SPLITE A LA BASE
else:
state_dict.update(_dict)
if remove_default_values is True:
if default_values is None:
default_values = default_state_from_class(type(self))
for key, default_value in default_values.items():
if key not in add:
if key in state_dict:
if state_dict[key] == default_value:
if state_dict is _dict:
state_dict = _dict.copy()
del state_dict[key]
elif splited_dict_slots and key in state_slots:
if state_slots[key] == default_value:
del state_slots[key]
if sort_keys and state_dict:
state_dict = sorted_dict(state_dict)
if last_classes is not None :
if type(last_classes) is not set :
last_classes = set(last_classes)
for key, value in state_dict.items():
if isinstance(value,last_classes):
state_dict[key] = state_dict.pop(key)
if lasts is not None:
for key in lasts:
if key in state_dict:
state_dict[key] = state_dict.pop(key)
if splited_dict_slots and state_slots:
if sort_keys:
state_dict = sorted_dict(state_dict)
if state_dict:
return state_dict, state_slots # peut planter si self n'a pas de __dict__ et pas de __setstate__
if state_slots:
return None, state_slots
return state_dict
# def getstate_factory(split_dict_slots = True, keep=None, add= None, remove=None, filter_="_", properties=False, getters=False, sort_keys = True, remove_default_values = False):
# return lambda self :getstate(self ,split_dict_slots = split_dict_slots, keep=keep, add= add, remove=remove, filter_=filter_, properties=properties, getters=getters, sort_keys = sort_keys, remove_default_values = remove_default_values)
def setstate(
self,
state,
properties=False,
setters=False,
extra_setters=None,
restore_default_values=False,
default_values=None,
order=None,
):
"""
Generic __setstate_ method to restore the state of an object .
Args:
self object instance to restore.
state dictionnary containing the state of the object to restore.
properties :
False: (default) no properties are saved
True : all properties (or only thus in keep) are saved
list or tuple : names of properties to save
setters :
False :(default) None setters are called
True : all getters are calaed (setters are guessed with introspection, parsing methodes with setXxx, set_xxx or setxxx name)
dict : dictionnary of "attribut": "setter". ex: {"a":"setA","b":"setB",("c","d"):"setCD"}
this option allow the finest control and is faster because getters are not guessed from instropection and it allow to call multi-attributs setters (ex : setCD restor "c" and "d")
extra_setters
dictionnary of extra setters. ex: {"c":"restore_c"}
useful when setters is True and not all settters are guessed by introspection.
restore_default_values :(False, True)
whether attribut/properties/setter not present in state
will be restaured with there default value.
Useful when __init__() is not called (update = True or object as not __reduce__() methode)
and we have encoded with remove_default_values = True .
default_values : (None or dict)
dict : dict of {"attribut":"attribut_default_value",...;}
None : serializejson will create an new instance of the object's
class, calling __init__() without any argument to know the default values.
order :
None : attributs are restored in state dictionnary key's order
list or tuple : attributs are restored in this order
If a attribut belong to a multi-attributs setters (like {("c","d"):"setCD"}), the setter will be called when one of the attribut occure .
"""
"""methode qu'on utilsera dans les object"""
## same as pickle
## Commenté pour eviter boucle infinie
## setstate = getattr(self, "__setstate__", None)
## if setstate is not None:
## setstate(state)
## return
## data_driven (bool):
## True : attributs are restored in state dictionnary key's order
## False : attribut are restored in the fallowin order : __slots__,properties,setters, other state dictionnary key's order
# on a un dictionnaire ou eventuellemnet un tuple (__dict__,__state__) si serializé avec pickle
passed_dict_setters = isinstance(setters, dict)
if (
isinstance(state, tuple) and len(state) == 2
): # n'arrivera pas venant de json(sauf si on a mis strict_pickle = True) , mais arriver venant de pickle
dict_state, state = state
if dict_state:
state.update(dict_state)
if not type(state) is dict:
raise Exception("try to restore object to a no dictionary state and without __setstate__ method")
if restore_default_values: # est-ce utile si pas d'update et qu'on vient de recrer l'objet ?
if default_values is None:
default_values = default_state_from_class(type(self))
for key, default_value in default_values.items():
if key not in state:
state[key] = default_value
if setters is True or properties is True or hasattr(self, "__slots__"):
slots, class_properties, _, class_setters = slots_properties_getters_setters_from_class(type(self))
if setters is True:
setters = class_setters
if properties is True:
properties = class_properties
setattr_ = set(slots)
else:
if setters is False:
setters = {}
setattr_ = set()
if extra_setters:
setters.update(extra_setters)
# if data_driven :
if properties:
setattr_.update(properties)
if setters is False:
setters = set()
intern = sys.intern
attribut_to_multi_attributs = {}
if passed_dict_setters or extra_setters:
for attribut in setters:
if isinstance(attribut, tuple):
for attr in attribut:
attribut_to_multi_attributs[attr] = attribut
if order is None:
for attribut, value in state.items():
if attribut in setattr_:
setattr(self, attribut, value) # marche pour les attribut de __dict__, slots et properties
elif attribut in attribut_to_multi_attributs:
attributs = attribut_to_multi_attributs[attribut]
if attributs:
values = []
for k in attributs:
values.append(state[k])
attribut_to_multi_attributs[attribut] = False
getattr(self, setters[attributs])(*values)
elif attribut in setters:
getattr(self, setters[attribut])(value)
else:
self.__dict__[intern(attribut)] = value
else:
for attribut in order:
value = state.popitem(attribut)
if attribut in setattr_:
setattr(self, attribut, value) # marche pour les attribut de __dict__, slots et properties
elif attribut in attribut_to_multi_attributs:
attributs = attribut_to_multi_attributs[attribut]
if attributs:
values = []
for k in attributs:
if k == attribut:
values.append(value) # attribut as already be poped , state[k] doesn't exciste anymore
else:
values.append(state[k])
attribut_to_multi_attributs[attribut] = False
getattr(self, setters[attributs])(*values)
elif attribut in setters:
getattr(self, setters[attribut])(value)
else:
self.__dict__[intern(attribut)] = value
if state:
raise Exception(f"{list(state.keys())} are not in __setstate__ order parameter")
# else :
# sentinel = []
# for key in slots :
# value = state.pop(key,sentinel)
# if value is not sentinel:
# setattr(self,key,value)
# for key in properties :
# value = state.pop(key,sentinel)
# if value is not sentinel:
# setattr(self,key,value)
# for key in setters :
# if isinstance(key,tuple):
# values = ( state.pop(k) for k in key )
# getattr(self,setters[key])(*values)
# else :
# value = state.pop(key,sentinel)
# if value is not sentinel:
# getattr(self,setters[key])(value)
# for key, value in state.items():
# self.__dict__[intern(key)] = value
class Reference:
def __init__(self, obj, sup_str=""):
self.obj = obj
self.sup_str = sup_str
# --- INTERNAL ---------------------------------------
# --- Conversion Class <-> qualified name ------------------------
class_from_class_str_dict = constructors
class_from_class_str_dict["base64.b64decode"] = lambda string_b64: b64decode(
string_b64, validate=True
) # allow to accelerete base 64 decode
def class_from_class_str(
string,
): # il ne faut pas mettre en caching sinon ne peut pas bidouiller class_from_class_str_dict
path_name = string.rsplit(".", 1)
if len(path_name) == 2:
path, name = path_name
try:
return getattr(import_module(path), name)
except ModuleNotFoundError:
path_name2 = path.rsplit(".", 1)
if len(path_name2) == 2:
path2, name2 = path_name2
return getattr(getattr(import_module(path2), name2), name)
raise
else:
return __builtins__[string]
@cached_one_arg_func
def module_str_from_class_str(class_str):
stop = class_str.rfind(".")
while stop != -1:
class_str = class_str[:stop]
if class_str in sys.modules:
return class_str
stop = class_str.rfind(".")
# print(class_str)
@cached_one_arg_func
def class_str_from_class(class_):
module = class_.__module__
# ce n'est pas une bonne idée de tenter de suprimer ou modifier "__main__" car
# il ne retrouvera pas le bon module , alors que le module pointé par __main__
# contiendra toujour les definition de class_ , si c'est toujours lui qu'on execute .
if module == "builtins":
if class_ is types.ModuleType:
return "types.ModuleType"
else:
return class_.__qualname__
elif module is None:
raise AttributeError(f"{class_.__name__}.__module__ is None")
else:
return f"{module}.{class_.__qualname__}"
# --- Introspection ------------------------------
@cached_one_arg_func
def default_state_from_class(class_):
obj = class_()
return getstate(
obj, split_dict_slots=False, properties=True, getters=True, sort_keys=False, remove_default_values=False
)
@cached_one_arg_func
def slots_properties_getters_setters_from_class(class_):
slots = []
properties = []
getters = {}
setters = {}
property_types_ = tuple(property_types)
for base_class in class_.__mro__:
# slots
# print(base_class)
for slot in getattr(
base_class, "__slots__", ()
): # on utilise pas directement base_class.__slots__ car une classe de base n'a pas forcement redefinit __slots__
if slot != "__dict__":
slots.append(slot)
for key, value in base_class.__dict__.items():
# print(key)
if isinstance(value, property_types_):
if hasattr(value, "__set__") and hasattr(value, "__get__"):
# if inspect.isdatadescriptor(value):
properties.append(key)
elif (
key.startswith("set") and len(key) > 3 and ismethod_methoddescriptor_or_function(value)
): # and callable(value):
c = key[3]
if c == "_":
attribut_name = key[4:]
elif len(key) > 4 and key[4].isupper():
attribut_name = key[3:]
else:
attribut_name = c.lower() + key[4:]
if (
attribut_name not in setters
): # on a peut etre definit deux setters set_x et setX dans deux classes de base différente, on gard la p
setters[attribut_name] = key
for getter_name in (attribut_name, "g" + key[1:], "is" + key[3:]):
getter_methode = getattr(base_class, getter_name, None)
if getter_methode is not None:
if getter_methode not in getters and ismethod_methoddescriptor_or_function(
getter_methode
): # and callable(getter_methode):
getters[attribut_name] = getter_name
break
for property_ in properties:
if property_ in setters:
del setters[property_]
if property_ in getters:
del getters[property_]
return slots, sorted(properties), sorted_dict(getters), sorted_dict(setters)
@cached_one_arg_func
def slots_from_class(class_):
slots = list()
for base_class in class_.__mro__:
for slot in getattr(
base_class, "__slots__", ()
): # on utilise pas directement base_class.__slots__ car une classe de base n'a pas forcement redefinit __slots__
if slot != "__dict__":
slots.append(slot)
return slots
@cached_one_arg_func
def setters_names_from_class(class_):
if class_.__base__ is None:
setters = {}
else:
setters = setters_names_from_class(class_.__base__).copy()
for key, value in class_.__dict__.items():
if key.startswith("set") and len(key) > 3:
c = key[3]
if c == "_":
attribut_name = key[4:]
else:
attribut_name = c.lower() + key[4:]
setters[attribut_name] = key
return setters
# --- Dump -------------------------------------------
# @profile
def tuple_from_instance(obj, protocol=4):
# recuperation de Class , initArgs et state
# un peu comme le __reduce__ des newstyle object , mais contrairment à ce dernier peut retourner None
# pour en deuxième position signifier qu'il n'y a pas d'appel à __init__() à faire lors du unpickling
# SERIALIZEJSON SPECIAL CASES -----------------------------------------------------------
# builtins ------
tuple_from_type = serializejson_builtins.get(obj.__class__)
if tuple_from_type is not None:
tuple_ = tuple_from_type(obj)
if len(tuple_) < 6:
tuple_ += (None,) * (6 - len(tuple_))
return tuple_
if not serialize_parameters.strict_pickle:
# plugins ------
tuple_from_type = serializejson_.get(obj.__class__)
if tuple_from_type is not None:
tuple_ = tuple_from_type(obj)
if len(tuple_) < 6:
tuple_ += (None,) * (6 - len(tuple_))
return tuple_
# __serializejson__ method
tuple_from_type = getattr(obj, "__serializejson__", None)
if tuple_from_type is not None:
tuple_ = tuple_from_type()
if len(tuple_) < 6:
tuple_ += (None,) * (6 - len(tuple_))
return tuple_
# PICKLE CASES --------------------------------
reduced = reduce(obj, protocol)
return tuple_from_reduce(*reduced, obj=obj)
def tuple_from_reduce(func, args, state=None, listitems=None, dictitems=None, obj=None):
initArgs = None
newArgs = None
if func is __newobj__ or func is __newobj_ex__:
# a prirori les methode __reduce_ex__ et __reduce__ n'ont pas ete reimplemente et correspondent aux methodes héritées de object
if func is __newobj__:
class_ = args[0]
if serialize_parameters.strict_pickle:
if not hasattr(class_, "__new__"):
raise PicklingError("args[0] from __newobj__ args has no __new__")
if obj is not None and class_ is not obj.__class__:
raise PicklingError("args[0] from __newobj__ args has the wrong class")
if len(args) > 1:
newArgs = args[1:] # le reduce n'a pas été réimplémenté , on doit pas utiliser d'initArgs
else: # func is __newobj_ex__
class_, new_largs, new_kwargs = args
if serialize_parameters.strict_pickle:
if not hasattr(class_, "__new__"):
raise PicklingError("args[0] from {} args has no __new__".format(getattr(func, "__name__", "")))
if obj is not None and class_ is not obj.__class__:
raise PicklingError(
"args[0] from {} args has the wrong class".format(getattr(func, "__name__", ""))
)
if new_largs:
# on met les new_largs dans new_kwargs
new_parameters_names = list(signature(class_.__new__).parameters)
for index, new_arg in new_largs:
new_kwargs[new_parameters_names[index]] = new_arg
newArgs = new_kwargs
if not serialize_parameters.strict_pickle:
if type(state) is tuple:
# ATTENTION en vrais rien ne nous dit qu'on a pas voulu retourne un autre tuple de longeur deux, si c'est le cas on a codé
if not class_has_method(class_, "__getstate__") or not class_has_method(class_, "__setstate__"):
__dict__, state = state
if __dict__:
state.update(__dict__) # fusionne slots et __dict__ dans un seul dictionnaire
if type(state) is dict:
if not class_has_method(class_, "__getstate__"):
# on devrait mettre ce qui suit pour être exacte mais couteux pour cas qui n'arrivera jamais : and ( class_.__reduce__ is object.__reduce__) and (class_.__reduce_ex__ is object.__reduce_ex__):
# le __reduce_ex__ a déjà recupéra les slots et attributs du __dict__
# add properties and getters (need obj)
_getters = serialize_parameters.getters
if _getters is True:
_getters = getters.get(class_, True)
elif type(_getters) is dict:
_getters = _getters.get(class_, False)
_properties = serialize_parameters.properties
if _properties is True:
_properties = properties.get(class_, True)
elif type(_properties) is dict:
_properties = _properties.get(class_, False)
if _getters is True or _properties is True:
(
class_slots,
class_properties,
class_getters,
class_setters,
) = slots_properties_getters_setters_from_class(class_)
_getattribute = obj.__getattribute__
_dict = getattr(obj, "__dict__", None)
if _getters is True:
_getters = class_getters
if _properties is True:
_properties = class_properties
if _properties:
if state is _dict:
state = state.copy()
for key in _properties:
if "_" + key in state:
del state["_" + key]
state[key] = _getattribute(
key
) # le stocker dans state_slots permet à pickle de restaurer des properties dans pickle._dumps.load_build :if slotstate:for k, v in slotstate.items():setattr(inst, k, v)
if _getters:
if state is _dict:
state = state.copy()
for key, getter_name in _getters.items():
try:
state[key] = _getattribute(getter_name)()
except TypeError:
pass
# remove default values (Je l'ai viré car il faut faire appel au __init__ et donc avoir recodé le reduce() pour pouvoir se permetre d'enlever les valeures par défaut)
##remove_default_values = serialize_parameters.remove_default_values
##if type(remove_default_values) is set :
## remove_default_values = (class_ in remove_default_values)
##if remove_default_values:
## _dict = getattr(obj, "__dict__",None)
## default_values = default_state_from_class(class_)
## for key, default_value in default_values.items() :
## if key in state :
## if state[key] == default_value:
## if state is _dict:
## state = _dict.copy()
## del(state[key])
attributes_filter = serialize_parameters.attributes_filter
if type(attributes_filter) is set:
attributes_filter = class_ in attributes_filter
state = sorted_filtered(state, attributes_filter)
elif func is apply:
class_, initLargs, initArgs = args
# if initLargs is not None:
# raise PicklingError("args[2] from apply args must be None for pickle compatibility")
else:
class_ = func
initArgs = args
if dictitems and not isinstance(dictitems, dict):
dictitems = dict(dictitems)
if listitems and not isinstance(listitems, list):
listitems = list(listitems)
return class_, initArgs, state, listitems, dictitems, newArgs
def reduce(obj, protocol):
# Check private dispatch table if any, or else copyreg.dispatch_table
reduce_func = dispatch_table.get(obj.__class__)
if reduce_func is not None:
reduced = reduce_func(obj)
else:
# Check for a class with a custom metaclass; treat as regular class
# try:
# issc = issubclass(t, type)
# except TypeError: # t is not a class (old Boost; see SF #502085)
# issc = False
# if issc:
# self.save_global(obj)
# return
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce_func = getattr(obj, "__reduce_ex__", None)
if reduce_func is not None:
# try :
reduced = reduce_func(protocol) # fait appel à __gestate__() si on n'a pas réimplementé __reduce__()
# except :
# reduced = generic__reduce_ex__(obj)
else:
reduce_func = getattr(obj, "__reduce__", None)
if reduce_func is not None:
reduced = reduce_func()
else:
raise PicklingError("Can't pickle %r object: %r" % (type(obj).__name__, obj))
if isinstance(reduced, str):
raise ValueError("{} reduce's methode return a string. It is not yet suported by serializejson") # A REVOIR !!!
if serialize_parameters.strict_pickle:
# Check for string returned by reduce(), meaning "save as global"
if isinstance(reduced, str):
return reduced
# Assert that reduce_func() returned a tuple
if not isinstance(reduced, tuple):
raise PicklingError("%s must return string or tuple" % reduce_func)
# Assert that it returned an appropriately sized tuple
if not (2 <= len(reduced) <= 5):
raise PicklingError("Tuple returned by %s must have " "two to five elements" % reduce_func)
if not callable(reduced[0]):
raise PicklingError(
f"first element returned by {obj.__class__.__name__}.__reduce_ex__() or {obj.__class__.__name__}.__reduce__() must be callable"
)
return reduced
def generic__reduce_ex__(self, protocol=4):
if hasattr(self, "__reduce_ex__") and self.__class__.__reduce_ex__ is not object.__reduce_ex__:
return self.__reduce_ex__(protocol)
if hasattr(self, "__reduce__") and self.__class__.__reduce__ is not object.__reduce__:
return self.__reduce__()
state, dictitems, listitems = None, None, None
if hasattr(self, "__getstate__"):
state = self.__getstate__()
else:
# state = getstate(self,filter_ = None,sort_keys = False)
if hasattr(self, "__slots__"):
slots = dict()
for slot in slots_from_class(type(self)):
if hasattr(self, slot):
slots[slot] = self.__getattribute__(slot)
state = getattr(self, "__dict__", None), slots
else:
state = getattr(self, "__dict__", None)
if isinstance(self, Mapping):
dictitems = iter(self.items())
if isinstance(self, MutableSequence):
listitems = iter(self)
if hasattr(self, "__getnewargs_ex__"):
new_args, kwargs = self.__getnewargs_ex__()
return __newobj_ex__, (self.__class__, new_args, kwargs), state, listitems, dictitems
elif hasattr(self, "__getnewargs__"):
new_args = self.__getnewargs__()
return __newobj__, (self.__class__, *new_args), state, listitems, dictitems
else:
return __newobj__, (self.__class__,), state, listitems, dictitems
def _onlyOneDimSameTypeNumbers(list_or_tuple):
if len(list_or_tuple):
type_first = type(list_or_tuple[0])
if type_first in _bool_int_and_float_types:
return all(type(elt) is type_first for elt in list_or_tuple)
return False
def _onlyOneDimNumbers(list_or_tuple):
if len(list_or_tuple):
return all(type(elt) in _bool_int_and_float_types for elt in list_or_tuple)
return False
if use_numpy:
_bool_int_and_float_types = set(
(
float,
int,
bool,
numpy.bool_,
numpy.int8,
numpy.int16,
numpy.int32,
numpy.int64,
numpy.uint8,
numpy.uint16,
numpy.uint32,
numpy.uint64,
numpy.float32,
numpy.float64,
)
)
else:
_bool_int_and_float_types = set(
(
float,
int,
bool,
)
)
# --- Load -------------------------------------------------------
def const(self):
return consts[self]
# raise ValueError(f"{self} : {value} don't seems to be a const value")
# @profile
def instance(
__class__=object, __init__=None, __state__=None, __new__=None, __initArgs__=None, __items__=None, **argsSup
):
"""créer une instance d'un objet :
instance(dictionnaire)
instance(**dictionnaire)
instance(class_,__init__,__state__)
instance(class_,__init__,**attributesDict)
instance(class_(*__init__),__state__)
instance(class_(*__init__),**attributesDict)
instance(__class__=...,__init__=...,attribute1 = ..., attribute2 = ...)
"""
if __initArgs__ is not None:
__init__ = __initArgs__ # pour retro-compatibilité avec anciens json
inst = None
if type(__class__) is str:
if (
__class__ == "type"
): # == ne permet pas de comparer numpy.dtype(), is fait planter array.array('i', [1, 2, 3])
if __init__ == "NoneType":
return type(None)
elif __init__:
return class_from_class_str(__init__)
else:
return type
try:
# acceleration en allant directement charcher la class_ à partir de la string dans un dictionnaire de cash
class_ = class_from_class_str_dict[__class__]
except:
class_ = class_from_class_str_dict[__class__] = class_from_class_str(__class__)
else:
if type(__class__) is dict:
# permet de gere le cas ou on donne directement un dictionnaire en premier argument
return instance(**__class__)
elif isclass(__class__):
class_ = __class__
elif isInstance(__class__): # arrrive avec serializeRepr
inst = __class__
class_ = inst.__class__
else:
raise Exception(
"erreure lors de la creation d'instance le premier parametre de Instance() n'est ni une classe , ni string representant un classe , ni une instance, ni un dictionnaire, ni un callable (fonction)"
)
if inst is None:
if __new__ is not None or __init__ is None:
__new__type = type(__new__)
if __new__type in (list, tuple):
inst = class_.__new__(class_, *__new__)
elif __new__type is dict:
inst = class_.__new__(class_, **__new__)
elif __new__ is not None:
inst = class_.__new__(class_, __new__) # when braces have been removed during serialization
else:
inst = class_.__new__(class_) # __init__ is None
if (
__init__ is not None
): # EN VRAI N'ARRIVE JAMAIS AVEC PICKLE, pourrait arriver avec methode __serialisejson__ ou plugin
__init__type = type(__init__)
if __init__type in (list, tuple):
inst.__init__(*__init__)
elif __init__type is dict:
inst.__init__(**__init__)
else:
inst.__init__(__init__) # when braces have been removed during serialization
elif __init__ is not None:
__init__type = type(__init__)
if __init__type in (list, tuple):
inst = class_(*__init__)
elif __init__type is dict:
inst = class_(**__init__)
else:
inst = class_(__init__) # when braces have been removed during serialization
if __items__:
try:
inst.update(__items__)
except:
inst.extend(__items__)
if argsSup:
__state__ = argsSup
if __state__:
if hasattr(inst, "__setstate__"):
# j'ai du remplacer hasMethod(inst,"__setstate__") par hasattr(inst,"__setstate__") pour pouvoir deserialiser des sklearn.tree._tree.Tree en json "__setstate__" n'est pas reconnu comme étant une methdoe !? alors que bien là .
inst.__setstate__(__state__)
else:
_setters = serialize_parameters.setters
if _setters is True:
_setters = setters.get(class_, True)
elif type(_setters) is dict:
_setters = _setters.get(class_, False)
_properties = serialize_parameters.properties
if _properties is True:
_properties = properties.get(class_, True)
elif type(_properties) is dict:
_properties = _properties.get(class_, False)
setstate(inst, __state__, setters=_setters, properties=_properties)
return inst
valid_char_for_var_name = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_")
def from_name(path, accept_dict_as_object=False, **variables):
"""fonction qui permet d'evaluer une expression pour acceder à une valeure à partir de son nom qualifié
fonctionne comme eval, mais securisé en acceptant juste la qualification avec "." et l'indexation avec des []
ATTENTION cette fonction n'a pas été testée à fond,il faudrait ecrire des tests!
examples :
variable.attribute
variable["key"]
variable['key']
variable[variable2]
variable.attribute.attribute
variable.attribute["key"]
variable.attribute['key']
variable.attribute[variable2]
variable["key"].attribute
variable["key"]["key"]
variable["key"]['key']
variable["key"][variable2]
par contre à priori ne marche pas avec :
variable[variable2.attribute]
"""
# return(ast.literal_eval(path))
# return(eval(path,{},variables))
# current = root
current = None
in_simple_quotes = False
in_double_quotes = False
in_squares = False
# in_curly = False
is_first = True
# is_var = False
in_var = False
in_attribute = False
first_ch_of_square = False
backslash_escape = False
element_chars = []
for i, ch in enumerate(path):
if in_squares:
if first_ch_of_square:
first_ch_of_square = False
# if ch == "{":
# in_curly = True
# is_var = True
# el
in_double_quotes = False
in_simple_quotes = False
in_number = False
in_var = False
if ch == '"': # "
in_double_quotes = True
elif ch == "'":
in_simple_quotes = True
elif ch.isdigit():
in_number = True
element_chars.append(ch)
else:
in_var = True
element_chars.append(ch)
# raise Exception("%s is not a valid path in json")
else:
# if in_curly:
# if ch == "}":
# in_curly = False
# else:
# element_chars.append(ch)
# el
if in_number:
if ch.isdigit():
element_chars.append(ch)
elif ch == "]":
in_squares = False
if element_chars:
index = int("".join(element_chars))
current = current[index]
is_first = False
element_chars = []
else:
raise Exception("%s is not a valid path in json")
elif in_simple_quotes:
if backslash_escape:
# we must have just seen a backslash; reset that flag and continue
backslash_escape = False
elif ch == "\\": # \
backslash_escape = True # we are in a quote and we see a backslash; escape next char
elif ch == "'":
in_simple_quotes = False
else:
element_chars.append(ch)
elif in_double_quotes:
if backslash_escape:
# we must have just seen a backslash; reset that flag and continue
backslash_escape = False
elif ch == "\\": # \
backslash_escape = True # we are in a quote and we see a backslash; escape next char
elif ch == '"':
in_double_quotes = False
else:
element_chars.append(ch)
elif ch == "]":
if element_chars:
key = "".join(element_chars)
if in_var:
key = variables[key] if key in variables else __builtins__[key]
current = current[key]
# is_first = False
element_chars = []
else:
raise Exception("%s is not a valid path in json")
in_squares = False
in_var = False
elif in_var:
if ch in valid_char_for_var_name:
element_chars.append(ch)
else:
raise Exception("%s is not a valid path in json")
# elif in_curly:
# if ch == '}':
# in_curly = False
# else :
# element_chars.append(ch)
# elif ch == '{':
# in_curly = True
# is_curly = True
elif ch == "[":
# is_var = False
if element_chars:
element = "".join(element_chars)
if is_first:
if in_var:
current = variables[element] if element in variables else __builtins__[element]
else:
raise Exception("firts element of path must be a name_of_variable")
is_first = False
else:
if in_var:
element = variables[element] if element in variables else __builtins__[element]
current = _getattr(
current, element, accept_dict_as_object
) # permet de marcher avec slot et properties,mais pas getters
element_chars = []
in_squares = True
in_var = False
first_ch_of_square = True
elif ch == ".":
if element_chars:
element = "".join(element_chars)
if is_first:
if in_var:
current = variables[element] if element in variables else __builtins__[element]
else:
raise Exception("firts element of path must be a name_of_variable")
is_first = False
else:
if in_var:
element = variables[element] if element in variables else __builtins__[element]
current = _getattr(
current, element, accept_dict_as_object
) # permet de marcher avec slot et properties,mais pas getters
element_chars = []
in_var = False
in_attribute = True
elif in_attribute:
element_chars.append(ch)
else:
element_chars.append(ch)
in_var = True
if element_chars: # on est sur le dernier element
element = "".join(element_chars)
if is_first:
if in_var:
current = variables[element] if element in variables else __builtins__[element]
else:
raise Exception("firts element of path must be a name_of_variable")
else:
if in_var:
element = variables[element] if element in variables else __builtins__[element]
current = _getattr(
current, element, accept_dict_as_object
) # permet de marcher avec slot et properties,mais pas getters
return current
def _getattr(obj, attribut, accept_dict_as_object):
if accept_dict_as_object and type(obj) is dict and "__class__" in obj:
return obj[attribut]
else:
try:
return getattr(obj, attribut) # permet de marcher avec slot et properties,mais pas getters
except:
for methode in [f"get{attribut}", f"get_{attribut}", f"get{attribut[0].upper()}{attribut[1:]}"]:
if hasattr(obj, methode):
return getattr(obj, methode)()
raise
def _get_getters(extra_getters):
if isinstance(extra_getters, bool):
return extra_getters
else:
getters_ = getters.copy()
if isinstance(extra_getters, dict):
for class_, class_getters in extra_getters.items():
getters_[class_] = class_getters
if isinstance(extra_getters, (list, set, tuple)):
for class_ in extra_getters:
getters_[class_] = True
elif extra_getters is not None:
raise TypeError(
"Encoder getters argument must be None, bool, list, tuple, set or dict, not '%s'" % type(extra_getters)
)
return getters_
def _get_setters(extra_setters):
if isinstance(extra_setters, bool):
return extra_setters
elif extra_setters is None:
return setters
else:
setters_ = setters.copy()
if isinstance(extra_setters, dict):
for class_, class_setters in extra_setters.items():
setters_[class_] = class_setters
if isinstance(extra_setters, (list, set, tuple)):
for class_ in extra_setters:
setters_[class_] = True
else:
raise TypeError(
"Decoder setters argument must be None, bool, list, tuple, set or dict, not '%s'" % type(extra_setters)
)
return setters_
def _get_properties(extra_properties):
if isinstance(extra_properties, bool):
return extra_properties
elif extra_properties is None:
return properties
else:
properties_ = properties.copy()
if isinstance(extra_properties, dict):
for class_, class_properties in extra_properties.items():
properties_[class_] = class_properties
if isinstance(extra_properties, (list, set, tuple)):
for class_ in extra_properties:
properties_[class_] = True
else:
raise TypeError(
"Decoder properties argument must be None, bool, list, tuple, set or dict, not '%s'"
% type(extra_properties)
)
return properties_
# --- Import of plugins -------------------------------------------------------
from . import plugins # needed for plugin regord
| 53,243 | 14,677 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0189-Rotate-Array.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-01-02
=================================================================="""
import sys
import time
from typing import List
"""
LeetCode - 0189 - (Medium) - Rotate Array
https://leetcode.com/problems/rotate-array/
Description:
Given an array, rotate the array to the right by k steps, where k is non-negative.
Example 1:
Input: nums = [1,2,3,4,5,6,7], k = 3
Output: [5,6,7,1,2,3,4]
Explanation:
rotate 1 steps to the right: [7,1,2,3,4,5,6]
rotate 2 steps to the right: [6,7,1,2,3,4,5]
rotate 3 steps to the right: [5,6,7,1,2,3,4]
Example 2:
Input: nums = [-1,-100,3,99], k = 2
Output: [3,99,-1,-100]
Explanation:
rotate 1 steps to the right: [99,-1,-100,3]
rotate 2 steps to the right: [3,99,-1,-100]
Constraints:
1 <= nums.length <= 105
-231 <= nums[i] <= 231 - 1
0 <= k <= 105
Follow up:
Try to come up with as many solutions as you can.
There are at least three different ways to solve this problem.
Could you do it in-place with O(1) extra space?
"""
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
# exception case
if not isinstance(nums, list) or len(nums) <= 1 or k <= 0:
return
# main method
self._rotate_double_reverse(nums, k)
# Warning: the following method is correct only if gcd(len_num, k) == 1
# def _rotate_gcd1(self, nums: List[int], k: int) -> None:
# len_num = len(nums)
# if k > len_num:
# k %= len_num # avoid unnecessary rotate
# # old_start_index, old_end_index = 0, len_num - 1
# # new_start_index = len_num - k
# # new_end_index = new_start_index - 1
# # nums[old_start_index]...nums[new_end_index] go right (gap = +k)
# # nums[new_start_index]...nums[old_end_index] go left (gap = -(len_num - k))
# forward_move_gap = k
# backward_move_gap = len_num - k
# watershed = len_num - k
# # move one by one in order to get O(1) space efficiency
# # if O(n) space, then just init a new list and append numbers, quite easy
# cur_move_index = 0
# temp_from_num = nums[cur_move_index] # store the number that is going to replace another number
# move_counter = 0 # move len_num times in total
# while move_counter < len_num:
# if cur_move_index < watershed: # go right (gap = +k)
# temp_to_num = nums[cur_move_index + forward_move_gap] # store the number that is going to be replaced
# nums[cur_move_index + forward_move_gap] = temp_from_num
# temp_from_num = temp_to_num
# cur_move_index = cur_move_index + forward_move_gap
# else: # go left (gap = -(len_num - k))
# temp_to_num = nums[cur_move_index - backward_move_gap] # store the number that is going to be replaced
# nums[cur_move_index - backward_move_gap] = temp_from_num
# temp_from_num = temp_to_num
# cur_move_index = cur_move_index - backward_move_gap
#
# move_counter += 1
# 1. reverse the whole list; 2. split; 3. reserve two small lists respectively; 4. combine.
def _rotate_double_reverse(self, nums: List[int], k: int) -> None:
len_num = len(nums)
if k > len_num:
k %= len_num # avoid unnecessary rotate
# split nums[0]...nums[k] and nums[k]...nums[len_num]
watershed = len_num - k
nums.reverse()
# Way 1:
# nums[0: k] = reversed(nums[0: k])
# nums[k: len_num] = reversed(nums[k: len_num])
# Way 2
self._reverse_list_in_place(nums, 0, k - 1)
self._reverse_list_in_place(nums, k, len_num - 1)
@staticmethod
def _reverse_list_in_place(nums: List[int], start_index: int, end_index: int) -> None:
while start_index < end_index:
temp_num = nums[start_index]
nums[start_index] = nums[end_index]
nums[end_index] = temp_num
start_index += 1
end_index -= 1
def main():
# Example 1: Output: [5,6,7,1,2,3,4]
nums = [1, 2, 3, 4, 5, 6, 7]
k = 3
# Example 2: Output: [3,99,-1,-100]
# nums = [-1, -100, 3, 99]
# k = 2
# Example 3: Output: [4, 5, 6, 1, 2, 3]
# nums = [1, 2, 3, 4, 5, 6]
# k = 3
# init instance
solution = Solution()
# run & time
start = time.process_time()
solution.rotate(nums, k)
ans = nums
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| 5,112 | 1,928 |
'''
One fcn returns the first nonrepeated character from string, s.
The other returns the first repeated character from the string.
'''
def print_dict(d):
for k, v in d.items():
print('\t', k, '=> ', v)
def char_count_dict(s):
d = dict()
for char in s:
count = d.get(char, 0)
d[char] = count + 1
return d
def first_nonrepeated_char(s, d):
for char in s:
if d[char] == 1:
return char
return None
def first_repeated_char(s):
st = set()
for char in s:
if char in st:
return char
st.add(char)
return None
#s = 'Where have I heard that?'
#s = 'Wherefore art thou Romeo?'
s = 'Grilled cheeses are great with mustard.'
s = s.lower()
d = char_count_dict(s)
print('\n string: ', s)
print('\n\t 1st nonrepeated char: ', first_nonrepeated_char(s, d))
print('\t 1st repeated char: ', first_repeated_char(s), '\n')
| 921 | 334 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiCateringPosDishcateTransferModel(object):
def __init__(self):
self._cate_id = None
self._cook_id = None
self._dish_ids = None
self._shop_id = None
@property
def cate_id(self):
return self._cate_id
@cate_id.setter
def cate_id(self, value):
self._cate_id = value
@property
def cook_id(self):
return self._cook_id
@cook_id.setter
def cook_id(self, value):
self._cook_id = value
@property
def dish_ids(self):
return self._dish_ids
@dish_ids.setter
def dish_ids(self, value):
if isinstance(value, list):
self._dish_ids = list()
for i in value:
self._dish_ids.append(i)
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
def to_alipay_dict(self):
params = dict()
if self.cate_id:
if hasattr(self.cate_id, 'to_alipay_dict'):
params['cate_id'] = self.cate_id.to_alipay_dict()
else:
params['cate_id'] = self.cate_id
if self.cook_id:
if hasattr(self.cook_id, 'to_alipay_dict'):
params['cook_id'] = self.cook_id.to_alipay_dict()
else:
params['cook_id'] = self.cook_id
if self.dish_ids:
if isinstance(self.dish_ids, list):
for i in range(0, len(self.dish_ids)):
element = self.dish_ids[i]
if hasattr(element, 'to_alipay_dict'):
self.dish_ids[i] = element.to_alipay_dict()
if hasattr(self.dish_ids, 'to_alipay_dict'):
params['dish_ids'] = self.dish_ids.to_alipay_dict()
else:
params['dish_ids'] = self.dish_ids
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiCateringPosDishcateTransferModel()
if 'cate_id' in d:
o.cate_id = d['cate_id']
if 'cook_id' in d:
o.cook_id = d['cook_id']
if 'dish_ids' in d:
o.dish_ids = d['dish_ids']
if 'shop_id' in d:
o.shop_id = d['shop_id']
return o
| 2,652 | 926 |
from django.contrib import admin
# Register your models here.
from user_manager_app.models import Attendance, Support
admin.site.register(Attendance)
admin.site.register(Support)
| 181 | 50 |
import torch
import torchvision
import torchvision.transforms as transforms
import albumentations
import numpy as np
# from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets , transforms
import torchvision
from Wrapper_EVAI_Pytorch.dataloader import albumentation as A
from Wrapper_EVAI_Pytorch.utils.helper import *
from Wrapper_EVAI_Pytorch.utils.gradcam import *
from Wrapper_EVAI_Pytorch.utils.plot_metrics import *
from Wrapper_EVAI_Pytorch.utils.test import *
from Wrapper_EVAI_Pytorch.utils.train import *
from Wrapper_EVAI_Pytorch.models import resnet
class main():
def __init__(self,device):
self.classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
self.device = device
self.train_losses = []
self.test_losses = []
self.train_accuracy = []
self.test_accuracy = []
self.plot_train_acc=[]
self.lrs=[]
pass
def dataloading_aubumentation(self,mean,std,batch_size):
albu_obj = A.CIFAR10Albumentation()
train_transform = albu_obj.train_transform(mean,std)
test_transform = albu_obj.test_transform(mean,std)
trainset = torchvision.datasets.CIFAR10(root='/content',train=True,download=True,transform=train_transform)
testset = torchvision.datasets.CIFAR10(root='/content',train=False,download=True,transform=test_transform)
train_dataloader = torch.utils.data.DataLoader(trainset,num_workers=2,shuffle=True,batch_size=batch_size)
test_dataloader = torch.utils.data.DataLoader(testset,num_workers=2,shuffle=True,batch_size=batch_size)
self.train_dataloader = train_dataloader
self.test_dataloader = test_dataloader
self.trainset = trainset
self.testset = testset
def show_augmented_img(self,no_of_images):
helper.plot_images(self.trainset,no_of_images,self.classes)
def model(self,model_name,set_seed_no,show_summery):
if model_name == 'resnet34':
net = resnet.ResNet34()
self.net = net
if set_seed_no != None:
set_seed(set_seed_no,True)
if show_summery == True:
model_summary(self.net,(3,32,32))
return net
def train_model(self,optimizer,epochs,lam_reg,schedular,criterian,show_plots=True):
for epoch in range(epochs):
train(self.net,self.device,self.train_dataloader,optimizer,epoch,self.train_accuracy,self.train_losses,lam_reg,schedular,criterian,self.lrs)
test(self.net,self.device,self.test_dataloader,self.test_accuracy,self.test_losses,criterian)
if show_plots==True:
plot_metrics([self.train_accuracy,self.train_losses,self.test_accuracy,self.test_losses])
conf_matrix = compute_confusion_matrix(self.net,self.test_dataloader,self.device)
plot_confusion_matrix(conf_matrix)
def examination(self,no_of_images):
wrong_pred = wrong_predictions(self.net,self.test_dataloader,no_of_images,self.device,self.classes)
target_layers = ["layer1","layer2","layer3","layer4"]
gradcam_output, probs, predicted_classes = generate_gradcam(wrong_pred[:10],self.net,target_layers,self.device)
plot_gradcam(gradcam_output, target_layers, self.classes, (3, 32, 32),predicted_classes, wrong_pred[:10])
| 3,557 | 1,274 |
"""CveDetail Class"""
from .halo_endpoint import HaloEndpoint
from .http_helper import HttpHelper
class CveDetails(HaloEndpoint):
"""Initializing the CveDetail class:
Args:
session (:class:`cloudpassage.HaloSession`): This will define how you
interact with the Halo API, including proxy settings and API keys
used for authentication.
Keyword args:
endpoint_version (int): Endpoint version override.
"""
objects_name = "cve_details"
default_endpoint_version = 1
def endpoint(self):
"""Return endpoint for API requests."""
return "/v{}/{}".format(self.endpoint_version, self.objects_name)
def describe(self, cve_id):
"""Describe a CVE with complete information on one Common
Vulnerability and Exposure (CVE), as defined by the National
Institute of Standards and Technology (NIST).
Args:
cve_id (str): CVE number
Returns:
dict: Dictionary object describing the details of the
Common Vulnerability and Exposure specified by CVE number.
"""
request = HttpHelper(self.session)
describe_endpoint = "%s/%s" % (self.endpoint(), cve_id)
response = request.get(describe_endpoint)
return response
def list_all(self):
"""Not implemented for this object."""
raise NotImplementedError
def create(self):
"""Not implemented for this object."""
raise NotImplementedError
def delete(self):
"""Not implemented for this object."""
raise NotImplementedError
def update(self):
"""Not implemented for this object."""
raise NotImplementedError
| 1,733 | 459 |
import pytest
import tempfile
import shutil
from baldir_markdown_lib import read_source_file, parse_source_listing_start, import_code_snippet, format_markdown_snippet, split_against_source_listing_tags, pre_process_markdown_file_in_place, pre_process_markdown_text, pre_process_markdown_file_to_string, verify
def test_verify_mismatch_after_pre_processing():
assert verify('markdown-sample-without-snippet.md') == False
def test_verify_match_after_pre_processing():
assert verify('markdown-sample.md') == True
def test_pre_process_markdown_file_in_place():
md_temp_file_path = shutil.copy(
'markdown-sample-without-snippet.md', tempfile.mkdtemp()+'/markdown-sample.md')
pre_process_markdown_file_in_place(md_temp_file_path)
file_pre_processed = open(md_temp_file_path)
text_pre_processed = file_pre_processed.read()
file_pre_processed.close()
assert text_pre_processed == """Markdown preprocessor should replace code snippet between `sourceListingStart` and `sourceListingEnd` with code from the source file.
<sourceListingStart source="./MyJavaFile.java" from="5" to="5" lang="java"/>
```java
System.out.println("Hello world");
```
<sourceListingEnd/>
end"""
def test_pre_process_markdown_file_to_string():
md_temp_file_path = shutil.copy(
'markdown-sample-without-snippet.md', tempfile.mkdtemp()+'/markdown-sample.md')
assert pre_process_markdown_file_to_string(md_temp_file_path) == """Markdown preprocessor should replace code snippet between `sourceListingStart` and `sourceListingEnd` with code from the source file.
<sourceListingStart source="./MyJavaFile.java" from="5" to="5" lang="java"/>
```java
System.out.println("Hello world");
```
<sourceListingEnd/>
end"""
def test_pre_process_markdown_text():
markdown_text = """Markdown preprocessor should replace code snippet between `sourceListingStart` and `sourceListingEnd` with code from the source file.
<sourceListingStart source="./MyJavaFile.java" from="5" to="5" lang="java"/>
<sourceListingEnd/>
end"""
result_as_string = pre_process_markdown_text(markdown_text)
print(result_as_string)
assert result_as_string == """Markdown preprocessor should replace code snippet between `sourceListingStart` and `sourceListingEnd` with code from the source file.
<sourceListingStart source="./MyJavaFile.java" from="5" to="5" lang="java"/>
```java
System.out.println("Hello world");
```
<sourceListingEnd/>
end"""
def test_read_source_file():
result = read_source_file('./markdown-sample.md')
assert result == 'Markdown preprocessor should replace code snippet between `sourceListingStart` and `sourceListingEnd` with code from the source file.\n\n<sourceListingStart source="./MyJavaFile.java" from="5" to="5" lang="java"/>\n```java\n System.out.println("Hello world");\n```\n<sourceListingEnd/>\n\nend'
def test_parse_source_listing_start():
result = parse_source_listing_start(
'<sourceListingStart source="./MyJavaFile.java" from="5" to="5" lang="java"/>')
assert result == {'from': '5', 'lang': 'java',
'source': './MyJavaFile.java', 'to': '5'}
def test_import_code_snippet_one_line():
code_snippet = import_code_snippet(
{'from': '5', 'lang': 'java', 'source': './MyJavaFile.java', 'to': '5'})
assert code_snippet == ' System.out.println("Hello world");'
def test_import_code_snippet_range():
code_snippet = import_code_snippet(
{'from': '4', 'lang': 'java', 'source': './MyJavaFile.java', 'to': '5'})
assert code_snippet == '\n System.out.println("Hello world");'
def test_import_code_snippet_whole_file():
code_snippet = import_code_snippet(
{'from': '1', 'lang': 'java', 'source': './MyJavaFile.java', 'to': '9'})
assert code_snippet == """public class MyJavaFile {
public static void main(String[] args){
System.out.println("Hello world");
}
}"""
def test_format_markdown_snippet():
formatted_snippet = format_markdown_snippet(
{'from': '1', 'lang': 'java', 'source': './MyJavaFile.java', 'to': '9'})
assert formatted_snippet == """```java
public class MyJavaFile {
public static void main(String[] args){
System.out.println("Hello world");
}
}
```"""
def test_split_against_source_listing_tags():
md_text = """Markdown preprocessor should replace code snippet between `sourceListingStart` and `sourceListingEnd` with code from the source file.
<sourceListingStart source="./MyJavaFile.java" from="5" to="5" lang="java"/>
```java
System.out.println("Hello world");
```
<sourceListingEnd/>
end"""
splitted_text = split_against_source_listing_tags(md_text)
assert splitted_text['text_before_start_tag'] == """Markdown preprocessor should replace code snippet between `sourceListingStart` and `sourceListingEnd` with code from the source file.
"""
assert splitted_text['start_tag'] == '<sourceListingStart source="./MyJavaFile.java" from="5" to="5" lang="java"/>'
assert splitted_text['text_between_start_and_end_tags'] == """
```java
System.out.println("Hello world");
```
"""
assert splitted_text['text_after_end_tag'] == """
end"""
| 5,245 | 1,720 |
"""Automated task runner"""
from typing import Any, Callable, Dict, Optional
from loguru import logger
from shadow.clone import ShadowClone
from shadow.helpers.state import ShadowState
class ShadowBot:
"""Base bot class"""
def __init__(self):
"""Sets the default name and attaches the state machine"""
self.name: Optional[str] = None
self.state: ShadowState = ShadowState()
self.clones: Dict[str, ShadowClone] = {}
def rename(self, new_name: Optional[str] = None):
"""Name setter"""
self.name = new_name
logger.debug(f"New name set: {self.name}")
def activate(self):
"""Transitions state from dead to alive"""
logger.debug("Activating")
self.state.revive()
# Notify observers
self.state.notify("State changed from dead to alive")
def deactivate(self):
"""Transitions state from alive to dead"""
logger.debug("Deactivating")
self.state.kill()
# Notify observers
self.state.notify("State changed from dead to alive")
def alive(self):
"""Checks if current state is alive"""
is_alive: bool = self.state.is_alive
return is_alive
def dead(self):
"""Checks if current state is dead"""
is_dead: bool = self.state.is_dead
return is_dead
def add_task(
self, signal: str, task: Callable, task_args: Optional[Dict[str, Any]] = {}
):
"""Delegates task to a ShadowClone which can be called via signal"""
if signal not in self.clones.keys():
# Create a clone and assign the task
clone: ShadowClone = ShadowClone()
clone.assign(func=task, **task_args) # type: ignore
# Clone performs task when signal is called
self.clones[signal] = clone
def remove_task(self, signal: str):
"""Removes clone via the signal it is attached to"""
if signal in self.clones.keys():
del self.clones[signal]
def check_task(self, signal: str):
"""Returns true if there is a task attached to signal"""
return signal in self.clones.keys()
def run(self, signal: str, wait: bool = False):
"""Performs the task attached to the signal and returns the result"""
shadowclone: ShadowClone = self.clones[signal]
result: Optional[Any] = None
if wait:
# Wait for result
result = shadowclone.perform(block=True)
else:
result = shadowclone.perform()
logger.debug(f"Result compiled: {result}")
return result
def get_result(self, signal: str):
"""Returns last result for task attached to signal"""
if signal in self.clones.keys():
# Check clone history for result
result: Any = self.clones[signal].check_history()
if result is not None:
return result
# No result
return False
| 3,014 | 867 |
from dataclasses import dataclass
from typing import List, Optional, Union
@dataclass
class PayloadSender:
phone: int
name: str
@dataclass
class BaseModel:
sender: PayloadSender
payload_id: str
@dataclass
class PayloadContactName:
first_name: str
formatted_name: str
last_name: Optional[str] = None
@dataclass
class PayloadContactPhone:
phone: Optional[str] = None
type: Optional[str] = None
@dataclass
class PayloadContactAddress:
city: Optional[str] = None
country: Optional[str] = None
country_code: Optional[str] = None
state: Optional[str] = None
street: Optional[str] = None
type: Optional[str] = None
zip: Optional[str] = None
@dataclass
class PayloadContactEmail:
email: Optional[str] = None
type: Optional[str] = None
@dataclass
class PayloadContactOrganization:
company: Union[str] = None
@dataclass
class PayloadContactUrl:
url: Optional[str] = None
type: Optional[str] = None
@dataclass
class PayloadContactInformation:
service: Optional[str] = None
user_id: Optional[str] = None
@dataclass
class PayloadContact:
emails: Optional[List[PayloadContactEmail]] = None
ims: Optional[List[PayloadContactInformation]] = None
name: Optional[PayloadContactName] = None
org: Optional[PayloadContactOrganization] = None
phones: Optional[List[PayloadContactPhone]] = None
urls: Optional[List[PayloadContactUrl]] = None
addresses: Optional[List[PayloadContactAddress]] = None
@dataclass
class PayloadContacts(BaseModel):
contacts: List[PayloadContact]
| 1,597 | 488 |
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import ntpath
import time
from . import util
from . import html
import scipy.misc
from io import BytesIO
def save_images(webpage, visuals, image_path, win_size=512):
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims = []
txts = []
links = []
for label, image_numpy in visuals.items():
if label.startswith('output'):
fulllabel = label
label = 'output'
else:
fulllabel = label
image_name = '%s_%s.jpg' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(image_numpy, save_path)
ims.append(image_name)
txts.append(fulllabel)
links.append(image_name)
webpage.add_images(ims, txts, links, width=win_size)
| 1,501 | 486 |
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_swin.ipynb (unless otherwise specified).
__all__ = ['SwinT']
# Cell
#hide
import timm
from nbdev.showdoc import *
from fastai.vision.all import *
# Cell
class SwinT :
"""Class for setting up a Swin Transformer model. The architecture is specified by `arch`, and the
number of classes is specified by `num_classes`. Returns a pretrained model, by default, or an
initialised model if `pretrained` is set to `False`.
"""
def __init__ (self, arch, num_classes, pretrained = True) :
self.arch = arch
self.pretrained = pretrained
self.num_classes = num_classes
def get_model (self) :
"""Method for getting the Swin Transformer model.
"""
model_timm = timm.create_model(self.arch, pretrained = self.pretrained, num_classes = self.num_classes)
model = nn.Sequential(model_timm)
return model | 922 | 289 |
import colander
from authorize.exceptions import AuthorizeInvalidError
class BaseAPI(object):
def __init__(self, api):
self.api = api
self.config = api.config
def _deserialize(self, schema, params={}):
try:
deserialized = schema.deserialize(params)
except colander.Invalid as e:
raise AuthorizeInvalidError(e)
return deserialized
| 407 | 115 |
import stable_baselines.common.tf_util as tf_util
from mpi4py import MPI
from stable_baselines import logger
from stable_baselines.common.cmd_util import make_mujoco_env, mujoco_arg_parser
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.trpo_mpi import TRPO
def train(env_id, num_timesteps, seed):
with tf_util.single_threaded_session():
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
logger.configure()
else:
logger.configure(format_strs=[])
logger.set_level(logger.DISABLED)
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
env = make_mujoco_env(env_id, workerseed)
model = TRPO(
MlpPolicy,
env,
timesteps_per_batch=1024,
max_kl=0.01,
cg_iters=10,
cg_damping=0.1,
entcoeff=0.0,
gamma=0.99,
lam=0.98,
vf_iters=5,
vf_stepsize=1e-3,
)
model.learn(total_timesteps=num_timesteps)
env.close()
def main():
args = mujoco_arg_parser().parse_args()
train(args.env, num_timesteps=args.num_timesteps, seed=args.seed)
if __name__ == '__main__':
main()
| 1,237 | 446 |
import sys
sys.path.append("..")
from common import *
def parse(data):
data = list(map(lambda s: s.strip(),filter(lambda s: len(s) > 1,data.split("\n"))))
conds = {}
for i in range(20):
kv = data[i].split(":")
cs = kv[1].split("or")
conds[kv[0]] = ([int(v) for v in cs[0].split("-")],
[int(v) for v in cs[1].split("-")])
myticket = [int(n) for n in data[21].split(",")]
nearbytickets = [[int(n) for n in data[l].split(",")] for l in range(23,len(data))]
return (conds,myticket,nearbytickets)
data = aoci(parse);
p(data[0]);
count = 0
error_rate = 0
for tic in data[2]:
for val in tic:
flag = False
for field in data[0]:
c = data[0][field]
if bi(val,c[0][0],c[0][1]) or bi(val,c[1][0],c[1][1]):
flag = True
if not flag:
count += 1
error_rate += val
print(error_rate)
print(count) | 929 | 359 |
import time
import json
import datetime
from flask import request
from flask import render_template
from flask_restful import Resource
from . import buyer, apibuyer
from AppStore.models import Class_Info
@buyer.route('/buyer/index/')
def index():
return 'Hello World! This is buyer index.'
@buyer.route('/api/buyer/page/')
def page():
return render_template('buyer/page.html')
@buyer.route('/buyer/data/')
def demo_data():
return render_template('buyer/datapage.html')
@apibuyer.resource('/api/buyer/')
class BuyerApi(Resource):
def __init__(self, *args, **kwargs):
super(BuyerApi, self).__init__(*args, **kwargs)
self.ret = {
'code': 200,
'version': 1.0,
'frame': 'flask 1.1.1',
'data': []
}
def get(self):
class_all = Class_Info.query.all()
print(type(class_all)) # <class 'list'>
for obj in class_all:
obj_data = {
'class_num': obj.class_num,
'class_name': obj.class_name,
'entrance_time': obj.entrance_time.strftime('%Y-%m-%d'),
'college': obj.college,
}
# print(type(obj.entrance_time))
# new_date = obj.entrance_time.strftime('%Y-%m-%d')
# print(new_date)
# print(type(new_date))
self.ret['data'].append(obj_data)
# print(self.ret)
return self.ret
def post(self):
data = request.form
class_obj = Class_Info()
class_obj.class_num = data.get('class_num')
class_obj.class_name = data.get('class_name')
class_obj.entrance_time = data.get('entrance_time')
class_obj.college = data.get('college')
class_obj.save()
self.ret['data'] = '保存成功'
return self.ret
def put(self):
return self.ret
def delete(self):
return self.ret
| 1,931 | 633 |
from flask_restful import Api
import info
def add_route(api: Api):
api.add_resource(info.InfoRoute, '/')
| 115 | 43 |
from integrations.github.resources.base import GitHubListResource
class FollowersListResource(GitHubListResource):
"""Ресурс данных подписчиков пользователя GitHub"""
endpoint = 'users/{user_slug}/followers'
| 219 | 64 |
# tests/test_provider_unicell_kind.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:20:21 UTC)
def test_provider_import():
import terrascript.provider.unicell.kind
def test_resource_import():
from terrascript.resource.unicell.kind import kind_cluster
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.unicell.kind
#
# t = terrascript.provider.unicell.kind.kind()
# s = str(t)
#
# assert 'https://github.com/unicell/terraform-provider-kind' in s
# assert '0.0.2-u2' in s
| 681 | 230 |
import queryCiteFile
import librarybase
import pywikibot
from epmclib.getPMCID import getPMCID
from epmclib.exceptions import IDNotResolvedException
import queue
import threading
import time
def rununthreaded():
citefile = queryCiteFile.CiteFile()
citations = citefile.findRowsWithIDType('pmc')
for idx, citation in enumerate(citations[10513:]):
addpaper(idx, citation)
def runthreaded():
threads = []
for i in range(10):
t = threading.Thread(target=worker())
t.start()
threads.append(t)
citefile = queryCiteFile.CiteFile()
citations = citefile.findRowsWithIDType('pmc')
for citation in enumerate(citations[10513:]):
q.put(citation)
q.join()
for i in range(10):
q.put(None)
for t in threads:
t.join()
def worker():
while True:
idx, citation = q.get()
addpaper( idx, citation )
q.task_done()
def addpaper( idx, citation ):
start=time.time()
print(citation)
if citation is None:
return
print('trying to add {} number {}'.format(citation[5], idx))
site = pywikibot.Site("librarybase", "librarybase")
item = librarybase.JournalArticlePage(site)
pmcidobj = getPMCID(citation[5])
try:
pmcidobj.getBBasicMetadata()
except IDNotResolvedException:
print('Couldn\'t find in EPMC:' + citation[5])
return
metadata = pmcidobj.metadata
print("Got metadata in:" + str(time.time()-start))
if not item.articleAlreadyExists(metadata['pmcid']):
print('Item doesn\'t seem to exist. Setting metadata for: ' + metadata['pmcid'])
item.setMetaData(metadata)
print("set metadata in" + str(time.time()-start))
else:
print("{} already exists. Doing nothing".format(metadata['pmcid']))
q=queue.Queue()
rununthreaded() | 1,993 | 657 |
from day18.script1 import parse_matrix, read_char_matrix, alpha_lower, solve_world
# We could use the same logic as part 1 with a state made of a distance and 4 (x, y) pairs (one for each bot)
# This works with examples, but there are too many combinations for the real input so it runs for very long
#
# To get it quicker, we will resolve the 4 sub-mazes independently, assuming we have all keys from other 3 mazes.
# Then we sum the 4 results.
#
# There could be cases where this logic does not work (basically if the keys from other mazes that we assume we have
# cannot be obtained in the order we assumed, because they require the current bot to get keys in a different order)
#
# I did not have such problematic cases with my input file and this logic gave the correct result.
def solve(world):
(world1, keys1, world2, keys2, world3, keys3, world4, keys4) = world
return solve_world(world1, keys1) \
+ solve_world(world2, keys2) \
+ solve_world(world3, keys3) \
+ solve_world(world4, keys4)
def parse(file_name):
matrix = read_char_matrix(file_name)
middle_i = (len(matrix) + 1) // 2
middle_j = (len(matrix[0]) + 1) // 2
# split into 4 sub-matrices
(mx1, mx2, mx3, mx4) = [], [], [], []
(keys1, keys2, keys3, keys4) = [], [], [], []
for i in range(middle_i):
row = []
for j in range(middle_j):
row.append(matrix[i][j])
if matrix[i][j] in alpha_lower:
keys1.append(matrix[i][j])
mx1.append(row)
for i in range(middle_i):
row = []
for j in range(middle_j, len(matrix[0])):
row.append(matrix[i][j])
if matrix[i][j] in alpha_lower:
keys2.append(matrix[i][j])
mx2.append(row)
for i in range(middle_i, len(matrix)):
row = []
for j in range(middle_j):
row.append(matrix[i][j])
if matrix[i][j] in alpha_lower:
keys3.append(matrix[i][j])
mx3.append(row)
for i in range(middle_i, len(matrix)):
row = []
for j in range(middle_j, len(matrix[0])):
row.append(matrix[i][j])
if matrix[i][j] in alpha_lower:
keys4.append(matrix[i][j])
mx4.append(row)
return parse_matrix(mx1), keys1, \
parse_matrix(mx2), keys2, \
parse_matrix(mx3), keys3, \
parse_matrix(mx4), keys4
if __name__ == '__main__':
print(solve(parse("data2.txt")))
| 2,491 | 827 |
import numpy as np
from igakit.cad import circle, Pi
def make_crv(p,u):
c = circle(radius=1, angle=Pi/2)
c.rotate(Pi/4)
c.elevate(0,p-2)
c.refine(0,u)
return c
def check_crv(c):
u0, u1 = c.breaks(0)[[0,-1]]
u = np.linspace(u0,u1,100)
x, y, z = c(u).T
r = np.hypot(x,y)
return np.allclose(r, 1)
def test_clamp():
for p in range(2,6):
for u in ([],[0.5],[1/3.0,2/3.0],[0.1,0.9]):
c = make_crv(p,u)
check_crv(c)
for continuity in range(c.degree[0]):
for side in (0, 1, None):
cc = c.copy()
cc.unclamp(0, continuity=continuity, side=side)
check_crv(cc)
cc.clamp(0, side=side)
check_crv(cc)
cc.clamp(0)
check_crv(cc)
assert np.allclose(cc.knots[0], c.knots[0])
assert np.allclose(cc.array, c.array)
if __name__ == '__main__':
test_clamp()
| 1,029 | 422 |
import vim
_BOOL_OPTS = set(('allowrevins', 'altkeymap', 'antialias', 'autochdir', 'arabic', 'arabicshape',
'autoindent', 'autoread', 'autowrite', 'backup', 'ballooneval', 'binary',
'bioskey', 'bomb', 'buflisted', 'buftype', 'cindent', 'compatible', 'confirm',
'conskey', 'copyindent', 'cscoperelative', 'cscopetag', 'cscopeverbose',
'cursorbind', 'cursorcolumn', 'cursorline', 'delcombine', 'diff', 'digraph',
'edcompatible', 'endofline', 'equalalways', 'equalprg', 'errorbells', 'esckeys',
'expandtab', 'exrc', 'fkmap', 'foldenable', 'fsync', 'gdefault', 'guipty',
'hidden', 'hlsearch', 'hkmap', 'hkmapp', 'icon', 'ignorecase', 'imcmdline', 'imdisable',
'incsearch', 'infercase', 'insertmode', 'joinspaces', 'lazyredraw', 'linebreak', 'lisp',
'list', 'loadplugins', 'macatsui', 'magic', 'modeline', 'modifiable', 'modified',
'more', 'mouse', 'mousefocus', 'mousehide', 'number', 'opendevice', 'paste',
'preserveindent', 'previewwindow', 'prompt',
))
_NUM_OPTS = set(('aleph', 'balloondelay', 'cmdheight', 'cmdwinheight', 'columns', 'concellevel',
'cscopepathcomp', 'cscopetagorder', 'foldcolumn', 'foldlevel', 'foldlevelstart',
'foldminlines', 'foldnestmax', 'guiheadroom', 'history', 'iminsert', 'imsearch',
'laststatus', 'lines', 'linespace', 'matchtime', 'maxcombine', 'maxfuncdepth',
'maxmem', 'maxmempattern', 'maxmemtot', 'menuitems', 'modelines', 'mousetime',
'mzquantum', 'numberwidth', 'previewheight', 'pumheight',
))
_STR_OPTS = set(('ambiwidth', 'background', 'backspace', 'backupcopy', 'backupdir', 'backupext',
'backupskip', 'balloonexpr', 'breakat', 'browsedir', 'bufhidden', 'casemap',
'cdpath', 'cedit', 'charconvert', 'cinkeys', 'cinoptions', 'cinwords',
'clipboard', 'colorcolumn', 'comments', 'commentstring', 'complete',
'completefunc', 'completeopt', 'concealcursor', 'cpoptions', 'cryptmethod',
'cscopeprg', 'cscopequickfix', 'debug', 'define', 'dictionary', 'diffexpr',
'diffopt', 'directory', 'display', 'eadirection', 'encoding', 'errorfile',
'errorformat', 'eventignore', 'fileencoding', 'fileencodings', 'fileformat',
'fileformats', 'filetype', 'fillchars', 'foldclose', 'foldexpr', 'foldignore',
'foldmarker', 'foldmethod', 'foldopen', 'foldtext', 'formatoptions',
'formatlistpat', 'formatprg', 'formatexpr', 'grepformat', 'grepprg',
'guicursor', 'guifont', 'guifontset', 'guifontwide', 'guioptions',
'guitablabel', 'guitabtooltip', 'helpfile', 'helpheight', 'helplang',
'highlight', 'iconstring', 'imactivatekey', 'include', 'includeexpr', 'indentexpr',
'indentkeys', 'isfname', 'isindent', 'iskeyword', 'isprint', 'key', 'keymap',
'keymodel', 'keywordprg', 'langmap', 'langmenu', 'lispwords', 'listchars',
'makeef', 'makeprg', 'matchpairs', 'mkspellmem', 'mousemodel', 'mouseshape',
'nrformats', 'omnifunc', 'operatorfunc', 'osfiletype', 'paragraphs', 'pastetoggle',
'patchexpr', 'patchmode', 'path', 'printdevice', 'printencoding', 'printexpr',
'printfont', 'printheader', 'printmbcharset', 'printmbfont', 'printoptions',
'quoteescape',
))
class _opt(object):
def __getattr__(self, name):
# print "TRYING TO GET %s" % name
if name in _BOOL_OPTS:
return vim.eval('&' + name) == '1'
elif name in _NUM_OPTS:
return int(vim.eval('&' + name), 0)
elif name in _STR_OPTS:
return vim.eval('&' + name)
def __setattr__(self, name, val):
# print "TRYING TO SET %s TO %s" % (name, val)
if name in _BOOL_OPTS:
if val:
vim.command('set %s' % name)
else:
vim.command('set no%s' % name)
vim.opt = _opt()
| 4,261 | 1,343 |
#!/bin/python
# coding=utf-8
import schedule
import time
from subprocess import call
# Referências:
# https://pypi.org/project/schedule/
# https://stackoverflow.com/questions/373335/how-do-i-get-a-cron-like-scheduler-in-python
# https://www.geeksforgeeks.org/python-schedule-library/
def postgres_backup_00_h():
print("#################### inicio postgres_backup_00_h ####################")
print("postgres_backup_60_min : {}".format(time.ctime()))
try:
call(['sh', '/scripts/postgres_backup.sh'])
except Exception as e:
print('problema ao executar postgres_backup.sh')
print(e)
print("#################### fim postgres_backup_60_min ####################")
if __name__ == "__main__":
print("#################### tasks.py iniciado ####################")
# Executa a tarefa postgres_backup_00_h() às 00:00.
schedule.every().day.at("00:00").do(postgres_backup_00_h)
while True:
schedule.run_pending()
time.sleep(1)
| 997 | 350 |
# -*- coding: utf-8 -*-
# Copyright (c) 2017 - for information on the respective copyright owner
# see the NOTICE file and/or the repository https://github.com/boschresearch/statestream
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
import importlib
import SharedArray
from statestream.utils.helper import is_scalar_shape
from statestream.utils.shared_memory_layout import SharedMemoryLayout
from statestream.meta.network import get_item_type
from statestream.meta.network import S2L
from statestream.meta.neuron_pool import np_shm_layout, np_init
from statestream.meta.synapse_pool import sp_shm_layout, sp_init
def shared_layout(net, param):
"""Generates shared-memory layout from net and param.
"""
layout = {}
for t in ["np", "sp", "plast", "if"]:
for i,I in net[S2L(t)].items():
# Begin with empty layout.
layout[i] = {}
# Empty tmem layout structure. Rest will be filled during shm creation.
layout[i]["tmem"] = []
for tmem in range(len(param["core"]["temporal_memory"])):
layout[i]["tmem"].append({"parameter": {}, "variables": {}})
if t == "plast":
layout[i]["tmem"][tmem]["updates"] = {}
# Get item shm data layout.
if t == "np":
layout[i].update(np_shm_layout(i, net, param))
elif t == "sp":
layout[i].update(sp_shm_layout(i, net, param))
elif t == "plast":
plast_shm_layout \
= getattr(importlib.import_module("statestream.meta.plasticities." + I["type"]),
"plast_shm_layout")
layout[i].update(plast_shm_layout(i, net, param))
layout[i]["updates"] = {}
for par in I["parameter"]:
if par[1] not in layout[i]["updates"]:
layout[i]["updates"][par[1]] = {}
shml = layout[par[1]]["parameter"][par[2]]
layout[i]["updates"][par[1]][par[2]] = shml
elif t == "if":
if_shm_layout \
= getattr(importlib.import_module("statestream.interfaces.process_if_" + I["type"]),
"if_shm_layout")
layout[i].update(if_shm_layout(i, net, param))
return layout
class SharedMemory(object):
def __init__(self, net, param, session_id=None, force_id=None):
self.net = net
self.param = param
# Get list of all existing shared memory arrays.
shm_list = SharedArray.list()
shm_list_name = []
for i in range(len(shm_list)):
if sys.version[0] == "2":
shm_list_name.append(shm_list[i].name)
elif sys.version[0] == "3":
shm_list_name.append(shm_list[i].name.decode("utf-8"))
# Initially start with invalid session id.
self.session_id = None
# Begin with empty structure holding the entire layout.
self.dat = {}
# Estimate of bytes reserved in shared memory.
self.log_lines = []
self.log_bytes = []
# Build layout.
# ---------------------------------------------------------------------
self.layout = shared_layout(self.net, self.param)
# Dependent on given session_id initialize shared memory.
# ---------------------------------------------------------------------
if session_id is None:
if force_id is None:
# Determine next free session id.
for tmp_session_id in range(2**10):
id_taken = False
for i in range(len(shm_list)):
if shm_list_name[i].find("statestream." + str(tmp_session_id) + ".") != -1:
id_taken = True
break
# Take the first free session id and break.
if not id_taken:
self.session_id = tmp_session_id
session_name = "statestream." + str(self.session_id) + "."
break
else:
self.session_id = force_id
session_name = "statestream." + str(self.session_id) + "."
# Allocate all shared memory.
# ---------------------------------------------------------
for t in ["np", "sp", "plast", "if"]:
for i, I in self.net[S2L(t)].items():
# Allocate process identifiers.
shm_name = session_name + "core.proc_id." + i
SharedArray.create(shm_name, 1, dtype=np.int32)
# Allocate shm for neuron pool states.
if t == "np":
shm_name = session_name + "net." + i + ".state"
SharedArray.create(shm_name,
self.layout[i]["state"].shape,
dtype=self.layout[i]["state"].dtype)
# Allocate also tmem for np states.
for tmem in range(len(param["core"]["temporal_memory"])):
self.layout[i]["tmem"][tmem]["state"] = self.layout[i]["state"]
tmem_shm_name = session_name + "net.tmem." + str(tmem) + "." + i + ".state"
SharedArray.create(tmem_shm_name,
self.layout[i]["state"].shape,
dtype=self.layout[i]["state"].dtype)
# Allocate parameters and variables (incl. tmem).
for T in ["parameter", "variables"]:
shm_name = session_name + "net." + i + "." + T
for d,d_l in self.layout[i][T].items():
dat_name = shm_name + "." + d
if is_scalar_shape(d_l.shape):
SharedArray.create(dat_name, 1, dtype=d_l.dtype)
else:
SharedArray.create(dat_name, d_l.shape, dtype=d_l.dtype)
# Allocate also tmem for parameters / variables.
for tmem in range(len(param["core"]["temporal_memory"])):
self.layout[i]["tmem"][tmem][T][d] = d_l
tmem_shm_name = session_name + "net.tmem." + str(tmem) \
+ "." + i + "." + T + "." + d
if is_scalar_shape(d_l.shape):
SharedArray.create(tmem_shm_name, 1, dtype=d_l.dtype)
else:
SharedArray.create(tmem_shm_name, d_l.shape, dtype=d_l.dtype)
# Allocate shm for plasticity updates (incl. tmem).
for i,I in self.net["plasticities"].items():
shm_name = session_name + "net." + i + ".updates."
for par in I["parameter"]:
shml = self.layout[par[1]]["parameter"][par[2]]
dat_name = shm_name + par[0] + "." + par[1] + "." + par[2]
if is_scalar_shape(shml.shape):
SharedArray.create(dat_name, 1, dtype=shml.dtype)
else:
SharedArray.create(dat_name, shml.shape, dtype=shml.dtype)
# Allocate also tmem for updates.
for tmem in range(len(param["core"]["temporal_memory"])):
if par[1] not in self.layout[i]["tmem"][tmem]["updates"]:
self.layout[i]["tmem"][tmem]["updates"][par[1]] = {}
self.layout[i]["tmem"][tmem]["updates"][par[1]][par[2]] = shml
tmem_shm_name = session_name + "net.tmem." + str(tmem) \
+ "." + i + ".updates." \
+ par[0] + "." + par[1] + "." + par[2]
if is_scalar_shape(shml.shape):
SharedArray.create(tmem_shm_name, 1, dtype=shml.dtype)
else:
SharedArray.create(tmem_shm_name, shml.shape, dtype=shml.dtype)
else:
# Set session name for shm.
session_name = "statestream." + str(session_id) + "."
# Check if shared memory for this session_id was already created.
for i in range(len(shm_list)):
if shm_list_name[i].find(session_name) != -1:
self.session_id = session_id
break
assert (self.session_id != None), \
"Error: SharedMemory() Given session_id was not found: " \
+ str(session_id) + " " + session_name
# Attach all shared memory.
# ---------------------------------------------------------------------
self.proc_id = {}
for t in ["np", "sp", "plast", "if"]:
for i,I in self.net[S2L(t)].items():
self.dat[i] = {}
# Begin with empty list of dicts for temporal memory.
self.dat[i]["tmem"] = [{} for tmem in range(len(param["core"]["temporal_memory"]))]
# Process relevant memory.
shm_name = session_name + "core.proc_id." + i
self.proc_id[i] = SharedArray.attach(shm_name)
# Network data shared memory for neuron pool states.
if t == "np":
shm_name = session_name + "net." + i + ".state"
self.dat[i]["state"] = SharedArray.attach(shm_name)
self.log_lines += [str(i) + ".state"]
self.log_bytes += [self.dat[i]["state"].nbytes]
# Attach also tmem for np states.
for tmem in range(len(param["core"]["temporal_memory"])):
tmem_dat_name = session_name + "net.tmem." + str(tmem) + "." + i + ".state"
self.dat[i]["tmem"][tmem]["state"] = SharedArray.attach(tmem_dat_name)
self.log_lines += [str(i) + ".tmem." + str(tmem) + ".state"]
self.log_bytes += [self.dat[i]["tmem"][tmem]["state"].nbytes]
# Network data shared memory for plasticity updates.
if t == "plast":
# Begin with empty dict also for temporal memory.
self.dat[i]["updates"] = {}
for tmem in range(len(param["core"]["temporal_memory"])):
self.dat[i]["tmem"][tmem]["updates"] = {}
shm_name = session_name + "net." + i + ".updates."
for par in I["parameter"]:
# First time add parameter for specific item (incl. tmem).
if par[1] not in self.dat[i]["updates"]:
self.dat[i]["updates"][par[1]] = {}
for tmem in range(len(param["core"]["temporal_memory"])):
self.dat[i]["tmem"][tmem]["updates"][par[1]] = {}
# Specify shm update id.
dat_name = shm_name + par[0] + "." + par[1] + "." + par[2]
# Attach shm.
self.dat[i]["updates"][par[1]][par[2]] = SharedArray.attach(dat_name)
self.log_lines += [str(i) + ".updates." + str(par[1]) + "." + str(par[2])]
self.log_bytes += [self.dat[i]["updates"][par[1]][par[2]].nbytes]
# Attach also tmem for updates.
for tmem in range(len(param["core"]["temporal_memory"])):
tmem_shm_name = session_name + "net.tmem." + str(tmem) \
+ "." + i + ".updates." \
+ par[0] + "." + par[1] + "." + par[2]
self.dat[i]["tmem"][tmem]["updates"][par[1]][par[2]] \
= SharedArray.attach(tmem_shm_name)
self.log_lines += [str(i) + ".tmem." + str(tmem) \
+ ".updates." + str(par[1]) + "." + str(par[2])]
self.log_bytes += [self.dat[i]["tmem"][tmem]["updates"][par[1]][par[2]].nbytes]
# Network data shared memory for variables and parameter.
for T in ["parameter", "variables"]:
# Begin with empty dict also for temporal memory.
self.dat[i][T] = {}
for tmem in range(len(param["core"]["temporal_memory"])):
self.dat[i]["tmem"][tmem][T] = {}
# Determine shm id item "prefix".
shm_name = session_name + "net." + i + "." + T
# Loop over all vars/pars of this item.
for d,d_l in self.layout[i][T].items():
dat_name = shm_name + "." + d
self.dat[i][T][d] = SharedArray.attach(dat_name)
self.log_lines += [str(i) + "." + str(T) + "." + str(d)]
self.log_bytes += [self.dat[i][T][d].nbytes]
# Attach also tmem for parameter / variables.
for tmem in range(len(param["core"]["temporal_memory"])):
tmem_shm_name = session_name + "net.tmem." + str(tmem) \
+ "." + i + "." + T + "." + d
self.dat[i]["tmem"][tmem][T][d] = SharedArray.attach(tmem_shm_name)
self.log_lines += [str(i) + ".tmem." + str(tmem) + "." \
+ str(T) + "." + str(d)]
self.log_bytes += [self.dat[i]["tmem"][tmem][T][d].nbytes]
def delete(self):
"""Method to free statestream shared memory of the particular session.
"""
if self.session_id != None:
shm_list = SharedArray.list()
shm_list_name = []
for i in range(len(shm_list)):
if sys.version[0] == "2":
shm_list_name.append(shm_list[i].name)
elif sys.version[0] == "3":
shm_list_name.append(shm_list[i].name.decode("utf-8"))
for i in range(len(shm_list)):
if shm_list_name[i].find("statestream." + str(self.session_id) + ".") != -1:
SharedArray.delete(shm_list_name[i])
def add_sys_client(self, client_param):
"""Create shared memory for a single system client.
"""
client_shm_name = 'statestream.' \
+ str(self.session_id) + '.' \
+ 'sys_clients.' \
+ str(client_param['name']) + '.'
# Create and attach client specific shared memory.
for T in ['parameter', 'variables']:
if T in client_param:
for pv,PV in client_param[T].items():
shm_name = client_shm_name + T + '.' + pv
try:
SharedArray.create(shm_name, PV['shape'], dtype=np.float32)
except:
dat = SharedArray.attach(shm_name)
if dat.shape != PV['shape']:
print('\nError: Shared memory: Tried to create already existing memory: ' + shm_name)
def update_sys_client(self):
"""Update this instance of shared memory to existing clients.
"""
# Determine all clients, currently in shared memory.
clients = {}
shm_list = SharedArray.list()
client_shm_name = 'statestream.' \
+ str(self.session_id) + '.' \
+ 'sys_clients.'
for shm_name_raw in shm_list:
if sys.version[0] == "2":
shm_name = shm_name_raw.name
elif sys.version[0] == "3":
shm_name = shm_name_raw.name.decode("utf-8")
if shm_name.startswith(client_shm_name):
shm_name_split = shm_name.split('.')
client_name = shm_name_split[3]
if client_name not in clients:
clients[client_name] = {
'parameter': {},
'variables': {}
}
clients[client_name][shm_name_split[4]][shm_name_split[5]] \
= shm_name
# Update client shared memroy dat and layout.
for c,C in clients.items():
if c not in self.dat:
self.dat[c] = {
'parameter': {},
'variables': {}
}
self.layout[c] = {
'parameter': {},
'variables': {}
}
for t,T in C.items():
for d,D in T.items():
self.dat[c][t][d] = SharedArray.attach(D)
self.layout[c][t][d] = SharedMemoryLayout('np',
self.dat[c][t][d].shape,
self.dat[c][t][d].dtype,
0.0)
# Determine all items in dat / layout which are not in shared memory.
# Remove deprecated shared memory from layout and dat.
remove_items = []
for i,I in self.layout.items():
if i not in clients and i not in self.net['neuron_pools'] \
and i not in self.net['synapse_pools'] \
and i not in self.net['plasticities'] \
and i not in self.net['interfaces']:
remove_items.append(i)
for i in remove_items:
self.dat.pop(i)
self.layout.pop(i)
def remove_sys_client(self, client_name):
"""Remove shared memory for system client.
"""
client_shm_name = 'statestream.' \
+ str(self.session_id) + '.' \
+ 'sys_clients.' \
+ str(client_name) + '.'
# Delete shared memory.
for T in ['parameter', 'variables']:
for d,d_l in self.layout[client_name][T].items():
shm_name = client_shm_name + T + '.' + str(d)
try:
SharedArray.delete(shm_name)
except:
print("\nERROR: Unable to delete non-existing shared memory: " + str(shm_name) + "\n")
def pprint_list(self, what=""):
"""Return a list of lines containing shm info about what.
"""
lines = []
w = what.split(".")
if len(w) > 1:
if len(w[1]) == 1:
if w[1] == "n":
i_type = "neuron_pools"
elif w[1] == "s":
i_type = "synapse_pools"
elif w[1] == "p":
i_type = "plasticities"
elif w[1] == "i":
i_type = "interfaces"
else:
return []
if what in ["shm", "shm."]:
lines.append("[n]euron pools")
lines.append("[s]ynapse pools")
lines.append("[p]lasticities")
lines.append("[i]nterfaces")
if len(w) == 2:
# shm.i_type
if w[1] != "":
cntr = 0
for i in self.net[i_type]:
if cntr == 0:
# Append new line.
lines.append(" " + i.ljust(18))
else:
# Append to existing line.
lines[-1] = lines[-1] + i.ljust(18)
if cntr < 3:
cntr += 1
else:
cntr = 0
elif len(w) == 3:
# shm.i_type.item_name
if w[1] != "":
cntr = 0
for i in self.net[i_type]:
if i.startswith(w[2]):
if cntr == 0:
# Append new line.
lines.append(" " + i.ljust(18))
else:
# Append to existing line.
lines[-1] = lines[-1] + i.ljust(18)
if cntr < 3:
cntr += 1
else:
cntr = 0
elif len(w) == 4:
# shm.i_type.item_name.data_type
if w[1] != "":
if w[2] in self.net[i_type]:
# Assuming all classes of data begin
# with a different letter.
if w[3] == "":
for e in self.dat[w[2]]:
lines.append(" [" + e[0] + "]" + e[1:])
else:
dat_type = "x"
if w[3][0] in ["v", "p"]:
if w[3] == "v":
dat_type = "variables"
else:
dat_type = "parameter"
for vp in self.dat[w[2]][dat_type]:
lines.append(" " + vp)
elif w[3].startswith("s"):
if w[3] == "s":
lines.append(" shape: " + str(self.layout[w[2]]["state"].shape))
lines.append(" type: " + str(self.layout[w[2]]["state"].dtype))
nbytes = self.dat[w[2]]["state"].nbytes
lines.append(" memory: " + str(nbytes) + " B")
if w[3].startswith("s[") and w[3][-1] == "]":
# Get data.
s = eval("self.dat[w[2]]['state']" + w[3][1:])
if len(s.shape) == 0:
lines.append(" value: " + str(s))
if len(s.shape) == 1:
for i in range(min(s.shape[0], 16)):
lines.append(str(i).ljust(4) + " " + str(s[i]))
if s.shape[0] >= 16:
lines.append("...")
elif len(w) == 5:
if w[1] != "":
if w[2] in self.net[i_type]:
dat_type = "x"
if w[3][0] in ["v", "p"]:
if w[3] == "v":
dat_type = "variables"
else:
dat_type = "parameter"
for vp in self.dat[w[2]][dat_type]:
if vp.startswith(w[4]) and len(w[4]) < len(vp):
lines.append(" " + vp)
if vp == w[4]:
lines.append(" shape: " + str(self.layout[w[2]][dat_type][vp].shape))
lines.append(" type: " + str(self.layout[w[2]][dat_type][vp].dtype))
nbytes = self.dat[w[2]][dat_type][vp].nbytes
lines.append(" memory: " + str(nbytes) + " B")
if w[4].startswith(vp) and w[4][-1] == "]" and "[" in w[4]:
# Get data.
s = eval("self.dat[w[2]][dat_type][vp]" + w[4][len(vp):])
if len(s.shape) == 0:
lines.append(" value: " + str(s))
if len(s.shape) == 1:
for i in range(min(s.shape[0], 16)):
lines.append(str(i).ljust(4) + " " + str(s[i]))
if s.shape[0] >= 16:
lines.append("...")
return lines
def init(self, what=[], mode=None):
"""Method to recusively initialize a subset of the network.
what:
[] Initialize everything.
["state"] Initialize all states.
["parameter"] Initialize all parameter.
["variables"] Initialize all variables.
["updates"] Initialize all updates.
[np_id, "state"] Initialize state of neuron pool np_id.
[item_id, Initialize parameter par_id of item item_id.
"parameter",
par_id]
[item_id, Initialize variable var_id of item item_id.
"variables",
var_id]
[plast_id, Initialize updates [tar_id, par_id] of plasticity plast_id.
"updates",
tar_id,
par_id]
"""
# Do not initialize meta-variables.
if len(what) >= 1:
if what[0] in self.dat \
and what[0] not in self.net['neuron_pools'] \
and what[0] not in self.net['synapse_pools'] \
and what[0] not in self.net['plasticities'] \
and what[0] not in self.net['interfaces']:
return
# Adjust mode in some cases.
if isinstance(mode, list):
if "external_models" in self.net:
# In case of external model init, set mode here to none.
if mode[0] in self.net["external_models"]:
mode = None
# Determine item to be set and its type.
item_id = None
item_type = None
if len(what) >= 1:
item_id = what[0]
if item_id == "state":
# Initialize all states.
for n in self.net["neuron_pools"]:
self.init([n, "state"], mode=mode)
# Done with initialization.
return None
elif item_id in ["parameter", "variables"]:
# Initialize all parameters or variables.
for i in self.dat:
for d, d_l in self.layout[i][item_id].items():
self.init([i, item_id, d], mode=mode)
# Done with initialization.
return None
elif item_id == "updates":
# Initialize all updates.
for i in self.net["plasticities"]:
for target_i in self.dat[i]["updates"]:
for target_p in self.dat[i]["updates"][target_i]:
self.init([i, "updates", target_i, target_p], mode=mode)
# Done with initialization.
return None
else:
# Assume what[0] is an item.
# Determine item type.
item_type = get_item_type(self.net, item_id)
else:
# len ought to be zero, so everthing should be set.
self.init(["state"], mode=0.0)
self.init(["parameter"], mode=mode)
self.init(["variables"], mode=0.0)
self.init(["updates"], mode=0.0)
# Done with initialization.
return None
# Dependent on len of what, determine what is to be set.
set_flag = False
if len(what) == 1:
# Re-init a single item.
if item_type == "np":
pass
elif item_type == "sp":
pass
elif item_type == "plast":
pass
elif item_type == "if":
pass
# TODO
elif len(what) == 2:
if what[1] in ["state"]:
dat_name = "__state__"
dat_layout = self.layout[item_id]["state"]
set_flag = True
else:
raise NameError("SharedMemory.init() inconsistent what parameter for what of length " \
+ str(len(what)) + ".")
elif len(what) == 3:
if what[1] in ["parameter", "variables"]:
dat_name = what[2]
dat_layout = self.layout[item_id][what[1]][what[2]]
set_flag = True
else:
raise NameError("SharedMemory.init() inconsistent what parameter for what of length " \
+ str(len(what)) + ".")
elif len(what) == 4:
if what[1] == "updates":
dat_name = [what[2], [what[3]]]
dat_layout = self.layout[item_id]["updates"][what[2]][what[3]]
set_flag = True
else:
raise NameError("SharedMemory.init() inconsistent what parameter for what of length " \
+ str(len(what)) + ".")
else:
raise NameError("SharedMemory.init() Unexpected what of length " + str(len(what)) + ".")
# Set if something is to be set.
if set_flag:
if item_type == "np":
value = np_init(self.net, item_id, dat_name, dat_layout, mode=mode)
elif item_type == "sp":
value = sp_init(self.net, item_id, dat_name, dat_layout, mode=mode)
elif item_type == "plast":
# Determine plasticity type.
plast_type = self.net["plasticities"][item_id]["type"]
# Get correct plasticity initializer.
try:
plast_init \
= getattr(importlib.import_module("statestream.meta.plasticities." + plast_type),
"plast_init")
value = plast_init(self.net, item_id, dat_name, dat_layout, mode=mode)
except:
value = None
elif item_type == "if":
# Determine interface type.
if_type = self.net["interfaces"][item_id]["type"]
# Get correct plasticity initializer.
try:
if_init = getattr(importlib.import_module("statestream.interfaces." + if_type),
"if_init")
value = if_init(self.net, item_id, dat_name, dat_layout, mode=mode)
except:
value = None
# Fallback if invalid value.
if value is None:
value = self.init_fallback(item_id, dat_name, dat_layout, mode=mode)
# Finally set value.
self.set_shm(what, value)
def init_fallback(self, item_id, dat_name, dat_layout, mode=None):
"""Fallback to default initialization.
"""
# Get local dictionary.
if item_id in self.net["neuron_pools"]:
p = self.net["neuron_pools"][item_id]
elif item_id in self.net["synapse_pools"]:
p = self.net["synapse_pools"][item_id]
elif item_id in self.net["plasticities"]:
p = self.net["plasticities"][item_id]
elif item_id in self.net["interfaces"]:
p = self.net["interfaces"][item_id]
# Dependent on scalar or not, try to initialize.
if is_scalar_shape(dat_layout.shape):
# Scalar values.
if mode is None:
dat_value = np.array(p.get(dat_name, dat_layout.default),
dtype=dat_layout.dtype)
else:
dat_value = np.array(mode, dtype=dat_layout.dtype)
if mode in ["one", 1.0]:
dat_value = np.array(1.0, dtype=dat_layout.dtype)
else:
dat_value = np.array(0.0, dtype=dat_layout.dtype)
else:
# If mode is None, set to default.
if mode is None:
dat_value = np.ones(dat_layout.shape, dtype=dat_layout.dtype)
try:
dat_value *= dat_layout.default
except:
dat_value *= 0
print("Warning: No valid initialization for " + str(dat_name) \
+ " of item " + str(item_id) + ". Set to zero.")
else:
# Dependent on specified mode set value.
if mode in ["zero", 0.0]:
dat_value = np.zeros(dat_layout.shape, dtype=dat_layout.dtype)
elif mode in ["one", 1.0]:
dat_value = np.ones(dat_layout.shape, dtype=dat_layout.dtype)
# Return initialized value.
return dat_value
def set_shm(self, which, value):
"""Method to set a specific array in shared memory to value.
"""
if len(which) == 2:
if self.layout[which[0]][which[1]].min is not None:
value = np.maximum(value, self.layout[which[0]][which[1]].min)
if self.layout[which[0]][which[1]].max is not None:
value = np.minimum(value, self.layout[which[0]][which[1]].max)
shape = self.layout[which[0]][which[1]].shape
if is_scalar_shape(shape):
self.dat[which[0]][which[1]][0] = value
elif value.shape == self.dat[which[0]][which[1]].shape:
self.dat[which[0]][which[1]][:] = value
else:
print("\nError set_shm: incompatible shapes: " \
+ str(value.shape) + " " \
+ str(self.dat[which[0]][which[1]].shape) \
+ " for " + str(which))
elif len(which) == 3:
if self.layout[which[0]][which[1]][which[2]].min is not None:
value = np.maximum(value, self.layout[which[0]][which[1]][which[2]].min)
if self.layout[which[0]][which[1]][which[2]].max is not None:
value = np.minimum(value, self.layout[which[0]][which[1]][which[2]].max)
shape = self.layout[which[0]][which[1]][which[2]].shape
if is_scalar_shape(shape):
self.dat[which[0]][which[1]][which[2]][0] = value
elif value.shape == self.dat[which[0]][which[1]][which[2]].shape:
self.dat[which[0]][which[1]][which[2]][:] = value
else:
print("\nError set_shm: incompatible shapes: " \
+ str(value.shape) + " " \
+ str(self.dat[which[0]][which[1]][which[2]].shape) \
+ " for " + str(which))
elif len(which) == 4:
if self.layout[which[0]][which[1]][which[2]][which[3]].min is not None:
value = np.maximum(value, self.layout[which[0]][which[1]][which[2]][which[3]].min)
if self.layout[which[0]][which[1]][which[2]][which[3]].max is not None:
value = np.minimum(value, self.layout[which[0]][which[1]][which[2]][which[3]].max)
shape = self.layout[which[0]][which[1]][which[2]][which[3]].shape
if is_scalar_shape(shape):
self.dat[which[0]][which[1]][which[2]][which[3]][0] = value
elif value.shape == self.dat[which[0]][which[1]][which[2]][which[3]].shape:
self.dat[which[0]][which[1]][which[2]][which[3]][:] = value
else:
print("\nError set_shm: incompatible shapes: " \
+ str(value.shape) + " " \
+ str(self.dat[which[0]][which[1]][which[2]][which[3]].shape) \
+ " for " + str(which))
elif len(which) == 5:
if self.layout[which[0]][which[1]][which[2]][which[3]][which[4]].min is not None:
value = np.maximum(value, self.layout[which[0]][which[1]][which[2]][which[3]][which[4]].min)
if self.layout[which[0]][which[1]][which[2]][which[3]][which[4]].max is not None:
value = np.minimum(value, self.layout[which[0]][which[1]][which[2]][which[3]][which[4]].max)
shape = self.layout[which[0]][which[1]][which[2]][which[3]][which[4]].shape
if is_scalar_shape(shape):
self.dat[which[0]][which[1]][which[2]][which[3]][which[4]][0] = value
elif value.shape == self.dat[which[0]][which[1]][which[2]][which[3]][which[4]].shape:
self.dat[which[0]][which[1]][which[2]][which[3]][which[4]][:] = value
else:
print("\nError set_shm: incompatible shapes: " \
+ str(value.shape) + " " \
+ str(self.dat[which[0]][which[1]][which[2]][which[3]][which[4]].shape) \
+ " for " + str(which))
else:
raise NameError("SharedMemory.set_shm() expected item \
specification of length 2-5, got " + str(len(which)))
def get_shm(self, which):
"""Method to get a specific array in shared memory.
"""
if len(which) == 2:
shape = self.layout[which[0]][which[1]].shape
if is_scalar_shape(shape):
return self.dat[which[0]][which[1]][0]
else:
return self.dat[which[0]][which[1]][:]
elif len(which) == 3:
shape = self.layout[which[0]][which[1]][which[2]].shape
if is_scalar_shape(shape):
return self.dat[which[0]][which[1]][which[2]][0]
else:
return self.dat[which[0]][which[1]][which[2]][:]
elif len(which) == 4:
shape = self.layout[which[0]][which[1]][which[2]][which[3]].shape
if is_scalar_shape(shape):
return self.dat[which[0]][which[1]][which[2]][which[3]][0]
else:
return self.dat[which[0]][which[1]][which[2]][which[3]][:]
elif len(which) == 5:
shape = self.layout[which[0]][which[1]][which[2]][which[3]][which[4]].shape
if is_scalar_shape(shape):
return self.dat[which[0]][which[1]][which[2]][which[3]][which[4]][0]
else:
return self.dat[which[0]][which[1]][which[2]][which[3]][which[4]][:]
else:
raise NameError("SharedMemory.get_shm() expected item \
specification of length 2-5, got " + str(len(which)))
def update_net(self, net):
"""Update the given net (its parameters, etc.) from shared memory.
"""
# Search all parameters of the network in shared memory.
for i in self.dat:
# Determine item type.
i_type = get_item_type(self.net, i)
if i_type is not None:
for p in self.dat[i]['parameter']:
# TODO: For now update of metas is not done.
try:
if p in net[S2L(i_type)][i] and is_scalar_shape(self.layout[i]['parameter'][p].shape):
net[S2L(i_type)][i][p] = float(self.dat[i]['parameter'][p][0])
except:
pass
| 40,198 | 11,671 |
from distutils.core import setup
setup(
name='Geometry2D',
version='0.7.1',
author='Luis Da Costa',
author_email='dacosta.le@gmail.com',
packages=['geometry'],
scripts=[],
url='https://github.com/ldacosta/geometry2D/',
license='LICENSE.txt',
long_description=open('README.txt').read(),
install_requires=[
"numpy >= 1.13.1",
],
)
| 382 | 139 |
from unittest import TestCase
import numpy as np
import matplotlib.pyplot as plt
from .plot import rainbowplot
class Fixtures:
"""
This class is a namespace for all the fixtures used in tests.
"""
nt = 2**10
nx = 2**10
t = np.linspace(0, 10, nt)
x = np.linspace(-10, +10, nx)
@staticmethod
def monochromatic_gaussian():
"""
A simple monochromatic Gaussian pulse with fixed carrier frequency ω=10.
"""
x = Fixtures.x
u = np.exp(-x**2) * np.exp(- 1j * 10 * x)
return np.tile(u, (Fixtures.nt, 1))
@staticmethod
def two_monochromatic_gaussians():
"""
A sum of two monochromatic pulses.
"""
x = Fixtures.x
u = (
np.exp(-(x - 5)**2) * np.exp(-1j * +10 * x) +
np.exp(-(x + 5)**2) * np.exp(-1j * -10 * x))
return np.tile(u, (Fixtures.nt, 1))
@staticmethod
def chirped_pulse():
"""
A chirped Gaussian pulse.
"""
x = Fixtures.x
u = np.exp(-x**2/5**2) * np.exp(-1j * x**2)
return np.tile(u, (Fixtures.nt, 1))
class SmokeTestCase(TestCase):
def test_smoke(self):
t = Fixtures.t
x = Fixtures.x
u = Fixtures.chirped_pulse()
rainbowplot(x, t, u, win=0.5, ssx=4, ssy=4)
plt.show()
| 1,341 | 542 |
from django.db.models import Count, Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, get_object_or_404
from .models import NewsItem, SignUp, Artist, Athlete,Category
# Create your views here.
def search(request):
queryset = NewsItem.objects.all()
query = request.GET.get('q')
if query:
queryset = queryset.filter(
Q(title__icontains=query)|
Q(news_story__icontains=query)
).distinct()
context = {
'queryset': queryset
}
return render(request, 'search_results.html', context)
def get_category_count():
queryset = NewsItem.objects.values('categories__title').annotate(Count('categories__title'))
return queryset
def get_category():
queryset = Athlete.objects.values('categories__title')
return queryset
def home(request):
queryset = NewsItem.objects.filter(featured=True)
latest = NewsItem.objects.order_by('-date')[0:3]
if request.method == "POST":
email = request.POST["email"]
new_signup = SignUp()
new_signup.email = email
new_signup.save()
context = {
'object_list': queryset,
'latest': latest
}
return render(request, "home_page.html", context)
def news(request):
category_count = get_category_count()
# print(category_count)
most_recent = NewsItem.objects.order_by('-date')[0:6]
news = NewsItem.objects.all()
paginator = Paginator(news, 6)
page_request_var = 'page'
page = request.GET.get(page_request_var)
try:
paginated_queryset = paginator.page(page)
except PageNotAnInteger:
paginated_queryset = paginator.page(1)
except EmptyPage:
paginated_queryset = paginator.page(paginator.num_pages)
context = {
'queryset': paginated_queryset,
'most_recent': most_recent,
'page_request_var': page_request_var,
'category_count': category_count
}
return render(request, "news.html", context)
def post(request, id):
news = get_object_or_404(NewsItem, id=id)
context = {
'news': news
}
return render(request, 'post.html', context)
def news_letter(request):
return render(request, 'news_letter.html')
def get_artist(request):
artists = Artist.objects.all()
context = {
'artists': artists
}
return render(request, 'artists.html', context)
def artist_profile(request, id):
artist = get_object_or_404(Artist, id=id)
queryset = NewsItem.objects.all()
query = artist.name
queryset = queryset.filter(
Q(title__icontains=query)|
Q(news_story__icontains=query)
).distinct()
context = {
'artist': artist,
'queryset': queryset
}
return render(request, 'artist_profile.html', context)
def get_athlete(request):
category = get_category()
athletes = Athlete.objects.all()
all_categories = Category.objects.all()
context = {
'athletes':athletes,
'category': category,
'all_categories': all_categories
}
return render(request, 'athletes.html', context)
def athlete_profile(request, id):
athlete = get_object_or_404(Athlete, id=id)
queryset = NewsItem.objects.all()
query = athlete.name
queryset = queryset.filter(
Q(title__icontains=query)|
Q(news_story__icontains=query)
).distinct()
context = {
'athlete': athlete,
'queryset': queryset
}
return render(request, 'athlete_profile.html', context)
def category_profile(request, id):
one_category = get_object_or_404(Category, id=id)
cat_queryset = Athlete.objects.all()
cat_query = one_category.title
cat_queryset = cat_queryset.filter(Q(categories__title__icontains=cat_query)).distinct()
context = {
'one_category': one_category,
'queryset': cat_queryset
}
return render(request, 'category.html', context)
| 3,957 | 1,286 |
import datetime
import glob
import sys
import requests
from defusedxml import ElementTree as ET
sys.path.append("..")
from llama.alma import Alma_API_Client
import llama.config as config
TODAY = datetime.date.today()
count_total_invoices = 0
count_invoices_updated = 0
count_invoice_errors = 0
# Update empty invoice XML file that gets posted to Alma to use today's date
tree = ET.parse("empty_invoice.xml")
root = tree.getroot()
voucher_date = root.find(".//voucher_date")
voucher_date.text = TODAY.strftime("%Y-%m-%dT12:%M:%SZ")
tree.write("output-files/empty.xml")
# Update invoices status in Alma for all invoice IDs in
# output-files/invoice_ids_YYYYMMDDhhmmss.txt and
# output-files/invoice_special_YYYYMMDDhhmmss.txt
alma_client = Alma_API_Client(config.get_alma_api_key("ALMA_API_ACQ_READ_WRITE_KEY"))
alma_client.set_content_headers("application/xml", "application/xml")
today_string = TODAY.strftime("%Y%m%d")
invoice_files = glob.glob(f"output-files/invoice_ids_{today_string}*.txt")
special_invoice_files = glob.glob(f"output-files/invoice_special_{today_string}*.txt")
with open(invoice_files[0]) as f:
invoice_ids = f.readlines()
with open(special_invoice_files[0]) as f:
invoice_ids.extend(f.readlines())
for item in invoice_ids:
count_total_invoices += 1
invoice_id = item.strip()
print("Marking invoice as Paid in Alma")
try:
paid_xml = alma_client.mark_invoice_paid(
invoice_id, "output-files/empty.xml")
print(f"Invoice #{invoice_id} marked as Paid in Alma\n")
with open(f"output-files/paid_{invoice_id}.xml", "w") as f:
f.write(paid_xml)
count_invoices_updated += 1
except requests.HTTPError as e:
print(f"Error marking invoice #{invoice_id} as paid in Alma")
print(f"{e.response.text}\n")
print("'update_invoice_statuses' process complete")
print("Summary:")
print(f" Total invoices processed: {count_total_invoices}")
print(f" Invoices marked as paid in Alma: {count_invoices_updated}")
print(
f" Invoices not successfully marked as paid in Alma: {count_invoice_errors}"
)
| 2,133 | 765 |
import argparse
from pathlib import Path
import tensorflow as tf
import torch
from models.net import SPPNet
def convert_mobilenetv2(ckpt_path, num_classes):
def conv_converter(pt_layer, tf_layer_name, depthwise=False, bias=False):
if depthwise:
pt_layer.weight.data = torch.Tensor(
reader.get_tensor(f'{tf_layer_name}/depthwise_weights').transpose(2, 3, 0, 1))
else:
pt_layer.weight.data = torch.Tensor(reader.get_tensor(f'{tf_layer_name}/weights').transpose(3, 2, 0, 1))
if bias:
pt_layer.bias.data = torch.Tensor(reader.get_tensor(f'{tf_layer_name}/biases'))
def bn_converter(pt_layer, tf_layer_name):
pt_layer.bias.data = torch.Tensor(reader.get_tensor(f'{tf_layer_name}/beta'))
pt_layer.weight.data = torch.Tensor(reader.get_tensor(f'{tf_layer_name}/gamma'))
pt_layer.running_mean.data = torch.Tensor(reader.get_tensor(f'{tf_layer_name}/moving_mean'))
pt_layer.running_var.data = torch.Tensor(reader.get_tensor(f'{tf_layer_name}/moving_variance'))
def block_converter(pt_layer, tf_layer_name):
if hasattr(pt_layer, 'expand'):
conv_converter(pt_layer.expand.conv, f'{tf_layer_name}/expand')
bn_converter(pt_layer.expand.bn, f'{tf_layer_name}/expand/BatchNorm')
conv_converter(pt_layer.depthwise.conv, f'{tf_layer_name}/depthwise', depthwise=True)
bn_converter(pt_layer.depthwise.bn, f'{tf_layer_name}/depthwise/BatchNorm')
conv_converter(pt_layer.project.conv, f'{tf_layer_name}/project')
bn_converter(pt_layer.project.bn, f'{tf_layer_name}/project/BatchNorm')
reader = tf.train.NewCheckpointReader(ckpt_path)
model = SPPNet(num_classes, enc_type='mobilenetv2', dec_type='maspp')
# MobileNetV2
conv_converter(model.encoder.conv, 'MobilenetV2/Conv')
bn_converter(model.encoder.bn, 'MobilenetV2/Conv/BatchNorm')
block_converter(model.encoder.block0, 'MobilenetV2/expanded_conv')
block_converter(model.encoder.block1, 'MobilenetV2/expanded_conv_1')
block_converter(model.encoder.block2, 'MobilenetV2/expanded_conv_2')
block_converter(model.encoder.block3, 'MobilenetV2/expanded_conv_3')
block_converter(model.encoder.block4, 'MobilenetV2/expanded_conv_4')
block_converter(model.encoder.block5, 'MobilenetV2/expanded_conv_5')
block_converter(model.encoder.block6, 'MobilenetV2/expanded_conv_6')
block_converter(model.encoder.block7, 'MobilenetV2/expanded_conv_7')
block_converter(model.encoder.block8, 'MobilenetV2/expanded_conv_8')
block_converter(model.encoder.block9, 'MobilenetV2/expanded_conv_9')
block_converter(model.encoder.block10, 'MobilenetV2/expanded_conv_10')
block_converter(model.encoder.block11, 'MobilenetV2/expanded_conv_11')
block_converter(model.encoder.block12, 'MobilenetV2/expanded_conv_12')
block_converter(model.encoder.block13, 'MobilenetV2/expanded_conv_13')
block_converter(model.encoder.block14, 'MobilenetV2/expanded_conv_14')
block_converter(model.encoder.block15, 'MobilenetV2/expanded_conv_15')
block_converter(model.encoder.block16, 'MobilenetV2/expanded_conv_16')
# SPP
conv_converter(model.spp.aspp0.conv, 'aspp0')
bn_converter(model.spp.aspp0.bn, 'aspp0/BatchNorm')
conv_converter(model.spp.image_pooling.conv, 'image_pooling')
bn_converter(model.spp.image_pooling.bn, 'image_pooling/BatchNorm')
conv_converter(model.spp.conv, 'concat_projection')
bn_converter(model.spp.bn, 'concat_projection/BatchNorm')
# Logits
conv_converter(model.logits, 'logits/semantic', bias=True)
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('ckpt_path')
parser.add_argument('num_classes', type=int)
parser.add_argument('output_path')
args = parser.parse_args()
ckpt_path = args.ckpt_path
num_classes = args.num_classes
output_path = Path(args.output_path)
output_path.parent.mkdir()
model = convert_mobilenetv2(ckpt_path, num_classes)
torch.save(model.state_dict(), output_path)
| 4,117 | 1,600 |
#!/usr/bin/env python
import ec2
security_group = ec2.DrupalSecurityGroup()
print("Created new Drupal Security Group Object:{}".format(security_group))
if security_group.exists():
print("Security Group Exists")
exit(1)
security_group_id = security_group.create()
print("Created new EC2 Security Group ID:{}".format(security_group_id))
| 347 | 107 |
# script for extracting patches from video frames suitable for neural network
# training
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img
from PIL import Image
import sys
import os
import glob
from PIL import Image
from os.path import basename, splitext
import numpy as np
def acceptable(a):
if np.average(a) > 0.95 * 255:
return False
return True
overlap = 8
source_path = './reducted-conferences-videos-equations/'
destination_path = './conferences-videos-equations-samples-512/'
for file_name in glob.glob(source_path+ "*.jpg"):
name_without_extenstion = splitext(basename(file_name))[0]
gt_file_path = source_path + name_without_extenstion + ".gt.jpg"
print(file_name)
print(gt_file_path)
source_image = load_img(file_name, grayscale=False)
try:
groud_img = load_img(gt_file_path, grayscale=True)
except FileNotFoundError:
#groud_img = Image.new('RGB', (source_image.size[0], source_image.size[1]), (255, 255, 255))
continue
size_list = [512]
for size_x in size_list:
for size_y in size_list:
subimage_size = (size_x, size_y)
num_of_subimages_horizontal = source_image.size[0] // (subimage_size[0] // overlap)
num_of_subimages_vertical = source_image.size[1] // (subimage_size[1] // overlap)
rest_h = source_image.size[0] - num_of_subimages_horizontal * (subimage_size[0] // overlap)
rest_v = source_image.size[1] - num_of_subimages_vertical * (subimage_size[1] // overlap)
for i in range(num_of_subimages_horizontal):
for j in range(num_of_subimages_vertical):
x = i * (subimage_size[0] // overlap)
y = j * (subimage_size[1] // overlap)
w = x + (subimage_size[0])
h = y + (subimage_size[1])
crop_rect = (x,y,w,h)
if w > source_image.size[0] or h > source_image.size[1]:
continue
chunk_file_name = "{dir}{name}-{sizex}-{sizey}-{i}-{j}".format(dir=destination_path, i=i, j=j, name=name_without_extenstion, sizex=size_x, sizey=size_y)
gt_sub_image = groud_img.crop(crop_rect)
if not acceptable(img_to_array(gt_sub_image)):
continue
print(chunk_file_name)
gt_sub_image.save(chunk_file_name + ".gt.jpg")
sub_image = source_image.crop(crop_rect)
sub_image.save(chunk_file_name+ ".jpg")
| 2,624 | 880 |
side = [
[ "Y", "O", "W" ],
[ "R", "G", "O" ],
[ "O", "O", "B" ]
]
new_side = [ [ 'X' for i in range(3) ] for i in range(3) ]
for row_idx, row in enumerate(side):
for unit_idx, unit in enumerate(row):
new_side[2-unit_idx][row_idx] = unit
print(new_side) | 269 | 123 |
import os
import time
import json
from random import random
from datetime import datetime
import pandas as pd
import seaborn as sns
import requests
class SearchAndExtractData(object):
def __init__(self, file: str, graph_name: str) -> None:
self.file = file
self.graph_name = graph_name
def create_csv(self) -> None:
ENDPOINT = "ConsultarTaxaDICetip.aspx"
URL = f"https://www2.cetip.com.br/ConsultarTaxaDi/{ENDPOINT}"
# Criando a variável data e hora
for _ in range(0, 10):
data_e_hora = datetime.now()
data = datetime.strftime(data_e_hora, "%Y/%m/%d")
hora = datetime.strftime(data_e_hora, "%H:%M:%S")
# Captando a taxa CDI do site da B3
try:
response = requests.get(URL)
response.raise_for_status()
except requests.HTTPError:
print("Dado não encontrado, continuando.")
cdi = None
except Exception as exc:
print("Erro, parando a execução.")
raise exc
else:
dado = json.loads(response.text)
cdi = float(dado["taxa"].replace(",", "."))
# Verificando se o arquivo "taxa-cdi.csv" existe
if os.path.exists(f"./{self.file}") is False:
with open(
file=f"./{self.file}", mode="w", encoding="utf8"
) as fp:
fp.write("data,hora,taxa\n")
# Salvando dados no arquivo "taxa-cdi.csv"
with open(file=f"./{self.file}", mode="a", encoding="utf8") as fp:
fp.write(f"{data},{hora},{cdi}\n")
time.sleep(2 + (random() - 0.5))
print("Sucesso")
def create_graph(self) -> None:
# Extraindo as colunas hora e taxa
df = pd.read_csv(f"./{self.file}")
# Salvando no grafico
grafico = sns.lineplot(x=df["hora"], y=df["taxa"])
_ = grafico.set_xticklabels(labels=df["hora"], rotation=90)
grafico.get_figure().savefig(f"{self.graph_name}.png")
| 2,113 | 676 |