hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bb52f288b0e795d00e5f3bf63b427429792a24ad | 9,486 | py | Python | house_load_profiler.py | gianmarco-lorenti/RECOpt | c7916861db033f9d917d05094102194202e3bb09 | [
"MIT"
] | 8 | 2021-03-08T09:30:16.000Z | 2022-02-18T19:40:41.000Z | house_load_profiler.py | gianmarco-lorenti/RECOpt | c7916861db033f9d917d05094102194202e3bb09 | [
"MIT"
] | null | null | null | house_load_profiler.py | gianmarco-lorenti/RECOpt | c7916861db033f9d917d05094102194202e3bb09 | [
"MIT"
] | 1 | 2021-05-24T13:21:36.000Z | 2021-05-24T13:21:36.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 18:37:58 2020
@author: giamm
"""
# from tictoc import tic, toc
import numpy as np
import datareader #routine created to properly read the files needed in the following
from load_profiler import load_profiler as lp
###############################################################################
# This file contains a method that, for a given household (considering its
# availability of each appliance considered in this study) returns the electric
# load profile for the household during one day (1440 min), with a resolution
# of 1 minute.
###############################################################################
########## Routine
def house_load_profiler(time_dict, apps_availability, day, season, appliances_data, **params):
''' The method returns a load profile for a given household in a total simulation time of 1440 min, with a timestep of 1 min.
Inputs:
apps_availability - 1d-array, availability of each appliance for the household is stored (1 if present, 0 if not)
day - str, type of day (weekday: 'wd'| weekend: 'we')
season - str, season (summer: 's', winter: 'w', autumn or spring: 'ap')
appliances_data - dict, various input data related to the appliances
params - dict, simulation parameters
Outputs:
house_load_profile - 1d-array, load profile for the household (W)
energy - 1d-array, energy consumed by each appliance in one day (Wh/day)
'''
## Time
# Time discretization for the simulation
# Time-step (min)
dt = time_dict['dt']
# Total time of simulation (min)
time = time_dict['time']
# Vector of time from 00:00 to 23:59 (one day) (min)
time_sim = time_dict['time_sim']
## Parameters
# Simulation parameters that can be changed from the user
# Contractual power for each household (W)
power_max = params['power_max']*1000
## Input data for the appliances
# Appliances' attributes, energy consumptions and user's coefficients
# apps is a 2d-array in which, for each appliance (rows) and attribute value is given (columns)
apps_ID = appliances_data['apps_ID']
# apps_attr is a dictionary in which the name of each attribute (value) is linked to its columns number in apps (key)
apps_attr = appliances_data['apps_attr']
## Household's load profile
# Generating the load profile for the house, considering which appliances are available
# Initializing the power vector for the load profile (W)
house_load_profile = np.zeros(np.shape(time_sim))
# Initializing the vector where to store the energy consumption from each appliance (Wh/day)
energy = np.zeros(len(apps_ID))
# Using the method load_profiler(lp) to get the load profile for each appliance
for app in apps_ID:
# The ID number of the appliance is stored in a variable since it will be used man times
app_ID = apps_ID[app][apps_attr['id_number']]
# Skipping appliances that are not present in the household
if apps_availability[app_ID] == 0:
continue
load_profile = lp(time_dict, app, day, season, appliances_data, **params) #load_profile has to outputs (time and power)
# In case the instantaneous power exceedes the maximum power, some tries
# are made in order to change the moment in which the next appliance is
# switched on (since it is evaluated randomly, according to the cumulative
# frequency of utilization for that appliance)
count = 0
maxtries = 10
duration_indices = np.size(load_profile[load_profile > 0])
while np.any(house_load_profile + load_profile > power_max) and count < maxtries:
switch_on_index = np.size((house_load_profile + load_profile)[house_load_profile + load_profile > power_max])
roll_step = int((duration_indices + switch_on_index)/2)
load_profile = np.roll(load_profile, roll_step)
count += 1
# Evaluating the energy consumption from each appliance by integrating
# the load profile over the time. Since the time is in minutes, the
# result is divided by 60 in order to obtain Wh.
energy[app_ID] = np.trapz(load_profile,time_sim)/60
# Injecting the power demand from each appliance into the load profile of the household
house_load_profile[:] = house_load_profile[:] + load_profile
# In case the which loop failed, the instantaneous power is saturated to the
# maximum power anyway. This may happen because the last appliance to be considered
# is the lighting, which is of "continuous" type, therefore its profile does
# not depend on a cumulative frequency (they are always on)
house_load_profile[house_load_profile > power_max] = power_max
return(house_load_profile,energy)
#####################################################################################################################
# ## Uncomment the following lines to test the function
# import matplotlib.pyplot as plt
# from tictoc import tic, toc
# # Time-step, total time and vector of time from 00:00 to 23:59 (one day) (min)
# dt = 1
# time = 1440
# time_sim = np.arange(0,time,dt)
# # Creating a dictionary to be passed to the various methods, containing the time discretization
# time_dict = {
# 'time': time,
# 'dt': dt,
# 'time_sim': time_sim,
# }
# apps, apps_ID, apps_attr = datareader.read_appliances('eltdome_report.csv',';','Input')
# ec_yearly_energy, ec_levels_dict = datareader.read_enclasses('classenerg_report.csv',';','Input')
# coeff_matrix, seasons_dict = datareader.read_enclasses('coeff_matrix.csv',';','Input')
# apps_avg_lps = {}
# apps_dcs = {}
# for app in apps_ID:
# # app_nickname is a 2 or 3 characters string identifying the appliance
# app_nickname = apps_ID[app][apps_attr['nickname']]
# # app_type depends from the work cycle for the appliance: 'continuous'|'no_duty_cycle'|'duty_cycle'|
# app_type = apps_ID[app][apps_attr['type']]
# # app_wbe (weekly behavior), different usage of the appliance in each type of days: 'wde'|'we','wd'
# app_wbe = apps_ID[app][apps_attr['week_behaviour']]
# # app_sbe (seasonal behavior), different usage of the appliance in each season: 'sawp'|'s','w','ap'
# app_sbe = apps_ID[app][apps_attr['season_behaviour']]
# # Building the name of the file to be opened and read
# fname_nickname = app_nickname
# fname_type = 'avg_loadprof'
# apps_avg_lps[app] = {}
# for season in app_sbe:
# fname_season = season
# for day in app_wbe:
# fname_day = day
# filename = '{}_{}_{}_{}.csv'.format(fname_type, fname_nickname, fname_day, fname_season)
# # Reading the time and power vectors for the load profile
# data_lp = datareader.read_general(filename,';','Input')
# # Time is stored in hours and converted to minutes
# time_lp = data_lp[:, 0]
# time_lp = time_lp*60
# # Power is already stored in Watts, it corresponds to the load profile
# power_lp = data_lp[:, 1]
# load_profile = power_lp
# # Interpolating the load profile if it has a different time-resolution
# if (time_lp[-1] - time_lp[0])/(np.size(time_lp) - 1) != dt:
# load_profile = np.interp(time_sim, time_lp, power_lp, period = time)
# apps_avg_lps[app][(season, day)] = load_profile
# if app_type == 'duty_cycle':
# fname_type = 'dutycycle'
# filename = '{}_{}.csv'.format(fname_type, fname_nickname)
# # Reading the time and power vectors for the duty cycle
# data_dc = datareader.read_general(filename, ';', 'Input')
# # Time is already stored in minutes
# time_dc = data_dc[:, 0]
# # Power is already stored in Watts, it corresponds to the duty cycle
# power_dc = data_dc[:, 1]
# duty_cycle = power_dc
# # Interpolating the duty-cycle, if it has a different time resolution
# if (time_dc[-1] - time_dc[0])/(np.size(time_dc) - 1) != dt:
# time_dc = np.arange(time_dc[0], time_dc[-1] + dt, dt)
# duty_cycle = np.interp(time_dc, power_dc)
# apps_dcs[app] = {'time_dc': time_dc,
# 'duty_cycle': duty_cycle}
# appliances_data = {
# 'apps': apps,
# 'apps_ID': apps_ID,
# 'apps_attr': apps_attr,
# 'ec_yearly_energy': ec_yearly_energy,
# 'ec_levels_dict': ec_levels_dict,
# 'coeff_matrix': coeff_matrix,
# 'seasons_dict': seasons_dict,
# 'apps_avg_lps': apps_avg_lps,
# 'apps_dcs': apps_dcs,
# }
# params = {
# 'power_max': 3,
# 'en_class': 'D',
# 'toll': 15,
# 'devsta': 2,
# 'ftg_avg': 100
# }
# apps_availability = np.ones(17)
# day = 'wd'
# season = 's'
# house_load_profile, energy = house_load_profiler(time_dict, apps_availability, day, season, appliances_data, **params)
# plt.bar(time_sim,house_load_profile,width=dt,align='edge')
# plt.show()
###################################################################################################################################### | 37.2 | 134 | 0.621969 | 1,267 | 9,486 | 4.479084 | 0.240726 | 0.073656 | 0.033833 | 0.011454 | 0.221322 | 0.170573 | 0.14185 | 0.09304 | 0.065903 | 0.052863 | 0 | 0.012665 | 0.242568 | 9,486 | 255 | 134 | 37.2 | 0.777175 | 0.731499 | 0 | 0 | 0 | 0 | 0.025065 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.103448 | 0 | 0.137931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb5619127572d5c7abef2ce8289d84bfdb46675d | 6,075 | py | Python | content/model_funcs.py | nathanlyons/sensitivity_analysis_clinic_CSDMS_2019 | ca1ae2cdc5239bae755929f6a92d669a82192813 | [
"MIT"
] | null | null | null | content/model_funcs.py | nathanlyons/sensitivity_analysis_clinic_CSDMS_2019 | ca1ae2cdc5239bae755929f6a92d669a82192813 | [
"MIT"
] | null | null | null | content/model_funcs.py | nathanlyons/sensitivity_analysis_clinic_CSDMS_2019 | ca1ae2cdc5239bae755929f6a92d669a82192813 | [
"MIT"
] | null | null | null | """Model functions sensitivity analysis clinic at CSDMS 2019.
Written by Nathan Lyons, May 2019
"""
from os.path import join
from landlab import RasterModelGrid
from landlab.components import FastscapeEroder, FlowAccumulator, LinearDiffuser
from landlab.io import write_esri_ascii
import numpy as np
import experiment_funcs as ef
def calculate_factor_values(levels):
"""Calculate values of trial factors.
Parameters
----------
levels : dictionary
The factor levels of the trial.
Returns
-------
dictionary
Calculated factor values.
"""
# Set parameters based on factor levels.
f = {
'U': 10**levels['U_exp'],
'K': 10**levels['K_exp'],
'D': 10**levels['D_exp'],
'base_level_fall': 10**levels['base_level_fall']}
return f
def run_model(f, output_path):
"""Run a trial of the base level fall model.
Parameters
----------
f : dictionary
Model trial factors.
output_path : string
Path where outputs will be saved.
"""
# Set parameters.
nrows = 200
ncols = 100
dx = 100
dt = 1000
# Create initial topography with random elevation values.
mg = RasterModelGrid(nrows, ncols, dx)
z = mg.add_zeros('node', 'topographic__elevation')
np.random.seed(1)
z += np.random.rand(z.size)
mg.set_closed_boundaries_at_grid_edges(right_is_closed=True,
top_is_closed=False,
left_is_closed=True,
bottom_is_closed=False)
# Instantiate model components.
fa = FlowAccumulator(mg, flow_director='D8')
sp = FastscapeEroder(mg, K_sp=f['K'], m_sp=0.5, n_sp=1)
ld = LinearDiffuser(mg, linear_diffusivity=f['D'], deposit=False)
# Set variables to evaluate presence of steady state.
initial_conditions_set = False
at_steady_state = False
relief_record = []
recent_mean = []
recent_std = []
step = 0
# Set number of time steps, `steps_ss` that is the time window to evaluate
# steady state.
steps_ss = 1000
# Create a dictionary to store responses.
response = {}
# Run model until steady state is reached.
uplift_per_step = f['U'] * dt
core_mask = mg.node_is_core()
print('Running model until elevation reaches steady state.')
while not at_steady_state:
fa.run_one_step()
sp.run_one_step(dt)
ld.run_one_step(dt)
z[core_mask] += uplift_per_step
at_steady_state = check_steady_state(step * dt, z, step, steps_ss,
relief_record, recent_mean,
recent_std)
if at_steady_state and not initial_conditions_set:
initial_conditions_set = True
# Save elevation of the initial conditions.
fn = join(output_path, 'initial_conditions_elevation.asc')
write_esri_ascii(fn, mg, ['topographic__elevation'], clobber=True)
# Retain steady state relief, `relief_ss`.
z_core = z[mg.core_nodes]
relief_ss = z_core.max() - z_core.min()
response['relief_at_steady_state'] = relief_ss
# Find steady state divide position.
divide_y_coord_initial = get_divide_position(mg)
# Perturb elevation.
base_level_nodes = mg.y_of_node == 0
z[base_level_nodes] -= f['base_level_fall']
at_steady_state = False
elif at_steady_state and initial_conditions_set:
response['time_back_to_steady_state'] = step * dt
# Get divide migration distance.
divide_y_coord_final = get_divide_position(mg)
d = divide_y_coord_final - divide_y_coord_initial
response['divide_migration_distance'] = d
# Save final elevation.
fn = join(output_path, 'final_elevation.asc')
write_esri_ascii(fn, mg, ['topographic__elevation'], clobber=True)
# Advance step counter.
step += 1
# Write response to file.
path_r = join(output_path, 'response.csv')
ef.write_data(response, path_r)
def check_steady_state(time, z, step, step_win, relief_record, recent_mean,
recent_std):
relief_record.append(z.max() - z.min())
if step < step_win:
# If fewer than `step_win` time steps have occurred, calculate the
# running mean and standard deviation the entire elapsed time period.
recent_mean.append(np.mean(relief_record))
recent_std.append(np.std(relief_record))
else:
# If more than `step_win` time steps have occurred, calculate the
# running mean and standard deviation over the most recent `step_win`
# time steps.
recent_mean.append(np.mean(relief_record[-step_win:]))
recent_std.append(np.std(relief_record[-step_win:]))
mean_pct_change = (np.abs(recent_mean[-1] - recent_mean[-step_win]) /
np.abs(recent_mean[-1] + recent_mean[-step_win]))
std_pct_change = (np.abs(recent_std[-1] - recent_std[-step_win]) /
np.abs(recent_std[-1] + recent_std[-step_win]))
# If the percent change is less than 1% for both the mean and
# standard deviation, flag the system as at steady state.
thresh = 0.01
minor_mean_change = mean_pct_change < thresh
minor_std_change = std_pct_change < thresh or recent_std[-1] == 0
at_steady_state = minor_mean_change and minor_std_change
if step % 1e3 == 0 or at_steady_state:
print('ss_check:', 'time', time, 'mean_pct_change',
mean_pct_change, 'std_pct_change', std_pct_change)
return at_steady_state
def get_divide_position(grid):
z = grid.at_node['topographic__elevation'].reshape(grid.shape)
z_row_mean = z.mean(axis=1)
divide_y_coord = z_row_mean.argmax() * grid.dy
return divide_y_coord
| 33.196721 | 79 | 0.624691 | 790 | 6,075 | 4.53038 | 0.272152 | 0.06147 | 0.039955 | 0.018441 | 0.18888 | 0.17463 | 0.148645 | 0.111763 | 0.111763 | 0.075999 | 0 | 0.012649 | 0.28428 | 6,075 | 182 | 80 | 33.379121 | 0.810488 | 0.233745 | 0 | 0.042105 | 0 | 0 | 0.085293 | 0.042207 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042105 | false | 0 | 0.063158 | 0 | 0.136842 | 0.021053 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb565d6a848c25f10f712bae53673495afeccc03 | 4,865 | py | Python | src/preprocessing.py | jimmystique/AudioClassification | 9f9966306068cff7419f6c190752bab4d35b3870 | [
"MIT"
] | null | null | null | src/preprocessing.py | jimmystique/AudioClassification | 9f9966306068cff7419f6c190752bab4d35b3870 | [
"MIT"
] | null | null | null | src/preprocessing.py | jimmystique/AudioClassification | 9f9966306068cff7419f6c190752bab4d35b3870 | [
"MIT"
] | null | null | null | import os
import pandas as pd
import pickle as pkl
import numpy as np
import argparse
import yaml
import librosa
import scipy
from scipy.io import wavfile
import multiprocessing
import time
import datetime
import socket
def resample_wav_data(wav_data, orig_sr, target_sr):
""" Resample wav_data from sampling rate equals to orig_sr to a new sampling rate equals to target_sr
"""
# resampled_wav_data = librosa.core.resample(y=wav_data.astype(np.float32), orig_sr=orig_sr, target_sr=target_sr)
resampled_wav_data = scipy.signal.resample(wav_data, target_sr)
# print(wav_data, resampled_wav_data, len(resampled_wav_data))
return resampled_wav_data
def pad_wav_data(wav_data, vect_size):
""" Pad wav data
"""
if (len(wav_data) > vect_size):
raise ValueError("")
elif len(wav_data) < vect_size:
padded_wav_data = np.zeros(vect_size)
starting_point = np.random.randint(low=0, high=vect_size-len(wav_data))
padded_wav_data[starting_point:starting_point+len(wav_data)] = wav_data
else:
padded_wav_data = wav_data
return padded_wav_data
def fft_preprocess_user_data_at_pth(user_data_path, preprocessed_data_path, vect_size):
print(user_data_path)
user_df = pd.DataFrame(columns=['data', "user_id", "record_num", "label"])
wav_user_id = 0
for file in sorted(os.listdir(user_data_path)):
if file.endswith(".wav"):
wav_label, wav_user_id, wav_record_n = os.path.splitext(file)[0].split("_")
wav_sr, wav_data = wavfile.read(os.path.join(user_data_path, file))
resampled_wav_data = resample_wav_data(wav_data, wav_sr, vect_size)
# new_row = {"data": padded_wav_data, "user_id": wav_user_id, "record_num": wav_record_n, "label": wav_label}
new_row = {"data": resampled_wav_data, "user_id": wav_user_id, "record_num": wav_record_n, "label": wav_label}
user_df = user_df.append(new_row, ignore_index=True)
pkl.dump( user_df, open("{}_preprocessed.pkl".format(os.path.join(preprocessed_data_path, str(wav_user_id))), "wb" ) )
def downsampling_preprocess_user_data_at_pth(user_data_path, preprocessed_data_path, target_sr, vect_size):
print(user_data_path)
user_df = pd.DataFrame(columns=['data', "user_id", "record_num", "label"])
wav_user_id = 0
for file in sorted(os.listdir(user_data_path)):
if file.endswith(".wav"):
wav_label, wav_user_id, wav_record_n = os.path.splitext(file)[0].split("_")
wav_sr, wav_data = wavfile.read(os.path.join(user_data_path, file))
y,s = librosa.load(os.path.join(user_data_path, file), target_sr)
padded_wav_data = pad_wav_data(y, vect_size)
new_row = {"data": padded_wav_data, "user_id": wav_user_id, "record_num": wav_record_n, "label": wav_label}
user_df = user_df.append(new_row, ignore_index=True)
pkl.dump(user_df, open("{}_preprocessed.pkl".format(os.path.join(preprocessed_data_path, str(wav_user_id))), "wb" ) )
def preprocess(raw_data_path, preprocessed_data_path, resampling_method, n_process, vect_size, target_sr=8000,):
#Create preprocessed_data_path if the directory does not exist
if not os.path.exists(preprocessed_data_path):
os.makedirs(preprocessed_data_path)
users_data_path = sorted([folder.path for folder in os.scandir(raw_data_path) if folder.is_dir() and any(file.endswith(".wav") for file in os.listdir(folder))])
print(users_data_path)
# pool=multiprocessing.Pool(n_process)
pool=multiprocessing.Pool(1)
if resampling_method == 'fft':
print("Resampling with FFT ...")
pool.starmap(fft_preprocess_user_data_at_pth, [[folder, preprocessed_data_path, vect_size] for folder in users_data_path if os.path.isdir(folder)], chunksize=1)
elif resampling_method == 'downsampling':
print("Resampling by downsampling ...")
pool.starmap(downsampling_preprocess_user_data_at_pth, [[folder, preprocessed_data_path, target_sr, vect_size] for folder in users_data_path if os.path.isdir(folder)], chunksize=1)
else:
raise ValueError(f"method {method} does not exist")
if __name__ == "__main__":
np.random.seed(42)
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--preprocessing_cfg", default="configs/config.yaml", type=str, help = "Path to the configuration file")
args = parser.parse_args()
preprocessing_cfg = yaml.safe_load(open(args.preprocessing_cfg))["preprocessing"]
t1 = time.time()
preprocess(**preprocessing_cfg)
t2 = time.time()
with open("logs/logs.csv", "a") as myfile:
myfile.write("{:%Y-%m-%d %H:%M:%S},data preprocessing,{},{},{:.2f}\n".format(datetime.datetime.now(),socket.gethostname(),preprocessing_cfg['n_process'],t2-t1))
print("Time elapsed for data processing: {} seconds ".format(t2-t1))
#Preprocessing : ~113s using Parallel computing with n_processed = 10 and resampling with scipys
#Preprocessing : ~477.1873028278351 seconds using Parallel computing with n_processed = 10 and resampling with resampy (python module for efficient time-series resampling)
| 41.581197 | 182 | 0.763001 | 765 | 4,865 | 4.539869 | 0.227451 | 0.068529 | 0.057587 | 0.020155 | 0.460121 | 0.415203 | 0.406565 | 0.394472 | 0.394472 | 0.366254 | 0 | 0.010638 | 0.111202 | 4,865 | 116 | 183 | 41.939655 | 0.792553 | 0.159096 | 0 | 0.25974 | 0 | 0 | 0.115328 | 0.006856 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064935 | false | 0 | 0.168831 | 0 | 0.25974 | 0.077922 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb5b9e701abf5095d0390b5cf553b81d80794a1b | 5,113 | py | Python | calaccess_website/views/docs/ccdc.py | california-civic-data-coalition/django-calaccess-downloads | 198f3b5b7fca846d9ce7c84f3a5bfa0ff4d3d3f7 | [
"MIT"
] | 3 | 2016-09-16T16:50:31.000Z | 2021-07-30T23:58:29.000Z | calaccess_website/views/docs/ccdc.py | world-admin/django-calaccess-downloads-website | 205480959358f51f4a7f11275fb8f60bd1091025 | [
"MIT"
] | 193 | 2016-05-27T16:34:10.000Z | 2021-09-24T16:11:04.000Z | calaccess_website/views/docs/ccdc.py | world-admin/django-calaccess-downloads-website | 205480959358f51f4a7f11275fb8f60bd1091025 | [
"MIT"
] | 4 | 2016-06-14T00:41:19.000Z | 2022-01-14T00:29:45.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Views for CCDC file documetnation pages.
"""
# Django tricks
from django.apps import apps
from django.http import Http404
from django.urls import reverse
from calaccess_website.templatetags.calaccess_website_tags import slugify
# Models
from calaccess_processed.models import ProcessedDataFile
# Views
from calaccess_website.views import CalAccessModelListMixin
from django.views.generic import DetailView, ListView
def get_ocd_proxy_models():
"""
Return an iterable of all OCD proxy models from the processed_data app.
"""
election_proxies = apps.get_app_config('calaccess_processed_elections').get_ocd_models_map().values()
flat_proxies = apps.get_app_config("calaccess_processed_flatfiles").get_flat_proxy_list()
return list(election_proxies) + list(flat_proxies)
def get_processed_data_files():
"""
Return a tuple of ProcessedDataFile instances for published files.
"""
file_list = [ProcessedDataFile(file_name=m().file_name) for m in get_ocd_proxy_models()]
return sorted(file_list, key=lambda f: f.file_name)
class CcdcFileList(ListView, CalAccessModelListMixin):
template_name = 'calaccess_website/docs/ccdc/file_list.html'
def get_queryset(self):
"""
Returns the CCDC model list with grouped by type.
"""
return self.regroup_by_klass_group(get_processed_data_files())
def get_context_data(self, **kwargs):
context = super(CcdcFileList, self).get_context_data(**kwargs)
context['file_num'] = len(get_processed_data_files())
context['title'] = 'Processed files'
context['description'] = 'Definitions, record layouts and data dictionaries for the \
processed data files released by the California Civic Data Coalition. Recommended for beginners and regular use.'
return context
class BaseFileDetailView(DetailView):
"""
Base class for views providing information about a CCDC data file.
"""
def get_queryset(self):
"""
Returns a list of the ccdc data files as a key dictionary
with the URL slug as the keys.
"""
return dict((slugify(f.file_name), f) for f in get_processed_data_files())
def set_kwargs(self, obj):
self.kwargs = {
'slug': obj
}
def get_object(self):
"""
Returns the file model from the CAL-ACCESS processed data app that
matches the provided slug.
Raises a 404 error if one is not found
"""
key = self.kwargs['slug']
try:
return self.get_queryset()[key.lower()]
except KeyError:
raise Http404
def get_context_data(self, **kwargs):
"""
Add some extra bits to the template's context
"""
file_name = self.kwargs['slug'].replace("-", "")
context = super(BaseFileDetailView, self).get_context_data(**kwargs)
# Pull all previous versions of the provided file
context['version_list'] = ProcessedDataFile.objects.filter(
file_name__icontains=file_name
).order_by(
'-version__raw_version__release_datetime'
).exclude(
version__raw_version__release_datetime__lte='2016-07-27'
)
# note if the most recent version of the file is empty
try:
context['empty'] = context['version_list'][0].records_count == 0
except IndexError:
context['empty'] = True
return context
class CcdcFileDownloadsList(BaseFileDetailView):
"""
A detail page with links to all downloads for the provided CCDC data file.
"""
template_name = 'calaccess_website/docs/ccdc/download_list.html'
def get_url(self, obj):
return reverse('ccdc_file_downloads_list', kwargs=dict(slug=obj))
class CcdcFileDetail(BaseFileDetailView):
"""
A detail page with all documentation for the provided CCDC data file.
"""
template_name = 'calaccess_website/docs/ccdc/file_detail.html'
def get_url(self, obj):
return reverse('ccdc_file_detail', kwargs=dict(slug=obj))
def get_context_data(self, **kwargs):
"""
Add some extra bits to the template's context
"""
context = super(CcdcFileDetail, self).get_context_data(**kwargs)
# Add list of fields to context
context['fields'] = self.get_sorted_fields()
return context
def get_sorted_fields(self):
"""
Return a list of fields (dicts) sorted by name.
"""
field_list = []
for field in self.object.model().get_field_list():
field_data = {
'column': field.name,
'description': field.description,
'help_text': field.help_text,
}
if field.choices and len(field.choices) > 0:
field_data['choices'] = [c for c in field.choices]
else:
field_data['choices'] = None
field_list.append(field_data)
return sorted(field_list, key=lambda k: k['column'])
| 32.775641 | 113 | 0.656171 | 628 | 5,113 | 5.146497 | 0.297771 | 0.020421 | 0.02599 | 0.02599 | 0.256807 | 0.150681 | 0.142327 | 0.103342 | 0.103342 | 0.103342 | 0 | 0.005474 | 0.249756 | 5,113 | 155 | 114 | 32.987097 | 0.83707 | 0.196558 | 0 | 0.151899 | 0 | 0 | 0.108143 | 0.065612 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151899 | false | 0 | 0.088608 | 0.025316 | 0.468354 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb5c8ea17ee9ecb16f8a2b9c72c06dd3eb1f5f67 | 2,536 | py | Python | comment/models/reactions.py | setayeshmbr/coronavirus_blog | 2bb94e905ac7e5401776ac81fd301a90a24f1a9f | [
"MIT"
] | null | null | null | comment/models/reactions.py | setayeshmbr/coronavirus_blog | 2bb94e905ac7e5401776ac81fd301a90a24f1a9f | [
"MIT"
] | null | null | null | comment/models/reactions.py | setayeshmbr/coronavirus_blog | 2bb94e905ac7e5401776ac81fd301a90a24f1a9f | [
"MIT"
] | null | null | null | from enum import IntEnum, unique
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils import timezone
from comment.models import Comment
from comment.managers import ReactionManager, ReactionInstanceManager
class Reaction(models.Model):
comment = models.OneToOneField(Comment, on_delete=models.CASCADE)
likes = models.PositiveIntegerField(default=0)
dislikes = models.PositiveIntegerField(default=0)
objects = ReactionManager()
def _increase_count(self, field):
self.refresh_from_db()
setattr(self, field, models.F(field) + 1)
self.save(update_fields=[field])
def _decrease_count(self, field):
self.refresh_from_db()
setattr(self, field, models.F(field) - 1)
self.save(update_fields=[field])
def increase_reaction_count(self, reaction):
if reaction == ReactionInstance.ReactionType.LIKE.value:
self._increase_count('likes')
else:
self._increase_count('dislikes')
def decrease_reaction_count(self, reaction):
if reaction == ReactionInstance.ReactionType.LIKE.value:
self._decrease_count('likes')
else:
self._decrease_count('dislikes')
class ReactionInstance(models.Model):
@unique
class ReactionType(IntEnum):
LIKE = 1
DISLIKE = 2
CHOICES = [(r.value, r.name) for r in ReactionType]
reaction = models.ForeignKey(Reaction, related_name='reactions', on_delete=models.CASCADE)
user = models.ForeignKey(get_user_model(), related_name='reactions', on_delete=models.CASCADE)
reaction_type = models.SmallIntegerField(choices=CHOICES)
date_reacted = models.DateTimeField(auto_now=timezone.now())
objects = ReactionInstanceManager()
class Meta:
unique_together = ['user', 'reaction']
@receiver(post_delete, sender=ReactionInstance)
def delete_reaction_instance(sender, instance, using, **kwargs):
instance.reaction.decrease_reaction_count(instance.reaction_type)
@receiver(post_save, sender=ReactionInstance)
def add_count(sender, instance, created, raw, using, update_fields, **kwargs):
if created:
instance.reaction.increase_reaction_count(instance.reaction_type)
@receiver(post_save, sender=Comment)
def add_reaction(sender, instance, created, raw, using, update_fields, **kwargs):
if created:
Reaction.objects.create(comment=instance)
| 32.935065 | 98 | 0.729101 | 294 | 2,536 | 6.112245 | 0.27551 | 0.027824 | 0.023372 | 0.035058 | 0.351697 | 0.351697 | 0.351697 | 0.306066 | 0.306066 | 0.244853 | 0 | 0.002859 | 0.172319 | 2,536 | 76 | 99 | 33.368421 | 0.853263 | 0 | 0 | 0.181818 | 0 | 0 | 0.022082 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.127273 | false | 0 | 0.145455 | 0 | 0.527273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
246de88d2fe441aebedc83fc4f8b906aee07ae8a | 1,394 | py | Python | gorp/test/test_b_option.py | molsonkiko/gorpy | 1dceb8bb530a7c7c558d51cedbbbf1cdc9e6f914 | [
"MIT"
] | null | null | null | gorp/test/test_b_option.py | molsonkiko/gorpy | 1dceb8bb530a7c7c558d51cedbbbf1cdc9e6f914 | [
"MIT"
] | null | null | null | gorp/test/test_b_option.py | molsonkiko/gorpy | 1dceb8bb530a7c7c558d51cedbbbf1cdc9e6f914 | [
"MIT"
] | null | null | null | from gorp.readfiles import *
from gorp.test.test_ku_options import setup_tempdir
import itertools
import unittest
og_dirname = os.getcwd()
def get_combos():
setup_tempdir()
os.chdir(os.path.join(gorpdir, "test", "temp"))
base_query = " -b '[bp]l[uo]t' /."
gorptags = ["-r", "-l", "-h", "-i", "-c", "-o", "-n", "-v"]
tag_combos = (
c
for combos in (itertools.combinations(gorptags, ii) for ii in range(9))
for c in combos
)
query_results = {}
bad_combos = {}
session = GorpSession(print_output=False)
try:
for combo in tag_combos:
Combo = " ".join(combo)
try:
session.receive_query(Combo + base_query)
out = session.old_queries["prev"].resultset
query_results[frozenset(re.findall("[a-z]+", Combo))] = out
except Exception as ex:
bad_combos[frozenset(re.findall("[a-z]+", Combo))] = repr(ex)
finally:
session.close()
os.chdir(og_dirname)
return query_results, bad_combos
class BOptionTester(unittest.TestCase):
def test_b_option(self):
query_results, bad_combos = get_combos()
self.assertFalse(
bad_combos,
"At least one option combo failed with the 'b' option under normal conditions",
)
if __name__ == "__main__":
unittest.main()
| 29.041667 | 91 | 0.58967 | 172 | 1,394 | 4.581395 | 0.52907 | 0.057107 | 0.057107 | 0.079949 | 0.063452 | 0.063452 | 0 | 0 | 0 | 0 | 0 | 0.000996 | 0.27977 | 1,394 | 47 | 92 | 29.659574 | 0.783865 | 0 | 0 | 0.05 | 0 | 0 | 0.1033 | 0 | 0 | 0 | 0 | 0 | 0.025 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.2 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2471c183baaffa838c1d8f4a682290146e29d206 | 8,689 | py | Python | stancode_projects/Break out games/breakoutgraphics.py | itinghuang/iting-projects | 304d41fe8a4b3ae45a01611a8868981f31e72d68 | [
"MIT"
] | null | null | null | stancode_projects/Break out games/breakoutgraphics.py | itinghuang/iting-projects | 304d41fe8a4b3ae45a01611a8868981f31e72d68 | [
"MIT"
] | null | null | null | stancode_projects/Break out games/breakoutgraphics.py | itinghuang/iting-projects | 304d41fe8a4b3ae45a01611a8868981f31e72d68 | [
"MIT"
] | null | null | null | """
stanCode Breakout Project
Adapted from Eric Roberts's Breakout by
Sonja Johnson-Yu, Kylie Jue, Nick Bowman,
and Jerry Liao
YOUR DESCRIPTION HERE
"""
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect, GLabel
from campy.gui.events.mouse import onmouseclicked, onmousemoved
import random
# constant
BRICK_SPACING = 5 # Space between bricks (in pixels). This space is used for horizontal and vertical spacing.
BRICK_WIDTH = 40 # Height of a brick (in pixels).
BRICK_HEIGHT = 15 # Height of a brick (in pixels).
BRICK_ROWS = 10 # Number of rows of bricks.
BRICK_COLS = 10 # Number of columns of bricks.
BRICK_OFFSET = 50 # Vertical offset of the topmost brick from the window top (in pixels).
BALL_RADIUS = 10 # Radius of the ball (in pixels).
PADDLE_WIDTH = 75 # Width of the paddle (in pixels).
PADDLE_HEIGHT = 15 # Height of the paddle (in pixels).
PADDLE_OFFSET = 50 # Vertical offset of the paddle from the window bottom (in pixels).
# constants that cannot be changed by user
INITIAL_Y_SPEED = 7.0 # Initial vertical speed for the ball.
MAX_X_SPEED = 5 # Maximum initial horizontal speed for the ball.
count = 0 # the Qty of removed bricks
class BreakoutGraphics:
def __init__(self, ball_radius = BALL_RADIUS, paddle_width = PADDLE_WIDTH,
paddle_height = PADDLE_HEIGHT, paddle_offset = PADDLE_OFFSET,
brick_rows = BRICK_ROWS, brick_cols = BRICK_COLS,
brick_width = BRICK_WIDTH, brick_height = BRICK_HEIGHT,
brick_offset = BRICK_OFFSET, brick_spacing = BRICK_SPACING,
title='Breakout'):
# Create a graphical window, with some extra space.
window_width = brick_cols * (brick_width + brick_spacing) - brick_spacing
window_height = brick_offset + 3 * (brick_rows * (brick_height + brick_spacing) - brick_spacing)
self.window = GWindow(width=window_width, height=window_height, title=title)
# Create a paddle.
self.paddle = GRect(PADDLE_WIDTH,PADDLE_HEIGHT,x=(window_width/2-PADDLE_WIDTH/2),y=window_height-PADDLE_OFFSET)
self.paddle.filled = True
self.window.add(self.paddle)
# Center a filled ball in the graphical window.
self.ball = GOval(BALL_RADIUS*2, BALL_RADIUS*2)
self.ball.filled = True
self.window.add(self.ball,x=(window_width/2-BALL_RADIUS), y=window_height/2-PADDLE_OFFSET)
# Initialize our mouse listeners.
onmouseclicked(self.ball_start)
onmousemoved(self.paddle_move)
# Default initial velocity for the ball.
self.__dx = 0
self.__dy = 0
# Default the removed bricks
self.count = 0
# Draw bricks.
for j in range(BRICK_ROWS):
for i in range(BRICK_COLS):
self.brick = GRect(BRICK_WIDTH, BRICK_HEIGHT, x= (BRICK_WIDTH+brick_spacing)*i, y=BRICK_OFFSET+(BRICK_HEIGHT+brick_spacing)*j)
self.brick.filled = True
if j == 0 :
self.brick_color = 'red'
elif j == 2:
self.brick_color = 'orange'
elif j == 4:
self.brick_color = 'yellow'
elif j == 6:
self.brick_color = 'green'
elif j ==8:
self.brick_color = 'blue'
self.brick.fill_color = self.brick_color
self.brick.color = self.brick_color
self.window.add(self.brick)
def paddle_move(self, event):
'''
To set the remove paddle. The paddle will follow the position of mouse
:return: the x of paddle
'''
self.paddle.x = event.x
self.paddle.y = self.window.height-PADDLE_OFFSET
if event.x <= 0:
self.paddle.x = 0
elif event.x >= (self.window.width-self.paddle.width):
self.paddle.x = self.window.width-self.paddle.width
else:
self.paddle.x = event.x
def ball_start(self, event):
'''
If user click the mouse, the moving distance will increase (__dx, __dy) and start the moving.
:return: new __dx, __dy
'''
if self.__dy == 0 and self.__dy == 0:
self.__dy = INITIAL_Y_SPEED
self.__dx = random.randint(1, MAX_X_SPEED)
if random.random > 0.5:
self.__dx = -self.__dx
if random.random > 0.5:
self.__dy = -self.__dy
def window_border(self):
'''
If the ball moved out of the window, the ball will bounce.
:return: new __dx, __dy
'''
if self.ball.x <= 0 or self.ball.x >= (self.window.width-BALL_RADIUS*2):
self.__dx = -self.__dx
elif self.ball.y <= 0:
self.__dy = -self.__dy
def move_to_bottom(self):
'''
If the ball touched the bottom
:return: boolean (True/False)
'''
if self.ball.y >= (self.window.height-BALL_RADIUS*2):
return True
def back_to_start(self):
'''
The ball moves back to the start point to restart the game.
:return: ball.x , ball.y , __dx, __dy
'''
self.ball.x = self.window.width/2-BALL_RADIUS
self.ball.y = self.window.height/2 - BALL_RADIUS
self.__dx = 0
self.__dy = 0
def bounce_ball(self):
'''
if the ball hits the paddle, the ball will bounce.
:return: new __dy
'''
x = self.ball.x
y = self.ball.y
obj = self.window.get_object_at(x, y)
obj2 = self.window.get_object_at(x+2*BALL_RADIUS, y)
obj3 = self.window.get_object_at(x, y+2*BALL_RADIUS)
obj4 = self.window.get_object_at(x+2*BALL_RADIUS, y+2*BALL_RADIUS)
if obj3 == self.paddle:
self.__dy = -self.__dy
elif obj4 == self.paddle:
self.__dy = -self.__dy
elif obj == self.paddle:
self.__dy = -self.__dy
elif obj2 == self.paddle:
self.__dy = -self.__dy
def hit_brick(self):
'''
If the ball hits the brick, the ball will bounce and the brick will be removed.
:return: count, __dy, removed bricks
'''
obj4 = self.window.get_object_at(self.ball.x,self.ball.y)
obj5 = self.window.get_object_at(self.ball.x+2*BALL_RADIUS, self.ball.y)
obj6 = self.window.get_object_at(self.ball.x, self.ball.y+2*BALL_RADIUS)
obj7 = self.window.get_object_at(self.ball.x+2*BALL_RADIUS, self.ball.y+2*BALL_RADIUS)
if obj4 != self.paddle and obj4 != None:
self.window.remove(obj4)
self.count += 1
self.__dy = -self.__dy
elif obj5 != self.paddle and obj5 != None:
self.__dy = -self.__dy
self.count += 1
self.window.remove(obj5)
elif obj6 != self.paddle and obj6 != None:
self.__dy = -self.__dy
self.count += 1
self.window.remove(obj6)
elif obj7 != self.paddle and obj7 != None:
self.__dy = -self.__dy
self.count += 1
self.window.remove(obj7)
def is_win_game(self):
'''
If the game removed all the bricks
:return: boolean (True/False)
'''
if self.count == BRICK_ROWS*BRICK_COLS:
return True
def win_game(self):
'''
If win, the game will over and the win-banner will appear.
:return: sign2, label2
'''
sign2 = GRect(self.window.width, self.window.height, x=0, y=0)
sign2.filled = True
sign2.color = 'yellow'
sign2.fill_color = 'yellow'
self.window.add(sign2)
label2 = GLabel('You win !', x=self.window.width/3.5, y=self.window.height/2)
label2.font = 'Helvetica-35'
label2.color = 'black'
self.window.add(label2)
def end_game(self):
'''
If lose, the game will over and the lose-banner will appear.
:return: sign, label
'''
sign = GRect(self.window.width, self.window.height, x=0, y=0)
sign.filled = True
sign.color = 'red'
sign.fill_color = 'red'
self.window.add(sign)
label = GLabel('You lose', x=self.window.width/3.5, y=self.window.height/2)
label.font = 'Helvetica-35'
label.color = 'black'
self.window.add(label)
# Getter
def get_dx(self):
return self.__dx
# Getter
def get_dy(self):
return self.__dy
# Getter
def get_ball(self):
return self.ball
| 33.941406 | 142 | 0.589711 | 1,176 | 8,689 | 4.167517 | 0.162415 | 0.071414 | 0.026525 | 0.024485 | 0.332177 | 0.289125 | 0.166293 | 0.117323 | 0.106917 | 0.106917 | 0 | 0.019003 | 0.309587 | 8,689 | 255 | 143 | 34.07451 | 0.797966 | 0.214524 | 0 | 0.173333 | 0 | 0 | 0.015601 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093333 | false | 0 | 0.026667 | 0.02 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24740d66448ac9fd6d0157a46216f7339a2b5d04 | 6,035 | py | Python | cosine_similarity.py | caikehe/YELP_DS | c9baa8626a22d6aca2a786e221bff5e184e521d7 | [
"Apache-2.0"
] | null | null | null | cosine_similarity.py | caikehe/YELP_DS | c9baa8626a22d6aca2a786e221bff5e184e521d7 | [
"Apache-2.0"
] | null | null | null | cosine_similarity.py | caikehe/YELP_DS | c9baa8626a22d6aca2a786e221bff5e184e521d7 | [
"Apache-2.0"
] | null | null | null | import io, json, random
import matplotlib.pyplot as plt
from sklearn.metrics.pairwise import pairwise_distances
from matplotlib.backends.backend_pdf import PdfPages
def drawCOSFigure(features, output):
histogram1Star = []
histogram2Star = []
histogram3Star = []
histogram4Star = []
histogram5Star = []
nSamples = 100
metricType = 'cosine'
with open("data/output/cosine_similarity/histogram_1star.json") as oneStarFile:
data = json.load(oneStarFile)
for i, item in enumerate(data):
if i > nSamples:
break
histogram1Star.append(list(item["histogram"][i] for i in features))
with open("data/output/cosine_similarity/histogram_2star.json") as twoStarFile:
data = json.load(twoStarFile)
for i, item in enumerate(data):
if i > nSamples:
break
histogram2Star.append(list(item["histogram"][i] for i in features))
with open("data/output/cosine_similarity/histogram_3star.json") as threeStarFile:
data = json.load(threeStarFile)
for i, item in enumerate(data):
if i > nSamples:
break
histogram3Star.append(list(item["histogram"][i] for i in features))
with open("data/output/cosine_similarity/histogram_4star.json") as fourStarFile:
data = json.load(fourStarFile)
for i, item in enumerate(data):
if i > nSamples:
break
histogram4Star.append(list(item["histogram"][i] for i in features))
with open("data/output/cosine_similarity/histogram_5star.json") as fiveStarFile:
data = json.load(fiveStarFile)
for i, item in enumerate(data):
if i > nSamples:
break
histogram5Star.append(list(item["histogram"][i] for i in features))
matrix1Star = pairwise_distances(histogram1Star, Y=None, metric=metricType, n_jobs=1)
matrix2Star = pairwise_distances(histogram2Star, Y=None, metric=metricType, n_jobs=1)
matrix3Star = pairwise_distances(histogram3Star, Y=None, metric=metricType, n_jobs=1)
matrix4Star = pairwise_distances(histogram4Star, Y=None, metric=metricType, n_jobs=1)
matrix5Star = pairwise_distances(histogram5Star, Y=None, metric=metricType, n_jobs=1)
length = nSamples
x_array1Star = []
y_array1Star = []
x_array2Star = []
y_array2Star = []
x_array3Star = []
y_array3Star = []
x_array4Star = []
y_array4Star = []
x_array5Star = []
y_array5Star = []
left = -0.49
right = 0.49
for i in xrange(0, length):
for j in xrange(i, length):
x = 0.5 + random.uniform(left, right)
y = 1- matrix1Star[i][j]
x_array1Star.append(x)
y_array1Star.append(y)
for i in xrange(0, length):
for j in xrange(i, length):
x = 1.5 + random.uniform(left, right)
y = 1- matrix2Star[i][j]
x_array2Star.append(x)
y_array2Star.append(y)
for i in xrange(0, length):
for j in xrange(i, length):
x = 2.5 + random.uniform(left, right)
y = 1- matrix3Star[i][j]
x_array3Star.append(x)
y_array3Star.append(y)
for i in xrange(0, length):
for j in xrange(i, length):
x = 3.5 + random.uniform(left, right)
y = 1- matrix4Star[i][j]
x_array4Star.append(x)
y_array4Star.append(y)
for i in xrange(0, length):
for j in xrange(i, length):
x = 4.5 + random.uniform(left, right)
y = 1- matrix5Star[i][j]
x_array5Star.append(x)
y_array5Star.append(y)
pp = PdfPages(output)
plt.figure()
plt.plot(x_array1Star, y_array1Star, 'ro', markersize=2.0)
plt.plot(x_array2Star, y_array2Star, 'ro', markersize=2.0)
plt.plot(x_array3Star, y_array3Star, 'ro', markersize=2.0)
plt.plot(x_array4Star, y_array4Star, 'ro', markersize=2.0)
plt.plot(x_array5Star, y_array5Star, 'ro', markersize=2.0)
plt.axis([0, 5, 0, 1])
plt.axvline(x=1, ymin=0, ymax = 1, linewidth=1, color='r')
plt.axvline(x=2, ymin=0, ymax = 1, linewidth=1, color='r')
plt.axvline(x=3, ymin=0, ymax = 1, linewidth=1, color='r')
plt.axvline(x=4, ymin=0, ymax = 1, linewidth=1, color='r')
plt.axvline(x=5, ymin=0, ymax = 1, linewidth=1, color='r')
#plt.show()
plt.savefig(pp, format='pdf')
pp.close()
def main():
features = []
features1 = [0, 1, 2, 3, 4, 5, 8]
features2 = [0, 1, 2, 3, 4, 5, 8, 9, 10, 11]
features3 = [0, 1, 2, 3, 4, 5, 8, 14, 15, 16, 17, 18, 19]
features4 = [0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 14, 15, 16, 17, 18, 19]
features5 = [6, 7, 8]
features6 = [6, 7, 8, 9, 10, 11]
features7 = [6, 7, 8, 14, 15, 16, 17, 18, 19]
features8 = [6, 7, 8, 9, 10, 11, 14, 15, 16, 17, 18, 19]
features9 = [12, 13, 8]
features10 = [12, 13, 8, 9, 10, 11]
features11 = [12, 13, 8, 14, 15, 16, 17, 18, 19]
features12 = [12, 13, 8, 9, 10, 11, 14, 15, 16, 17, 18, 19]
features13 = [20, 21, 22, 23, 24]
features14 = [20, 21, 22, 23, 24, 9, 10, 11]
features15 = [20, 21, 22, 23, 24, 14, 15, 16, 17, 18, 19]
features.append(features1)
features.append(features2)
features.append(features3)
features.append(features4)
features.append(features5)
features.append(features6)
features.append(features7)
features.append(features8)
features.append(features9)
features.append(features10)
features.append(features11)
features.append(features12)
features.append(features13)
features.append(features14)
features.append(features15)
print(features)
for i in xrange(1, 16):
print(i)
output = 'report/refs/COS/multipage' + str(i) + '.pdf'
drawCOSFigure(features[i-1], output)
if __name__ == "__main__":
main()
| 34.485714 | 89 | 0.594532 | 804 | 6,035 | 4.38806 | 0.176617 | 0.018141 | 0.018707 | 0.015873 | 0.44076 | 0.427438 | 0.417234 | 0.291667 | 0.291667 | 0.272676 | 0 | 0.087503 | 0.274731 | 6,035 | 174 | 90 | 34.683908 | 0.718529 | 0.001657 | 0 | 0.173611 | 0 | 0 | 0.059097 | 0.045651 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013889 | false | 0 | 0.027778 | 0 | 0.041667 | 0.013889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
247640d80add47e8ea829bcb77b7fb6e623ffeff | 579 | py | Python | cobu/constants.py | tlancaster6/cobu | 444f249bdec5b009858011002fa0782520891e2b | [
"MIT"
] | null | null | null | cobu/constants.py | tlancaster6/cobu | 444f249bdec5b009858011002fa0782520891e2b | [
"MIT"
] | null | null | null | cobu/constants.py | tlancaster6/cobu | 444f249bdec5b009858011002fa0782520891e2b | [
"MIT"
] | null | null | null | import os
try:
from importlib.metadata import metadata # Python 3.8
except ImportError:
from importlib_metadata import metadata # Python < 3.8
COBU_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(COBU_DIR)
DATA_DIR = os.path.join(BASE_DIR, 'data')
# package metadata
_META = metadata("cobu")
NAME = _META["name"]
VERSION = _META["version"]
DESCRIPTION = _META["summary"]
AUTHOR = _META["author"]
AUTHOR_EMAIL = _META["author-email"]
URL = _META["home-page"]
LICENSE = _META["license"]
VERSION_LONG = "FiftyOne v%s, %s" % (VERSION, AUTHOR)
| 27.571429 | 59 | 0.723661 | 82 | 579 | 4.865854 | 0.439024 | 0.06015 | 0.067669 | 0.135338 | 0.215539 | 0.215539 | 0.215539 | 0.215539 | 0 | 0 | 0 | 0.007968 | 0.132988 | 579 | 20 | 60 | 28.95 | 0.786853 | 0.069085 | 0 | 0 | 0 | 0 | 0.142056 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.235294 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2479d7af5d28d64ede7ccc8bebd1054c83190763 | 885 | py | Python | params_flow/utils/learn_scheduler.py | kpe/params-flow | 5857dfd67cf15e89803e5987316c81fe6c4cc54d | [
"MIT"
] | 23 | 2019-06-08T17:24:05.000Z | 2022-01-16T17:53:15.000Z | params_flow/utils/learn_scheduler.py | kpe/params-flow | 5857dfd67cf15e89803e5987316c81fe6c4cc54d | [
"MIT"
] | null | null | null | params_flow/utils/learn_scheduler.py | kpe/params-flow | 5857dfd67cf15e89803e5987316c81fe6c4cc54d | [
"MIT"
] | 4 | 2019-09-29T07:25:24.000Z | 2020-11-25T15:02:15.000Z | # coding=utf-8
#
# created by kpe on 10.08.2019 at 12:39 AM
#
from __future__ import division, absolute_import, print_function
import math
from tensorflow import keras
def create_one_cycle_lr_scheduler(max_learn_rate=5e-5,
end_learn_rate=1e-7,
warmup_epoch_count=10,
total_epoch_count=90):
def lr_scheduler(epoch):
if epoch < warmup_epoch_count:
res = (max_learn_rate / warmup_epoch_count) * (epoch + 1)
else:
k = math.log(end_learn_rate / max_learn_rate) / (total_epoch_count - warmup_epoch_count)
res = max_learn_rate * math.exp(k * (epoch - warmup_epoch_count))
return float(res)
learning_rate_scheduler = keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1)
return learning_rate_scheduler
| 30.517241 | 100 | 0.640678 | 114 | 885 | 4.605263 | 0.491228 | 0.133333 | 0.152381 | 0.08 | 0.118095 | 0.118095 | 0.118095 | 0 | 0 | 0 | 0 | 0.03645 | 0.287006 | 885 | 28 | 101 | 31.607143 | 0.795563 | 0.059887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.1875 | 0 | 0.4375 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
247aaaa9afe8af3944f0215ec29353a612a9ac89 | 445 | py | Python | cegid/apps/main/urls.py | vsoch/tcell | 3c4a19e5c78f1a36a2fe5e0f3229d1d70ef3a2c6 | [
"MIT"
] | null | null | null | cegid/apps/main/urls.py | vsoch/tcell | 3c4a19e5c78f1a36a2fe5e0f3229d1d70ef3a2c6 | [
"MIT"
] | 9 | 2016-08-04T22:56:21.000Z | 2016-08-17T20:55:57.000Z | cegid/apps/main/urls.py | vsoch/tcell | 3c4a19e5c78f1a36a2fe5e0f3229d1d70ef3a2c6 | [
"MIT"
] | 1 | 2016-10-11T04:22:52.000Z | 2016-10-11T04:22:52.000Z | from django.views.generic.base import TemplateView
from django.conf.urls import patterns, url
from .views import index_view, signup_view, about_view, search_view, \
home_view, contact_view
urlpatterns = [
url(r'^$', index_view, name="index"),
url(r'^search$', search_view, name="search"),
url(r'^contact$', contact_view, name="contact"),
url(r'^about$', about_view, name="about"),
url(r'^home$', home_view, name="home")
]
| 34.230769 | 70 | 0.694382 | 64 | 445 | 4.65625 | 0.328125 | 0.067114 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.137079 | 445 | 12 | 71 | 37.083333 | 0.776042 | 0 | 0 | 0 | 0 | 0 | 0.132584 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.272727 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
247b0f2580310cad3e1f381e524fdce487e2bc44 | 212 | py | Python | students/K33422/Larionova Anastasia/lab 1/task1_client.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
] | 4 | 2020-09-03T15:41:42.000Z | 2021-12-24T15:28:20.000Z | students/K33422/Larionova Anastasia/lab 1/task1_client.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
] | 48 | 2020-09-13T20:22:42.000Z | 2021-04-30T11:13:30.000Z | students/K33422/Larionova Anastasia/lab 1/task1_client.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
] | 69 | 2020-09-06T10:32:37.000Z | 2021-11-28T18:13:17.000Z | import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', 7777))
sock.send('Hello, server! :)'.encode('utf-8'))
d = sock.recv(1024)
print(d.decode('utf-8'))
sock.close() | 21.2 | 56 | 0.70283 | 33 | 212 | 4.454545 | 0.636364 | 0.136054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.051546 | 0.084906 | 212 | 10 | 57 | 21.2 | 0.706186 | 0 | 0 | 0 | 0 | 0 | 0.169014 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
247dc70e6437fecc8408f5935f960b8ac070e582 | 5,589 | py | Python | chapter-4/gridworld.py | epignatelli/Reinforcement-Learning-An-Introduction_exercises | e260cacdb1ec8e51af46da4031054d4c6771322c | [
"MIT"
] | 6 | 2020-12-13T00:34:40.000Z | 2022-03-05T03:58:46.000Z | chapter-4/gridworld.py | epignatelli/Reinforcement-Learning-An-Introduction_exercises | e260cacdb1ec8e51af46da4031054d4c6771322c | [
"MIT"
] | null | null | null | chapter-4/gridworld.py | epignatelli/Reinforcement-Learning-An-Introduction_exercises | e260cacdb1ec8e51af46da4031054d4c6771322c | [
"MIT"
] | 4 | 2021-02-03T06:32:24.000Z | 2021-09-14T22:46:51.000Z | # MIT License
# Copyright (c) 2020 Eduardo Pignatelli
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sn
from matplotlib.colors import LinearSegmentedColormap
W = LinearSegmentedColormap.from_list('w', ["w", "w"], N=256)
ACTIONS = {
0: [1, 0], # north
1: [-1, 0], # south
2: [0, -1], # west
3: [0, 1], # east
}
class GridWorld:
def __init__(self, size=4):
"""
A gridworld environment with absorbing states at [0, 0] and [size - 1, size - 1].
Args:
size (int): the dimension of the grid in each direction
cell_reward (float): the reward return after extiting any non absorbing state
"""
self.state_value = np.zeros((size, size))
return
def reset(self):
self.state_value = np.zeros((size, size))
return
def step(self, state, action):
# is terminal state?
size = len(self.state_value) - 1
if (state == (0, 0)) or (state == (size, size)):
return state, 0
s_1 = (state[0] + action[0], state[1] + action[1])
reward = -1
# out of bounds north-south
if s_1[0] < 0 or s_1[0] >= len(self.state_value):
s_1 = state
# out of bounds east-west
elif s_1[1] < 0 or s_1[1] >= len(self.state_value):
s_1 = state
return s_1, reward
def render(self, title=None):
"""
Displays the current value table of mini gridworld environment
"""
size = len(self.state_value) if len(self.state_value) < 20 else 20
fig, ax = plt.subplots(figsize=(size, size))
if title is not None:
ax.set_title(title)
ax.grid(which='major', axis='both',
linestyle='-', color='k', linewidth=2)
sn.heatmap(self.state_value, annot=True, fmt=".1f", cmap=W,
linewidths=1, linecolor="black", cbar=False)
plt.show()
return fig, ax
def bellman_expectation(self, state, probs, discount):
"""
Makes a one step lookahead and applies the bellman expectation equation to the state self.state_value[state]
Args:
state (Tuple[int, int]): the x, y indices that define the address on the value table
probs (List[float]): transition probabilities for each action
in_place (bool): if False, the value table is updated after all the new values have been calculated.
if True the state [i, j] will new already new values for the states [< i, < j]
Returns:
(numpy.ndarrray): the new value for the specified state
"""
# absorbing state
value = 0
for c, action in ACTIONS.items():
s_1, reward = self.step(state, action)
value += probs[c] * (reward + discount * self.state_value[s_1])
return value
def policy_evaluation(env, policy=None, steps=1, discount=1., in_place=False):
"""
Args:
policy (numpy.array): a numpy 3-D numpy array, where the first two dimensions identify a state and the third dimension identifies the actions.
The array stores the probability of taking each action.
steps (int): the number of iterations of the algorithm
discount (float): discount factor for the bellman equations
in_place (bool): if False, the value table is updated after all the new values have been calculated.
if True the state [i, j] will new already new values for the states [< i, < j]
"""
if policy is None:
# uniform random policy
policy = np.ones((*env.state_value.shape, len(ACTIONS))) * 0.25
for k in range(steps):
# cache old values if not in place
values = env.state_value if in_place else np.empty_like(
env.state_value)
for i in range(len(env.state_value)):
for j in range(len(env.state_value[i])):
# apply bellman expectation equation to each state
state = (i, j)
value = env.bellman_expectation(state, policy[i, j], discount)
values[i, j] = value * discount
# set the new value table
env.state_value = values
return env.state_value
if __name__ == "__main__":
# reprocuce Figure 4.1
for k in [1, 2, 3, 10, 1000]:
env = GridWorld(4)
env.render()
value_table = policy_evaluation(env, steps=k, in_place=False)
env.render()
| 39.359155 | 150 | 0.627661 | 789 | 5,589 | 4.378961 | 0.332066 | 0.052098 | 0.040521 | 0.024602 | 0.141823 | 0.125036 | 0.111722 | 0.097829 | 0.097829 | 0.075832 | 0 | 0.01868 | 0.281625 | 5,589 | 141 | 151 | 39.638298 | 0.841843 | 0.485776 | 0 | 0.121212 | 0 | 0 | 0.011295 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.075758 | 0 | 0.287879 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
247eb44c889ef4965c1c10cc694955ebaa4b8ea6 | 2,571 | py | Python | client.py | esperantomerkato/tuner | b3998f6bcaf8acd1b541e2aa0670e0f0a0a8eb7d | [
"BSD-Source-Code"
] | null | null | null | client.py | esperantomerkato/tuner | b3998f6bcaf8acd1b541e2aa0670e0f0a0a8eb7d | [
"BSD-Source-Code"
] | null | null | null | client.py | esperantomerkato/tuner | b3998f6bcaf8acd1b541e2aa0670e0f0a0a8eb7d | [
"BSD-Source-Code"
] | null | null | null | import asyncio
import json
import websockets
from merkato.merkato_config import get_config
from merkato.parser import parse
from merkato.utils.database_utils import no_merkatos_table_exists, create_merkatos_table, drop_merkatos_table, \
no_exchanges_table_exists, create_exchanges_table, drop_exchanges_table
import logging
log = logging.getLogger("client")
"""
Merkato WebSocket CLI client.
This demonstrates the behavior that will eventually be moved to a Javascript GUI client.
Establish a connection to server, get user input, send config to server,
and loop awaiting updates from server. (In the GUI, it'll also loop awaiting user action e.g. button clicks.)
"""
async def client(url):
async with websockets.connect(url) as ws:
log.info("Connected.")
try:
merkato_params = get_merkato_params_from_user()
log.info("Sending Merkato params {}".format(merkato_params))
await ws.send(json.dumps({'merkato_params': merkato_params}))
while True:
msg = await ws.recv()
print("Received {}".format(msg))
except websockets.ConnectionClosed:
log.error("Server unexpectedly closed connection, investigate.")
exit(1)
def get_merkato_params_from_user():
print("Merkato Alpha v0.1.1\n")
if no_merkatos_table_exists():
create_merkatos_table()
else:
should_drop_merkatos = input('Do you want to drop merkatos? y/n: ')
if should_drop_merkatos == 'y':
drop_merkatos_table()
create_merkatos_table()
if no_exchanges_table_exists():
create_exchanges_table()
else:
should_drop_exchanges = input('Do you want to drop exchanges? y/n: ')
if should_drop_exchanges == 'y':
drop_exchanges_table()
create_exchanges_table()
configuration = parse()
if not configuration:
configuration = get_config()
if not configuration:
raise Exception("Failed to get configuration.")
base = input("Base: ")
coin = input("Coin: ")
spread = input("Spread: ")
coin_reserve = input("Coin reserve: ")
base_reserve = input("Base reserve: ")
return {
'configuration': configuration,
'base': base,
'coin': coin,
'spread': float(spread),
'bid_reserved_balance': float(base_reserve),
'ask_reserved_balance': float(coin_reserve)
}
if __name__ == "__main__":
url = "ws://localhost:5678"
asyncio.get_event_loop().run_until_complete(client(url)) | 30.975904 | 112 | 0.669 | 312 | 2,571 | 5.269231 | 0.38141 | 0.055353 | 0.041363 | 0.025547 | 0.170316 | 0.124088 | 0.099757 | 0 | 0 | 0 | 0 | 0.004065 | 0.234539 | 2,571 | 83 | 113 | 30.975904 | 0.831301 | 0 | 0 | 0.135593 | 0 | 0 | 0.168802 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016949 | false | 0 | 0.118644 | 0 | 0.152542 | 0.033898 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24801d3763c6b4f5d5fdf3b1bc18bbe2654427a3 | 356 | py | Python | application/web/helper.py | satan1a/poopak | 9862f09edc22c030db520e317020b54a36e070ac | [
"curl"
] | 91 | 2019-01-17T13:35:49.000Z | 2022-03-30T21:16:37.000Z | application/web/helper.py | satan1a/poopak | 9862f09edc22c030db520e317020b54a36e070ac | [
"curl"
] | 13 | 2019-01-13T14:35:51.000Z | 2021-04-26T05:13:42.000Z | application/web/helper.py | satan1a/poopak | 9862f09edc22c030db520e317020b54a36e070ac | [
"curl"
] | 33 | 2019-01-17T13:37:22.000Z | 2022-03-25T09:35:54.000Z | import re
def extract_onions(s):
result = []
print(s)
for m in re.finditer(r'(?:https?://)?(?:www)?(\S*?\.onion)\b', s, re.M | re.IGNORECASE):
url = str(m.group(0))
# url = url[len(url) - 22:]
# if "'" in url:
# url = url[url.index("'")+1:]
result.append(url)
print(result)
return result
| 20.941176 | 92 | 0.485955 | 50 | 356 | 3.44 | 0.58 | 0.139535 | 0.104651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016 | 0.297753 | 356 | 16 | 93 | 22.25 | 0.672 | 0.205056 | 0 | 0 | 0 | 0 | 0.134058 | 0.134058 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.333333 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
248892eac1e0a2c6110682fef0f6ec29d697f548 | 11,432 | py | Python | main_big.py | giorgosouz/HSI-classification-using-state-of-the-art-models | a925972ffe02c2cd1e5dde2b163e1faa854a4966 | [
"MIT"
] | null | null | null | main_big.py | giorgosouz/HSI-classification-using-state-of-the-art-models | a925972ffe02c2cd1e5dde2b163e1faa854a4966 | [
"MIT"
] | null | null | null | main_big.py | giorgosouz/HSI-classification-using-state-of-the-art-models | a925972ffe02c2cd1e5dde2b163e1faa854a4966 | [
"MIT"
] | null | null | null |
import torch
import torch.utils.data as data
from torchsummary import summary
from datasets import get_dataset, HyperX,sHyperX
from models import get_model, train,test
from utils import sample_gt,get_device,metrics,compute_imf_weights
import visdom
import argparse
import numpy as np
import cv2
from models_liu import train_liu
parser = argparse.ArgumentParser(description="Run deep learning experiments on"
" various hyperspectral datasets")
parser.add_argument('--dataset', type=str, default=None,
help="Dataset to use.")
parser.add_argument('--model', type=str, default=None,
help="Model to train. Available:\n"
"SVM (linear), "
"SVM_grid (grid search on linear, poly and RBF kernels), "
"baseline (fully connected NN), "
"hu (1D CNN), "
"hamida (3D CNN + 1D classifier), "
"lee (3D FCN), "
"chen (3D CNN), "
"li (3D CNN), "
"he (3D CNN), "
"luo (3D CNN), "
"sharma (2D CNN), "
"boulch (1D semi-supervised CNN), "
"liu (3D semi-supervised CNN), "
"mou (1D RNN)")
parser.add_argument('--folder', type=str, help="Folder where to store the "
"datasets (defaults to the current working directory).",
default="./Datasets/")
parser.add_argument('--cuda', type=int, default=0,
help="Specify CUDA device (defaults to -1, which learns on CPU)")
parser.add_argument('--runs', type=int, default=1, help="Number of runs (default: 1)")
parser.add_argument('--restore', type=str, default=None,
help="Weights to use for initialization, e.g. a checkpoint")
# Dataset options
group_dataset = parser.add_argument_group('Dataset')
group_dataset.add_argument('--training_sample', type=float, default=10,
help="Percentage of samples to use for training (default: 10%%)")
group_dataset.add_argument('--sampling_mode', type=str, default='random', help="Sampling mode"
" (random sampling or disjoint, default= random)"
)
group_dataset.add_argument('--train_set', type=str, default=None,
help="Path to the train ground truth (optional, this "
"supersedes the --sampling_mode option)")
group_dataset.add_argument('--test_set', type=str, default=None,
help="Path to the test set (optional, by default "
"the test_set is the entire ground truth minus the training)")
# Training options
group_train = parser.add_argument_group('Training')
group_train.add_argument('--epoch', type=int, help="Training epochs (optional, if"
" absent will be set by the model)")
group_train.add_argument('--patch_size', type=int,
help="Size of the spatial neighbourhood (optional, if "
"absent will be set by the model)")
group_train.add_argument('--lr', type=float,
help="Learning rate, set by the model if not specified.")
group_train.add_argument('--class_balancing', action='store_true',
help="Inverse median frequency class balancing (default = False)")
group_train.add_argument('--batch_size', type=int,
help="Batch size (optional, if absent will be set by the model")
group_train.add_argument('--test_stride', type=int, default=1,
help="Sliding window step stride during inference (default = 1)")
# Data augmentation parameters
group_da = parser.add_argument_group('Data augmentation')
group_da.add_argument('--flip_augmentation', action='store_true',
help="Random flips (if patch_size > 1)")
group_da.add_argument('--radiation_augmentation', action='store_true',
help="Random radiation noise (illumination)")
group_da.add_argument('--mixture_augmentation', action='store_true',
help="Random mixes between spectra")
parser.add_argument('--with_exploration', action='store_true',
help="See data exploration visualization")
parser.add_argument('--download', type=str, default=None, nargs='+',
help="Download the specified datasets and quits.")
args = parser.parse_args()
CUDA_DEVICE = get_device(args.cuda)
# % of training samples
SAMPLE_PERCENTAGE = args.training_sample
# Data augmentation ?
FLIP_AUGMENTATION = args.flip_augmentation
RADIATION_AUGMENTATION = args.radiation_augmentation
MIXTURE_AUGMENTATION = args.mixture_augmentation
# Dataset name
DATASET =args.dataset
# Model name
MODEL = args.model
# Number of runs (for cross-validation)
N_RUNS = args.runs
# Spatial context size (number of neighbours in each spatial direction)
PATCH_SIZE = args.patch_size
# Add some visualization of the spectra ?
DATAVIZ = args.with_exploration
# Target folder to store/download/load the datasets
FOLDER = args.folder
# Number of epochs to run
EPOCH = args.epoch
# Sampling mode, e.g random sampling
SAMPLING_MODE = args.sampling_mode
# Pre-computed weights to restore
CHECKPOINT = args.restore
# Learning rate for the SGD
LEARNING_RATE = args.lr
# Automated class balancing
CLASS_BALANCING = args.class_balancing
# Training ground truth file
TRAIN_GT = args.train_set
# Testing ground truth file
TEST_GT = args.test_set
TEST_STRIDE = args.test_stride
MODEL='hamida'
DATASET='train2'
vDATASET='test2'
MODE='disjoint'
ttest=True
if ttest==False:
SAMPLE_PERCENTAGE=0.2
else:
SAMPLE_PERCENTAGE=1
# CHECKPOINT="checkpoints/hamida_et_al/name of the dataset used/True2020-07-02 02:12:54.440903_epoch18_0.72.pth"
viz = visdom.Visdom(env=DATASET + ' ' + MODEL)
hyperparams = vars(args)
img, gt, LABEL_VALUES, IGNORED_LABELS, RGB_BANDS, palette = get_dataset(DATASET,
FOLDER)
# top, bottom, left, right = [30]*4
# img=cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0])
# gt=cv2.copyMakeBorder(gt, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0])
# qmin=[]
# qmax=[]
img = img.astype('float32')
qmax=np.load("maxint16.npy")
qmin=np.load("minint16.npy")
for i in range(img.shape[-1]):
# qmin.append(np.min(img[:,:,i]))
# qmax.append(np.max(img[:,:,i]))
img[:,:,i] = (img[:,:,i] - qmin[i]) /(qmax[i] - qmin[i])
N_CLASSES=len(LABEL_VALUES)
# N_CLASSES = 24
N_BANDS=img.shape[-1]
hyperparams = vars(args)
hyperparams.update({'n_classes': N_CLASSES, 'n_bands': N_BANDS, 'ignored_labels': IGNORED_LABELS, 'device': CUDA_DEVICE})
hyperparams = dict((k, v) for k, v in hyperparams.items() if v is not None)
if SAMPLE_PERCENTAGE!=1:
test_gt, train_gt = sample_gt(gt, SAMPLE_PERCENTAGE, mode=MODE)
#######
# test_gt, train_gt = sample_gt(test_gt, SAMPLE_PERCENTAGE, mode=MODE)
# test_gt, train_gt = sample_gt(test_gt, SAMPLE_PERCENTAGE, mode=MODE)
# test_gt, train_gt = sample_gt(test_gt, SAMPLE_PERCENTAGE, mode=MODE)
########
else:
train_gt = test_gt= gt
#######
# weights = compute_imf_weights(train_gt, N_CLASSES, IGNORED_LABELS)
# hyperparams['weights'] = torch.from_numpy(weights).float()
##########
model, optimizer, loss, hyperparams = get_model(MODEL, **hyperparams)
train_dataset = HyperX(img, train_gt, **hyperparams)
train_loader = data.DataLoader(train_dataset,
batch_size=hyperparams['batch_size'],
pin_memory=hyperparams['device'],
shuffle=True)
if ttest==True:
del img
vimg, vgt, vLABEL_VALUES, vIGNORED_LABELS, vRGB_BANDS, vpalette = get_dataset(vDATASET,
FOLDER)
# top, bottom, left, right = [30]*4
# vimg=cv2.copyMakeBorder(vimg, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0])
# vgt=cv2.copyMakeBorder(vgt, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0])
# vqmin,vqmax=[],[]
vimg = vimg.astype('float32')
for i in range(vimg.shape[-1]):
# vqmin.append(np.min(vimg[:,:,i]))
# vqmax.append(np.max(vimg[:,:,i]))
vimg[:,:,i] = (vimg[:,:,i] - qmin[i]) /(qmax[i] - qmin[i])
# vtrain_gt, vtest_gt = sample_gt(vgt, 0.1, mode='random')
val_dataset = HyperX(vimg, vgt, **hyperparams)
# del vimg
val_loader = data.DataLoader(val_dataset,
pin_memory=hyperparams['device'],
batch_size=hyperparams['batch_size'],drop_last=True)
del vimg
else:
val_dataset = HyperX(img, test_gt, **hyperparams)
val_loader = data.DataLoader(val_dataset,
pin_memory=hyperparams['device'],
batch_size=hyperparams['batch_size'],drop_last=True)
# del img
print(np.count_nonzero(train_gt))
print(np.count_nonzero(test_gt))
print(np.count_nonzero(gt))
print(hyperparams)
print("Network :")
with torch.no_grad():
for input, _ in train_loader:
break
summary(model.to(hyperparams['device']), input.size()[1:])
if CHECKPOINT is not None:
model.load_state_dict(torch.load(CHECKPOINT))
if MODEL!="liu":
try:
train(model, optimizer, loss, train_loader, hyperparams['epoch'],
scheduler=hyperparams['scheduler'], device=hyperparams['device'],
supervision=hyperparams['supervision'], val_loader=val_loader,
display=viz,klepsia=klepsia)
except KeyboardInterrupt:
# Allow the user to stop the training
pass
if MODEL=="liu":
strain_dataset = sHyperX(img, train_gt, **hyperparams)
strain_loader = data.DataLoader(strain_dataset,
batch_size=hyperparams['batch_size'],
pin_memory=hyperparams['device'],
shuffle=True,drop_last=True)
try:
train_liu(model, optimizer, loss, train_loader, hyperparams['epoch'], val_loader,klepsia,strain_loader,
scheduler=hyperparams['scheduler'], device=hyperparams['device'],
display=viz)
except KeyboardInterrupt:
# Allow the user to stop the training
pass
# probabilities = test(model, vimg, hyperparams)
# prediction = np.argmax(probabilities, axis=-1)
# run_results = metrics(prediction, vgt, ignored_labels=[0], n_classes=N_CLASSES)
# cm=run_results['Confusion matrix']
# cmr = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# cmp = cm.astype('float') / cm.sum(axis=0)[np.newaxis,:]
# np.savetxt(MODEL+'cmr.csv',cmr,delimiter=',')
# np.savetxt(MODEL+'cm.csv',cm,delimiter=',')
# np.savetxt(MODEL+'cmp.csv',cmp,delimiter=',')
# rep=run_results['report']
# np.savetxt(MODEL+'rep.csv',rep,delimiter=',')
# import cv2
# cv2.imwrite(MODEL+'pred.tif',prediction)
| 34.33033 | 121 | 0.616253 | 1,374 | 11,432 | 4.973071 | 0.235808 | 0.038636 | 0.027367 | 0.015806 | 0.240743 | 0.219377 | 0.178399 | 0.152642 | 0.152642 | 0.118689 | 0 | 0.011257 | 0.261809 | 11,432 | 332 | 122 | 34.433735 | 0.798436 | 0.200665 | 0 | 0.152542 | 0 | 0 | 0.227675 | 0.005084 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.011299 | 0.062147 | 0 | 0.062147 | 0.028249 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
248941d967a1b7d873ad7da541cea91f968f8222 | 954 | py | Python | plusy/spiders/gov_sbir_news.py | OracleGao/plusy | 756ed8fe932255ffcf7eed2afdc0b83a85a7548e | [
"Apache-2.0"
] | null | null | null | plusy/spiders/gov_sbir_news.py | OracleGao/plusy | 756ed8fe932255ffcf7eed2afdc0b83a85a7548e | [
"Apache-2.0"
] | null | null | null | plusy/spiders/gov_sbir_news.py | OracleGao/plusy | 756ed8fe932255ffcf7eed2afdc0b83a85a7548e | [
"Apache-2.0"
] | null | null | null | import scrapy
class GovSbirNewsSpider(scrapy.Spider):
name = 'gov.sbir.news'
start_urls = ["https://www.sbir.gov/news?page=0"]
def __init__(self):
for page_num in range(1, 23):
self.start_urls.append("https://www.sbir.gov/news?page=%d" % page_num)
def parse(self, response):
for div in response.css('div.news-view-rest-wrapper'):
types = []
for div_type in div.css('div.news-type > span'):
types.append(div_type.css('span::text').extract_first())
yield {
'title': div.css('div.news-title > a::text').extract_first(),
'href': 'https://www.sbir.gov' + div.css('div.news-title > a::attr(href)').extract_first(),
'date': div.css('div.news-date > strong > span::attr(content)').extract_first(),
'body': div.css('div.news-body::text').extract_first(),
'type': types
}
| 39.75 | 107 | 0.5587 | 125 | 954 | 4.144 | 0.384 | 0.069498 | 0.11583 | 0.125483 | 0.162162 | 0.162162 | 0 | 0 | 0 | 0 | 0 | 0.005698 | 0.264151 | 954 | 23 | 108 | 41.478261 | 0.732194 | 0 | 0 | 0 | 0 | 0 | 0.30608 | 0.027254 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.052632 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
248b843beb808fda7506840e01537e62bc25f9dc | 4,697 | py | Python | schedule/forms.py | yourcelf/masterschedule | e585df0e9edcaff5fa4f04f77a9452e3073b5db7 | [
"Unlicense"
] | 1 | 2015-02-11T04:08:36.000Z | 2015-02-11T04:08:36.000Z | schedule/forms.py | yourcelf/masterschedule | e585df0e9edcaff5fa4f04f77a9452e3073b5db7 | [
"Unlicense"
] | null | null | null | schedule/forms.py | yourcelf/masterschedule | e585df0e9edcaff5fa4f04f77a9452e3073b5db7 | [
"Unlicense"
] | null | null | null | from django import forms
from django.forms.models import inlineformset_factory
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from schedule.models import *
class PersonForm(forms.ModelForm):
class Meta:
model = Person
fields = ['name']
class AvailabilityForm(forms.ModelForm):
class Meta:
model = Person
exclude = ['conference']
OtherCommitmentFormset = inlineformset_factory(Person,
OtherCommitment,
extra=2,
fields=['start_date', 'end_date'])
class ProspectiveAdminField(forms.ModelMultipleChoiceField):
def clean(self, value):
if not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
models = []
id_values = []
for val in value:
if "@" in val:
try:
model, created = self.queryset.get_or_create(email=val)
except ValueError:
return ValidationError(
"Invalid email address",
code='invalid_email',
params={"pk": val})
models.append(model)
else:
try:
self.queryset.filter(pk=val)
except ValueError:
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': val},
)
id_values.append(val)
qs = list(self.queryset.filter(pk__in=id_values))
pks = set(str(o.pk) for o in qs)
for val in id_values:
if str(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
self.run_validators(value)
return qs + models
class CreateConferenceForm(forms.ModelForm):
admins = forms.ModelMultipleChoiceField(User.objects.all(),
widget=forms.MultipleHiddenInput)
prospective_admins = ProspectiveAdminField(ProspectiveAdmin.objects.all(),
widget=forms.MultipleHiddenInput)
class Meta:
model = Conference
fields = ['name', 'public', 'admins', 'prospective_admins']
class UpdateConferenceForm(CreateConferenceForm):
class Meta(CreateConferenceForm.Meta):
fields = ['name', 'public', 'admins', 'prospective_admins', 'archived']
class VenueForm(forms.ModelForm):
class Meta:
model = Venue
fields = ['name']
class RoleTypeForm(forms.ModelForm):
class Meta:
model = RoleType
fields = ['role']
class EventForm(forms.ModelForm):
add_period = forms.BooleanField(required=False,
label="Add new period",
help_text='Add these start and end dates as a new "period" (e.g. a course block, track, etc)?')
period_name = forms.CharField(required=False)
def __init__(self, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
instance = kwargs.get('instance', None)
if instance is None:
self.fields['period'].queryset = Period.objects.none()
else:
self.fields['period'].queryset = instance.conference.period_set.all()
def clean_period_name(self):
if self.cleaned_data['add_period'] and not self.cleaned_data.get('period_name'):
raise ValidationError("Period name is required to add a period.")
if Period.objects.filter(period=self.cleaned_data['period_name'],
conference=self.instance.conference).exists():
raise ValidationError("That name is already in use.")
return self.cleaned_data['period_name']
def clean(self):
data = super(EventForm, self).clean()
if not (data['start_date'] or data['period']):
raise ValidationError("One of start date or period is required.")
return data
def save(self):
event = super(EventForm, self).save(commit=False)
if self.cleaned_data['add_period']:
event.period = Period.objects.create(
conference=self.instance.conference,
period=self.cleaned_data['period_name'],
start_date=event.start_date,
end_date=event.end_date)
event.save()
return event
class Meta:
model = Event
fields = ['title', 'start_date', 'end_date', 'period', 'description']
| 36.410853 | 103 | 0.593996 | 487 | 4,697 | 5.607803 | 0.279261 | 0.023068 | 0.030758 | 0.033687 | 0.199927 | 0.127426 | 0 | 0 | 0 | 0 | 0 | 0.000305 | 0.301895 | 4,697 | 128 | 104 | 36.695313 | 0.832571 | 0 | 0 | 0.178571 | 0 | 0.008929 | 0.12178 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044643 | false | 0 | 0.044643 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
248f7384cfb08b1240e64c826352c311afba0e91 | 16,088 | py | Python | LinkTap.py | simplextech/udi-linktap-poly | e51180aecf58e0453bfe444acd2b454e6d3df00d | [
"MIT"
] | 2 | 2020-05-23T18:04:04.000Z | 2020-06-22T11:29:03.000Z | LinkTap.py | simplextech/udi-linktap-poly | e51180aecf58e0453bfe444acd2b454e6d3df00d | [
"MIT"
] | 11 | 2020-02-07T02:14:01.000Z | 2021-05-03T21:35:01.000Z | LinkTap.py | simplextech/udi-linktap-poly | e51180aecf58e0453bfe444acd2b454e6d3df00d | [
"MIT"
] | 1 | 2020-08-05T01:36:56.000Z | 2020-08-05T01:36:56.000Z | #!/usr/bin/env python
import sys
import linktap
import time
import logging
try:
import polyinterface
CLOUD = False
except ImportError:
import pgc_interface as polyinterface
CLOUD = True
LOGGER = polyinterface.LOGGER
logging.getLogger('urllib3').setLevel(logging.INFO)
class Controller(polyinterface.Controller):
def __init__(self, polyglot):
super(Controller, self).__init__(polyglot)
self.name = 'LinkTap Controller'
# self.poly.onConfig(self.process_config)
self.username = ''
self.apiKey = ''
self.data = None
self.ready = False
self.retry_count = 1
def start(self):
LOGGER.info('Started LinkTap NodeServer')
if self.check_params():
self.discover()
def get_link_tap_devices(self):
lt = linktap.LinkTap(self.username, self.apiKey)
all_devices = lt.get_all_devices()
if all_devices == 'error':
LOGGER.info("get_link_tap_devices: The minimum interval of calling this API is 5 minutes.")
self.data = None
self.ready = False
return False
elif all_devices is None:
LOGGER.info("Get all devices failed")
self.data = None
self.ready = False
return False
else:
self.data = all_devices
self.ready = True
return True
def shortPoll(self):
# Update Watering Status
if self.ready:
# LOGGER.info("Updating Watering Status")
for node in self.nodes:
if self.nodes[node].address != self.address:
for gw in self.data['devices']:
for tl in gw['taplinker']:
if tl['taplinkerId'][0:8].lower() == self.nodes[node].address:
if tl['status'] == 'Connected':
link_tap = linktap.LinkTap(self.username, self.apiKey)
watering_status = link_tap.get_watering_status(tl['taplinkerId'])
# print(watering_status)
try:
if watering_status['status'] is not None:
if watering_status['status']['onDuration']:
self.nodes[node].setDriver('GV1', 1)
self.nodes[node].setDriver('GV2', watering_status['status']['onDuration'])
if watering_status['status']['total']:
self.nodes[node].setDriver('GV3', watering_status['status']['total'])
watering_total = int(watering_status['status']['total'])
watering_duration = int(watering_status['status']['onDuration'])
watering_elapsed = watering_total - watering_duration
self.nodes[node].setDriver('GV4', watering_elapsed)
else:
self.nodes[node].setDriver('GV1', 0)
self.nodes[node].setDriver('GV2', 0)
self.nodes[node].setDriver('GV3', 0)
self.nodes[node].setDriver('GV4', 0)
except TypeError:
pass
else:
pass
def longPoll(self):
if self.ready:
if self.get_link_tap_devices():
self.update()
else:
LOGGER.info("LinkTap Devices API returned None")
else:
pass
def update(self):
if self.ready:
for node in self.nodes:
if self.nodes[node].address != self.address:
for gw in self.data['devices']:
if gw['gatewayId'][0:8].lower() == self.nodes[node].address:
if gw['status'] == 'Connected':
self.nodes[node].setDriver('ST', 1, force=False)
else:
self.nodes[node].setDriver('ST', 0, force=False)
for tl in gw['taplinker']:
if tl['taplinkerId'][0:8].lower() == self.nodes[node].address:
if tl['status'] == 'Connected':
self.nodes[node].setDriver('ST', 1, force=False)
else:
self.nodes[node].setDriver('ST', 0, force=False)
self.nodes[node].setDriver('BATLVL', tl['batteryStatus'].strip('%'), force=False)
# self.nodes[node].setDriver('GV0', tl['signal'].strip('%'), force=False)
self.nodes[node].setDriver('GV0', tl['signal'], force=False)
if tl['watering'] is not None:
self.nodes[node].setDriver('GV1', 1, force=False)
for key in tl['watering']:
if key == 'remaining':
self.nodes[node].setDriver('GV2', tl['watering'][key], force=False)
if key == 'total':
self.nodes[node].setDriver('GV3', tl['watering'][key], force=False)
else:
self.nodes[node].setDriver('GV1', 0, force=False)
self.nodes[node].setDriver('GV2', 0, force=False)
self.nodes[node].setDriver('GV3', 0, force=False)
def query(self):
if self.ready:
self.check_params()
for node in self.nodes:
self.nodes[node].reportDrivers()
def discover_retry(self):
retry_count = str(self.retry_count)
if self.retry_count <= 3000:
LOGGER.info("discover_retry: Failed to start. Retrying attempt: " + retry_count)
self.retry_count += 1
self.discover()
else:
LOGGER.info("discover_retry: Failed to start after 3000 retries. Aborting")
polyglot.stop()
def discover(self, *args, **kwargs):
if self.get_link_tap_devices():
for ctl in self.data['devices']:
gw_name = ctl['name']
gw_address = ctl['gatewayId'][0:8].lower()
self.addNode(GatewayNode(self, gw_address, gw_address, gw_name))
time.sleep(2)
for tl in ctl['taplinker']:
tl_name = tl['taplinkerName']
tl_address = tl['taplinkerId'][0:8].lower()
self.addNode(TapLinkNode(self, gw_address, tl_address, tl_name))
time.sleep(2)
self.ready = True
self.update()
else:
LOGGER.info("Failed to get devices. Will retry in 5 minutes")
self.ready = False
time.sleep(300)
self.discover_retry()
def delete(self):
LOGGER.info('LinkTap Nodeserver: Deleted')
def stop(self):
LOGGER.debug('NodeServer stopped.')
def process_config(self, config):
# this seems to get called twice for every change, why?
# What does config represent?
LOGGER.info("process_config: Enter config={}".format(config))
LOGGER.info("process_config: Exit")
def check_params(self):
default_username = "YourUserName"
default_api_key = "YourApiKey"
if 'username' in self.polyConfig['customParams']:
self.username = self.polyConfig['customParams']['username']
else:
self.username = default_username
LOGGER.error('check_params: user not defined in customParams, please add it. '
'Using {}'.format(self.username))
if 'apiKey' in self.polyConfig['customParams']:
self.apiKey = self.polyConfig['customParams']['apiKey']
else:
self.apiKey = default_api_key
LOGGER.error('check_params: apiKey not defined in customParams, please add it. '
'Using {}'.format(self.apiKey))
self.addCustomParam({'username': self.username, 'apiKey': self.apiKey})
time.sleep(2)
if self.username == default_username or self.apiKey == default_api_key:
self.addNotice({'params_notice': 'Please set proper user and apiKey in '
'configuration page, and restart this nodeserver'})
return False
else:
self.remove_notices_all()
return True
def remove_notice_test(self, command):
LOGGER.info('remove_notice_test: notices={}'.format(self.poly.config['notices']))
# Remove named notices
self.removeNotice('test')
def remove_notices_all(self):
LOGGER.info('remove_notices_all: notices={}'.format(self.poly.config['notices']))
# Remove all existing notices
self.removeNoticesAll()
def update_profile(self, command):
LOGGER.info('update_profile:')
st = self.poly.installprofile()
return st
id = 'controller'
commands = {
'QUERY': query,
'DISCOVER': discover,
'UPDATE_PROFILE': update_profile
}
drivers = [{'driver': 'ST', 'value': 1, 'uom': 2}]
class GatewayNode(polyinterface.Node):
def __init__(self, controller, primary, address, name):
super(GatewayNode, self).__init__(controller, primary, address, name)
self.data = controller.data
def start(self):
for gw in self.data['devices']:
if gw['gatewayId'][0:8].lower() == self.address:
if gw['status'] == 'Connected':
self.setDriver('ST', 1)
else:
self.setDriver('ST', 0)
def setOn(self, command):
self.setDriver('ST', 1)
def setOff(self, command):
self.setDriver('ST', 0)
def query(self):
self.reportDrivers()
def update(self):
for gw in self.data['devices']:
if gw['gatewayId'][0:8].lower() == self.address:
if gw['status'] == 'Connected':
self.setDriver('ST', 1)
else:
self.setDriver('ST', 0)
# "Hints See: https://github.com/UniversalDevicesInc/hints"
# hint = [1,2,3,4]
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2}]
id = 'gateway'
commands = {
'DON': setOn, 'DOF': setOff
}
class TapLinkNode(polyinterface.Node):
def __init__(self, controller, primary, address, name):
super(TapLinkNode, self).__init__(controller, primary, address, name)
self.data = controller.data
self.primary = primary
self.dev_suffix = '004B1200'
def start(self):
for gw in self.data['devices']:
for tl in gw['taplinker']:
if tl['taplinkerId'][0:8].lower() == self.address:
if tl['status'] == 'Connected':
self.setDriver('ST', 1, force=True)
else:
self.setDriver('ST', 0, force=True)
self.setDriver('BATLVL', tl['batteryStatus'].strip('%'), force=True)
# self.setDriver('GV0', tl['signal'].strip('%'), force=True)
self.setDriver('GV0', tl['signal'], force=True)
if tl['watering'] is not None:
self.setDriver('GV1', 1, force=True)
for key in tl['watering']:
if key == 'remaining':
self.setDriver('GV2', tl['watering'][key], force=True)
if key == 'total':
self.setDriver('GV3', tl['watering'][key], force=True)
else:
self.setDriver('GV1', 0, force=True)
self.setDriver('GV2', 0, force=True)
self.setDriver('GV3', 0, force=True)
def setOn(self, command):
self.setDriver('ST', 1)
def setOff(self, command):
self.setDriver('ST', 0)
def query(self):
self.reportDrivers()
def instantOn(self, command):
val = command.get('value')
taplinker = command.get('address') + self.dev_suffix
gateway = self.primary + self.dev_suffix
duration = int(val)
# if duration == 0:
# action = False
# else:
# action = True
action = True
eco = False
lt = linktap.LinkTap(self.controller.username, self.controller.apiKey)
lt.activate_instant_mode(gateway, taplinker, action, duration, eco)
self.setDriver('GV1', 1)
self.setDriver('GV2', duration)
self.setDriver('GV3', duration)
def instantOff(self, command):
taplinker = command.get('address') + self.dev_suffix
gateway = self.primary + self.dev_suffix
duration = 0
action = False
eco = False
lt = linktap.LinkTap(self.controller.username, self.controller.apiKey)
lt.activate_instant_mode(gateway, taplinker, action, duration, eco)
self.setDriver('GV1', 0)
self.setDriver('GV2', duration)
self.setDriver('GV3', duration)
def intervalMode(self, command):
taplinker = command.get('address') + self.dev_suffix
gateway = self.primary + self.dev_suffix
lt = linktap.LinkTap(self.controller.username, self.controller.apiKey)
lt.activate_interval_mode(gateway, taplinker)
def oddEvenMode(self, command):
taplinker = command.get('address') + self.dev_suffix
gateway = self.primary + self.dev_suffix
lt = linktap.LinkTap(self.controller.username, self.controller.apiKey)
lt.activate_odd_even_mode(gateway, taplinker)
def sevenDayMode(self, command):
taplinker = command.get('address') + self.dev_suffix
gateway = self.primary + self.dev_suffix
lt = linktap.LinkTap(self.controller.username, self.controller.apiKey)
lt.activate_seven_day_mode(gateway, taplinker)
def monthMode(self, command):
taplinker = command.get('address') + self.dev_suffix
gateway = self.primary + self.dev_suffix
lt = linktap.LinkTap(self.controller.username, self.controller.apiKey)
lt.activate_month_mode(gateway, taplinker)
# "Hints See: https://github.com/UniversalDevicesInc/hints"
# hint = [1,2,3,4]
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'BATLVL', 'value': 0, 'uom': 51},
{'driver': 'GV0', 'value': 0, 'uom': 51}, # Signal
{'driver': 'GV1', 'value': 0, 'uom': 2}, # Watering
{'driver': 'GV2', 'value': 0, 'uom': 44}, # Remaining
{'driver': 'GV3', 'value': 0, 'uom': 44}, # Total
{'driver': 'GV4', 'value': 0, 'uom': 44}, # Elapsed
{'driver': 'GV5', 'value': 0, 'uom': 44}, # Instant On Minutes
]
id = 'taplinker'
commands = {
'GV5': instantOn, 'GV10': instantOff, 'GV6': intervalMode, 'GV7': oddEvenMode,
'GV8': sevenDayMode, 'GV9': monthMode
}
if __name__ == "__main__":
try:
polyglot = polyinterface.Interface('Template')
polyglot.start()
control = Controller(polyglot)
control.runForever()
except (KeyboardInterrupt, SystemExit):
polyglot.stop()
sys.exit(0)
"""
Catch SIGTERM or Control-C and exit cleanly.
"""
| 40.422111 | 122 | 0.518585 | 1,622 | 16,088 | 5.051171 | 0.151048 | 0.032955 | 0.042841 | 0.05639 | 0.534237 | 0.47211 | 0.407055 | 0.363237 | 0.342732 | 0.319053 | 0 | 0.014676 | 0.360455 | 16,088 | 397 | 123 | 40.523929 | 0.781611 | 0.042143 | 0 | 0.445141 | 0 | 0 | 0.11954 | 0.001371 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0.009404 | 0.021944 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
248f8a8798b422e37a9e5b6af2d4bad26607f584 | 908 | py | Python | src/note_passing.py | fizisist/numerical-methods-pdes | 659045a19d8285f12447bb93401062c5e3666e18 | [
"CC-BY-4.0",
"MIT"
] | 100 | 2016-01-18T21:25:55.000Z | 2022-02-03T02:57:22.000Z | src/note_passing.py | QamarQQ/numerical-methods-pdes | d714a9fd2a94110bd0d7bffefb9043e010c4b7f2 | [
"CC-BY-4.0",
"MIT"
] | 7 | 2016-01-28T04:19:31.000Z | 2022-02-06T22:37:08.000Z | src/note_passing.py | QamarQQ/numerical-methods-pdes | d714a9fd2a94110bd0d7bffefb9043e010c4b7f2 | [
"CC-BY-4.0",
"MIT"
] | 82 | 2016-01-18T23:44:21.000Z | 2022-02-06T04:59:14.000Z | #!/usr/bin/env python
# encoding: utf-8
"""Note passing from Python"""
from __future__ import absolute_import
from __future__ import print_function
from mpi4py import MPI
import numpy
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
tag = 42
N = 10
if rank == 0:
data = numpy.arange(N, dtype=numpy.float64)
print("Process %s note = %s" % (rank, data))
# Note here that MPI datatype discovery is automatic
comm.Send(data, dest=rank + 1, tag=tag)
elif rank < size - 1:
data = numpy.empty(N, dtype=numpy.float64)
comm.Recv(data, source=rank - 1, tag=tag)
print("Process %s note = %s" % (rank, data))
comm.Send(data, dest=rank + 1, tag=tag)
elif rank == size - 1:
data = numpy.empty(N, dtype=numpy.float64)
comm.Recv(data, source=rank - 1, tag=tag)
print("Process %s note = %s" % (rank, data))
else:
raise Exception("Invalid rank.")
| 22.146341 | 56 | 0.656388 | 141 | 908 | 4.134752 | 0.368794 | 0.034305 | 0.054889 | 0.075472 | 0.480274 | 0.480274 | 0.480274 | 0.435678 | 0.435678 | 0.435678 | 0 | 0.026207 | 0.201542 | 908 | 40 | 57 | 22.7 | 0.777931 | 0.123348 | 0 | 0.375 | 0 | 0 | 0.09264 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24903ea9e622fd8f43c25fe974be2913277bb0c6 | 2,287 | py | Python | metanic/settings/development.py | LimpidTech/melody | a00b99f9b697864a078e2cb886be4d75c10458a9 | [
"BSD-3-Clause"
] | null | null | null | metanic/settings/development.py | LimpidTech/melody | a00b99f9b697864a078e2cb886be4d75c10458a9 | [
"BSD-3-Clause"
] | 1 | 2020-02-11T21:34:24.000Z | 2020-02-11T21:34:24.000Z | metanic/settings/development.py | LimpidTech/melody | a00b99f9b697864a078e2cb886be4d75c10458a9 | [
"BSD-3-Clause"
] | null | null | null | from metanic.settings.defaults import INSTALLED_APPS
from metanic.settings.defaults import MIDDLEWARE
from metanic.settings.defaults import REST_FRAMEWORK
from metanic.settings.defaults import cache_url
from metanic.settings.defaults import env_value
from metanic.settings.defaults import project_path
# We specifically allow `import *` in this case to pull in expected settings
from metanic.settings.defaults import * # noqa
DEBUG = True
DEFAULT_FROM_EMAIL = 'services@metanic.local'
FRONTEND_URL = env_value('frontend_url', 'http://localhost:3030/')
MEDIA_ROOT = project_path('media')
MEDIA_URL = '/media/'
METANIC_REDIRECT_URL = 'http://localhost:3030/'
ROOT_URLCONF = 'metanic.core.urls.development'
STATIC_ROOT = project_path('static')
STATIC_URL = '/static/'
MAILGUN_API_KEY = env_value('mailgun_api_key', default='TEST')
ANYMAIL['MAILGUN_API_KEY'] = MAILGUN_API_KEY
SECRET_KEY = env_value(
'secret_key',
'diagonal stunning powder ledge employ dealer',
)
ACCESS_CONTROL_ALLOW_ORIGINS = [
'localhost:3030',
]
REST_FRAMEWORK['DEFAULT_THROTTLE_CLASSES'] = []
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'
] += ('rest_framework.authentication.SessionAuthentication',)
REST_FRAMEWORK['DEFAULT_THROTTLE_RATES'] = {
'anon': env_value('anon_throttle_rate', default='100/second'),
'sensitive': env_value('sensitive_throttle_rate', default='100/second'),
'user': env_value('user_throttle_rate', default='100/second'),
}
INSTALLED_APPS += [
'debug_toolbar',
'django_extensions',
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
CACHES = {
'default': cache_url('redis://localhost:6379/0'),
}
DATABASES = {
'default':
{
'ENGINE':
'django.db.backends.sqlite3',
'NAME':
project_path(
env_value('DATABASE_FILENAME', 'metanic.sqlite3')
),
},
}
ALLOWED_HOSTS = [
'localhost',
'metanic.local',
]
ACCESS_CONTROL_ALLOW_ORIGINS = [
'::1:',
'127.0.0.1',
'127.0.0.1:*',
'localhost',
'localhost:*',
'metanic.local',
'metanic.local:*',
]
INTERNAL_IPS = [
'127.0.0.1',
]
| 25.131868 | 76 | 0.690861 | 257 | 2,287 | 5.88716 | 0.373541 | 0.0423 | 0.087905 | 0.124917 | 0.216788 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02492 | 0.175339 | 2,287 | 90 | 77 | 25.411111 | 0.777306 | 0.034543 | 0 | 0.109589 | 0 | 0 | 0.366893 | 0.160091 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.09589 | 0 | 0.09589 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
249344d6dc4af56e0d60cf084fb8f52348db6021 | 1,848 | py | Python | sensordisplayProject/sensor/views.py | ylf2002/sensor_display | 660751e461b1afbf0cd6fd97baa3f8bc05698848 | [
"MIT"
] | null | null | null | sensordisplayProject/sensor/views.py | ylf2002/sensor_display | 660751e461b1afbf0cd6fd97baa3f8bc05698848 | [
"MIT"
] | null | null | null | sensordisplayProject/sensor/views.py | ylf2002/sensor_display | 660751e461b1afbf0cd6fd97baa3f8bc05698848 | [
"MIT"
] | null | null | null | from sensor.models import Sensors
from sensor.serializers import TempSerializer
from rest_framework import generics
from django.shortcuts import render
from django.http import JsonResponse
# Create your views here.
class sensors_api(generics.ListCreateAPIView):
queryset = Sensors.objects.all()
serializer_class = TempSerializer
def sensors(request):
data = Sensors.objects.all()
res = []
if data:
for i in data:
tt = i.captime
ax = i.accx
ay = i.accy
az = i.accz
mx = i.magx
my = i.magy
mz = i.magz
gx = i.gyrx
gy = i.gyry
gz = i.gyrz
tx = i.longitude
ty = i.latitude
res.append([tt.isoformat(), float(ax), float(ay), float(az), float(mx), float(my), float(mz), float(gx),
float(gy), float(gz), float(tx), float(ty)])
return render(request, 'sensors_index.html', locals())
# 给html传送需要的gps数据
def get_sensors(request):
data = Sensors.objects.all()
res = []
if data:
for i in data:
tt = i.captime
ax = i.accx
ay = i.accy
az = i.accz
mx = i.magx
my = i.magy
mz = i.magz
gx = i.gyrx
gy = i.gyry
gz = i.gyrz
tx = i.longitude
ty = i.latitude
res.append({"tt": tt.isoformat(), "ax": float(ax), "ay": float(ay), "az": float(az),
"mx": float(mx), "my": float(my), "mz": float(mz),
"gx": float(gx), "gy": float(gy), "gz": float(gz),
"longitude": float(tx), "latitude": float(ty)})
return JsonResponse({'s1': res})
| 27.58209 | 117 | 0.487554 | 218 | 1,848 | 4.110092 | 0.311927 | 0.046875 | 0.05692 | 0.055804 | 0.359375 | 0.359375 | 0.359375 | 0.359375 | 0.359375 | 0.359375 | 0 | 0.000888 | 0.390693 | 1,848 | 66 | 118 | 28 | 0.794849 | 0.021104 | 0 | 0.64 | 0 | 0 | 0.032759 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.1 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2495e93dc6a272797419348823761b57f910f2c3 | 5,798 | py | Python | engagement/clearbit_engager.py | drorgarti/SatoriLab | 6e57bad2c01d6ee8baa97f915abc001bed974785 | [
"MIT"
] | 1 | 2019-06-12T09:02:34.000Z | 2019-06-12T09:02:34.000Z | engagement/clearbit_engager.py | drorgarti/SatoriLab | 6e57bad2c01d6ee8baa97f915abc001bed974785 | [
"MIT"
] | null | null | null | engagement/clearbit_engager.py | drorgarti/SatoriLab | 6e57bad2c01d6ee8baa97f915abc001bed974785 | [
"MIT"
] | null | null | null | from engagement.engager import Engager
from engagement.engagement_exception import EngagementException
from entities.acurerate_attributes import P, C
from utils.acurerate_utils import AcureRateUtils
import clearbit
class ClearbitEngager(Engager):
ACURATE_TRIAL_KEY = "sk_2a34f937f031587cb2bf4f6ee84a3c70" # AcureRate - Trial
#ACURATE_PRODUCTION_KEY = "no production key yet" # AcureRate - Production
THE_KEY = ACURATE_TRIAL_KEY
def __init__(self):
super().__init__()
clearbit.key = ClearbitEngager.THE_KEY
def __str__(self):
return 'Clearbit Engager'
def __repr__(self):
return 'Clearbit Engager'
def get_provider_name(self):
return 'Clearbit'
def get_short_symbol(self):
return 'clb'
def get_api_key(self):
return ClearbitEngager.THE_KEY
def set_enrich_key(self):
t = self.enriched_entity.__class__.__name__
if t == 'AcureRatePerson':
email = self.get_pivot_email()
if email is None:
raise EngagementException("Clearbit - cannot engage. No email available as enrich key")
self.enrich_key = email
elif t == 'AcureRateCompany':
if C.DOMAIN not in self.enriched_entity.deduced:
raise EngagementException("Clearbit - cannot engage - no domain property to use as key")
self.enrich_key = self.enriched_entity.deduced.get(C.DOMAIN)
else:
raise EngagementException("Clearbit - cannot engage - cannot generate enrich key. Unknown entity type")
def enrich_person(self):
result_obj = self._get_person_info()
if 'pending' in result_obj and result_obj['pending']:
msg = 'Failed to get information on person %s. Pending (202)' % self.enrich_key
raise EngagementException(msg)
if 'person' not in result_obj or result_obj['person'] is None:
msg = 'Failed to get information on person %s. Not Found (404)' % self.enrich_key
raise EngagementException(msg)
enriched = False
person_data = result_obj['person']
# Get the name properties
if 'name' in person_data:
self.set_data(P.FIRST_NAME, person_data['name']['givenName'])
self.set_data(P.LAST_NAME, person_data['name']['familyName'])
self.set_data(P.FULL_NAME, person_data['name']['fullName'])
if 'email' in person_data and person_data['email'] != self.enrich_key:
self.set_data(P.EMAIL, person_data['email'])
self.add_data(P.EMAILS, person_data['email'])
if 'gender' in person_data and person_data['gender']:
#enriched = True
self.add_data(P.GENDER, person_data['gender'])
if 'bio' in person_data and person_data['bio']:
enriched = True
self.add_data(P.SHORT_DESCRIPTION, person_data['bio'])
if 'location' in person_data and person_data['location']:
enriched = True
self.add_data(P.LOCATIONS, person_data['location'])
if 'facebook' in person_data and person_data['facebook']['handle']:
enriched = True
self.add_data(P.FACEBOOK_URL, person_data['facebook']['handle'])
if 'linkedin' in person_data and person_data['linkedin']['handle']:
enriched = True
self.add_data(P.LINKEDIN_URL, result_obj['person']['linkedin'])
if 'twitter' in person_data and person_data['twitter']['handle']:
enriched = True
self.add_data(P.TWITTER_URL, result_obj['person']['twitter'])
if 'googleplus' in person_data and person_data['googleplus']['handle']:
enriched = True
self.add_data(P.GOOGLEPLUS_URL, result_obj['person']['googleplus'])
if 'employment' in person_data:
job = {}
if person_data['employment'].get('name', None) is not None:
job[P.JOB_NAME] = person_data['employment'].get('name', [])
if person_data['employment'].get('title', None) is not None:
job[P.JOB_TITLE] = person_data['employment'].get('title', [])
if person_data['employment'].get('role', None) is not None:
job[P.JOB_ROLE] = person_data['employment'].get('role', [])
if job != {}:
enriched = True
self.add_data(P.JOBS, job)
# TODO: gravatar, aboutme, github
if not enriched:
msg = 'Failed: no information added to person %s' % self.enrich_key
raise EngagementException(msg)
return [P.JOBS]
def enrich_company(self):
result_obj = self._get_company_info()
if 'pending' in result_obj and result_obj['pending']:
msg = 'Failed to get information on person %s. Pending (202)' % self.enrich_key
raise EngagementException(msg)
if 'company' not in result_obj or result_obj['company'] is None:
msg = 'Failed to get information on company %s. Not Found (404)' % self.enrich_key
raise EngagementException(msg)
enriched = False
company_data = result_obj['company']
return [C.NAME]
def _get_person_info(self):
try:
response = clearbit.Enrichment.find(email=self.enrich_key)
except EngagementException as e:
raise e
except Exception as e:
raise EngagementException(e, True)
return response
def _get_company_info(self):
try:
response = clearbit.Company.find(domain=self.enrich_key, stream=True)
except EngagementException as e:
raise e
except Exception as e:
raise EngagementException(e, True)
return response
| 35.790123 | 115 | 0.625388 | 697 | 5,798 | 4.989957 | 0.177905 | 0.097757 | 0.037378 | 0.031052 | 0.480736 | 0.376078 | 0.253019 | 0.186889 | 0.166187 | 0.166187 | 0 | 0.007601 | 0.273888 | 5,798 | 161 | 116 | 36.012422 | 0.818527 | 0.027768 | 0 | 0.283186 | 0 | 0 | 0.174423 | 0.006217 | 0 | 0 | 0 | 0.006211 | 0 | 1 | 0.097345 | false | 0 | 0.044248 | 0.044248 | 0.247788 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
249661029b750065aec2931215cce553b6efc801 | 666 | py | Python | knapsack/ratio.py | dstlmrk/knapsack | 357c6b06cca4e743c2d3977d718346b97ee4b11a | [
"MIT"
] | null | null | null | knapsack/ratio.py | dstlmrk/knapsack | 357c6b06cca4e743c2d3977d718346b97ee4b11a | [
"MIT"
] | null | null | null | knapsack/ratio.py | dstlmrk/knapsack | 357c6b06cca4e743c2d3977d718346b97ee4b11a | [
"MIT"
] | null | null | null | from knapsack import Knapsack
class Ratio(Knapsack):
"""
Ratio sorts all items by ratio of price and weight and adds
them into bag step by step, if is it possible.
"""
def evaluate(self):
price = 0
capacity = self.capacity
configuration = [0 for i, val in enumerate(self.items)]
sorted_items = sorted(
self.items, key=lambda item: item.ratio, reverse=True
)
for item in sorted_items:
if item.weight <= capacity:
capacity -= item.weight
price += item.price
configuration[item.index] = 1
return price, configuration
| 27.75 | 65 | 0.584084 | 80 | 666 | 4.8375 | 0.525 | 0.046512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006834 | 0.340841 | 666 | 23 | 66 | 28.956522 | 0.874715 | 0.159159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
249ccac49aa2c5391984dfd6fbfac075310032ca | 1,945 | py | Python | scrapers/unhcr.py | OCHA-DAP/hdx-scraper-ukraine-viz | 149a27c7b6dd3ae8807d063554867aea31f77a38 | [
"MIT"
] | 1 | 2022-03-20T21:00:22.000Z | 2022-03-20T21:00:22.000Z | scrapers/unhcr.py | OCHA-DAP/hdx-scraper-ukraine-viz | 149a27c7b6dd3ae8807d063554867aea31f77a38 | [
"MIT"
] | null | null | null | scrapers/unhcr.py | OCHA-DAP/hdx-scraper-ukraine-viz | 149a27c7b6dd3ae8807d063554867aea31f77a38 | [
"MIT"
] | null | null | null | import logging
from hdx.location.country import Country
from hdx.scraper.base_scraper import BaseScraper
logger = logging.getLogger(__name__)
class UNHCR(BaseScraper):
def __init__(self, datasetinfo, today, outputs, countryiso3s):
super().__init__(
"unhcr",
datasetinfo,
{
"national": (
("NoRefugees", "RefugeesDate"),
("#affected+refugees", "#affected+date+refugees"),
),
"regional": (
("NoRefugees",),
("#affected+refugees",),
),
},
)
self.today = today
self.outputs = outputs
self.countryiso3s = countryiso3s
def run(self):
url = self.datasetinfo["url"]
valuedicts = self.get_values("national")
reader = self.get_reader()
json = reader.download_json(url)
total_refugees = 0
for data in json["data"]:
individuals = int(data["individuals"])
total_refugees += individuals
date = data["date"]
countryiso3, _ = Country.get_iso3_country_code_fuzzy(data["geomaster_name"])
if countryiso3 in self.countryiso3s:
valuedicts[0][countryiso3] = individuals
valuedicts[1][countryiso3] = date
self.get_values("regional")[0]["value"] = total_refugees
url = self.datasetinfo["url_series"]
json = reader.download_json(url)
rows = [
("RefugeesDate", "NoRefugees"),
("#affected+date+refugees", "#affected+refugees"),
]
for data in json["data"]["timeseries"]:
rows.append((data["data_date"], data["individuals"]))
tabname = "refugees_series"
for output in self.outputs.values():
output.update_tab(tabname, rows)
self.datasetinfo["source_date"] = self.today
| 34.122807 | 88 | 0.553728 | 175 | 1,945 | 5.971429 | 0.348571 | 0.057416 | 0.038278 | 0.040191 | 0.080383 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009909 | 0.32545 | 1,945 | 56 | 89 | 34.732143 | 0.786585 | 0 | 0 | 0.08 | 0 | 0 | 0.15527 | 0.02365 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.06 | 0 | 0.12 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
249d017eb5799d5e056dbd1b6eb003f08d3ffb34 | 1,167 | py | Python | ready_model.py | vitalProjects/curse_project | 5768d7413db86b1d1054f0ff1102acfbea773fc7 | [
"Apache-2.0"
] | 1 | 2021-03-14T21:38:41.000Z | 2021-03-14T21:38:41.000Z | ready_model.py | vitalfect/columnsAgent | 5768d7413db86b1d1054f0ff1102acfbea773fc7 | [
"Apache-2.0"
] | null | null | null | ready_model.py | vitalfect/columnsAgent | 5768d7413db86b1d1054f0ff1102acfbea773fc7 | [
"Apache-2.0"
] | null | null | null | from os.path import join
import torch
import config
from models.attention_model import ZReader as ZReader_attn
from models.fnet_model import ZReader as ZReader_fnet
if __name__ == "__main__":
str_true = "thehighwayroundsacurveandtransitionstondstreetrunningeastwardthroughmorefarmlandasthetrunklineappro" \
"achesneway"
str_ = "twer hvb rye hfj idf g fdhh ghw kja ghjy r rtyo u nfgh dhjk s a cghfhf u r vfgh e a fn d t r afgh n s i " \
"tfgh i ghjo n srt t o nghj d smn t rkl e edfg t fdr u fdn n iret hn rtyg e adfsg s t wdfg a r vbd t " \
"hvbcnv r o u iopg xcvh zxm sdo qwr dfge frety a dfgr m l kla ern drt auio jks vbnt bvnh e fght r u dsfn" \
" ikk bnl gfi kbn fe ea hgp dsfp feir bnco ajkl etc dfh ehjd s dgn e dfw dfka yghp"
batch_ = [str_]
with open(file="ztext.txt", encoding="utf-8", mode="r") as file:
batch = file.readlines()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = ZReader_attn(*ZReader_attn.get_parameters()).to(device)
model.load_parameters(join(config.weights_path, '0529_1629_72'), device=device)
model.z_read(batch_)
| 46.68 | 119 | 0.700086 | 197 | 1,167 | 4.015228 | 0.649746 | 0.041719 | 0.045512 | 0.050569 | 0.068268 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012195 | 0.227078 | 1,167 | 24 | 120 | 48.625 | 0.864745 | 0 | 0 | 0 | 0 | 0.157895 | 0.461868 | 0.084833 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.263158 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
249e34ad03f7c56a8da00f23fe94625ab7a19c26 | 907 | py | Python | meza/__init__.py | SteadBytes/meza | ff1f881fc2a765b383535328456b380e75181b34 | [
"MIT"
] | null | null | null | meza/__init__.py | SteadBytes/meza | ff1f881fc2a765b383535328456b380e75181b34 | [
"MIT"
] | null | null | null | meza/__init__.py | SteadBytes/meza | ff1f881fc2a765b383535328456b380e75181b34 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
meza
~~~~
Provides methods for reading and processing data from tabular formatted files
Attributes:
CURRENCIES [tuple(unicode)]: Currency symbols to remove from decimal
strings.
ENCODING (str): Default file encoding.
DEFAULT_DATETIME (obj): Default datetime object
"""
from datetime import datetime as dt
from os import path as p
__version__ = '0.42.5'
__title__ = 'meza'
__package_name__ = 'meza'
__author__ = 'Reuben Cummings'
__description__ = 'A Python toolkit for processing tabular data'
__email__ = 'reubano@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
CURRENCIES = ('$', '£', '€')
ENCODING = 'utf-8'
DEFAULT_DATETIME = dt(9999, 12, 31, 0, 0, 0)
BOM = '\ufeff'
PARENT_DIR = p.abspath(p.dirname(p.dirname(__file__)))
DATA_DIR = p.join(PARENT_DIR, 'data', 'test')
| 25.194444 | 77 | 0.706725 | 124 | 907 | 4.846774 | 0.653226 | 0.074875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030144 | 0.158765 | 907 | 35 | 78 | 25.914286 | 0.754915 | 0.390298 | 0 | 0 | 0 | 0 | 0.267035 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
249e4e189863ccce071f3265d50900ed737a9d1d | 3,590 | py | Python | protein_ligand_contacts/contact_extraction/ifg.py | songlab-cal/contact-geometry | 81cb7e56af7bccaf5b7f6dde52d9f2adbe89e8bd | [
"MIT"
] | 2 | 2021-11-03T21:00:21.000Z | 2021-11-06T21:27:53.000Z | protein_ligand_contacts/contact_extraction/ifg.py | songlab-cal/contact-geometry | 81cb7e56af7bccaf5b7f6dde52d9f2adbe89e8bd | [
"MIT"
] | null | null | null | protein_ligand_contacts/contact_extraction/ifg.py | songlab-cal/contact-geometry | 81cb7e56af7bccaf5b7f6dde52d9f2adbe89e8bd | [
"MIT"
] | null | null | null | from functional_groups import SMARTS_ATOM_MAP
from typing import List, Tuple
import numpy as np
#typedef
xyzcoord = Tuple[float, float, float]
class iFG():
'''
iFG class to store information about ligand atoms that make up an iFG, mapping to amino acid residues,
as well as the protein residues that interact with the iFG
'''
def __init__(self, name:str, cfg_entry:dict):
self.canonical_name = name.split('-')[0]
self.name = name
atom_list = []
coord_list = []
for atm in cfg_entry:
atom_list.append(atm)
coord_list.append(cfg_entry[atm])
self.lig_atoms = atom_list
self.heavy_atom_coords = np.vstack(coord_list)
self.aa_atoms = None
self.contacts = dict()
self.ala_ref = np.array([[ 0.11800566, -2.45811057, 0. ], \
[-0.77976233, -1.30825949, 0. ], \
[ 0. , 0. , 0. ]])
def __repr__(self):
return self.name
def get_aa_atom_names(self, SMARTS_ATOM_MAP):
lig_aa_mapping = {}
for restype in SMARTS_ATOM_MAP[self.canonical_name]:
atoms = SMARTS_ATOM_MAP[self.canonical_name][restype]
lig_aa_mapping[restype] = dict(zip(self.lig_atoms, atoms))
self.aa_atoms = lig_aa_mapping
def add_interacting_residue(self, interaction_type:str, resid:str, name2:str, name1:str, idx_coord_map:dict):
'''
if interaction_type == 'cc':
if resid not in self.ccs:
self.ccs.update({resid: {'atoms': idx_coord_map[resid]['names'], 'coords': idx_coord_map[resid]['coords'], 'interaction': [(name2 ,name1)]}})
else:
self.ccs[resid]['interaction'].append((name2, name1))
elif interaction_type == 'hb':
if resid not in self.hbs:
self.hbs.update({resid: {'atoms': idx_coord_map[resid]['names'], 'coords': idx_coord_map[resid]['coords'], 'interaction': [(name2 ,name1)]}})
else:
self.hbs[resid]['interaction'].append((name2, name1))
'''
if resid not in self.contacts:
self.contacts.update({resid: {'residue_atoms': idx_coord_map[resid]['names'], \
'residue_coords': np.array(idx_coord_map[resid]['coords']),
'interactions': {'hb': set(), 'cc': set()}}})
self.contacts[resid]['interactions'][interaction_type].add((name2,name1))
class VDM():
def __init__(
self,
name: str,
atom_names: List[str],
atom_coords: List[xyzcoord],
):
self.name = name
self.canonical_name = name.split('_')[0]
self.atoms = atom_names
self.coords = np.array(atom_coords)
self.aa_atoms = None
self.contacts = dict()
def get_aa_atoms(self):
return self.aa_atoms
def set_aa_atoms(self):
global SMARTS_ATOM_MAP
lig_aa_mapping = dict()
for restype in SMARTS_ATOM_MAP[self.canonical_name]:
atoms = SMARTS_ATOM_MAP[self.canonical_name][restype]
lig_aa_mapping[restype] = dict(zip(self.atoms, atoms))
self.aa_atoms = lig_aa_mapping
def __repr__(self):
return self.name
def update_contacts(self, new_contact: dict):
self.contacts.update(new_contact) | 31.769912 | 157 | 0.558496 | 423 | 3,590 | 4.491726 | 0.234043 | 0.036842 | 0.047895 | 0.050526 | 0.479474 | 0.39 | 0.35 | 0.256842 | 0.256842 | 0.218947 | 0 | 0.022699 | 0.32507 | 3,590 | 113 | 158 | 31.769912 | 0.761453 | 0.206128 | 0 | 0.266667 | 0 | 0 | 0.025074 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.05 | 0.05 | 0.283333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
249f953c98713e6e5275f3787508693400d14f46 | 1,806 | py | Python | Other code/Rasterization Script/Python_RasterCalculator.py | Ange-lina/BalticOffshorePotential | fd8af87bfe0bc069fe1ec6fd1c9e175ff1a5a1d3 | [
"MIT"
] | 2 | 2020-01-16T17:35:27.000Z | 2020-01-16T17:45:54.000Z | Other code/Rasterization Script/Python_RasterCalculator.py | Ange-lina/BalticOffshorePotential | fd8af87bfe0bc069fe1ec6fd1c9e175ff1a5a1d3 | [
"MIT"
] | null | null | null | Other code/Rasterization Script/Python_RasterCalculator.py | Ange-lina/BalticOffshorePotential | fd8af87bfe0bc069fe1ec6fd1c9e175ff1a5a1d3 | [
"MIT"
] | 2 | 2020-01-17T09:15:26.000Z | 2020-01-17T09:16:47.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 13:35:09 2019
@author: Petronium
"""
# Import system modules
import rasterio
from rasterio.plot import show
fp1 = r'Bathymetry_Clip.tif'
fp2 = r'Shipping_Clip.tif'
fp3 = r'WindMap_Clip.tif'
bathymetry = rasterio.open(fp1)
shipping = rasterio.open(fp2)
windspeed = rasterio.open(fp3)
bathymetry_b = bathymetry.read(1)
shipping_b = shipping.read(1)
windspeed_b = windspeed.read(1)
def raster_calculation(raster_list, weight_list):
"""
Function to calcule weighted sum of the rasters
Args:
raster_list (list): input rasters
weight_list (list): input weight of the rasters
Returns:
result raster
"""
assert len(raster_list) == len(weight_list), "Both list should have the same length!"
result_map = 0
for r, w in zip(raster_list, weight_list):
result_map += r * w
return result_map
def saving_to_file(ras_name):
"""
Function to save the raster
Args:
name(variable): name of the raster
Returns:
None
"""
with rasterio.Env():
# Write an array as a raster band to a new 8-bit file. For
# the new file's profile, we start with the profile of the source
profile = bathymetry.profile
# And then change the band count to 1, set the
# dtype to uint8, and specify LZW compression.
profile.update(
dtype=rasterio.float32,
count=1,
compress='lzw')
with rasterio.open('ResultMap.tif', 'w', **profile) as dst:
dst.write(ras_name.astype(rasterio.float32), 1)
result_map = raster_calculation((bathymetry_b, shipping_b, windspeed_b), (0.35,0.2,0.45))
saving_to_file(result_map)
| 22.860759 | 89 | 0.630676 | 248 | 1,806 | 4.471774 | 0.427419 | 0.040577 | 0.028855 | 0.036069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030465 | 0.272979 | 1,806 | 78 | 90 | 23.153846 | 0.814166 | 0.318937 | 0 | 0 | 0 | 0 | 0.094523 | 0 | 0 | 0 | 0 | 0 | 0.035714 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24a456166df9f6ee81fe216b4721ba7faa8166b8 | 917 | py | Python | setup.py | M3TIOR/pyside2-style-test | 1000a00c44de90b1d9c2f2ae121ab127d1499ecd | [
"MIT"
] | 1 | 2020-08-14T16:21:22.000Z | 2020-08-14T16:21:22.000Z | setup.py | M3TIOR/pyside2-style-test | 1000a00c44de90b1d9c2f2ae121ab127d1499ecd | [
"MIT"
] | 1 | 2020-03-27T00:29:04.000Z | 2020-03-27T05:59:18.000Z | setup.py | M3TIOR/pyside2-style-test | 1000a00c44de90b1d9c2f2ae121ab127d1499ecd | [
"MIT"
] | null | null | null | import setuptools
import pyside2_style_test as testkit
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt", "r") as req:
dependencies = req.read()
setuptools.setup(
name="m3-pyside2-style-test",
version=testkit.__version__,
author=testkit.__author__,
author_email="cplusplusook@gmail.com",
license="MIT",
description="A Qt-5 interactive stylesheet preview script",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/m3tior/pyside2-style-test",
packages=setuptools.find_packages(),
install_requires=dependencies,
entry_points={
"console_scripts": "pyside2-style-test=pyside2_style_test.cli:_main"
},
classifiers=[
"Programming Language :: Python :: 3.5",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 29.580645 | 70 | 0.708833 | 109 | 917 | 5.743119 | 0.614679 | 0.095847 | 0.127796 | 0.095847 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01297 | 0.159215 | 917 | 30 | 71 | 30.566667 | 0.798962 | 0 | 0 | 0 | 0 | 0 | 0.376227 | 0.098146 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.074074 | 0 | 0.074074 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24a66bf1696cb9f471526fbe804dd1d34ee16f6b | 1,446 | py | Python | presidio-anonymizer/tests/test_anonymizer_result.py | kubistika/presidio | 4777d1759e9bddc45317d9b2689e6df9f75eec05 | [
"MIT"
] | null | null | null | presidio-anonymizer/tests/test_anonymizer_result.py | kubistika/presidio | 4777d1759e9bddc45317d9b2689e6df9f75eec05 | [
"MIT"
] | null | null | null | presidio-anonymizer/tests/test_anonymizer_result.py | kubistika/presidio | 4777d1759e9bddc45317d9b2689e6df9f75eec05 | [
"MIT"
] | null | null | null | from presidio_anonymizer.entities import AnonymizerResult
from presidio_anonymizer.entities.anonymized_entity import AnonymizedEntity
def test_when_no_params_then_object_initialised_correctly():
res = AnonymizerResult()
assert res.text is None
assert res.items == []
def test_when_correct_params_then_object_initialised_correctly():
ari = AnonymizedEntity("an", "b", 1, 2, "c")
res = AnonymizerResult("a", [ari])
assert res.text == "a"
assert res.items[0] == ari
def test_when_normalized_items_called_then_idexes_are_normalized():
ari = AnonymizedEntity("a", "b", 1, 2, "cd")
res = AnonymizerResult("*****", [ari])
res.normalize_item_indexes()
assert res.items[0].start == 3
assert res.items[0].end == 5
def test_when_set_text_then_text_is_set():
res = AnonymizerResult()
res.set_text("a")
assert res.text == "a"
def test_when_add_item_the_item_added():
res = AnonymizerResult()
ari = AnonymizedEntity("a", "b", 1, 2, "cd")
res.add_item(ari)
assert res.items[0] == ari
def test_when_eq_called_then_instances_are_equal():
res = AnonymizerResult()
res.set_text("a")
res2 = AnonymizerResult()
res2.set_text("a")
assert res.__eq__(res2)
def test_when_not_eq_called_then_instances_are_not_equal():
res = AnonymizerResult()
res.set_text("a")
res2 = AnonymizerResult()
res2.set_text("b")
assert res.__eq__(res2) is False
| 26.290909 | 75 | 0.70332 | 197 | 1,446 | 4.807107 | 0.279188 | 0.095037 | 0.081309 | 0.063358 | 0.438226 | 0.291447 | 0.259768 | 0.259768 | 0.139388 | 0.139388 | 0 | 0.015038 | 0.172199 | 1,446 | 54 | 76 | 26.777778 | 0.776107 | 0 | 0 | 0.421053 | 0 | 0 | 0.017289 | 0 | 0 | 0 | 0 | 0 | 0.263158 | 1 | 0.184211 | false | 0 | 0.052632 | 0 | 0.236842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24a6ceaa6ad3a63b821c739a4e853b4bfbbe8d5a | 3,637 | py | Python | train.py | rickgroen/cov-weighting | 64c296679cd37e724a03c6dc107606f7048aec96 | [
"MIT"
] | 26 | 2021-01-05T07:10:31.000Z | 2022-03-23T06:31:00.000Z | train.py | rickgroen/cov-weighting | 64c296679cd37e724a03c6dc107606f7048aec96 | [
"MIT"
] | 6 | 2021-04-12T16:27:11.000Z | 2022-02-09T07:00:15.000Z | train.py | rickgroen/cov-weighting | 64c296679cd37e724a03c6dc107606f7048aec96 | [
"MIT"
] | 7 | 2021-03-08T09:28:05.000Z | 2022-02-23T07:39:29.000Z | import time
import torch
# Project imports
from utils import *
from options import TrainOptions
from data_loaders import create_dataloader
from methods import create_method
class Trainer:
"""
Trains any of the model methods in methods/ using any of the losses
in losses/.
Initialize the trainer using a set of parsed arguments from options/.
"""
def __init__(self, args):
self.args = args
# Retrieve train and validation data loaders.
self.loader = create_dataloader(self.args, 'train')
self.val_loader = create_dataloader(self.args, 'val')
num_iterations_per_epoch = len(self.loader)
setattr(args, 'num_iterations', num_iterations_per_epoch)
# Initialize a model.
self.model = create_method(args, self.loader)
# We keep track of the aggregated losses per epoch in a dict. For now the pre-training
# train loss is set to zero.
self.best_val_loss = float('Inf')
self.validate(-1)
pre_validation_update(self.model.losses[-1]['val'])
def train(self):
""" Main function for training any of the methods, given an input parse.
"""
for epoch in range(self.args.epochs):
self.model.update_learning_rate(epoch, self.args.learning_rate)
c_time = time.time()
self.model.to_train()
# Run a single training epoch.
self.model.run_epoch(epoch)
# Perform a validation pass each epoch.
self.validate(epoch)
# Print an update of training, val losses.
print_epoch_update(epoch, time.time() - c_time, self.model.losses)
# Make a checkpoint, so training can be resumed.
running_val_loss = self.model.losses[epoch]['val']
is_best = running_val_loss < self.best_val_loss
if is_best:
self.best_val_loss = running_val_loss
self.model.save_network('best')
print('Finished Training. Best validation loss:\t{:.4f}'.format(self.best_val_loss))
# Save the model of the final epoch. If another model was better, also save it separately as best.
self.model.save_network('final')
self.model.save_losses()
def validate(self, epoch):
self.model.to_eval()
val_loss = 0.0
for data in self.val_loader:
# Get the losses for the model for this epoch.
self.model.set_input(data)
self.model.forward()
iter_loss = self.model.get_untrained_loss()
val_loss += iter_loss
# Compute the loss over this validation set.
val_loss /= len(self.val_loader)
# Store the running loss for the validation images.
self.model.store_val_loss(val_loss, epoch)
def verify_data(self):
""" Verifies whether all data has been downloaded and correctly put in the data directory.
"""
check_if_all_images_are_present('eigen', self.args.data_dir)
check_if_all_images_are_present('cityscapes', self.args.data_dir)
def main():
parser = TrainOptions()
args = parser.parse()
args.mode = 'train'
# Print CUDA version.
print("Running code using CUDA {}".format(torch.version.cuda))
gpu_id = int(args.device[-1])
torch.cuda.set_device(gpu_id)
print('Training on device cuda:{}'.format(gpu_id))
trainer = Trainer(args)
if args.mode == 'train':
trainer.train()
elif args.mode == 'verify-data':
trainer.verify_data()
if __name__ == '__main__':
main()
print("YOU ARE TERMINATED!")
| 32.765766 | 106 | 0.637613 | 484 | 3,637 | 4.609504 | 0.299587 | 0.060511 | 0.019722 | 0.026894 | 0.07082 | 0.023308 | 0 | 0 | 0 | 0 | 0 | 0.002257 | 0.269178 | 3,637 | 110 | 107 | 33.063636 | 0.837096 | 0.254605 | 0 | 0 | 0 | 0 | 0.076604 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080645 | false | 0 | 0.096774 | 0 | 0.193548 | 0.080645 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24a6f16182dda0c0b9e74d9f007b1abeaf6547e8 | 1,272 | py | Python | Versuch2/task3.4.py | Tobias-Schoch/SSS | f8b078ca7f6482fc7c89d5f9e784a549459eefb7 | [
"MIT"
] | null | null | null | Versuch2/task3.4.py | Tobias-Schoch/SSS | f8b078ca7f6482fc7c89d5f9e784a549459eefb7 | [
"MIT"
] | null | null | null | Versuch2/task3.4.py | Tobias-Schoch/SSS | f8b078ca7f6482fc7c89d5f9e784a549459eefb7 | [
"MIT"
] | 1 | 2022-01-06T12:47:53.000Z | 2022-01-06T12:47:53.000Z | import numpy as np
import cv2
# -------- Aufgabe3.4 -------- #
# Vector um Grenz- und Breitenwert zu speichern
vec = np.zeros((5, 2))
crop = ["crop1", "crop2", "crop3", "crop4", "crop5"]
# Bild einlesen und in Schwarz - Weiß umwandeln
image = cv2.imread('data/korrigiertes_bild2.png', cv2.IMREAD_GRAYSCALE)
# Grenz- und Breitenwerte für Bild 1
vec[0, 0] = 0
vec[0, 1] = 105
# Grenz- und Breitenwerte für Bild 2
vec[1, 0] = 111
vec[1, 1] = 135
# Grenz- und Breitenwerte für Bild 3
vec[2, 0] = 249
vec[2, 1] = 137
# Grenz- und Breitenwerte für Bild 4
vec[3, 0] = 389
vec[3, 1] = 132
# Grenz- und Breitenwerte für Bild 5
vec[4, 0] = 529
vec[4, 1] = 111
# Auf alle 5 Dateien zugreifen
for z in range(1, 6):
# Ab welchem Pixel in der Höhe y soll begonnen werden
y = 0
# Ab welchem Pixel in der Breite x soll begonnen werden
x = int(vec[z-1, 0])
# Wie tief soll der Pixel gehen in h
h = 480
# Wie breit soll der Pixel gehen in w
w = int(vec[z-1, 1])
# Schneiden des Bildes mit den Variablen von oben
crop[z - 1] = image[y:y + h, x:x + w]
# geschnittene Bilder anzeigen
cv2.imshow("Crop" + str(z), crop[z - 1])
# geschnittene Bilder exportieren
cv2.imwrite("korrigiert" + str(z) + ".png", crop[z-1])
cv2.waitKey(0)
| 25.959184 | 71 | 0.63522 | 220 | 1,272 | 3.663636 | 0.427273 | 0.059553 | 0.124069 | 0.14268 | 0.261787 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085279 | 0.225629 | 1,272 | 48 | 72 | 26.5 | 0.732995 | 0.480346 | 0 | 0 | 0 | 0 | 0.108865 | 0.041991 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24a9de62a34b58dfc19d30bea2ba3cdb517e09ee | 944 | py | Python | data_structures/Stack/stack/Python/stack.py | CarbonDDR/al-go-rithms | 8e65affbe812931b7dde0e2933eb06c0f44b4130 | [
"CC0-1.0"
] | 1,253 | 2017-06-06T07:19:25.000Z | 2022-03-30T17:07:58.000Z | data_structures/Stack/stack/Python/stack.py | rishabh99-rc/al-go-rithms | 4df20d7ef7598fda4bc89101f9a99aac94cdd794 | [
"CC0-1.0"
] | 554 | 2017-09-29T18:56:01.000Z | 2022-02-21T15:48:13.000Z | data_structures/Stack/stack/Python/stack.py | rishabh99-rc/al-go-rithms | 4df20d7ef7598fda4bc89101f9a99aac94cdd794 | [
"CC0-1.0"
] | 2,226 | 2017-09-29T19:59:59.000Z | 2022-03-25T08:59:55.000Z | '''This code is written MTH Junaidi github: Miranjunaidi on 26th Oct 2019 at 7:15 PM IST'''
#POP function removes the last element of the list
def pop(stack):
del stack[-1]
return stack
#push function adds a new number num to the list at the end.
def push(stack,num):
stack.append(num)
return stack
#print function prints all the element of the stack sequestially
def prints(stack):
for i in stack:
print(i,end = " ")
print("")
#This is where the main funtions start
print("\n welcome, a new empty stack is created. \n press index numbers to do the opertations \n")
stack = []
a=0
while a!=4:
print(" 1.push \n 2.pop \n 3.see the stack \n 4.quit")
a = int(input())
if a == 1:
num = int(input("Enter the number you want to push"))
push(stack, num)
elif a == 2:
pop(stack)
elif a==3:
prints(stack)
elif a>4 or a<0:
print("enter a valid operation") | 29.5 | 98 | 0.635593 | 162 | 944 | 3.703704 | 0.469136 | 0.025 | 0.04 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029915 | 0.256356 | 944 | 32 | 99 | 29.5 | 0.824786 | 0.3125 | 0 | 0.08 | 0 | 0.04 | 0.297972 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0 | 0 | 0.2 | 0.28 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24aa16e48945bcc9cda497360276a9d7973aafa9 | 5,165 | py | Python | server.py | Protozzzer/works | 2e4ee1443ff8289eabde3c85024037cefba47ea2 | [
"Unlicense"
] | null | null | null | server.py | Protozzzer/works | 2e4ee1443ff8289eabde3c85024037cefba47ea2 | [
"Unlicense"
] | null | null | null | server.py | Protozzzer/works | 2e4ee1443ff8289eabde3c85024037cefba47ea2 | [
"Unlicense"
] | null | null | null | # -*- coding: cp1251 -*
import socket
import time
import sys
from PyQt5 import QtWidgets
from threading import Thread
from PyQt5 import QtCore
from PyQt5.QtWidgets import (QVBoxLayout, QSplitter, QTextEdit, QWidget)
from PyQt5.QtGui import QTextCursor
import MySQLdb
class Settings_server:
host = socket.gethostbyname(socket.gethostname())
port = 6046
clients = []
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("",port))
class ServerThread(Thread):
def __init__(self, window):
Thread.__init__(self)
self.window = window
def run(self):
db = MySQLdb.connect("localhost", "root", "protozerg", "datachat1", charset='cp1251')
db.autocommit(True)
cursor = db.cursor()
window.chat.append("Server started")
while True:
data, adr = Settings_server.sock.recvfrom(1024)
if adr not in Settings_server.clients:
Settings_server.clients.append(adr)
times = time.strftime("%Y-%m-%d-%H.-%M.-%S", time.localtime())
window.chat.append("[" + adr[0] + "]=[" + str(adr[1]) + "]=[" + times + "]\n")
for client in Settings_server.clients:
if adr != client:
Settings_server.sock.sendto(data, client)
window.chat.append(data.decode("cp1251") + "\n")
data_dec = str(data.decode("cp1251"))
if data_dec[0] == "A":
data1 = data_dec[1:]
res = [element for element in data1.split(",")]
cursor.execute("Select 'name', `password` from `Users` Where `name` = %s and `password` = %s;", res)
rows = cursor.fetchone()
if rows is None:
data_to_client = "A" + "False"
Settings_server.sock.sendto(data_to_client.encode("cp1251"), client)
else:
data_to_client = "A" + "True"
Settings_server.sock.sendto(data_to_client.encode("cp1251"), client)
elif data_dec[0] == "R":
data1 = data_dec[1:]
res = [element for element in data1.split(",")]
cursor.execute("Select 'name' from `Users` Where `name` = %s;", [res[0]])
rows = cursor.fetchone()
if rows is None:
cursor.execute("INSERT Users(name, password) VALUES (%s, %s);", res)
data_to_client = "R" + "True"
Settings_server.sock.sendto(data_to_client.encode("cp1251"), client)
elif rows is not None:
data_to_client = "R" + "False"
Settings_server.sock.sendto(data_to_client.encode("cp1251"), client)
elif data_dec[0] == "M":
data1 = data_dec[1:]
res = [element for element in data1.split(",")]
cursor.execute("Select `id`, `name` from `Users` Where `name` = %s", [res[0]])
row = cursor.fetchone()
values = []
values.append(res[1])
values.append(int(row[0]))
cursor.execute("INSERT INTO Messages(text, User_id, date) VALUES (%s, %s, NOW());", values)
elif data_dec[0] == "P":
cursor.execute(
"Select M.text, M.date, U.name from `Messages` M INNER JOIN `Users` U ON U.id = M.User_id ORDER BY M.date;")
row = cursor.fetchall()
l = []
for rall in row:
new = []
new.append(rall[2])
new.append(str(rall[1]))
new.append(rall[0])
l.append(new)
for d in l:
Settings_server.sock.sendto(("P" + str(d)).encode("cp1251"), client)
db.close()
class Window(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setObjectName("Server")
self.resize(371, 500)
self.setMaximumSize(300, 700)
self.setMinimumSize(400, 700)
self.setStyleSheet("\n""background-color: rgb(167, 198, 255);")
self.chat = QTextEdit()
self.chat.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"font: 75 italic 15pt \"MS Shell Dlg 2\";")
self.chat.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
self.chat.setReadOnly(True)
self.chatBody = QVBoxLayout(self)
splitter = QSplitter(QtCore.Qt.Vertical)
splitter.addWidget(self.chat)
splitter.setSizes([400, 100])
splitter2 = QSplitter(QtCore.Qt.Vertical)
splitter2.addWidget(splitter)
splitter2.setSizes([200, 10])
self.chatBody.addWidget(splitter2)
self.setWindowTitle("Chat Application")
self.resize(500, 500)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = Window()
ServerThread = ServerThread(window)
ServerThread.start()
window.show()
window.hide()
app.exec_()
ServerThread.join()
Settings_server.sock.close()
| 38.544776 | 128 | 0.548112 | 575 | 5,165 | 4.810435 | 0.29913 | 0.060738 | 0.052061 | 0.052061 | 0.223789 | 0.206797 | 0.206797 | 0.185105 | 0.167028 | 0.167028 | 0 | 0.037838 | 0.319458 | 5,165 | 133 | 129 | 38.834586 | 0.749075 | 0.004066 | 0 | 0.12069 | 0 | 0.017241 | 0.130688 | 0 | 0.034483 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0.017241 | 0.077586 | 0 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24aa7d31a941bbe6faa5d7383cec1739d90b532d | 2,147 | py | Python | lib/ansibledocgen/cli.py | Ymil/ansible-docgen | a69dc8b067861b5a365d6086526d49f3cebfcfc0 | [
"MIT"
] | null | null | null | lib/ansibledocgen/cli.py | Ymil/ansible-docgen | a69dc8b067861b5a365d6086526d49f3cebfcfc0 | [
"MIT"
] | null | null | null | lib/ansibledocgen/cli.py | Ymil/ansible-docgen | a69dc8b067861b5a365d6086526d49f3cebfcfc0 | [
"MIT"
] | null | null | null | """ Command Line Interface Module """
import optparse
import sys
import os
from ansibledocgen.parser.dir import DirParser
from ansibledocgen.formatter.markup import MarkupFormatter
class Cli(object):
""" Command Line Interface for ansible-docgen """
def __init__(self):
""" Setup Arguments and Options for CLI """
# Parse CLI Arguments
parser = optparse.OptionParser()
parser.add_option("-p", "--project", dest="project",
help="Path to Ansible project",
metavar="PROJECT",
default="./")
parser.add_option("-s", "--style", dest="style",
help="Choose the format for the documentation.\
Default is markup. Example: --style=[markup]",
metavar="STYLE",
default="markup")
parser.add_option("-n", "--no-tags", dest="show_tags",
action='store_false',
help="This option disables show tags in the documentation",
metavar="TAGS",
default=True)
(options, args) = parser.parse_args()
# Make sure there is a trailing /
self.project = os.path.join(options.project, "")
self.style = options.style
self.params = {}
self.params['show_tags'] = options.show_tags
# Used to Parse Roles and Playbooks
self.dirparser = None
self.formatter = None
def run(self):
""" EntryPoint Of Application """
# Parse Project for Roles and Playbooks
self.dirparser = DirParser(self.project)
# Based on chosen style, use the associated formatter
if self.style == "markup":
self.formatter = MarkupFormatter(
self.dirparser.get_parserdata(), self.project, self.params)
self.formatter.parse_data()
self.formatter.write_files()
else:
print("Error: Use of an unsupported style.\
The supported styles are: markup")
sys.exit(1)
| 37.666667 | 85 | 0.552399 | 217 | 2,147 | 5.396313 | 0.447005 | 0.027327 | 0.038429 | 0.035867 | 0.051238 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00071 | 0.344201 | 2,147 | 56 | 86 | 38.339286 | 0.830966 | 0.146251 | 0 | 0 | 0 | 0 | 0.097561 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.125 | 0 | 0.2 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24aed837d0532b684b7fafa445f36bb47a201801 | 1,904 | py | Python | missing_strat/inference/decomposition.py | tritas/missing_strat | 79abbcadfc8d2074f05f7e72de4f7d454a847a9c | [
"BSD-3-Clause"
] | null | null | null | missing_strat/inference/decomposition.py | tritas/missing_strat | 79abbcadfc8d2074f05f7e72de4f7d454a847a9c | [
"BSD-3-Clause"
] | null | null | null | missing_strat/inference/decomposition.py | tritas/missing_strat | 79abbcadfc8d2074f05f7e72de4f7d454a847a9c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 15 16:20:49 2016
@author: Philippe
"""
import numpy as np
from scipy.sparse.linalg import svds
from functools import partial
def em_svd(Y, k=None, tol=1e-3, maxiter=None):
"""
Approximate SVD on data with missing values via expectation-maximization
Parameters
----------
Y: (nobs, ndim) data matrix, missing values denoted by NaN/Inf
k: number of singular values/vectors to find (default: k=ndim)
tol: convergence tolerance on change in trace norm
maxiter: maximum number of EM steps to perform (default: no limit)
Returns
-------
Y_hat: (nobs, ndim) reconstructed data matrix
mu_hat: (ndim,) estimated column means for reconstructed data
U, s, Vt: singular values and vectors (see np.linalg.svd and
scipy.sparse.linalg.svds for details)
"""
if k is None:
svdmethod = partial(np.linalg.svd, full_matrices=False)
else:
svdmethod = partial(svds, k=k)
if maxiter is None:
maxiter = np.inf
# initialize the missing values to their respective column means
mu_hat = np.nanmean(Y, axis=0, keepdims=1)
valid = np.isfinite(Y)
Y_hat = np.where(valid, Y, mu_hat)
halt = False
ii = 1
v_prev = 0
U, s, Vt = None, None, None
while not halt:
# SVD on filled-in data
U, s, Vt = svdmethod(Y_hat - mu_hat)
# impute missing values
Y_hat[~valid] = (U.dot(np.diag(s)).dot(Vt) + mu_hat)[~valid]
# update bias parameter
mu_hat = Y_hat.mean(axis=0, keepdims=1)
# test convergence using relative change in trace norm
v = s.sum()
print((v - v_prev) / v_prev)
if ii >= maxiter or ((v - v_prev) / v_prev) < tol:
halt = True
ii += 1
v_prev = v
return Y_hat, mu_hat, U, s, Vt
| 27.2 | 76 | 0.601366 | 280 | 1,904 | 4.014286 | 0.442857 | 0.031139 | 0.014235 | 0.030249 | 0.019573 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016357 | 0.293592 | 1,904 | 69 | 77 | 27.594203 | 0.819331 | 0.459034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.107143 | 0 | 0.178571 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24b05ec839f0838d72624fea140291ea0a77d8e1 | 500 | py | Python | Resources/books/deep_learning_time_series_forecasting/code/chapter_11/02_average_forecast.py | gdepalma93/bright-athlete-academy | 54ba0cc6633637c1bd6d90120153e04b981244bf | [
"MIT"
] | null | null | null | Resources/books/deep_learning_time_series_forecasting/code/chapter_11/02_average_forecast.py | gdepalma93/bright-athlete-academy | 54ba0cc6633637c1bd6d90120153e04b981244bf | [
"MIT"
] | null | null | null | Resources/books/deep_learning_time_series_forecasting/code/chapter_11/02_average_forecast.py | gdepalma93/bright-athlete-academy | 54ba0cc6633637c1bd6d90120153e04b981244bf | [
"MIT"
] | null | null | null | # example of an average forecast
from numpy import mean
from numpy import median
# one-step average forecast
def average_forecast(history, config):
n, avg_type = config
# mean of last n values
if avg_type is 'mean':
return mean(history[-n:])
# median of last n values
return median(history[-n:])
# define dataset
data = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
print(data)
# test naive forecast
for i in range(1, len(data)+1):
print(average_forecast(data, (i, 'mean'))) | 26.315789 | 68 | 0.698 | 91 | 500 | 3.791209 | 0.505495 | 0.173913 | 0.086957 | 0.075362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.078947 | 0.164 | 500 | 19 | 69 | 26.315789 | 0.746411 | 0.274 | 0 | 0 | 0 | 0 | 0.022409 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.454545 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24b9c10a0c8dd0aa24e9fa4c47c3053ceaf8be37 | 3,610 | py | Python | sumo/tests/netedit/additionalElements/chargingstation/create/test.sikuli/test.py | iltempe/osmosi | c0f54ecdbb7c7b5602d587768617d0dc50f1d75d | [
"MIT"
] | null | null | null | sumo/tests/netedit/additionalElements/chargingstation/create/test.sikuli/test.py | iltempe/osmosi | c0f54ecdbb7c7b5602d587768617d0dc50f1d75d | [
"MIT"
] | null | null | null | sumo/tests/netedit/additionalElements/chargingstation/create/test.sikuli/test.py | iltempe/osmosi | c0f54ecdbb7c7b5602d587768617d0dc50f1d75d | [
"MIT"
] | 2 | 2017-12-14T16:41:59.000Z | 2020-10-16T17:51:27.000Z | #!/usr/bin/env python
"""
@file test.py
@author Pablo Alvarez Lopez
@date 2016-11-25
@version $Id$
python script used by sikulix for testing netedit
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2009-2017 DLR/TS, Germany
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
# import common functions for netedit tests
import os
import sys
testRoot = os.path.join(os.environ.get('SUMO_HOME', '.'), 'tests')
neteditTestRoot = os.path.join(
os.environ.get('TEXTTEST_HOME', testRoot), 'netedit')
sys.path.append(neteditTestRoot)
import neteditTestFunctions as netedit # noqa
# Open netedit
neteditProcess, match = netedit.setupAndStart(neteditTestRoot)
# go to additional mode
netedit.additionalMode()
# select chargingStation
netedit.changeAdditional("chargingStation")
# set name
netedit.modifyAdditionalDefaultValue(2, "chargingStation")
# set friendlyPos
netedit.modifyAdditionalDefaultBoolValue(3)
# set invalid charging power
netedit.modifyAdditionalDefaultValue(4, "-200")
# try to create chargingStation in mode "reference left"
netedit.leftClick(match, 250, 250)
# set valid charging power
netedit.modifyAdditionalDefaultValue(4, "12000")
# create chargingStation in mode "reference left"
netedit.leftClick(match, 250, 250)
# change reference to right
netedit.modifyAdditionalDefaultValue(9, "reference right")
# set invalid efficiency
netedit.modifyAdditionalDefaultValue(5, "2")
# try create chargingStation in mode "reference right"
netedit.leftClick(match, 240, 250)
# set valid efficiency
netedit.modifyAdditionalDefaultValue(5, "0.3")
# create chargingStation in mode "reference right"
netedit.leftClick(match, 240, 250)
# change reference to center
netedit.modifyAdditionalDefaultValue(9, "reference center")
# Change change in transit
netedit.modifyAdditionalDefaultBoolValue(6)
# create chargingStation in mode "reference center"
netedit.leftClick(match, 425, 250)
# Change length
netedit.modifyAdditionalDefaultValue(11, "30")
# change reference to "reference left"
netedit.modifyAdditionalDefaultValue(9, "reference left")
# set invalid charge delay
netedit.modifyAdditionalDefaultValue(7, "-5")
# try to create a chargingStation in mode "reference left" forcing poisition
netedit.leftClick(match, 500, 250)
# valid charge delay
netedit.modifyAdditionalDefaultValue(7, "7")
# create a chargingStation in mode "reference left" forcing poisition
netedit.leftClick(match, 500, 250)
# change reference to "reference right"
netedit.modifyAdditionalDefaultValue(9, "reference right")
# create a chargingStation in mode "reference right"
netedit.leftClick(match, 110, 250)
# disable friendlyPos
netedit.modifyAdditionalDefaultBoolValue(3)
# change reference to "reference left"
netedit.modifyAdditionalDefaultValue(9, "reference left")
# create a chargingStation in mode "reference left" without friendlyPos
netedit.leftClick(match, 120, 215)
# change reference to "reference right"
netedit.modifyAdditionalDefaultValue(9, "reference right")
# create a chargingStation in mode "reference right" without friendlyPos
netedit.leftClick(match, 500, 215)
# Check undo redo
netedit.undo(match, 8)
netedit.redo(match, 8)
# save additionals
netedit.saveAdditionals()
# Fix stopping places position
netedit.fixStoppingPlace("fixPositions")
# save newtork
netedit.saveNetwork()
# quit netedit
netedit.quit(neteditProcess)
| 26.940299 | 76 | 0.789474 | 439 | 3,610 | 6.487472 | 0.359909 | 0.172051 | 0.073736 | 0.105337 | 0.459621 | 0.352177 | 0.317416 | 0.30302 | 0.295646 | 0.295646 | 0 | 0.035287 | 0.120776 | 3,610 | 133 | 77 | 27.142857 | 0.862004 | 0.477008 | 0 | 0.302326 | 0 | 0 | 0.099837 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.069767 | 0 | 0.069767 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24babe270e3cb016bf9a05e1484aec6a6bfc25c7 | 360 | py | Python | nlputils/subset.py | shaun-russell/nlp | 5d6a80ce146b4904dc8bc49d8353da85957b74eb | [
"MIT"
] | 1 | 2018-11-19T00:19:11.000Z | 2018-11-19T00:19:11.000Z | nlputils/subset.py | shaun-russell/nlp | 5d6a80ce146b4904dc8bc49d8353da85957b74eb | [
"MIT"
] | null | null | null | nlputils/subset.py | shaun-russell/nlp | 5d6a80ce146b4904dc8bc49d8353da85957b74eb | [
"MIT"
] | null | null | null | ''' Extract a subset of lines from a file. '''
import random
def extract_n(datalist, number, save_header=True):
start = 1 if save_header else 0
subset = []
if save_header:
subset.append(datalist[0])
sample_indices = random.sample(range(start, len(datalist)), number)
for idx in sample_indices:
subset.append(datalist[idx])
return subset
| 24 | 69 | 0.713889 | 53 | 360 | 4.735849 | 0.566038 | 0.119522 | 0.095618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010135 | 0.177778 | 360 | 14 | 70 | 25.714286 | 0.837838 | 0.105556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24bdbfaa7f1f765ba8654015611f4aa1bebb433a | 2,005 | py | Python | SLClassifier/RepLoader.py | zhangrenyuuchicago/AdvReg4Intpltn | e6ce5a4db8213f0b6e7ba30f3f82375044c8dab0 | [
"MIT"
] | null | null | null | SLClassifier/RepLoader.py | zhangrenyuuchicago/AdvReg4Intpltn | e6ce5a4db8213f0b6e7ba30f3f82375044c8dab0 | [
"MIT"
] | null | null | null | SLClassifier/RepLoader.py | zhangrenyuuchicago/AdvReg4Intpltn | e6ce5a4db8213f0b6e7ba30f3f82375044c8dab0 | [
"MIT"
] | null | null | null | import torch
import torch.utils.data
import numpy as np
import csv
import random
import scvi
import scanpy as sc
import json
from collections import Counter
import json
import os
def convert_num(labels):
if os.path.exists('label2id.json'):
with open('label2id.json', 'r') as j:
label2id = json.load(j)
else:
label2id = {}
ctr = Counter(labels)
for label in ctr:
label2id[label] = len(label2id)
with open('label2id.json', 'w') as j:
json.dump(label2id, j)
label_id_lt = []
for label in labels:
label_id = label2id[label]
label_id_lt.append(label_id)
return np.array(label_id_lt)
class RepDataset(torch.utils.data.Dataset):
def __init__(self, rep_path):
matrix = []
labels = []
with open(rep_path, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
rep = row[:-1]
rep = [float(item) for item in rep]
matrix.append(rep)
labels.append(row[-1])
line_count += 1
self.mat = np.array(matrix)
self.labels = convert_num(labels)
print(f'rep num: {len(self.labels)}')
def get_weight(self):
label_num = {}
for label in self.labels:
if label in label_num:
label_num[label] += 1
else:
label_num[label] = 1
weight = []
for i in range(len(label_num)):
weight.append(1.0/label_num[i])
weight = np.array(weight)
weight = weight/ np.sum(weight)
return weight
def get_rep_dim(self):
return self.mat.shape[1]
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
view = torch.Tensor(self.mat[idx])
label = torch.LongTensor([int(self.labels[idx])])
return (view, label)
| 26.733333 | 60 | 0.558105 | 259 | 2,005 | 4.169884 | 0.293436 | 0.044444 | 0.027778 | 0.037037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013473 | 0.333666 | 2,005 | 74 | 61 | 27.094595 | 0.79491 | 0 | 0 | 0.0625 | 0 | 0 | 0.03493 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.171875 | 0.03125 | 0.359375 | 0.015625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24bddb341b044ae3fa62fcdfa2048a65f4b08738 | 1,091 | py | Python | MatplotlibView.py | thekoc/ginkgo-sql-gui | f5f2523df42d505ffdc06b3a1b80dba4181c7b83 | [
"Apache-2.0"
] | null | null | null | MatplotlibView.py | thekoc/ginkgo-sql-gui | f5f2523df42d505ffdc06b3a1b80dba4181c7b83 | [
"Apache-2.0"
] | null | null | null | MatplotlibView.py | thekoc/ginkgo-sql-gui | f5f2523df42d505ffdc06b3a1b80dba4181c7b83 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*
import wx
import matplotlib
matplotlib.use("WxAgg")
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
class MatplotlibPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.toolbar = self.add_toolbar()
self.SetSizer(self.sizer)
self.Fit()
def add_toolbar(self):
toolbar = NavigationToolbar2Wx(self.canvas)
toolbar.Realize()
self.sizer.Add(toolbar, 0, wx.ALIGN_CENTER | wx.EXPAND)
toolbar.update()
return toolbar
if __name__ == '__main__':
app = wx.App()
frame = wx.Frame(None, title='demo app')
p = MatplotlibPanel(frame)
frame.Show(True)
app.MainLoop()
| 30.305556 | 79 | 0.670027 | 135 | 1,091 | 5.251852 | 0.422222 | 0.050776 | 0.062059 | 0.081805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01049 | 0.213566 | 1,091 | 35 | 80 | 31.171429 | 0.815851 | 0.018332 | 0 | 0 | 0 | 0 | 0.019645 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.172414 | 0 | 0.310345 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24be2797fcd66e9b0985981bbaefcaaccc754cde | 3,608 | py | Python | examples/export_create_tables.py | danicarrion/carto-python | 631b018f065960baa35473e2087ce598560b9e17 | [
"BSD-3-Clause"
] | 85 | 2016-08-07T16:46:58.000Z | 2022-03-23T01:44:02.000Z | examples/export_create_tables.py | danicarrion/carto-python | 631b018f065960baa35473e2087ce598560b9e17 | [
"BSD-3-Clause"
] | 109 | 2016-08-02T18:40:04.000Z | 2021-08-23T08:08:02.000Z | examples/export_create_tables.py | danicarrion/carto-python | 631b018f065960baa35473e2087ce598560b9e17 | [
"BSD-3-Clause"
] | 29 | 2016-11-29T03:42:47.000Z | 2022-01-23T17:37:11.000Z | import argparse
import os
import re
import warnings
from carto.auth import APIKeyAuthClient
from carto.datasets import DatasetManager
from carto.sql import SQLClient
warnings.filterwarnings('ignore')
# set input arguments
parser = argparse.ArgumentParser(
description='Exports the CREATE TABLE scripts of all the account datasets')
parser.add_argument('--organization', type=str, dest='organization',
default=os.environ['CARTO_ORG'] if 'CARTO_ORG' in os.environ else '',
help='Set the name of the organization' +
' account (defaults to env variable CARTO_ORG)')
parser.add_argument('--base_url', type=str, dest='CARTO_BASE_URL',
default=os.environ['CARTO_API_URL'] if 'CARTO_API_URL' in os.environ else '',
help='Set the base URL. For example:' +
' https://username.carto.com/ ' +
'(defaults to env variable CARTO_API_URL)')
parser.add_argument('--api_key', dest='CARTO_API_KEY',
default=os.environ['CARTO_API_KEY'] if 'CARTO_API_KEY' in os.environ else '',
help='Api key of the account' +
' (defaults to env variable CARTO_API_KEY)')
args = parser.parse_args()
# Authenticate to CARTO account
if args.CARTO_BASE_URL and args.CARTO_API_KEY and args.organization:
auth_client = APIKeyAuthClient(
args.CARTO_BASE_URL, args.CARTO_API_KEY, args.organization)
dataset_manager = DatasetManager(auth_client)
else:
logger.error('You need to provide valid credentials, run with -h parameter for details')
import sys
sys.exit(1)
# SQL wrapper
sql = SQLClient(APIKeyAuthClient(args.CARTO_BASE_URL, args.CARTO_API_KEY))
# get username from base_url
substring = re.search('https://(.+?).carto.com', args.CARTO_BASE_URL)
if substring:
username = substring.group(1)
# check all table name of account
all_tables = []
tables = sql.send(
"select pg_class.relname from pg_class, pg_roles, pg_namespace" +
" where pg_roles.oid = pg_class.relowner and " +
"pg_roles.rolname = current_user " +
"and pg_namespace.oid = pg_class.relnamespace and pg_class.relkind = 'r'")
q = "select \
'CREATE TABLE ' || relname || E'\n(\n' || \
array_to_string( \
array_agg( \
' ' || column_name || ' ' || type || ' '|| not_null \
) \
, E',\n' \
) || E'\n);\n' as create_table \
from \
( \
select \
distinct on (column_name) c.relname, a.attname AS column_name, \
pg_catalog.format_type(a.atttypid, a.atttypmod) as type, \
case \
when a.attnotnull \
then 'NOT NULL' \
else 'NULL' \
END as not_null \
FROM pg_class c, \
pg_attribute a, \
pg_type t \
WHERE c.relname = '{table_name}' \
AND a.attnum > 0 \
AND a.attrelid = c.oid \
AND a.atttypid = t.oid \
and a.attname not in ('cartodb_id', 'the_geom_webmercator') \
ORDER BY column_name, a.attnum \
) as tabledefinition \
group by relname"
with open('create_table.sql', 'w') as f:
for k, v in tables.items():
if k == 'rows':
for itr in v:
try:
dataset_name = itr['relname']
print("Found dataset: " + dataset_name)
result = sql.send(q.format(table_name=dataset_name))
create_table = result['rows'][0]['create_table']
f.write(create_table + "\n")
except:
print("Error while exporting: " + dataset_name)
continue
f.close()
print('\nScript exported')
| 33.719626 | 97 | 0.624169 | 473 | 3,608 | 4.585624 | 0.331924 | 0.036883 | 0.0355 | 0.029507 | 0.142462 | 0.111572 | 0.06639 | 0.043338 | 0.043338 | 0 | 0 | 0.001496 | 0.259146 | 3,608 | 106 | 98 | 34.037736 | 0.809951 | 0.033259 | 0 | 0 | 0 | 0 | 0.269385 | 0.006031 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.093023 | 0 | 0.093023 | 0.034884 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24be8697b54674c0e0b86a367e1a14ca3ae70fb3 | 669 | py | Python | peer.py | hmccreanor/voip | c37ae98f53700c5d17bd8af83dbece1702f0bd10 | [
"MIT"
] | null | null | null | peer.py | hmccreanor/voip | c37ae98f53700c5d17bd8af83dbece1702f0bd10 | [
"MIT"
] | null | null | null | peer.py | hmccreanor/voip | c37ae98f53700c5d17bd8af83dbece1702f0bd10 | [
"MIT"
] | null | null | null | import socket
import sys
import threading
import time
import pyaudio
recipient = sys.argv[1]
inPort = int(sys.argv[2])
outPort = int(sys.argv[3])
def poll(sock):
while True:
data, addr = sock.recvfrom(1024)
print(data.decode())
inSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
outSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
host = socket.gethostname()
inSock.bind((host, inPort))
polling = threading.Thread(target = poll, args = (inSock,))
polling.daemon = True
polling.start()
while True:
msg = input()
if msg == "/q":
sys.exit()
else:
outSock.sendto(str.encode(msg), (recipient, outPort))
| 19.114286 | 61 | 0.675635 | 91 | 669 | 4.923077 | 0.527473 | 0.107143 | 0.044643 | 0.089286 | 0.174107 | 0.174107 | 0.174107 | 0.174107 | 0 | 0 | 0 | 0.012844 | 0.185351 | 669 | 34 | 62 | 19.676471 | 0.809174 | 0 | 0 | 0.08 | 0 | 0 | 0.002994 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.2 | 0 | 0.24 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24bf2e20956de8f312c50f5e50f0efe1c031e2db | 10,450 | py | Python | CorrNet/main.py | hcji/CorrNet | 373c425a3c17ce29ace1e9d9403d8300b3e5d93b | [
"MIT"
] | 2 | 2019-11-28T14:04:19.000Z | 2020-09-01T08:59:25.000Z | CorrNet/main.py | hcji/CorrNet | 373c425a3c17ce29ace1e9d9403d8300b3e5d93b | [
"MIT"
] | null | null | null | CorrNet/main.py | hcji/CorrNet | 373c425a3c17ce29ace1e9d9403d8300b3e5d93b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 13 09:14:27 2019
@author: hcji
"""
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # 这一行注释掉就是使用cpu,不注释就是使用gpu
import numpy as np
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, Add, concatenate, Conv1D, MaxPooling1D, Flatten
from keras.engine.topology import Layer
from keras import optimizers
from scipy.stats import pearsonr
'''
data_l = np.load('Data/data_l.npy')
data_r = np.load('Data/data_r.npy')
label = np.load('Data/data_label.npy')
test_l = np.load('Data/test_v1.npy')
test_r = np.load('Data/test_v2.npy')
test_label = np.load('Data/test_l.npy')
'''
class ZeroPadding(Layer):
def __init__(self, **kwargs):
super(ZeroPadding, self).__init__(**kwargs)
def call(self, x, mask=None):
return K.zeros_like(x)
def get_output_shape_for(self, input_shape):
return input_shape
class CorrnetCost(Layer):
def __init__(self,lamda, **kwargs):
super(CorrnetCost, self).__init__(**kwargs)
self.lamda = lamda
def cor(self,y1, y2, lamda):
y1_mean = K.mean(y1, axis=0)
y1_centered = y1 - y1_mean
y2_mean = K.mean(y2, axis=0)
y2_centered = y2 - y2_mean
corr_nr = K.sum(y1_centered * y2_centered, axis=0)
corr_dr1 = K.sqrt(K.sum(y1_centered * y1_centered, axis=0) + 1e-8)
corr_dr2 = K.sqrt(K.sum(y2_centered * y2_centered, axis=0) + 1e-8)
corr_dr = corr_dr1 * corr_dr2
corr = corr_nr / corr_dr
return K.sum(corr) * lamda
def call(self ,x ,mask=None):
h1=x[0]
h2=x[1]
corr = self.cor(h1,h2,self.lamda)
#self.add_loss(corr,x)
#we output junk but be sure to use it for the loss to be added
return corr
def get_output_shape_for(self, input_shape):
#print input_shape[0][0]
return (input_shape[0][0],input_shape[0][1])
def corr_loss(y_true, y_pred):
#print y_true.type,y_pred.type
#return K.zeros_like(y_pred)
return y_pred
class CorrNet:
def __init__(self, data_l, data_r, Lambda=0.02, nb_epoch=10):
self.data_l = data_l
self.data_r = data_r
self.Lambda = Lambda
self.nb_epoch = nb_epoch
dimx = self.data_l.shape[1]
dimy = self.data_r.shape[1]
inpx = Input(shape=(dimx,))
inpy = Input(shape=(dimy,))
hx = Dense(256, activation='relu')(inpx)
hx = Dense(128, activation='relu')(hx)
hy = Dense(256, activation='relu')(inpy)
hy = Dense(128, activation='relu')(hy)
h = Add()([hx,hy])
recx = Dense(128, activation='relu')(h)
recx = Dense(256, activation='relu')(recx)
recx = Dense(dimx, activation='relu')(recx)
recy = Dense(128, activation='relu')(h)
recy = Dense(256, activation='relu')(recy)
recy = Dense(dimy, activation='relu')(recy)
branchModel = Model([inpx,inpy], [recx,recy,h])
[recx1,recy1,h1] = branchModel([inpx, ZeroPadding()(inpy)])
[recx2,recy2,h2] = branchModel([ZeroPadding()(inpx), inpy])
[recx3,recy3,h] = branchModel([inpx, inpy])
corr = CorrnetCost(-Lambda)([h1,h2])
opt = optimizers.Adam(lr=0.01)
model = Model([inpx,inpy],[recx1,recx2,recx3,recy1,recy2,recy3,corr])
model.compile(loss=["mse","mse","mse","mse","mse","mse",corr_loss], optimizer=opt)
self.model = model
self.branchModel = branchModel
def train(self):
data_l = self.data_l
data_r = self.data_r
nb_epoch = self.nb_epoch
self.model.fit([data_l, data_r],
[data_l,data_l,data_l,data_r,data_r,data_r,np.ones(data_l.shape)], epochs=nb_epoch)
def left_to_right(self, new_data_l):
branchModel = self.branchModel
_,new_data_r,_ = branchModel.predict([new_data_l, np.zeros(new_data_l.shape)])
return new_data_r
def right_to_left(self, new_data_r):
branchModel = self.branchModel
new_data_l,_,_ = branchModel.predict([np.zeros(new_data_r.shape), new_data_r])
return new_data_l
def left_to_latent(self, new_data_l):
branchModel = self.branchModel
_,_,h = branchModel.predict([new_data_l, np.zeros(new_data_l.shape)])
return h
def right_to_latent(self, new_data_r):
branchModel = self.branchModel
_,_,h = branchModel.predict([np.zeros(new_data_r.shape), new_data_r])
return h
def both_to_latent(self, new_data_l, new_data_r):
branchModel = self.branchModel
_,_,h = branchModel.predict([new_data_l, new_data_r])
return h
### Some modification based on the the original model
### Not evaluate performance
class CorrTarget:
def __init__(self, data_l, data_r, target, Lambda=0.02, nb_epoch=10):
self.data_l = data_l
self.data_r = data_r
self.target = target
self.nb_epoch = nb_epoch
self.Lambda = Lambda
dimx = self.data_l.shape[1]
dimy = self.data_r.shape[1]
inpx = Input(shape=(dimx,))
inpy = Input(shape=(dimy,))
hx = Dense(256, activation='relu')(inpx)
hx = Dense(128, activation='relu')(hx)
hy = Dense(256, activation='relu')(inpy)
hy = Dense(128, activation='relu')(hy)
h = Add()([hx,hy])
t = Dense(64, activation='relu')(h)
t = Dense(32, activation='relu')(t)
t = Dense(1, activation='relu')(t)
branchModel = Model([inpx,inpy], [t,h])
[t1,h1] = branchModel([inpx, ZeroPadding()(inpy)])
[t2,h2] = branchModel([ZeroPadding()(inpx), inpy])
[t3,h] = branchModel([inpx, inpy])
corr = CorrnetCost(-Lambda)([h1,h2])
opt = optimizers.Adam(lr=0.01)
model = Model([inpx,inpy],[t1, t2, t3, corr])
model.compile(loss=["mse","mse","mse",corr_loss], optimizer=opt)
self.model = model
self.branchModel = branchModel
def train(self):
data_l = self.data_l
data_r = self.data_r
target = self.target
nb_epoch = self.nb_epoch
self.model.fit([data_l, data_r],
[target, target, target, np.ones(data_l.shape)], epochs=nb_epoch)
def predict_by_left(self, new_data_l):
branchModel = self.branchModel
t,_ = branchModel.predict([new_data_l, np.zeros(new_data_l.shape)])
return t
def predict_by_right(self, new_data_r):
branchModel = self.branchModel
t,_ = branchModel.predict([np.zeros(new_data_r.shape), new_data_r])
return t
def predict_by_both(self, new_data_l, new_data_r):
branchModel = self.branchModel
t,_ = branchModel.predict([new_data_l, new_data_r])
return t
def left_to_latent(self, new_data_l):
branchModel = self.branchModel
_,h = branchModel.predict([new_data_l, np.zeros(new_data_l.shape)])
return h
def right_to_latent(self, new_data_r):
branchModel = self.branchModel
_,h = branchModel.predict([np.zeros(new_data_r.shape), new_data_r])
return h
def both_to_latent(self, new_data_l, new_data_r):
branchModel = self.branchModel
_,h = branchModel.predict([new_data_l, new_data_r])
return h
class ConvCorrTarget:
def __init__(self, data_l, data_r, target, Lambda=0.02, nb_epoch=10):
self.data_l = data_l
self.data_r = data_r
self.target = target
self.nb_epoch = nb_epoch
self.Lambda = Lambda
dimx = self.data_l.shape[1:3]
dimy = self.data_r.shape[1:3]
inpx = Input(shape=dimx)
inpy = Input(shape=dimy)
hx = Conv1D(64, 3, activation='relu', kernel_initializer='normal')(inpx)
hx = MaxPooling1D(2)(hx)
hx = Conv1D(32, 3, activation='relu', kernel_initializer='normal')(hx)
hx = MaxPooling1D(2)(hx)
hy = Conv1D(64, 3, activation='relu', kernel_initializer='normal')(inpy)
hy = MaxPooling1D(2)(hy)
hy = Conv1D(32, 3, activation='relu', kernel_initializer='normal')(hy)
hy = MaxPooling1D(2)(hy)
hx = Flatten()(hx)
hy = Flatten()(hy)
h = Add()([hx,hy])
t = Dense(16, activation='relu')(h)
t = Dense(1, activation='relu')(t)
branchModel = Model([inpx,inpy], [t,h])
[t1,h1] = branchModel([inpx, ZeroPadding()(inpy)])
[t2,h2] = branchModel([ZeroPadding()(inpx), inpy])
[t3,h] = branchModel([inpx, inpy])
corr = CorrnetCost(-Lambda)([h1,h2])
opt = optimizers.Adam(lr=0.01)
model = Model([inpx,inpy],[t1, t2, t3, corr])
model.compile(loss=["mse","mse","mse",corr_loss], optimizer=opt)
self.model = model
self.branchModel = branchModel
def train(self):
data_l = self.data_l
data_r = self.data_r
target = self.target
nb_epoch = self.nb_epoch
self.model.fit([data_l, data_r],
[target, target, target, np.ones(data_l.shape)], epochs=nb_epoch)
def predict_by_left(self, new_data_l):
branchModel = self.branchModel
t,_ = branchModel.predict([new_data_l, np.zeros(new_data_l.shape)])
return t
def predict_by_right(self, new_data_r):
branchModel = self.branchModel
t,_ = branchModel.predict([np.zeros(new_data_r.shape), new_data_r])
return t
def predict_by_both(self, new_data_l, new_data_r):
branchModel = self.branchModel
t,_ = branchModel.predict([new_data_l, new_data_r])
return t
def left_to_latent(self, new_data_l):
branchModel = self.branchModel
_,h = branchModel.predict([new_data_l, np.zeros(new_data_l.shape)])
return h
def right_to_latent(self, new_data_r):
branchModel = self.branchModel
_,h = branchModel.predict([np.zeros(new_data_r.shape), new_data_r])
return h
def both_to_latent(self, new_data_l, new_data_r):
branchModel = self.branchModel
_,h = branchModel.predict([new_data_l, new_data_r])
return h | 33.928571 | 101 | 0.598852 | 1,439 | 10,450 | 4.116053 | 0.121612 | 0.07091 | 0.04052 | 0.038494 | 0.728516 | 0.68884 | 0.672126 | 0.647138 | 0.604592 | 0.592774 | 0 | 0.025846 | 0.270622 | 10,450 | 308 | 102 | 33.928571 | 0.751246 | 0.031962 | 0 | 0.662162 | 0 | 0 | 0.01764 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.13964 | false | 0 | 0.036036 | 0.018018 | 0.301802 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24c8220e8ee9158d4486cc7075172e26d11d35e1 | 3,847 | py | Python | discordSuperUtils/Ban.py | adam757521/discordSuperUtils-splitted | 7221cd0461be5311e4c51e93e332bdfaa92ae2b2 | [
"MIT"
] | 2 | 2021-09-09T02:56:38.000Z | 2021-09-09T03:55:49.000Z | discordSuperUtils/Ban.py | Z1R343L/discord-super-utils | c3b8c224ebe59a5acba1b9069c52cce029fdf9c4 | [
"MIT"
] | null | null | null | discordSuperUtils/Ban.py | Z1R343L/discord-super-utils | c3b8c224ebe59a5acba1b9069c52cce029fdf9c4 | [
"MIT"
] | null | null | null | from __future__ import annotations
import asyncio
from datetime import datetime
from typing import (
TYPE_CHECKING,
Union,
Optional
)
import discord
from .Base import DatabaseChecker
from .Punishments import Punisher
if TYPE_CHECKING:
from .Punishments import Punishment
from discord.ext import commands
class UnbanFailure(Exception):
"""Raises an exception when the user tries to unban a discord.User without passing the guild."""
class BanManager(DatabaseChecker, Punisher):
def __init__(self, bot: commands.Bot):
super().__init__([{'guild': "snowflake", 'member': "snowflake", 'reason': "string", 'timestamp': "snowflake"}],
['bans'])
self.bot = bot
self.add_event(self.on_database_connect)
async def on_database_connect(self):
self.bot.loop.create_task(self.__check_bans())
async def get_banned_members(self):
"""
This function returns all the members that are supposed to be unbanned but are banned.
:return:
"""
return [x for x in await self.database.select(self.tables['bans'], [], fetchall=True)
if x["timestamp"] <= datetime.utcnow().timestamp()]
async def __check_bans(self) -> None:
await self.bot.wait_until_ready()
while not self.bot.is_closed():
for banned_member in await self.get_banned_members():
guild = self.bot.get_guild(banned_member['guild'])
if guild is None:
continue
user = await self.bot.fetch_user(banned_member['member'])
if await self.unban(user, guild):
await self.call_event("on_unban", user, banned_member['reason'])
await asyncio.sleep(300)
async def punish(self, ctx: commands.Context, member: discord.Member, punishment: Punishment) -> None:
try:
self.bot.loop.create_task(
self.ban(member, punishment.punishment_reason, punishment.punishment_time.total_seconds())
)
except discord.errors.Forbidden as e:
raise e
else:
await self.call_event("on_punishment", ctx, member, punishment)
@staticmethod
async def get_ban(member: Union[discord.Member, discord.User], guild: discord.Guild) -> Optional[discord.User]:
banned = await guild.bans()
for x in banned:
if x.user.id == member.id:
return x.user
async def unban(self, member: Union[discord.Member, discord.User], guild: discord.Guild = None) -> bool:
self._check_database()
if isinstance(member, discord.User) and not guild:
raise UnbanFailure("Cannot unban a discord.User without a guild.")
guild = guild if guild is not None else member.guild
await self.database.delete(self.tables['bans'], {'guild': guild.id, 'member': member.id})
if user := await self.get_ban(member, guild):
await guild.unban(user)
return True
async def ban(self,
member: discord.Member,
reason: str = "No reason provided.",
time_of_ban: Union[int, float] = 0) -> None:
self._check_database()
await member.ban(reason=reason)
if time_of_ban <= 0:
return
await self.database.insert(self.tables['bans'], {'guild': member.guild.id,
'member': member.id,
'reason': reason,
'timestamp': datetime.utcnow().timestamp() + time_of_ban})
await asyncio.sleep(time_of_ban)
if await self.unban(member):
await self.call_event("on_unban", member, reason)
| 34.348214 | 119 | 0.597089 | 445 | 3,847 | 5.020225 | 0.276404 | 0.048344 | 0.016115 | 0.024172 | 0.140555 | 0.091316 | 0.046553 | 0.046553 | 0.046553 | 0 | 0 | 0.001857 | 0.300234 | 3,847 | 111 | 120 | 34.657658 | 0.828009 | 0.023395 | 0 | 0.026316 | 0 | 0 | 0.063483 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013158 | false | 0 | 0.118421 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24c94333f1c59b0fa2a2847c041b962bdb757ab4 | 2,393 | py | Python | mogp_emulator/demos/kdr_demos.py | EXAUQ/mogp-emulator | 9d5772135498bdf5b95b44b4afb065c2c266f899 | [
"MIT"
] | 21 | 2021-01-20T07:02:12.000Z | 2022-03-30T21:09:04.000Z | mogp_emulator/demos/kdr_demos.py | EXAUQ/mogp-emulator | 9d5772135498bdf5b95b44b4afb065c2c266f899 | [
"MIT"
] | 114 | 2019-04-25T14:53:11.000Z | 2021-01-06T17:07:41.000Z | mogp_emulator/demos/kdr_demos.py | EXAUQ/mogp-emulator | 9d5772135498bdf5b95b44b4afb065c2c266f899 | [
"MIT"
] | 8 | 2021-02-02T08:56:12.000Z | 2022-02-15T10:03:15.000Z | import mogp_emulator
import numpy as np
# simple Dimension Reduction examples
# simulator function -- returns a single "important" dimension from
# at least 4 inputs
def f(x):
return (x[0]-x[1]+2.*x[3])/3.
# Experimental design -- create a design with 5 input parameters
# all uniformly distributed over [0,1].
ed = mogp_emulator.LatinHypercubeDesign(5)
# sample space
inputs = ed.sample(100)
# run simulation
targets = np.array([f(p) for p in inputs])
###################################################################################
# First example -- dimension reduction given a specified number of dimensions
# (note that in real life, we do not know that the underlying simulation only
# has a single dimension)
print("Example 1: Basic Dimension Reduction")
# create DR object with a single reduced dimension (K = 1)
dr = mogp_emulator.gKDR(inputs, targets, K=1)
# use it to create GP
gp = mogp_emulator.fit_GP_MAP(dr(inputs), targets)
# create 5 target points to predict
predict_points = ed.sample(5)
predict_actual = np.array([f(p) for p in predict_points])
means = gp(dr(predict_points))
for pp, m, a in zip(predict_points, means, predict_actual):
print("Target point: {} Predicted mean: {} Actual mean: {}".format(pp, m, a))
###################################################################################
# Second Example: Estimate dimensions from data
print("Example 2: Estimate the number of dimensions from the data")
# Use the tune_parameters method to use cross validation to create DR object
# Note this is more realistic than the above as it does not know the
# number of dimensions in advance
dr_tuned, loss = mogp_emulator.gKDR.tune_parameters(inputs, targets,
mogp_emulator.fit_GP_MAP,
cXs=[3.], cYs=[3.])
# Get number of inferred dimensions (usually gives 2)
print("Number of inferred dimensions is {}".format(dr_tuned.K))
# use object to create GP
gp_tuned = mogp_emulator.fit_GP_MAP(dr_tuned(inputs), targets)
# create 10 target points to predict
predict_points = ed.sample(5)
predict_actual = np.array([f(p) for p in predict_points])
means = gp_tuned(dr_tuned(predict_points))
for pp, m, a in zip(predict_points, means, predict_actual):
print("Target point: {} Predicted mean: {} Actual mean: {}".format(pp, m, a))
| 29.182927 | 83 | 0.654409 | 340 | 2,393 | 4.508824 | 0.352941 | 0.067841 | 0.046967 | 0.017613 | 0.307241 | 0.294194 | 0.265493 | 0.255708 | 0.255708 | 0.255708 | 0 | 0.012821 | 0.185123 | 2,393 | 81 | 84 | 29.54321 | 0.773333 | 0.361889 | 0 | 0.307692 | 0 | 0 | 0.172388 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.076923 | 0.038462 | 0.153846 | 0.192308 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24ce5f9fcb194a70d3a391932da2b7673b304ad3 | 3,969 | py | Python | 2_vae_denoiser_data_gen.py | Ladvien/denoising_vae | c61433a41ba69ca305ab1a7b526f8560a70bad4e | [
"MIT"
] | null | null | null | 2_vae_denoiser_data_gen.py | Ladvien/denoising_vae | c61433a41ba69ca305ab1a7b526f8560a70bad4e | [
"MIT"
] | null | null | null | 2_vae_denoiser_data_gen.py | Ladvien/denoising_vae | c61433a41ba69ca305ab1a7b526f8560a70bad4e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 17:27:59 2020
@author: ladvien
"""
import sys
import os
import cv2
import numpy as np
from random import randint
import matplotlib.pyplot as plt
image_tools_path = "/home/ladvien/deep_arcane/"
sys.path.append(image_tools_path)
from image_utils import ImageUtils
iu = ImageUtils()
def noisy(noise_typ, image):
if noise_typ == "gauss":
row,col,ch= image.shape
mean = 0
var = 0.1
sigma = var**0.5
gauss = np.random.normal(mean,sigma,(row,col,ch))
gauss = gauss.reshape(row,col,ch)
noisy = image + gauss
return noisy
elif noise_typ == "s&p":
row,col,ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_typ == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_typ =="speckle":
row,col,ch = image.shape
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch)
noisy = image + image * gauss
return noisy
#############
# Parameters
#############
input_path = "/home/ladvien/denoising_vae/data/extracted/"
output_path = "/home/ladvien/denoising_vae/data/train/"
threshold = 240
color_range = 30
shape_range = 40
size_range = 1
num_pepper = 20
specks_per_pepper = 10
group_range = 50
image_shape = (64, 64)
show = True
#############
# Extract
#############
clear_img_path = f"{output_path}clear/"
noise_img_path = f"{output_path}noise/"
if not os.path.exists(clear_img_path):
os.makedirs(clear_img_path)
if not os.path.exists(noise_img_path):
os.makedirs(noise_img_path)
file_paths = iu.get_image_files_recursively(input_path)
counter = 0
for file_path in file_paths:
file_name = file_path.split("/")[-1]
outpout_file_path = output_path + file_name
clear_image = cv2.imread(file_path)
clear_image = cv2.resize(clear_image, image_shape)
_, image = cv2.threshold(clear_image, 127, 255, cv2.THRESH_BINARY)
clear_image = cv2.cvtColor(clear_image, cv2.COLOR_BGR2RGB)
noise_img = clear_image.copy()
for i in range(0, num_pepper):
# Radius of circle
radius = randint(0, shape_range)
b = randint(0, color_range)
g = randint(0, color_range)
r = randint(0, color_range)
# BGR
color = (b, g, r)
# Center coordinates
y = randint(0, image_shape[1])
x = randint(0, image_shape[1])
for j in range(0, specks_per_pepper):
group_x_offset = randint(group_range*-1, group_range)
group_y_offset = randint(group_range*-1, group_range)
# Size
radius = randint(0, size_range)
noise_img = cv2.circle(noise_img, (x + group_x_offset, y + group_y_offset), radius, color, -1)
if show and counter < 16:
plt.imshow(noise_img, cmap="gray")
plt.show()
try:
file_name = f"{counter}.png"
print(f"Writing file {file_name}")
cv2.imwrite(noise_img_path + file_name, noise_img)
cv2.imwrite(clear_img_path + file_name, clear_image)
except:
print(f"Removed: {file_path}")
counter+=1
| 26.111842 | 106 | 0.581759 | 541 | 3,969 | 4.066543 | 0.295749 | 0.040909 | 0.025455 | 0.017727 | 0.285455 | 0.151818 | 0.123636 | 0.07 | 0.07 | 0.07 | 0 | 0.031553 | 0.297304 | 3,969 | 151 | 107 | 26.284768 | 0.757261 | 0.046107 | 0 | 0.104167 | 0 | 0 | 0.061878 | 0.029056 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010417 | false | 0 | 0.072917 | 0 | 0.125 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24dd3da7f5133aac9176f92bd1db427450396bd2 | 1,509 | py | Python | sweat/io/generic.py | GoldenCheetah/sweatpy | eed6b34ff75c16fcbad878caded8ee4d18dea589 | [
"MIT"
] | 58 | 2018-03-10T08:26:10.000Z | 2022-03-20T11:23:50.000Z | sweat/io/generic.py | GoldenCheetah/sweatpy | eed6b34ff75c16fcbad878caded8ee4d18dea589 | [
"MIT"
] | 19 | 2018-03-10T13:09:49.000Z | 2022-03-18T10:31:19.000Z | sweat/io/generic.py | GoldenCheetah/sweatpy | eed6b34ff75c16fcbad878caded8ee4d18dea589 | [
"MIT"
] | 20 | 2018-03-09T19:16:15.000Z | 2022-03-08T00:21:38.000Z | from pathlib import Path
from typing import Generator, Union
import pandas as pd
from .fit import read_fit
from .gpx import read_gpx
from .tcx import read_tcx
def read_file(fpath: Union[str, Path], *args, **kwargs) -> pd.DataFrame:
"""This method tries to recognize the file type of the fpath argument by reading the file extension (suffix).
Please note that this method does not support file-like objects, in contrast to the other read_* functions of sweatpy.
Args:
fpath: str or Path object representing the path to a file.
Returns:
Returns an activity as a pandas data frames.
"""
suffix = Path(fpath).suffix.lower()
if suffix == ".tcx":
read_func = read_tcx
elif suffix == ".gpx":
read_func = read_gpx
elif suffix == ".fit":
read_func = read_fit
else:
raise ValueError(
f"Argument fpath ({fpath}) has an unsupported file extensions (suffix): {suffix}"
)
return read_func(fpath, *args, **kwargs)
def read_dir(path: Union[str, Path]) -> Generator[pd.DataFrame, None, None]:
"""Generator function that returns activities in a directory as pandas data frames.
Args:
path: str or Path object representing the path to a directory with activity files.
Yields:
Yields activities as pandas data frames.
"""
path = Path(path)
assert path.is_dir()
for f in path.iterdir():
if f.is_dir():
continue
yield read_file(f)
| 26.946429 | 122 | 0.656064 | 211 | 1,509 | 4.616114 | 0.388626 | 0.032854 | 0.049281 | 0.030801 | 0.075975 | 0.075975 | 0.075975 | 0.075975 | 0.075975 | 0 | 0 | 0 | 0.259112 | 1,509 | 55 | 123 | 27.436364 | 0.871199 | 0.386349 | 0 | 0 | 0 | 0 | 0.103687 | 0 | 0 | 0 | 0 | 0 | 0.038462 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.346154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24de9ab8b2a618abe1a2446b71c946a46ce2cf44 | 5,542 | py | Python | python/import_gnomad_vep.py | mkanai/slalom-paper | 3d96b1778bdb3d91ac7bf70667460a88c2db530a | [
"MIT"
] | null | null | null | python/import_gnomad_vep.py | mkanai/slalom-paper | 3d96b1778bdb3d91ac7bf70667460a88c2db530a | [
"MIT"
] | null | null | null | python/import_gnomad_vep.py | mkanai/slalom-paper | 3d96b1778bdb3d91ac7bf70667460a88c2db530a | [
"MIT"
] | null | null | null | import argparse
import hail as hl
from gnomad.utils.vep import (
process_consequences,
filter_vep_to_canonical_transcripts,
get_most_severe_consequence_for_summary,
CSQ_CODING_HIGH_IMPACT,
CSQ_CODING_MEDIUM_IMPACT,
CSQ_CODING_LOW_IMPACT,
CSQ_NON_CODING,
)
from hail.genetics import reference_genome
from fm_insights.utils import register_log, annotate_bed
coding_high = hl.set(CSQ_CODING_HIGH_IMPACT)
coding_medium = hl.set(CSQ_CODING_MEDIUM_IMPACT)
coding_low = hl.set(CSQ_CODING_LOW_IMPACT)
non_coding = hl.set(CSQ_NON_CODING)
bed_files = {
"GRCH37": [
"gs://finemapping-insights/annotations/baselineLD_v2.2/Promoter_UCSC.bed",
"gs://finemapping-insights/annotations/Ulirsch_v1.0/DHSmerged_Ulirsch.bed",
"gs://finemapping-insights/annotations/Ulirsch_v1.0/Roadmap_H3K27ac_Ulirsch.bed",
"gs://finemapping-insights/annotations/Ulirsch_v1.0/CA_H3K27ac_Ulirsch.bed",
],
"GRCh38": [
"gs://meta-finemapping-simulation/annotations_hg38/Promoter_UCSC.bed",
"gs://meta-finemapping-simulation/annotations_hg38/DHSmerged_Ulirsch.bed",
"gs://meta-finemapping-simulation/annotations_hg38/Roadmap_H3K27ac_Ulirsch.bed",
"gs://meta-finemapping-simulation/annotations_hg38/CA_H3K27ac_Ulirsch.bed",
],
}
gnomad_latest_versions = {"GRCh37": "2.1.1", "GRCh38": "3.1.2"}
gnomad_v2_pops = ["afr", "amr", "asj", "eas", "fin", "nfe", "nfe_est", "nfe_nwe", "nfe_onf", "nfe_seu"]
gnomad_v3_pops = ["afr", "ami", "amr", "asj", "eas", "mid", "fin", "nfe", "oth", "sas"]
def annotate_consequence_category(csq_expr, annot_location="consequence_category"):
annot_expr = {
annot_location: hl.case()
.when(coding_high.contains(csq_expr), "coding_high")
.when(coding_medium.contains(csq_expr), "coding_medium")
.when(coding_low.contains(csq_expr), "coding_low")
.when(non_coding.contains(csq_expr), "non_coding")
.or_missing()
}
return annot_expr
def main(args):
reference_genome = args.reference_genome
if reference_genome == "GRCh37":
from gnomad.resources.grch37.gnomad import public_release
ht = public_release("genomes").versions[gnomad_latest_versions[reference_genome]].ht()
freq_index_dict = ht.freq_index_dict.collect()[0]
freq_expr = {pop: ht.freq[freq_index_dict[f"gnomad_{pop}"]] for pop in gnomad_v2_pops}
freq_expr.update({"all": ht.freq[freq_index_dict[f"gnomad"]]})
elif reference_genome == "GRCh38":
from gnomad.resources.grch38.gnomad import public_release
ht = public_release("genomes").versions[gnomad_latest_versions[reference_genome]].ht()
freq_index_dict = ht.freq_index_dict.collect()[0]
freq_expr = {pop: ht.freq[freq_index_dict[f"{pop}-adj"]] for pop in gnomad_v3_pops}
freq_expr.update({"all": ht.freq[freq_index_dict[f"adj"]]})
else:
raise ValueError("Invalid --reference-genome")
ht = ht.annotate(freq=hl.struct(**freq_expr))
ht = filter_vep_to_canonical_transcripts(ht)
ht = process_consequences(ht)
ht = get_most_severe_consequence_for_summary(ht)
# extract most severe
ht = ht.select(
freq=ht.freq,
most_severe=hl.if_else(hl.is_defined(ht.most_severe_csq), ht.most_severe_csq, "intergenic_variant"),
gene_most_severe=ht.vep.worst_csq_for_variant_canonical.gene_symbol,
lof=ht.vep.worst_csq_for_variant_canonical.lof,
hgnc_id=ht.vep.worst_csq_for_variant_canonical.hgnc_id,
hgvsp=ht.vep.worst_csq_for_variant_canonical.hgvsp,
transcript_id=ht.vep.worst_csq_for_variant_canonical.transcript_id,
polyphen_prediction=ht.vep.worst_csq_for_variant_canonical.polyphen_prediction,
polyphen_score=ht.vep.worst_csq_for_variant_canonical.polyphen_score,
sift_prediction=ht.vep.worst_csq_for_variant_canonical.sift_prediction,
sift_score=ht.vep.worst_csq_for_variant_canonical.sift_score,
protein_coding=ht.protein_coding,
)
ht = ht.select_globals()
ht = ht.annotate(**annotate_consequence_category(ht.most_severe))
ht = annotate_bed(ht, bed_files=bed_files[reference_genome], reference_genome=reference_genome)
ht = ht.annotate(
consequence=(
hl.case(missing_false=True)
.when(hl.is_defined(ht.lof) & (ht.lof != "LC"), "pLoF")
.when(
(ht.lof == "LC")
| (ht.consequence_category == "coding_high")
| (ht.consequence_category == "coding_medium"),
"Missense",
)
.when(ht.consequence_category == "coding_low", "Synonymous")
.when(ht.most_severe == "3_prime_UTR_variant", "UTR3")
.when(ht.most_severe == "5_prime_UTR_variant", "UTR5")
.when(ht.Promoter_UCSC == 1, "Promoter")
.when(
(ht.DHSmerged_Ulirsch == 1) & ((ht.Roadmap_H3K27ac_Ulirsch == 1) | (ht.CA_H3K27ac_Ulirsch == 1)), "CRE"
)
.default("Non-genic")
)
)
ht.describe()
ht = ht.checkpoint(
f"gs://meta-finemapping-simulation/gnomad/gnomad.genomes.r{gnomad_latest_versions[args.reference_genome]}.sites.most_severe.ht",
overwrite=args.overwrite,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--reference-genome", type=str, required=True)
parser.add_argument("--overwrite", action="store_true")
args = parser.parse_args()
register_log()
main(args)
| 41.358209 | 136 | 0.690184 | 720 | 5,542 | 4.9625 | 0.227778 | 0.054576 | 0.025189 | 0.032746 | 0.366639 | 0.326336 | 0.29555 | 0.252729 | 0.141058 | 0.111951 | 0 | 0.015432 | 0.181523 | 5,542 | 133 | 137 | 41.669173 | 0.772266 | 0.003428 | 0 | 0.070796 | 0 | 0.00885 | 0.209745 | 0.127694 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017699 | false | 0 | 0.061947 | 0 | 0.088496 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24dfeb32f732cda7cc1bd5475ac2eba78563c4bb | 1,196 | py | Python | tests/tools.py | tkonopka/crossmap | 237e4319a77281490c4e037918977230fea43d7e | [
"MIT"
] | 1 | 2021-08-12T11:40:10.000Z | 2021-08-12T11:40:10.000Z | tests/tools.py | tkonopka/crossmap | 237e4319a77281490c4e037918977230fea43d7e | [
"MIT"
] | null | null | null | tests/tools.py | tkonopka/crossmap | 237e4319a77281490c4e037918977230fea43d7e | [
"MIT"
] | null | null | null | """
Helper functions used within the test suite
"""
import glob
from contextlib import suppress
from os import environ, remove, rmdir
from os.path import join, exists
from pymongo import MongoClient
def remove_file(files):
"""remove a single file if it exists."""
for f in files:
if exists(f):
remove(f)
def remove_crossmap_cache(dir, name, use_subdir=True):
"""remove any crossmap cache files for a crossmap project"""
host = environ["MONGODB_HOST"] if "MONGODB_HOST" in environ else "0.0.0.0"
port = environ["MONGODB_PORT"] if "MONGODB_PORT" in environ else 8097
client = MongoClient(host=host, port=int(port),
username="crossmap", password="crossmap")
client.drop_database(name)
crossmap_data_dir = join(dir, name) if use_subdir else dir
prefix = join(crossmap_data_dir, name)
all_filenames = glob.glob(prefix+"*")
remove_file(all_filenames)
if exists(crossmap_data_dir):
with suppress(OSError):
rmdir(crossmap_data_dir)
def remove_cachefile(dir, filename):
"""remove a specific crossmap cache file"""
filepath = join(dir, filename)
remove_file([filepath])
| 27.813953 | 78 | 0.686455 | 163 | 1,196 | 4.895706 | 0.380368 | 0.06015 | 0.075188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008502 | 0.213211 | 1,196 | 42 | 79 | 28.47619 | 0.839532 | 0.142977 | 0 | 0 | 0 | 0 | 0.071928 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0.04 | 0.2 | 0 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24e2ecbea1b137afbbe27a88d7fa408842091b46 | 1,131 | py | Python | examples/scripts/flatten.py | bcdev/nc2zarr | 4ba4829d78c4bda593515685729d32baa83cabc2 | [
"MIT"
] | 5 | 2021-01-08T14:16:31.000Z | 2021-09-13T04:56:40.000Z | examples/scripts/flatten.py | bcdev/nc2zarr | 4ba4829d78c4bda593515685729d32baa83cabc2 | [
"MIT"
] | 42 | 2021-01-06T11:01:12.000Z | 2022-03-04T15:46:15.000Z | examples/scripts/flatten.py | bcdev/nc2zarr | 4ba4829d78c4bda593515685729d32baa83cabc2 | [
"MIT"
] | 2 | 2020-12-28T08:51:27.000Z | 2021-08-13T12:46:04.000Z | #!/usr/bin/env python3
"""Rechunk a Zarr with chunks of size 1 in time, full size in lat/lon.
If s3fs is installed, "s3://..." arguments can be used and credentials
will be read from standard environment variables or files (see s3fs docs).
The output dataset will have the same data as the input dataset, rechunked
so that the chunks are flat time slices. That is, the chunks will have
size 1 in the time dimension and cover the full extent of the dataset in
the lat and lon dimensions.
"""
import xarray as xr
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_zarr')
parser.add_argument('output_zarr')
args = parser.parse_args()
rechunk(args.input_zarr, args.output_zarr)
def rechunk(input_path, output_path):
ds = xr.open_dataset(input_path, engine="zarr")
for var in ds:
del ds[var].encoding['chunks']
full_lat = len(ds.lat)
full_lon = len(ds.lon)
ds_rechunked = ds.chunk({'time': 1, 'lat': full_lat, 'lon': full_lon})
print('Writing output Zarr...')
ds_rechunked.to_zarr(output_path)
if __name__ == '__main__':
main()
| 29.763158 | 74 | 0.709991 | 180 | 1,131 | 4.311111 | 0.455556 | 0.03866 | 0.018041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0076 | 0.185676 | 1,131 | 37 | 75 | 30.567568 | 0.834962 | 0.42794 | 0 | 0 | 0 | 0 | 0.110938 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.105263 | 0 | 0.210526 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24e403742d04ac820900a705be239f8e41ce14fb | 4,724 | py | Python | algorythmic_heights/scc/scc.py | localmonkey/rosalind | dceab11d4938c1325075988be091abd3a5d25824 | [
"MIT"
] | null | null | null | algorythmic_heights/scc/scc.py | localmonkey/rosalind | dceab11d4938c1325075988be091abd3a5d25824 | [
"MIT"
] | null | null | null | algorythmic_heights/scc/scc.py | localmonkey/rosalind | dceab11d4938c1325075988be091abd3a5d25824 | [
"MIT"
] | null | null | null | #import numpy as np
class MyGrph:
def __init__(self, V_quant, E_quant):
self.V = V_quant
self.E = E_quant
self.edges_list = [[]] * (self.V)
self.reverse_edges_list = [[]] * (self.V)
def build_graph(self, f):
for i in range(0, self.E):
vertex1, vertex2 = map(int, f.readline().strip().split())
self.edges_list[vertex1 - 1] = self.edges_list[vertex1 - 1] + [vertex2 - 1]
self.reverse_edges_list[vertex2 - 1] = self.reverse_edges_list[vertex2 - 1] + [vertex1 - 1]
def dfs(self, vertex, visited_list):
vertex = vertex - 1
visited_list[vertex] = True
stack_l = []
stack_l.append(vertex)
while len(stack_l) > 0:
vrtx = stack_l.pop()
visited_list[vrtx] = True
for i in self.edges_list[vrtx]:
if visited_list[i] is False:
stack_l.append(i)
return visited_list
def check_aciclity(self):
visited_list = [False]*self.V
#for v_comp in self.edges_list[self.V]:
for v_comp in range(0, self.V):
if visited_list[v_comp] is True:
continue
check_list = [False]*self.V
vertex = v_comp
stack_l = []
stack_l.append(vertex)
while len(stack_l) > 0:
vrtx = stack_l[len(stack_l) - 1]
if visited_list[vrtx] is False:
visited_list[vrtx] = True
check_list[vrtx] = True
else:
check_list[vrtx] = False
stack_l.pop()
continue
for i in self.edges_list[vrtx]:
if visited_list[i] is False:
stack_l.append(i)
if check_list[i] is True:
return -1
return 1
def topologically_sort(self):
"""
Not actually topologicall sort.
Graph has cycles in this task
"""
sorted_grph = []
cnt = self.V - 1
visited_list = [False]*self.V
for v_comp in range(0, self.V):
if visited_list[v_comp] is True:
continue
#check_list = [False]*self.V
vertex = v_comp
stack_l = []
stack_l.append(vertex)
while len(stack_l) > 0:
vrtx = stack_l[len(stack_l) - 1]
if visited_list[vrtx] is False:
visited_list[vrtx] = True
#check_list[vrtx] = True
else:
#check_list[vrtx] = False
stack_l.pop()
if not vrtx in sorted_grph:
sorted_grph.append(vrtx)
continue
for i in self.edges_list[vrtx]:
if visited_list[i] is False:
stack_l.append(i)
#if check_list[i] is True:
# graph is cyclic
#return -1
return sorted_grph[::-1]
def hdag(self):
sorted_grph = self.topologically_sort()
for i in range(0, self.V - 1):
if not ((sorted_grph[i + 1] - 1) in
self.edges_list[sorted_grph[i] - 1]):
return [-1]
return [1] + sorted_grph
def scc(self):
scc_cnt = 0
sorted_grph = self.topologically_sort()
visited_list = [False]*self.V
for v_comp in sorted_grph:
if visited_list[v_comp] is True:
continue
scc_cnt = scc_cnt + 1
vertex = v_comp
stack_l = []
stack_l.append(vertex)
while len(stack_l) > 0:
vrtx = stack_l.pop()
visited_list[vrtx] = True
for i in self.reverse_edges_list[vrtx]:
if visited_list[i] is False:
stack_l.append(i)
return scc_cnt
def conn_comp(self):
counter = 0
visited_list = [False] * self.V
while all(visited_list) is False:
for i in range(0, len(visited_list)):
if visited_list[i] is False:
visited_list = self.dfs(i + 1, visited_list)
counter = counter + 1
return counter
if __name__ == "__main__":
with open("rosalind_scc.txt", "r") as f:
vertex_quant, edges_quant = map(int, f.readline().strip().split())
grph = MyGrph(vertex_quant, edges_quant)
grph.build_graph(f)
rslt = grph.scc()
print(rslt)
with open("scc_answer.txt", "w") as f:
f.write(str(rslt))
| 34.481752 | 103 | 0.491956 | 579 | 4,724 | 3.7962 | 0.141623 | 0.125114 | 0.059145 | 0.038217 | 0.621929 | 0.545041 | 0.49818 | 0.49818 | 0.454504 | 0.419017 | 0 | 0.01513 | 0.412362 | 4,724 | 136 | 104 | 34.735294 | 0.776657 | 0.051228 | 0 | 0.504425 | 0 | 0 | 0.009003 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070796 | false | 0 | 0 | 0 | 0.150442 | 0.00885 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24e941de480f9fd93454090b5da0fb71dc8afdca | 1,193 | py | Python | allgemein/test_tastatur.py | kanopus1958/Python | 795d0e7c3479032a71e1c1246391772619e1e6ef | [
"MIT"
] | null | null | null | allgemein/test_tastatur.py | kanopus1958/Python | 795d0e7c3479032a71e1c1246391772619e1e6ef | [
"MIT"
] | null | null | null | allgemein/test_tastatur.py | kanopus1958/Python | 795d0e7c3479032a71e1c1246391772619e1e6ef | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Programm : test_tastatur.py
# Version : 1.01
# SW-Stand : 17.02.2022
# Autor : Kanopus1958
# Beschreibung : Tastur Beispiel in Python
from rwm_steuerung import color as c
from rwm_mod01 import show_header
import platform
import sys
G_OS = ('Raspbian', 'Debian')
G_HEADER_1 = '# Test Tastatur (Python-Beispi'
G_HEADER_2 = 'el) #'
if platform.system() == 'Linux':
import tty
import termios
def inkey():
fd = sys.stdin.fileno()
while True:
remember_attributes = termios.tcgetattr(fd)
tty.setraw(fd)
character = sys.stdin.read(1) # wir lesen nur einzelne zeichen
termios.tcsetattr(fd, termios.TCSADRAIN, remember_attributes)
if character == 'q':
break
if character != '\x1b' and character != '[': # x1b is ESC
sys.stdout.write(character)
sys.stdout.flush()
# print(character)
def _main():
show_header(G_HEADER_1, G_HEADER_2, __file__, G_OS)
print("\nTasteninput gestartet (Beenden mit 'q')\n")
inkey()
print()
print("\nTasteninput gestoppt\n")
if __name__ == "__main__":
_main()
| 24.854167 | 71 | 0.61777 | 147 | 1,193 | 4.802721 | 0.585034 | 0.03966 | 0.022663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028571 | 0.266555 | 1,193 | 47 | 72 | 25.382979 | 0.778286 | 0.189438 | 0 | 0 | 0 | 0 | 0.167015 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.2 | 0 | 0.266667 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24f18b66c7006add174b6858a066253782ef852a | 941 | py | Python | service.py | DropB1t/ClickerPy | a5142e1afcc27d6e557627dbbf75d8f224213431 | [
"MIT"
] | 1 | 2020-09-14T09:35:21.000Z | 2020-09-14T09:35:21.000Z | service.py | DropB1t/ClickerPy | a5142e1afcc27d6e557627dbbf75d8f224213431 | [
"MIT"
] | null | null | null | service.py | DropB1t/ClickerPy | a5142e1afcc27d6e557627dbbf75d8f224213431 | [
"MIT"
] | null | null | null | import socket
import struct
# Funzione send_names chiede al utente tramite l'input il suo nome da inviare al server
def send_name():
while True:
name = input('Please enter your nickname (maximum 20 characters) --> ')
if len(name) > 0 and len(name) < 21:
return name.encode('utf-8')
def connect():
ip = input("Enter the server ip--> ")
port = 1235
multicast_group = '225.1.1.1'
multicast_port = 5007
udps = socket.socket(socket.AF_INET, socket.SOCK_DGRAM )
udps.bind(('', multicast_port ))
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
udps.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
tcps = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
tcps.connect( (ip,port) )
tcps.sendall( send_name() )
except:
print("Connection cannot be established!")
quit()
return tcps, udps | 28.515152 | 87 | 0.669501 | 131 | 941 | 4.687023 | 0.572519 | 0.078176 | 0.058632 | 0.065147 | 0.110749 | 0.110749 | 0.110749 | 0 | 0 | 0 | 0 | 0.02834 | 0.21254 | 941 | 33 | 88 | 28.515152 | 0.80027 | 0.090329 | 0 | 0 | 0 | 0 | 0.149708 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.08 | 0 | 0.24 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24f20d156732daf62fc69467ac140f84f352da1e | 3,586 | py | Python | backend/otpvalidation/views.py | beehyv/workforcehealthtracker | ab0f921938e1ee50158c9d2c72a66c534d435eee | [
"Apache-2.0"
] | 2 | 2020-05-10T05:20:04.000Z | 2020-05-21T15:29:55.000Z | backend/otpvalidation/views.py | beehyv/workforcehealthtracker | ab0f921938e1ee50158c9d2c72a66c534d435eee | [
"Apache-2.0"
] | null | null | null | backend/otpvalidation/views.py | beehyv/workforcehealthtracker | ab0f921938e1ee50158c9d2c72a66c534d435eee | [
"Apache-2.0"
] | 2 | 2020-05-13T18:21:37.000Z | 2020-05-21T15:29:58.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Built and managed with Open Source Love by BeeHyv Software Solutions Pvt Ltd. Hyderabad
# www.beehyv.com
import uuid
from django.http import HttpResponse, JsonResponse
from rest_framework import status
from communications.CommunicationHandler import CommunicationHandler
from otpvalidation.models import Otp
from survey.models import SurveyInstance
def send_otp(request):
try:
# id send otp based on survey id
otpValue = str(uuid.uuid4()).replace('-', '')[:6]
entity = Otp()
entity.otp = otpValue
instance = SurveyInstance.objects.filter(pk=request.GET['survey_id']).first()
worker = instance.health_worker_id
if worker.whatsapp_number and len(worker.whatsapp_number) > 0:
entity.phone_number = worker.whatsapp_number
else:
entity.phone_number = worker.phone_number
entity.email = worker.email
commHandler = CommunicationHandler()
commHandler.send_message(worker, 5, {'otp': entity.otp})
entity.save()
except Exception as e:
return HttpResponse(e, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
response = {}
response['name'] = instance.health_worker_id.first_name
response['consent'] = instance.health_worker_id.is_consented
response['email'] = entity.email
response['phone_number'] = entity.phone_number
return JsonResponse(response)
def resend_otp(request):
try:
instance = SurveyInstance.objects.filter(pk=request.GET['survey_id']).first()
worker = instance.health_worker_id
entity = Otp.objects.filter(phone_number=worker.phone_number).order_by('-id').first()
if entity:
commHandler = CommunicationHandler()
commHandler.send_message(worker, 5, {'otp': entity.otp})
except Exception as e:
return HttpResponse(e, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
response = {}
response['name'] = worker.first_name
response['consent'] = instance.health_worker_id.is_consented
response['email'] = entity.email
response['phone_number'] = entity.phone_number
return JsonResponse(response)
def verify_otp(request):
otpValue = request.GET['otp']
instance = SurveyInstance.objects.filter(pk=request.GET['survey_id']).first()
worker = instance.health_worker_id
response = {}
entity = Otp.objects.filter(phone_number=worker.whatsapp_number, otp=otpValue).first()
if not entity:
entity = Otp.objects.filter(phone_number=worker.phone_number, otp=otpValue).first()
if entity:
worker.is_consented = True
worker.save()
entity.delete()
response['verified'] = True
else:
response['verified'] = False
return JsonResponse(response) | 37.747368 | 93 | 0.706637 | 443 | 3,586 | 5.604966 | 0.334086 | 0.053162 | 0.048329 | 0.053162 | 0.458317 | 0.414418 | 0.414418 | 0.398711 | 0.398711 | 0.358437 | 0 | 0.005236 | 0.20106 | 3,586 | 95 | 94 | 37.747368 | 0.861431 | 0.255438 | 0 | 0.52459 | 0 | 0 | 0.042296 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04918 | false | 0 | 0.098361 | 0 | 0.229508 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24f22fdaa93b5ce8be07b713b821e2b5f86f6a5c | 12,386 | py | Python | tagnet.py | dbxinj/ynet | 01dcf26246f673ee0168b8143596ef7994c24418 | [
"Apache-2.0"
] | null | null | null | tagnet.py | dbxinj/ynet | 01dcf26246f673ee0168b8143596ef7994c24418 | [
"Apache-2.0"
] | null | null | null | tagnet.py | dbxinj/ynet | 01dcf26246f673ee0168b8143596ef7994c24418 | [
"Apache-2.0"
] | null | null | null | import ynet
from util import *
from singa.layer import Conv2D, Activation, MaxPooling2D, AvgPooling2D, Flatten, Slice, LRN
from singa import initializer
from singa import layer
from singa import loss
from singa import tensor
import cPickle as pickle
import logging
import os
import numpy as np
from numpy.core.umath_tests import inner1d
import scipy.spatial
from tqdm import trange
import time
logger = logging.getLogger(__name__)
class L2Norm(ynet.L2Norm):
def forward(self, is_train, x):
norm = np.sqrt(np.sum(x**2, axis=1) + self.epsilon)
self.y = x / norm[:, np.newaxis]
if is_train:
self.norm = norm
return self.y
def backward(self, is_train, dy):
# (b' - b * k) /norm, k = sum(dy * y)
k = np.sum(dy * self.y, axis=1)
dx = dy - self.y * k[:, np.newaxis]
dx /= self.norm[:, np.newaxis]
return dx, []
class Softmax(layer.Layer):
def __init__(self, name, input_sample_shape):
super(Softmax, self).__init__(name)
self.a = None
def forward(self, is_train, x):
assert len(x.shape) == 2, 'softmax input should be 2d-array'
a = x - np.max(x, axis=1)[:, np.newaxis]
a = np.exp(a)
a /= np.sum(a, axis=1)[:, np.newaxis]
if is_train:
self.a = a
return a
def backward(self, is_train, dy):
c = np.einsum('ij, ij->i', dy, self.a)
return self.a * (dy - c[:, np.newaxis]), []
class Aggregation(layer.Layer):
def __init__(self, name, input_sample_shape):
super(Aggregation, self).__init__(name)
self.c, h, w = input_sample_shape[0]
assert h * w == input_sample_shape[1][0], \
'# locations not match: %d vs %d' % (h * w, input_sample_shape[1][0])
self.x = None
def forward(self, is_train, xs):
x = xs[0].reshape((xs[0].shape[0], self.c, -1))
w = xs[1]
if is_train:
self.x = x
self.w = w
return np.einsum('ijk, ik -> ij', x, w)
def backward(self, is_train, dy):
dw = np.einsum('ij, ijk -> ik', dy, self.x)
dx = np.einsum('ij, ik -> ijk', dy, self.w)
return [dx, dw], []
class TagEmbedding(layer.Layer):
def __init__(self, name, num_output, input_sample_shape):
super(TagEmbedding, self).__init__(name)
self.W = tensor.Tensor((input_sample_shape[0], num_output))
#initializer.gaussian(self.W, input_sample_shape[0], num_output)
self.W.gaussian(0, 0.008)
def param_names(self):
return ['%s_weight' % self.name]
def param_values(self):
return [self.W]
def forward(self, is_train, x):
if is_train:
self.x = x
W = tensor.to_numpy(self.W)
# b = self.to_numpy(self.b)
return np.dot(x, W) # + b[np.newaxis, :]
def backward(self, is_train, dy):
dw = np.einsum('id, ij -> dj', self.x, dy)
# db = np.sum(dt, axis=0)
return [], [tensor.from_numpy(dw)]
class ProductAttention(layer.Layer):
def __init__(self, name, input_sample_shape):
super(ProductAttention, self).__init__(name)
self.c, self.h, self.w = input_sample_shape[0]
assert self.c == input_sample_shape[1][0], \
'# channels != tag embed dim: %d vs %d' % (self.c, input_sample_shape[1][0])
self.x = None
self.t = None
def forward(self, is_train, xs):
x = xs[0].reshape((xs[0].shape[0], self.c, -1))
t = xs[1]
if is_train:
self.x = x
self.t = xs[1]
return np.einsum('ijk, ij->ik', x, t)
def backward(self, is_train, dy):
dt = np.einsum('ik, ijk -> ij', dy, self.x)
dx = np.einsum('ij, ik -> ijk', self.t, dy)
return [dx, dt], []
class TagAttention(layer.Layer):
def __init__(self, name, input_sample_shape):
super(TagAttention, self).__init__(name)
self.c, self.h, self.w = input_sample_shape[0]
l = self.h * self.w
self.embed = TagEmbedding('%s_embed' % name, self.c, input_sample_shape[1])
self.attention = ProductAttention('%s_attention' % name, [input_sample_shape[0], (self.c,)])
self.softmax = Softmax('%s_softmax' % name, (l,))
self.agg = Aggregation('%s_agg' % name, [input_sample_shape[0], (l,)])
self.dev = None
def get_output_sample_shape(self):
return (self.c, )
def param_names(self):
return self.embed.param_names()
def param_values(self):
return self.embed.param_values()
def display(self, name, val):
if ynet.debug:
print('%30s = %2.8f' % (name, np.average(np.abs(val))))
def forward(self, is_train, x, output_weight=False):
if type(x[0]) == tensor.Tensor:
self.dev = x[0].device
img = tensor.to_numpy(x[0])
else:
img = x[0]
t = self.embed.forward(is_train, x[1])
if ynet.debug:
show_debuginfo(self.embed.name, t)
w = self.attention.forward(is_train, [img, t])
if ynet.debug:
show_debuginfo(self.attention.name, w)
w = self.softmax.forward(is_train, w)
if ynet.debug:
show_debuginfo(self.softmax.name, w)
y = self.agg.forward(is_train, [img, w])
if ynet.debug:
show_debuginfo(self.agg.name, y)
if output_weight:
return y, w
else:
return y
def backward(self, is_train, dy):
[dx1, dw], _ = self.agg.backward(is_train, dy)
if ynet.debug:
show_debuginfo(self.agg.name, dx1)
dw, _ = self.softmax.backward(is_train, dw)
if ynet.debug:
show_debuginfo(self.softmax.name, dw)
[dx2, dt], _ = self.attention.backward(is_train, dw)
if ynet.debug:
show_debuginfo(self.attention.name, dx2)
_, dW = self.embed.backward(is_train, dt)
dx = np.reshape(dx1 + dx2, (dx1.shape[0], self.c, self.h, self.w))
if self.dev is not None:
dx = tensor.from_numpy(dx)
dx.to_device(self.dev)
return dx, dW
class TagNIN(ynet.YNIN):
def create_net(self, name, img_size, batchsize=32):
assert self.ntag > 0, 'no tags for tag nin'
shared = []
self.add_conv(shared, 'conv1', [96, 96, 96], 11, 4, sample_shape=(3, img_size, img_size))
shared.append(MaxPooling2D('p1', 3, 2, pad=1, input_sample_shape=shared[-1].get_output_sample_shape()))
self.add_conv(shared, 'conv2', [256, 256, 256], 5, 1, 2)
shared.append(MaxPooling2D('p2', 3, 2, pad=0, input_sample_shape=shared[-1].get_output_sample_shape()))
self.add_conv(shared, 'conv3', [384, 384, 384], 3, 1, 1)
shared.append(MaxPooling2D('p3', 3, 2, pad=0, input_sample_shape=shared[-1].get_output_sample_shape()))
slice_layer = Slice('slice', 0, [batchsize*self.nuser], input_sample_shape=shared[-1].get_output_sample_shape())
shared.append(slice_layer)
user = []
self.add_conv(user, 'street-conv4', [1024, 1024, 1000] , 3, 1, 1, sample_shape=slice_layer.get_output_sample_shape()[0])
user.append(AvgPooling2D('street-p4', 6, 1, pad=0, input_sample_shape=user[-1].get_output_sample_shape()))
user.append(Flatten('street-flat', input_sample_shape=user[-1].get_output_sample_shape()))
user.append(ynet.L2Norm('street-l2', input_sample_shape=user[-1].get_output_sample_shape()))
shop = []
self.add_conv(shop, 'shop-conv4', [1024, 1024, 1000], 3, 1, 1, sample_shape=slice_layer.get_output_sample_shape()[1])
shop.append(TagAttention('shop-tag',
input_sample_shape=[shop[-1].get_output_sample_shape(), (self.ntag, )]))
shop.append(L2Norm('shop-l2', input_sample_shape=shop[-1].get_output_sample_shape()))
return shared, user, shop
def forward(self, is_train, data):
t1 = time.time()
imgs, pids = data.next()
t2 = time.time()
imgs = self.put_input_to_gpu(imgs)
a, b = self.forward_layers(is_train and (not self.freeze_shared), imgs, self.shared)
a = self.forward_layers(is_train and (not self.freeze_user), a, self.user)
b = self.forward_layers(is_train and (not self.freeze_shop), b, self.shop[0:-2])
b = self.shop[-2].forward(is_train, [b, data.tag2vec(pids[a.shape[0]:])])
b = self.forward_layers(is_train and (not self.freeze_shop), b, self.shop[-1:])
loss = self.loss.forward(is_train, a, b, pids)
return loss, t2 - t1, time.time() - t2
def extract_db_feature_on_batch(self, data):
img, pid = data.next()
img = self.put_input_to_gpu(img)
fea = self.forward_layers(False, img, self.shared[0:-1] + self.shop[0:-2])
fea = self.shop[-2].forward(False, [fea, data.tag2vec(pid)])
return fea, pid
class TagVGG(TagNIN):
def create_net(self, name, img_size, batchsize=32):
assert self.ntag > 0, 'no tags for tag nin'
shared = []
shared.append(Conv2D('conv1-3x3', 96, 7, 2, pad=1, input_sample_shape=(3, img_size, img_size)))
shared.append(Activation('conv1-relu', input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(LRN('conv1-norm', size=5, alpha=5e-4, beta=0.75, k=2, input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(MaxPooling2D('pool1', 3, 3, pad=0, input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(Conv2D('conv2', 256, 5, 1, cudnn_prefer='limited_workspace', workspace_byte_limit=1000, pad=1, input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(Activation('conv2-relu', input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(MaxPooling2D('pool2', 2, 2, pad=0, input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(Conv2D('conv3', 512, 3, 1, cudnn_prefer='limited_workspace', workspace_byte_limit=1000, pad=1, input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(Activation('conv3-relu', input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(Conv2D('conv4', 512, 3, 1, cudnn_prefer='limited_workspace', workspace_byte_limit=1500, pad=1, input_sample_shape=shared[-1].get_output_sample_shape()))
shared.append(Activation('conv4-relu', input_sample_shape=shared[-1].get_output_sample_shape()))
slice_layer = Slice('slice', 0, [batchsize*self.nuser], input_sample_shape=shared[-1].get_output_sample_shape())
shared.append(slice_layer)
user = []
user.append(Conv2D('street-conv5', 512, 3, 1, cudnn_prefer='limited_workspace', workspace_byte_limit=1500, pad=1, input_sample_shape=shared[-1].get_output_sample_shape()[1]))
user.append(Activation('street-conv5-relu', input_sample_shape=user[-1].get_output_sample_shape()))
user.append(Conv2D('street-conv6', 128, 3, 2, cudnn_prefer='limited_workspace', workspace_byte_limit=1500, pad=0, input_sample_shape=user[-1].get_output_sample_shape()))
user.append(Activation('street-conv6-relu', input_sample_shape=user[-1].get_output_sample_shape()))
user.append(AvgPooling2D('street-pool6', 8, 1, pad=0, input_sample_shape=user[-1].get_output_sample_shape()))
user.append(Flatten('street-flat', input_sample_shape=user[-1].get_output_sample_shape()))
user.append(ynet.L2Norm('street-l2', input_sample_shape=user[-1].get_output_sample_shape()))
shop = []
shop.append(Conv2D('shop-conv5', 512, 3, 1, cudnn_prefer='limited_workspace', workspace_byte_limit=1500, pad=1, input_sample_shape=shared[-1].get_output_sample_shape()[1]))
shop.append(Activation('shop-conv5-relu', input_sample_shape=shop[-1].get_output_sample_shape()))
shop.append(Conv2D('shop-conv6', 128, 3, 2, cudnn_prefer='limited_workspace', workspace_byte_limit=1500, pad=0, input_sample_shape=shop[-1].get_output_sample_shape()))
shop.append(Activation('shop-conv6-relu', input_sample_shape=shop[-1].get_output_sample_shape()))
shop.append(TagAttention('shop-tag',
input_sample_shape=[shop[-1].get_output_sample_shape(), (self.ntag, )]))
shop.append(L2Norm('shop-l2', input_sample_shape=shop[-1].get_output_sample_shape()))
return shared, user, shop
| 43.612676 | 182 | 0.631842 | 1,819 | 12,386 | 4.08851 | 0.120946 | 0.133118 | 0.109722 | 0.096813 | 0.638026 | 0.607503 | 0.544306 | 0.529111 | 0.498992 | 0.46188 | 0 | 0.035825 | 0.217988 | 12,386 | 283 | 183 | 43.766784 | 0.731984 | 0.013483 | 0 | 0.30131 | 0 | 0 | 0.061164 | 0 | 0 | 0 | 0 | 0 | 0.021834 | 1 | 0.117904 | false | 0 | 0.065502 | 0.021834 | 0.31441 | 0.004367 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24f51f706d59ef12ae7c9778bb46f43394712322 | 1,362 | py | Python | server/api/processing/models/dim_reduction/umap.py | JBris/dolphin_segmentation | b1d22293720c15038d9c521aed8e7b258d8409aa | [
"MIT"
] | 1 | 2021-05-09T05:40:53.000Z | 2021-05-09T05:40:53.000Z | server/api/processing/models/dim_reduction/umap.py | JBris/dolphin_segmentation | b1d22293720c15038d9c521aed8e7b258d8409aa | [
"MIT"
] | null | null | null | server/api/processing/models/dim_reduction/umap.py | JBris/dolphin_segmentation | b1d22293720c15038d9c521aed8e7b258d8409aa | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from joblib import load as model_load
from api.services.file_select import FileTask
def preprocess_images(image_files, img_size):
images = []
for file in image_files:
img = cv2.imread(file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (img_size, img_size))
img = img.astype('float32')
img = cv2.bilateralFilter(img, 9, 50, 50)
# #image = cv2.normalize(image, np.zeros((img_size, img_size)), 0, 1, cv2.NORM_MINMAX)
images.append(img)
images = np.asarray(images)
images = images.reshape((images.shape[0], -1))
return images
def identify(images, module):
model = model_load(f"/app/models/{module}/umap_identify.joblib")
embeddings = model.transform(images)
return embeddings
def classify(images, module):
model = model_load(f"/app/models/{module}/umap_classify.joblib")
embeddings = model.transform(images)
return embeddings
class UMAP:
IMG_SIZE = 256
def transform(self, files, module, task):
images = preprocess_images(files, self.IMG_SIZE)
if task == FileTask.IDENTIFICATION.value: return identify(images, module)
if task == FileTask.CLASSIFICATION.value : return classify(images, module)
else: raise NotImplementedError(f"UMAP has not implemented task: {task}")
| 33.219512 | 94 | 0.687225 | 180 | 1,362 | 5.094444 | 0.372222 | 0.053435 | 0.032715 | 0.030534 | 0.21374 | 0.21374 | 0.21374 | 0.100327 | 0.100327 | 0.100327 | 0 | 0.02112 | 0.200441 | 1,362 | 40 | 95 | 34.05 | 0.820937 | 0.06094 | 0 | 0.129032 | 0 | 0 | 0.099057 | 0.064465 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0 | 0.129032 | 0 | 0.419355 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24f660253a038a05146b9855744f65f5a06fa0cb | 6,099 | py | Python | SupportVectorMachine/support_vector_machine.py | DragonYong/Sep-Dragon | 674df34a992b449669116c5322596ffc19a21030 | [
"MIT"
] | null | null | null | SupportVectorMachine/support_vector_machine.py | DragonYong/Sep-Dragon | 674df34a992b449669116c5322596ffc19a21030 | [
"MIT"
] | 1 | 2020-09-10T04:05:06.000Z | 2020-09-10T04:05:06.000Z | SupportVectorMachine/support_vector_machine.py | DragonYong/Sep-Dragon | 674df34a992b449669116c5322596ffc19a21030 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2020/9/9-22:50
# @Author : TuringEmmy
# @Email : yonglonggeng@163.com
# @WeChat : csy_lgy
# @File : support_vector_machine.py
# @Project : Sep-Dragon
# *************************************************
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
class SVM:
def __init__(self, max_iter=100, kernel='linear'):
self.max_iter = max_iter
self._kernel = kernel
def init_args(self, features, labels):
self.m, self.n = features.shape
self.X = features
self.Y = labels
self.b = 0.0
# 将Ei保存在一个列表里
self.alpha = np.ones(self.m)
self.E = [self._E(i) for i in range(self.m)]
# 松弛变量
self.C = 1.0
def _KKT(self, i):
y_g = self._g(i) * self.Y[i]
if self.alpha[i] == 0:
return y_g >= 1
elif 0 < self.alpha[i] < self.C:
return y_g == 1
else:
return y_g <= 1
# g(x)预测值,输入xi(X[i])
def _g(self, i):
r = self.b
for j in range(self.m):
r += self.alpha[j] * self.Y[j] * self.kernel(self.X[i],
self.X[j]) # 对于输入的一行数据,分别对应原数据按位置乘再加和,在之后再进行累加,即为预测值
return r
# 核函数
def kernel(self, x1, x2):
if self._kernel == 'linear':
return sum([x1[k] * x2[k] for k in range(self.n)])
elif self._kernel == 'poly':
return (sum([x1[k] * x2[k] for k in range(self.n)]) + 1) ** 2
return 0
# E(x)为g(x)对输入x的预测值和y的差
def _E(self, i):
return self._g(i) - self.Y[i]
def _init_alpha(self):
# 外层循环首先遍历所有满足0<a<C的样本点,检验是否满足KKT
index_list = [i for i in range(self.m) if 0 < self.alpha[i] < self.C]
# 否则遍历整个训练集
non_satisfy_list = [i for i in range(self.m) if i not in index_list]
index_list.extend(non_satisfy_list)
for i in index_list:
if self._KKT(i):
continue
E1 = self.E[i]
# 如果E2是+,选择最小的;如果E2是负的,选择最大的
if E1 >= 0:
j = min(range(self.m), key=lambda x: self.E[x])
else:
j = max(range(self.m), key=lambda x: self.E[x])
return i, j
def _compare(self, _alpha, L, H):
if _alpha > H:
return H
elif _alpha < L:
return L
else:
return _alpha
def fit(self, features, labels):
self.init_args(features, labels)
for t in range(self.max_iter):
# train
i1, i2 = self._init_alpha()
# 边界
if self.Y[i1] == self.Y[i2]:
L = max(0, self.alpha[i1] + self.alpha[i2] - self.C)
H = min(self.C, self.alpha[i1] + self.alpha[i2])
else:
L = max(0, self.alpha[i2] - self.alpha[i1])
H = min(self.C, self.C + self.alpha[i2] - self.alpha[i1])
E1 = self.E[i1]
E2 = self.E[i2]
# eta=K11+K22-2K12
eta = self.kernel(self.X[i1], self.X[i1]) + self.kernel(
self.X[i2],
self.X[i2]) - 2 * self.kernel(self.X[i1], self.X[i2])
if eta <= 0:
# print('eta <= 0')
continue
alpha2_new_unc = self.alpha[i2] + self.Y[i2] * (
E1 - E2) / eta # 此处有修改,根据书上应该是E1 - E2,书上130-131页
alpha2_new = self._compare(alpha2_new_unc, L, H)
alpha1_new = self.alpha[i1] + self.Y[i1] * self.Y[i2] * (
self.alpha[i2] - alpha2_new)
b1_new = -E1 - self.Y[i1] * self.kernel(self.X[i1], self.X[i1]) * (
alpha1_new - self.alpha[i1]) - self.Y[i2] * self.kernel(
self.X[i2],
self.X[i1]) * (alpha2_new - self.alpha[i2]) + self.b
b2_new = -E2 - self.Y[i1] * self.kernel(self.X[i1], self.X[i2]) * (
alpha1_new - self.alpha[i1]) - self.Y[i2] * self.kernel(
self.X[i2],
self.X[i2]) * (alpha2_new - self.alpha[i2]) + self.b
if 0 < alpha1_new < self.C:
b_new = b1_new
elif 0 < alpha2_new < self.C:
b_new = b2_new
else:
# 选择中点
b_new = (b1_new + b2_new) / 2
# 更新参数
self.alpha[i1] = alpha1_new
self.alpha[i2] = alpha2_new
self.b = b_new
self.E[i1] = self._E(i1)
self.E[i2] = self._E(i2)
return 'train done!'
def predict(self, data):
r = self.b
for i in range(self.m):
r += self.alpha[i] * self.Y[i] * self.kernel(data, self.X[i])
return 1 if r > 0 else -1
def score(self, X_test, y_test):
right_count = 0
for i in range(len(X_test)):
result = self.predict(X_test[i])
if result == y_test[i]:
right_count += 1
return right_count / len(X_test)
def _weight(self):
# linear model
yx = self.Y.reshape(-1, 1) * self.X
self.w = np.dot(yx.T, self.alpha)
return self.w
# data
def create_data():
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['label'] = iris.target
df.columns = [
'sepal length', 'sepal width', 'petal length', 'petal width', 'label'
]
data = np.array(df.iloc[:100:10, [0, 1, -1]])
for i in range(len(data)):
if data[i, -1] == 0:
data[i, -1] = -1
# print(data)
return data[:, :2], data[:, -1]
if __name__ == '__main__':
X, y = create_data()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
# plt.scatter(X[:50, 0], X[:50, 1], label='0')
# plt.scatter(X[50:, 0], X[50:, 1], label='1')
# plt.legend()
# plt.show()
svm = SVM(max_iter=200)
status = svm.fit(X_train, y_train)
print(status)
res = svm.score(X_test, y_test)
print(res)
| 30.959391 | 110 | 0.490572 | 881 | 6,099 | 3.271283 | 0.204313 | 0.078071 | 0.034351 | 0.041638 | 0.31263 | 0.259195 | 0.199861 | 0.152672 | 0.125607 | 0.075642 | 0 | 0.045733 | 0.358255 | 6,099 | 196 | 111 | 31.117347 | 0.690598 | 0.104279 | 0 | 0.102941 | 0 | 0 | 0.016756 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095588 | false | 0 | 0.029412 | 0.007353 | 0.257353 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24f6f3452f96ca5836016613b2d46de60f6bab90 | 2,668 | py | Python | plaidml2/exec/__init__.py | winnerineast/plaidml | 7a7191507f04f0853fed34cfd471ef76aa6d489f | [
"Apache-2.0"
] | 1 | 2019-09-23T05:50:50.000Z | 2019-09-23T05:50:50.000Z | plaidml2/exec/__init__.py | kounkounsito/plaidml | 615cfce9dac7a5786a8895feadfd2aa13680d6de | [
"Apache-2.0"
] | null | null | null | plaidml2/exec/__init__.py | kounkounsito/plaidml | 615cfce9dac7a5786a8895feadfd2aa13680d6de | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Intel Corporation.
import numpy as np
import plaidml2 as plaidml
import plaidml2.settings as plaidml_settings
from plaidml2.ffi import ForeignObject, decode_str, ffi, ffi_call, lib
def __init():
ffi_call(lib.plaidml_exec_init)
ffi.init_once(__init, 'plaidml_exec_init')
def list_devices():
ndevices = ffi_call(lib.plaidml_device_list_count)
raw_devices = ffi.new('plaidml_string*[]', ndevices)
ffi_call(lib.plaidml_device_list, ndevices, raw_devices)
return [decode_str(x) for x in raw_devices]
def list_targets():
ntargets = ffi_call(lib.plaidml_target_list_count)
raw_targets = ffi.new('plaidml_string*[]', ntargets)
ffi_call(lib.plaidml_target_list, ntargets, raw_targets)
return [decode_str(x) for x in raw_targets]
class Executable(ForeignObject):
__ffi_del__ = lib.plaidml_executable_free
def __init__(self, program, inputs, device=None, target=None):
if device is None:
device = plaidml_settings.get('PLAIDML_DEVICE')
if target is None:
target = plaidml_settings.get('PLAIDML_TARGET')
def make_buffer(tensor):
# convert LogicalShape into TensorShape
return plaidml.Buffer(device, tensor.shape.into_TensorShape())
self._input_bindings = [(x, make_buffer(x)) for x in inputs]
self._output_bindings = [(x, make_buffer(x)) for x in program.outputs]
self._inputs = [x[1] for x in self._input_bindings]
self._outputs = [x[1] for x in self._output_bindings]
def wrap(x, y):
return ffi.new('plaidml_binding*', [x.as_ptr(), y.as_ptr()])
inputs = [wrap(x, y) for x, y in self._input_bindings]
outputs = [wrap(x, y) for x, y in self._output_bindings]
ffi_obj = ffi_call(
lib.plaidml_compile,
program.as_ptr(),
device.encode(),
target.encode(),
len(inputs),
inputs,
len(outputs),
outputs,
)
super(Executable, self).__init__(ffi_obj)
def __call__(self, inputs):
for buffer, ndarray in zip(self._inputs, inputs):
# Cast the input data type to match the dtype expected by the placeholder buffer
ndarray = np.array(ndarray, dtype=buffer.shape.dtype.into_numpy())
buffer.copy_from_ndarray(ndarray)
ffi_call(lib.plaidml_executable_run, self.as_ptr())
return self._outputs
def run(program, inputs, device=None, target=None):
exe = Executable(program, [x for x, y in inputs], device=device, target=target)
return [x.as_ndarray() for x in exe([y for x, y in inputs])]
| 33.772152 | 92 | 0.662294 | 364 | 2,668 | 4.585165 | 0.236264 | 0.026363 | 0.047933 | 0.0713 | 0.23547 | 0.219293 | 0.165368 | 0.081486 | 0 | 0 | 0 | 0.00441 | 0.235008 | 2,668 | 78 | 93 | 34.205128 | 0.813327 | 0.056222 | 0 | 0 | 0 | 0 | 0.037788 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0 | 0.074074 | 0.037037 | 0.37037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24f791043bd9796713489db2bdf00e09dbb2fd36 | 3,831 | py | Python | luna_transformer/model.py | sooftware/luna-transformer | fc06b453478dcfd9b7979b70af1b4f4507597891 | [
"MIT"
] | 26 | 2021-07-29T18:05:39.000Z | 2022-02-04T20:03:13.000Z | luna_transformer/model.py | sooftware/luna-transformer | fc06b453478dcfd9b7979b70af1b4f4507597891 | [
"MIT"
] | null | null | null | luna_transformer/model.py | sooftware/luna-transformer | fc06b453478dcfd9b7979b70af1b4f4507597891 | [
"MIT"
] | 1 | 2021-07-31T07:30:02.000Z | 2021-07-31T07:30:02.000Z | # MIT License
#
# Copyright (c) 2021 Soohwan Kim
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import torch
import torch.nn as nn
from luna_transformer.embedding import PositionalEncoding
from luna_transformer.encoder import LunaTransformerEncoderLayer
from luna_transformer.mask import get_attn_pad_mask
class LunaTransformerEncoder(nn.Module):
"""
Transformer encoder architecture applied Linear Unified Nested Attention (Luna).
Luna was proposed in the paper "Luna: Linear Unified Nested Attention" (https://arxiv.org/abs/2106.01540.pdf)
"""
def __init__(
self,
vocab_size: int,
d_model: int,
num_layers: int = 6,
num_attention_heads: int = 8,
d_ff: int = 2048,
dropout_p: float = 0.1,
project_embedding_length: int = 32,
max_length: int = 1024,
):
super(LunaTransformerEncoder, self).__init__()
self.d_model = d_model
self.projected_embedding_length = project_embedding_length
self.projected_embeddings = nn.Parameter(torch.Tensor(project_embedding_length, self.d_model))
self.projected_positions = PositionalEncoding(self.d_model, project_embedding_length)
nn.init.normal_(self.projected_embeddings, mean=0.0, std=self.d_model ** -0.5)
self.input_embedding = nn.Embedding(vocab_size, d_model)
self.dropout = nn.Dropout(p=dropout_p)
self.input_positions = PositionalEncoding(d_model, max_length)
self.input_norm = nn.LayerNorm(d_model)
self.embed_scale = math.sqrt(self.d_model)
self.layers = nn.ModuleList([
LunaTransformerEncoderLayer(
d_model=d_model,
num_attention_heads=num_attention_heads,
d_ff=d_ff,
dropout_p=dropout_p,
) for _ in range(num_layers)
])
def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor):
batch_size, seq_length = inputs.size()
attention_padding_mask = get_attn_pad_mask(inputs, input_lengths, self.projected_embedding_length)
embedded = self.input_embedding(inputs)
embedded *= self.embed_scale
projected_embedded = self.projected_embeddings * self.embed_scale
embedded += self.input_positions(embedded.size(1))
projected_embedded += self.projected_positions(self.projected_embedding_length).squeeze(0)
seq_length, dim = projected_embedded.size()
projected_embedded = projected_embedded.unsqueeze(0).expand(batch_size, seq_length, dim)
outputs = self.dropout(embedded)
p = self.dropout(projected_embedded)
for layer in self.layers:
outputs, p = layer(outputs, p, attention_padding_mask)
return outputs
| 40.755319 | 113 | 0.708431 | 495 | 3,831 | 5.292929 | 0.377778 | 0.027481 | 0.019084 | 0.032061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01136 | 0.218742 | 3,831 | 93 | 114 | 41.193548 | 0.864016 | 0.327591 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.115385 | 0 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24f9bb21ea848732645dd95fdfaf8ef61a4a7e4b | 6,726 | py | Python | nfbackend/testboard_utils.py | neuralfoo/neuralfoo | d40388cd506d612ceee9ae10d92985c794249c65 | [
"MIT"
] | 2 | 2022-01-06T08:18:29.000Z | 2022-01-06T08:19:09.000Z | nfbackend/testboard_utils.py | neuralfoo/neuralfoo | d40388cd506d612ceee9ae10d92985c794249c65 | [
"MIT"
] | null | null | null | nfbackend/testboard_utils.py | neuralfoo/neuralfoo | d40388cd506d612ceee9ae10d92985c794249c65 | [
"MIT"
] | null | null | null | import dbops
from loguru import logger
import traceback
from bson.objectid import ObjectId
import utils
import global_vars as g
import fs_utils
def create_testboard(data,userID,organizationID):
try:
if dbops.check_if_exists("testboards","apiName",data["apiName"]):
message = "Testboard named '"+data["apiName"]+"' already exists."
logger.error(message)
return "",message
testboard_id = dbops.insert_testboard(
data["apiName"],
data["apiType"],
data["apiEnvironment"],
data["visibility"],
userID,
organizationID
)
for r in data["apiRequests"]:
request_id = dbops.insert_request(
testboard_id ,
r["apiHeader"] ,
r["apiHttpMethod"] ,
r["apiEndpoint"] ,
r["apiRequestBody"] ,
r["apiResponseBody"] ,
r["apiInputDataType"] ,
r["apiRequestBodyType"] ,
r["apiResponseBodyType"]
)
dbops.push_request_in_testboard(testboard_id,request_id)
return testboard_id,"success"
except Exception as e:
logger.error(str(e))
traceback.print_exc()
return None,str(e)
def update_testboard(data,userID):
try:
testboardID = data["testboardID"]
if dbops.check_if_exists("testboards","_id",ObjectId(testboardID)) == False:
message = "Testboard with ID '"+testboardID+"' does not exist."
logger.error(message)
return "",message
for d in data:
if d in ["apiName","apiEnvironment"]:
r = dbops.update_collection("testboards",testboardID,d,data[d])
if r == False:
return None,"Unable to update entities"
ownership,msg = utils.check_ownership("testboards",ObjectId(testboardID),userID)
if ownership:
r = dbops.update_collection("testboards",testboardID,"visibility",data["visibility"])
if r == False:
return None,"Unable to update visibility"
cleared = dbops.clear_all_requests(testboardID)
if not cleared:
return None, "Unable to remove existing requests"
r = dbops.update_collection("testboards",testboardID,"apiRequests",[])
if r == False:
return None,"Unable to update apiRequests"
for request in data["apiRequests"]:
request_id = dbops.insert_request(
testboardID ,
request["apiHeader"] ,
request["apiHttpMethod"] ,
request["apiEndpoint"] ,
request["apiRequestBody"] ,
request["apiResponseBody"] ,
request["apiInputDataType"] ,
request["apiRequestBodyType"] ,
request["apiResponseBodyType"]
)
dbops.push_request_in_testboard(testboardID,request_id)
dbops.update_collection("testboards",testboardID,"apiLastUpdatedBy",userID)
return testboardID,"success"
except Exception as e:
logger.error(str(e))
traceback.print_exc()
return None,str(e)
def get_testboard(testboard_id):
try:
testboard_details = dbops.get_testboard(testboard_id)
if testboard_details is None:
return None,"testboard not found"
testboard_details["testboardID"] = str(testboard_details["_id"])
del testboard_details["_id"]
api_requests = []
for reqID in testboard_details["apiRequests"]:
req_data = dbops.get_request(reqID)
req_data["requestID"] = str(req_data["_id"])
del req_data["_id"]
api_requests.append(req_data)
testboard_details["apiRequests"] = api_requests
return testboard_details,"success"
except Exception as e:
logger.error(str(e))
traceback.print_exc()
return None,str(e)
def list_testboard(userID,organizationID):
try:
testboard_list = dbops.list_testboards(userID,organizationID)
for i in range(len(testboard_list)):
testboard_list[i]["testboardID"] = str(testboard_list[i]["_id"])
del testboard_list[i]["_id"]
testboard_list[i]["key"] = i+1
testboard_list[i]["apiType"] = g.api_named_types[testboard_list[i]["apiType"]]
creator = dbops.fetch_user_details(testboard_list[i]["creatorID"])
if creator is not None:
testboard_list[i]["creator"] = creator["firstName"]
else:
testboard_list[i]["creator"] = "User deleted"
return testboard_list,"success"
except Exception as e:
logger.error(str(e))
traceback.print_exc()
return None,str(e)
def get_test_files(testboardID):
testboard = dbops.get_testboard(testboardID)
if testboard is None:
message = "TestboardID does not exist"
logger.error(message)
return None,message
if testboard["apiType"] == "imageclassification":
file_list,msg = get_image_classification_test_files(testboardID)
return file_list,msg
return None,"This should never be returned, probably a bug."
def get_image_classification_test_files(testboardID):
image_list = dbops.get_images_for_testboard(testboardID)
for i in range(len(image_list)):
image_list[i]["imageID"] = str(image_list[i]["_id"])
del image_list[i]["_id"]
del image_list[i]["imageUrl"]
image_list[i]["key"] = i+1
image_list[i]["imageResolution"] = str(image_list[i]["imageHeight"])+"x"+str(image_list[i]["imageWidth"])
image_list[i]["className"] = image_list[i]["annotation"]
image_list[i]["fileSize"] = str(round((image_list[i]["fileSize"])/1024,1)) + "kB"
return image_list,"success"
def delete_test_files(testboardID,imageIDs):
images_urls = dbops.get_links_for_images(testboardID,imageIDs)
for url in images_urls:
r = fs_utils.delete_from_fs(url["imageUrl"])
if r == False:
logger.error(f"unable to delete {url} from fs")
delete_count = dbops.delete_images_from_testboard(testboardID,imageIDs)
return delete_count
def update_image_visibility(testboardID,imageIDs,visible):
modified_count = dbops.update_image_visibility(imageIDs,visible)
return modified_count
def update_testfile_annotation(testboardID,imageID,annotation):
r = dbops.update_testfile_annotation(testboardID,imageID,annotation)
return r
| 26.376471 | 113 | 0.610616 | 714 | 6,726 | 5.556022 | 0.196078 | 0.026468 | 0.03025 | 0.0242 | 0.306781 | 0.274515 | 0.156289 | 0.156289 | 0.076128 | 0.076128 | 0 | 0.001449 | 0.281742 | 6,726 | 254 | 114 | 26.480315 | 0.819706 | 0 | 0 | 0.202614 | 0 | 0 | 0.158945 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.045752 | 0 | 0.248366 | 0.026144 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24ffa281701166331532a6d684f818550c073997 | 4,696 | py | Python | language/serene/text_matcher.py | Xtuden-com/language | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | [
"Apache-2.0"
] | 1,199 | 2018-10-16T01:30:18.000Z | 2022-03-31T21:05:24.000Z | language/serene/text_matcher.py | Xtuden-com/language | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | [
"Apache-2.0"
] | 116 | 2018-10-18T03:31:46.000Z | 2022-03-24T13:40:50.000Z | language/serene/text_matcher.py | Xtuden-com/language | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | [
"Apache-2.0"
] | 303 | 2018-10-22T12:35:12.000Z | 2022-03-27T17:38:17.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A simple TFIDF text matcher and function to run it."""
import pickle
import random
from absl import logging
from language.serene import fever_pb2
from language.serene import types
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import tensorflow.compat.v2 as tf
import tqdm
class TextMatcher:
"""A simple TFIDF Text matcher."""
def __init__(
self,
ngram_range = (1, 2), min_df=2, max_df=.9):
"""Init parameters for text matcher.
For details, refer to
https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html
since the parameters are used in calling that.
Args:
ngram_range: Tuple of n-grams to use (e.g., unigram and bigram)
min_df: Max allowed term frequency before excluding from vocab
max_df: Min required term frequency before excluding from vocab
"""
self._tfidf: Optional[TfidfVectorizer] = None
self._ngram_range = ngram_range
self._min_df = min_df
self._max_df = max_df
def train(self, sentences):
self._tfidf = TfidfVectorizer(
ngram_range=self._ngram_range,
min_df=self._min_df, max_df=self._max_df)
self._tfidf.fit(sentences)
def score(self,
claim,
candidates,
text_key = 'text'):
"""Return the score for each candidate, order does not change.
Args:
claim: The claim to match
candidates: The candidates to rank
text_key: Key in the candidate json that contains the text to score
Returns:
The score for each candidate
"""
if self._tfidf is None:
raise ValueError('You must train or load a model before predicting')
if not candidates:
return []
# make candidates indexable via numpy style indices
candidates = np.array(candidates, dtype=np.object)
# (1, vocab_size)
claim_repr = self._tfidf.transform([claim])
# (n_candidates, vocab_size)
candidates_repr = self._tfidf.transform([c[text_key] for c in candidates])
# (1, n_candidates)
product = candidates_repr.dot(claim_repr.T).T.toarray()
return product.reshape(-1).tolist()
def predict(
self,
claim, candidates,
text_key = 'text'):
"""Scores claim against candidates and returns ordered candidates.
Args:
claim: The claim to match
candidates: The candidates to rank
text_key: Key in the candidate json that contains the text to score
Returns:
sorted candidates and a score for each.
"""
if self._tfidf is None:
raise ValueError('You must train or load a model before predicting')
if not candidates:
return []
# make candidates indexable via numpy style indices
candidates = np.array(candidates, dtype=np.object)
# (1, vocab_size)
claim_repr = self._tfidf.transform([claim])
# (n_candidates, vocab_size)
candidates_repr = self._tfidf.transform([c[text_key] for c in candidates])
# (1, n_candidates)
product = candidates_repr.dot(claim_repr.T).T.toarray()
# Take the first row, since that is the only row and the one that
# contains the scores against the claim
preds = (-product).argsort(axis=1)[0]
scores = -np.sort(-product, axis=1)[0]
scores_and_candidates = []
for match_score, candidate in zip(scores, candidates[preds]):
scores_and_candidates.append((match_score, candidate))
return scores_and_candidates
def save(self, data_dir):
if self._tfidf is None:
raise ValueError('Attempted to save nonexistent model')
with tf.io.gfile.GFile(data_dir, 'wb') as f:
pickle.dump({
'tfidf': self._tfidf,
'ngram_range': self._ngram_range,
'min_df': self._min_df,
'max_df': self._max_df,
}, f)
def load(self, data_dir):
with tf.io.gfile.GFile(data_dir, 'rb') as f:
params = pickle.load(f)
self._tfidf = params['tfidf']
self._ngram_range = params['ngram_range']
self._min_df = params['min_df']
self._max_df = params['max_df']
| 32.611111 | 106 | 0.688671 | 666 | 4,696 | 4.713213 | 0.318318 | 0.034406 | 0.0223 | 0.014017 | 0.447276 | 0.396305 | 0.353616 | 0.327493 | 0.327493 | 0.327493 | 0 | 0.006834 | 0.221039 | 4,696 | 143 | 107 | 32.839161 | 0.851285 | 0.409072 | 0 | 0.3 | 0 | 0 | 0.075637 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.128571 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70000a0effbae8943e38a948112d24880ec73970 | 1,099 | py | Python | setup.py | hulusibaysal/EksiGundem | 6b0130a89c0c42b365dea847027ff3e13c4c4240 | [
"MIT"
] | null | null | null | setup.py | hulusibaysal/EksiGundem | 6b0130a89c0c42b365dea847027ff3e13c4c4240 | [
"MIT"
] | null | null | null | setup.py | hulusibaysal/EksiGundem | 6b0130a89c0c42b365dea847027ff3e13c4c4240 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
from setuptools import setup
CURRENT_DIR = Path(__file__).parent
def get_long_description():
readme_md = CURRENT_DIR / "README.md"
with open(readme_md, encoding="utf8") as ld_file:
return ld_file.read()
setup(
name="eksi",
version="0.0.1",
description="Komut satırında Ekşisözlük!",
long_description=get_long_description(),
long_description_content_type="text/markdown",
keywords=["ekşisözlük", "ekşi", "sözlük"],
author="Furkan Önder",
author_email="furkanonder@protonmail.com",
url="https://github.com/furkanonder/EksiGundem/",
license="MIT",
python_requires=">=3.0.0",
py_modules=["eksi"],
packages=[],
zip_safe=False,
include_package_data=True,
install_requires=["beautifulsoup4", "bs4", "colorama", "lxml"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={"console_scripts": ["eksi=eksi:eksi"]},
)
| 26.804878 | 67 | 0.658781 | 128 | 1,099 | 5.445313 | 0.695313 | 0.086083 | 0.05165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012277 | 0.184713 | 1,099 | 40 | 68 | 27.475 | 0.765625 | 0.038217 | 0 | 0 | 0 | 0 | 0.323223 | 0.024645 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.064516 | 0 | 0.129032 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7001e2e5450d227861c09e67f69e32ff38986c9e | 4,596 | py | Python | asci_art.py | JonathanFromm/HackerspaceTemplatePackage | b0bd5e77cd36417901b064e82812d365c55ff421 | [
"MIT"
] | null | null | null | asci_art.py | JonathanFromm/HackerspaceTemplatePackage | b0bd5e77cd36417901b064e82812d365c55ff421 | [
"MIT"
] | null | null | null | asci_art.py | JonathanFromm/HackerspaceTemplatePackage | b0bd5e77cd36417901b064e82812d365c55ff421 | [
"MIT"
] | null | null | null | class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def happy_face():
print(' ')
print(bcolors.OKGREEN+' ^ ^ '+bcolors.ENDC)
print(bcolors.OKGREEN+' \__/ '+bcolors.ENDC)
print(' ')
def confused_face():
print(' ')
print(bcolors.WARNING+' o O '+bcolors.ENDC)
print(bcolors.WARNING+' ---- '+bcolors.ENDC)
print(' ')
def sad_face():
print(' ')
print(bcolors.FAIL+' > < '+bcolors.ENDC)
print(bcolors.FAIL+' ---- '+bcolors.ENDC)
print(' ')
def show_message(text):
# split up string so it fits into lines (27 character per lines)
words = text.split(' ')
lines = []
while len(words) > 0:
line = ''
for word in words:
if len(line)+len(word) > 27:
break
else:
line = line+word+' '
words = words[1:]
while len(line) < 27:
line = line + ' '
lines.append(line)
print(' ')
print(' ')
print(' ')
print(bcolors.HEADER+' /-------------------------------\ '+bcolors.ENDC)
for line in lines:
print(bcolors.HEADER+' | '+bcolors.ENDC +
line+bcolors.HEADER+' | '+bcolors.ENDC)
print(bcolors.HEADER+' \ /-----------------------/ '+bcolors.ENDC)
print(bcolors.HEADER+' \ / '+bcolors.ENDC)
print(bcolors.HEADER+' \---/ '+bcolors.ENDC)
if text.startswith('WARNING:') or text.startswith('Guess later then'):
confused_face()
elif text.startswith('ERROR:'):
sad_face()
else:
happy_face()
def show_messages(list_messages):
for message in list_messages:
show_message(message)
input(bcolors.WARNING+"Press Enter to continue..."+bcolors.ENDC)
def set_secret(json_secrets, later_then_message, message, str_level_0, str_level_1=None, str_level_2=None, str_level_3=None):
if str_level_3:
show_message(message)
json_secrets[str_level_0][str_level_1][str_level_2][str_level_3] = input()
if not json_secrets[str_level_0][str_level_1][str_level_2][str_level_3]:
json_secrets[str_level_0][str_level_1][str_level_2][str_level_3] = None
show_message(later_then_message)
elif str_level_2:
show_message(message)
json_secrets[str_level_0][str_level_1][str_level_2] = input()
if not json_secrets[str_level_0][str_level_1][str_level_2]:
json_secrets[str_level_0][str_level_1][str_level_2] = None
show_message(later_then_message)
elif str_level_1:
show_message(message)
json_secrets[str_level_0][str_level_1] = input()
if not json_secrets[str_level_0][str_level_1]:
json_secrets[str_level_0][str_level_1] = None
show_message(later_then_message)
elif str_level_0:
show_message(message)
json_secrets[str_level_0] = input()
if not json_secrets[str_level_0]:
json_secrets[str_level_0] = None
show_message(later_then_message)
return json_secrets
def set_secrets(json_secrets, later_then_message, str_set_what):
location = str_set_what.upper()
for parameter in json_secrets[location]:
if json_secrets[location][parameter] == None:
show_message(
'Please enter your '+parameter+' for '+str_set_what+' (or add it later and press Enter now)')
json_secrets[location][parameter] = input()
if not json_secrets[location][parameter]:
json_secrets[location][parameter] = None
show_message(later_then_message)
break
elif json_secrets[location][parameter] != None:
for sub_paramter in json_secrets[location][parameter]:
show_message(
'Please enter your '+parameter+' '+sub_paramter+' for '+str_set_what+' (or add it later and press Enter now)')
json_secrets[location][parameter][sub_paramter] = input()
if not json_secrets[location][parameter][sub_paramter]:
json_secrets[location][parameter][sub_paramter] = None
show_message(later_then_message)
break
return json_secrets
| 35.353846 | 130 | 0.572454 | 543 | 4,596 | 4.554328 | 0.163904 | 0.122928 | 0.05095 | 0.092196 | 0.627578 | 0.561262 | 0.450061 | 0.352608 | 0.313385 | 0.261221 | 0 | 0.025865 | 0.301784 | 4,596 | 129 | 131 | 35.627907 | 0.74478 | 0.01349 | 0 | 0.27619 | 0 | 0 | 0.13526 | 0.012798 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0 | 0 | 0.171429 | 0.190476 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70031ea92c28e396deb633661ccd21348ccd84ec | 795 | py | Python | paths.py | ShuaiW/kaggle-heart | 022997f27add953c74af2b371c67d9d86cbdccc3 | [
"MIT"
] | 182 | 2016-03-15T01:51:29.000Z | 2021-04-21T09:49:05.000Z | paths.py | weidezhang/kaggle-heart | 022997f27add953c74af2b371c67d9d86cbdccc3 | [
"MIT"
] | 1 | 2018-06-22T16:46:12.000Z | 2018-06-22T21:08:09.000Z | paths.py | weidezhang/kaggle-heart | 022997f27add953c74af2b371c67d9d86cbdccc3 | [
"MIT"
] | 61 | 2016-03-15T00:58:28.000Z | 2020-03-06T22:00:41.000Z | """Module for reading the SETTINGS.json file.
"""
import json
import os
with open(os.path.dirname(os.path.realpath(__file__)) + '/SETTINGS.json') as data_file:
PATHS = json.load(data_file)
TRAIN_DATA_PATH = PATHS["TRAIN_DATA_PATH"]
TEST_DATA_PATH = PATHS["VALIDATE_DATA_PATH"]
PKL_TRAIN_DATA_PATH = PATHS["PKL_TRAIN_DATA_PATH"]
PKL_TEST_DATA_PATH = PATHS["PKL_VALIDATE_DATA_PATH"]
MODEL_PATH = PATHS["MODEL_PATH"]
SUBMISSION_PATH = PATHS["SUBMISSION_PATH"]
LOGS_PATH = PATHS["LOGS_PATH"]
INTERMEDIATE_PREDICTIONS_PATH = PATHS["INTERMEDIATE_PREDICTIONS_PATH"]
TEMP_FILES_PATH = PATHS["TEMP_FILES_PATH"]
TRAIN_PATIENT_IDS = PATHS["TRAIN_PATIENT_IDS"]
TEST_PATIENT_IDS = PATHS["TEST_PATIENT_IDS"]
SUBMISSION_NR = PATHS["SUBMISSION_NR"]
ENSEMBLE_WEIGHTS_PATH = PATHS["ENSEMBLE_WEIGHTS_PATH"]
| 34.565217 | 87 | 0.8 | 117 | 795 | 4.974359 | 0.290598 | 0.154639 | 0.089347 | 0.061856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083019 | 795 | 22 | 88 | 36.136364 | 0.798354 | 0.05283 | 0 | 0 | 0 | 0 | 0.312332 | 0.096515 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
700529f087345e2c10ecc73eaeb6cb41b5f20690 | 433 | py | Python | debug/debug08.py | adamlukomski/pykron | 5e4c4b840af2cf574dab8417a97b7f0fc4080878 | [
"BSD-2-Clause"
] | null | null | null | debug/debug08.py | adamlukomski/pykron | 5e4c4b840af2cf574dab8417a97b7f0fc4080878 | [
"BSD-2-Clause"
] | null | null | null | debug/debug08.py | adamlukomski/pykron | 5e4c4b840af2cf574dab8417a97b7f0fc4080878 | [
"BSD-2-Clause"
] | null | null | null |
# what not to do - use empty function
# race conditions may cause it not to finish right away
# but the timeout will catch it
# UPDATE: solved in d9c4fad
import sys
sys.path.append('..')
from pykron.core import Pykron, PykronLogger
import time
app = Pykron()
@app.AsyncRequest(timeout=0.5)
def fun4():
return 1
logger = PykronLogger.getInstance()
result = fun4().wait_for_completed()
print('result',result)
app.close()
| 16.037037 | 55 | 0.727483 | 64 | 433 | 4.890625 | 0.765625 | 0.031949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019391 | 0.166282 | 433 | 26 | 56 | 16.653846 | 0.847645 | 0.334873 | 0 | 0 | 0 | 0 | 0.028369 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0.083333 | 0.416667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7005d5f528431853e55b70e8b2c13f2e752e74a6 | 8,563 | py | Python | (sandbox,tobemerged)/pythongis/vector/(old)/geom_safe.py | karimbahgat/pythongis | 1042ea14de4e2aafd55de4e01d86b7d379d55999 | [
"MIT"
] | 4 | 2015-12-05T14:31:55.000Z | 2018-02-09T05:54:36.000Z | (sandbox,tobemerged)/pythongis/vector/(old)/geom_safe.py | karimbahgat/pythongis | 1042ea14de4e2aafd55de4e01d86b7d379d55999 | [
"MIT"
] | null | null | null | (sandbox,tobemerged)/pythongis/vector/(old)/geom_safe.py | karimbahgat/pythongis | 1042ea14de4e2aafd55de4e01d86b7d379d55999 | [
"MIT"
] | 1 | 2018-10-24T01:08:11.000Z | 2018-10-24T01:08:11.000Z | import sys, os, itertools, operator
import datetime
import shapely
from shapely.geometry import asShape as geoj2geom
from shapely.geometry import mapping as geom2geoj
import rtree
from . import loader
class Feature:
def __init__(self, table, row, geometry):
"geometry must be a geojson dictionary or a shapely geometry instance"
self._table = table
self.row = list(row)
if isinstance(geometry, dict): geometry = geoj2geom(geometry)
self.geometry = geometry # maybe need to copy geometry?
self._cached_bbox = None
def __getitem__(self, i):
if isinstance(i, (str,unicode)):
i = self._table.fields.index(i)
return self.row[i]
def __setitem__(self, i, setvalue):
if isinstance(i, (str,unicode)):
i = self._table.fields.index(i)
self.row[i] = setvalue
@property
def bbox(self):
if not self._cached_bbox:
self._cached_bbox = self.geometry.bounds
return self._cached_bbox
def copy(self):
return Feature(self._table, self.row, self.geometry, self.bbox)
class GeoTable:
def __init__(self, filepath=None):
if filepath:
fields,rows,geometries = loader.from_file(filepath)
else:
fields,rows,geometries = [],[],[]
self.fields = fields
self.features = [Feature(self,row,geom) for row,geom in itertools.izip(rows,geometries)]
self.create_spatial_index()
def __len__(self):
return len(self.features)
def __iter__(self):
for feat in self.features:
yield feat
def __getitem__(self, i):
"""
Get one or more Features of data.
"""
return self.features[i]
@property
def bbox(self):
xmins, xmaxs, ymins, ymaxs = itertools.izip(*(feat.bbox for feat in self))
xmin, xmax = min(xmins), max(xmaxs)
ymin, ymax = min(ymins), max(ymaxs)
bbox = (xmin, ymin, xmax, ymax)
return bbox
###### SPATIAL INDEXING #######
def create_spatial_index(self):
self.spindex = rtree.index.Index()
i = 0
for feat in self:
self.spindex.insert(i, feat.bbox)#, obj=feat)
i += 1
def intersecting(self, bbox):
results = self.spindex.intersection(bbox)
return (self[item.id] for item in results)
def nearest(self, bbox):
results = self.spindex.nearest(bbox)
return (self[item.id] for item in results)
###### GENERAL #######
def save(self, savepath, **kwargs):
fields = self.fields
rowgeoms = ((feat.row,feat.geometry) for feat in self)
rows, geometries = itertools.izip(*rowgeoms)
saver.to_file(fields, rows, geometries, savepath, **kwargs)
def copy(self):
new = GeoTable()
new.fields = [field for field in self.fields]
new.features = [Feature(new,feat.row,feat.geom,feat.bbox) for feat in self.features]
new.bbox = self.bbox
new.create_spindex()
return new
###### FIELDS #######
def addfield(self, field):
self.fields.append(field)
for row in self.rows:
row.append(MISSING)
def keepfields(self, *fields):
pass
def dropfields(self, *fields):
pass
###### SELECT #######
def iter_select(self, query):
"return a generator of True False for each row's query result"
# MAYBE ALSO ADD SUPPORT FOR SENDING A TEST FUNCTION
for row in self:
# make fields into vars
for field in self.fields:
value = row[self.fields.index(field)]
if isinstance(value, (unicode,str)):
value = '"""'+str(value).replace('"',"'")+'"""'
elif isinstance(value, (int,float)):
value = str(value)
code = "%s = %s"%(field,value)
exec(code)
# run and retrieve query value
yield eval(query)
def select(self, query):
outtable = self.copy(copyrows=False)
for row,keep in zip(self,self.iter_select(query)):
if keep:
outtable.append(row)
return outtable
def exclude(self, query):
outtable = Table()
for row,drop in zip(self,self.iter_select(query)):
if not drop:
outtable.append(row)
return outtable
###### GROUP #######
def split(self, splitfields):
"""
Sharp/distinct groupings.
"""
fieldindexes = [self.fields.index(field) for field in splitfields]
temprows = sorted(self.rows, key=operator.itemgetter(*fieldindexes))
for combi,rows in itertools.groupby(temprows, key=operator.itemgetter(*fieldindexes) ):
table = self.copy(copyrows=False)
table.rows = list(rows)
table.name = str(combi)
yield table
def aggregate(self, groupfields, fieldmapping=[]):
"""
...choose to aggregate into a summary value, OR into multiple fields (maybe not into multiple fields, for that use to_fields() afterwards...
...maybe make flexible, so aggregation can be on either unique fields, or on an expression or function that groups into membership categories (if so drop membership() method)...
"""
if fieldmapping: aggfields,aggtypes = zip(*fieldmapping)
aggfunctions = dict([("count",len),
("sum",sum),
("max",max),
("min",min),
("average",stats.average),
("median",stats.median),
("stdev",stats.stdev),
("most common",stats.most_common),
("least common",stats.least_common) ])
outtable = self.copy(copyrows=False)
fieldindexes = [self.fields.index(field) for field in groupfields]
temprows = sorted(self.rows, key=operator.itemgetter(*fieldindexes))
for combi,rows in itertools.groupby(temprows, key=operator.itemgetter(*fieldindexes) ):
if not isinstance(combi, tuple):
combi = tuple([combi])
# first the groupby values
newrow = list(combi)
# then the aggregation values
if fieldmapping:
columns = zip(*rows)
selectcolumns = [columns[self.fields.index(field)] for field in aggfields]
for aggtype,values in zip(aggtypes,selectcolumns):
aggfunc = aggfunctions[aggtype]
aggvalue = aggfunc(values)
newrow.append(aggvalue)
outtable.append(newrow)
outtable.fields = groupfields
if fieldmapping: outtable.fields.extend(aggfields)
return outtable
###### CREATE #######
def compute(self, fieldname, expression, query=None):
# NOTE: queries and expressions currently do not validate
# that value types are of the same kind, eg querying if a number
# is bigger than a string, so may lead to weird results or errors.
if not fieldname in self.fields:
self.addfield(fieldname)
expression = "result = %s" % expression
for row in self:
# make fields into vars
for field in self.fields:
value = row[self.fields.index(field)]
if isinstance(value, (unicode,str)):
value = '"""'+str(value).replace('"',"'")+'"""'
elif isinstance(value, (int,float)):
value = str(value)
code = "%s = %s"%(field,value)
exec(code)
# run and retrieve expression value
if not query or (eval(query) == True):
exec(expression)
row[self.fields.index(fieldname)] = result
return self
###### CONNECT #######
def join(self, othertable, query):
"""
...
"""
pass
def relate(self, othertable, query):
"""maybe add a .relates attribute dict to each row,
with each relate dict entry being the unique tablename of the other table,
containing another dictionary with a "query" entry for that relate,
and a "links" entry with a list of rows pointing to the matching rows in the other table.
"""
pass
| 30.045614 | 185 | 0.564288 | 957 | 8,563 | 4.99373 | 0.254963 | 0.031387 | 0.012555 | 0.013601 | 0.263026 | 0.216782 | 0.207993 | 0.201716 | 0.171584 | 0.156518 | 0 | 0.000868 | 0.327105 | 8,563 | 284 | 186 | 30.151408 | 0.828532 | 0.152633 | 0 | 0.281437 | 0 | 0 | 0.031399 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155689 | false | 0.023952 | 0.041916 | 0.011976 | 0.287425 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70069527a46c915cd2978c2c7d78d1acde10b272 | 1,970 | py | Python | 2021/five/day10.py | BenningtonComputing/adventofcode | f162163862e122ac15495e14d138601379fab382 | [
"MIT"
] | null | null | null | 2021/five/day10.py | BenningtonComputing/adventofcode | f162163862e122ac15495e14d138601379fab382 | [
"MIT"
] | null | null | null | 2021/five/day10.py | BenningtonComputing/adventofcode | f162163862e122ac15495e14d138601379fab382 | [
"MIT"
] | 1 | 2021-12-27T09:31:30.000Z | 2021-12-27T09:31:30.000Z | #!/usr/bin/env python3
"""
day10-2021.py - my solution to day 10 of advent of code 2021.
the link to the problem is:
https://adventofcode.com/2021/day/10
use by running `./aoc-day10-2021.py [input]`
this code was originally posted here:
https://gist.github.com/fivegrant/8e451be44b89ddcfe63e46532bf18821
"""
# Snag Data \ stole this from my day 9 code
import sys
data_path = sys.argv[1]
with open(data_path) as f:
raw_data = f.readlines()
data = [x.strip("\n") for x in raw_data]
def inverted(mapping):
return {value: key for key, value in mapping.items()}
pairs = {
"{": "}",
"[": "]",
"(": ")",
"<": ">"
}
point_values = {
"}": 1197,
"]": 57,
")": 3,
">": 25137
}
auto_values = {
"{": 3,
"[": 2,
"(": 1,
"<": 4
}
class Stack:
def __init__(self, string):
self.pile = ""
self.string = string
self.position = 0
def step(self):
if self.position >= len(self.string): return -1
current = self.string[self.position]
if current in pairs:
self.pile += current
self.position += 1
return 0
elif pairs[self.pile[-1]] == current:
self.pile = self.pile[:-1]
self.position += 1
return 0
else: # ERROR!
return point_values[current]
def autocomplete(self):
points = 0
while points == 0:
points = self.step()
points = 0
for s in reversed(self.pile):
points *= 5
points += auto_values[s]
return points
def score(line):
stack = Stack(line)
points = 0
while points == 0:
points = stack.step()
return points if points != -1 else 0
corrupted = [score(x) for x in data]
incomplete = [data[i] for i in range(len(corrupted)) if corrupted[i] == 0]
# Part I
print(f'part i: {sum(corrupted)}')
# Part II
completed = sorted([Stack(x).autocomplete() for x in incomplete])
print(f'part ii: {completed[len(completed)//2]}')
| 21.648352 | 81 | 0.57868 | 268 | 1,970 | 4.208955 | 0.399254 | 0.042553 | 0.015957 | 0.033688 | 0.079787 | 0.044326 | 0 | 0 | 0 | 0 | 0 | 0.056211 | 0.268528 | 1,970 | 90 | 82 | 21.888889 | 0.726579 | 0.230457 | 0 | 0.142857 | 0 | 0 | 0.053892 | 0.01996 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079365 | false | 0 | 0.015873 | 0.015873 | 0.206349 | 0.031746 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7006ed71317775974197d8422ff224edfdfa01ac | 9,264 | py | Python | nxsdk_modules_ncl/epl/src/computeResults.py | biagiom/models | 79489a3c429b3027dd420840bbccfee5e8c9a879 | [
"BSD-3-Clause"
] | 54 | 2020-03-04T17:37:17.000Z | 2022-02-22T13:16:10.000Z | nxsdk_modules_ncl/epl/src/computeResults.py | biagiom/models | 79489a3c429b3027dd420840bbccfee5e8c9a879 | [
"BSD-3-Clause"
] | 9 | 2020-08-26T13:17:54.000Z | 2021-11-09T09:02:00.000Z | nxsdk_modules_ncl/epl/src/computeResults.py | biagiom/models | 79489a3c429b3027dd420840bbccfee5e8c9a879 | [
"BSD-3-Clause"
] | 26 | 2020-03-18T17:09:34.000Z | 2021-11-22T16:23:14.000Z | # Copyright(c) 2019-2020 Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pylint: disable-all
import matplotlib.pyplot as plt
import math
import pickle
import numpy as np
def dot(l1=[], l2=[]):
""" computes dot product of 2 vectors"""
sum1 = 0
for i in range(0, len(l1)):
sum1 = (l1[i] * l2[i]) + sum1
return sum1
# Function to compute 2-norm of a list
def norm(l1=[]):
""" computes norm of 2 vectors"""
d = 0
for i in range(0, len(l1)):
d = d + ((l1[i]) ** 2)
return math.sqrt(d)
# Function to compute cosine similarity index of two lists
def cosine_similarity(l1=[], l2=[]):
""" computes cosine similarity"""
dot_product = dot(l1, l2)
norm_a = norm(l1)
norm_b = norm(l2)
denom = norm_a * norm_b
if (denom != 0):
out = round(float(dot_product) / (denom), 4)
else:
out = 0
return out
def hammingSimilarity(l1=[], l2=[]):
""" computes hamming similarity """
hammingD = 0
nsensors = len(l1) # using the total number of non-zero sensors per odor
nNonZero = len(l1)
# nsensors = len(l1)
for i in range(0, nsensors):
if l1[i] != l2[i]:
hammingD += 1
if l1[i] == 0 and l2[i] == 0:
nNonZero = nNonZero - 1
# ratio = float(hammingD)/nNonZero
ratio = float(hammingD) / nsensors
hammingS = round(1 - ratio, 2)
return hammingS
def jaccardSimilarity(l1=[], l2=[]):
""" computes Jaccard similarity"""
list1 = []
list2 = []
for i in range(0, len(l1)):
list1.append((i, l1[i]))
list2.append((i, l2[i]))
set1 = set(list1)
set2 = set(list2)
intersectionSize = len(set.intersection(set1, set2))
unionSize = len(set.union(set1, set2))
# print intersectionSize, unionSize;
return round(intersectionSize/float(unionSize), 4)
def computeSimilarity(l1, l2):
""" computes similarity index """
return jaccardSimilarity(l1, l2)
def findPrediction(SImatrix_gamma, nACh=1, pThreshold=0.75):
""" computes the correct classifications"""
pValues = []
pValuesNaive = []
maxSI = 0
maxSIindex = 'x'
maxSInaive = 0
maxSInaiveIndex = 'x'
k = 0
gammaIndex = 0
AChCnt = [0] * nACh # counts number of correct classifications at each ACh level
for i in range(0, len(SImatrix_gamma)):
for j in range(0, len(SImatrix_gamma[i])):
if (SImatrix_gamma[i][j] > maxSI):
maxSI = SImatrix_gamma[i][j]
maxSIindex = j
AChID = k // 10
if (gammaIndex == 0):
maxSInaive = SImatrix_gamma[i][j]
maxSInaiveIndex = j
k += 1
if (k == 10 * nACh):
if (maxSI >= pThreshold):
pValues.append(maxSIindex)
AChCnt[AChID] += 1
else:
pValues.append('x')
# if(maxSInaive>=pThreshold):
if (maxSInaive >= pThreshold):
pValuesNaive.append(maxSInaiveIndex)
else:
pValuesNaive.append('x')
maxSI = 0
maxSIindex = 'x'
maxSInaive = 0
maxSInaiveIndex = 'x'
k = 0
gammaIndex += 1
if (gammaIndex == 10):
gammaIndex = 0
return pValues, AChCnt, pValuesNaive
def computeClassification(pValues, nTestPerOdor, nodors):
""" computes the classification accuracy"""
currentOdorId = 0
k = 0
percentCorrect = 0
for i in range(0, len(pValues)):
if (pValues[i] == currentOdorId):
percentCorrect += 1
k += 1
if (k == nTestPerOdor):
currentOdorId += 1
k = 0
return percentCorrect
def computeResults(nGammaPerTraining, trainingSetSize, testSetSize,
nsensors=72, verbose=False, gammaCode=None,
similarityThreshold=0.75):
"""evaluates the performance of the EPL network"""
nodors = trainingSetSize
nNoiseLevels = 1
nTestPerOdor = testSetSize/nodors
nACh = 1
precedenceCodeLearned = []
# this stores results from 1st gamma cycle to measure performance with naive representation
precedenceCodeNaive = []
if gammaCode is None:
pickedfilename = '/.spikes.pi'
rf = open(pickedfilename, 'rb')
precedenceCodeGamma = pickle.load(rf)
rf.close()
else:
precedenceCodeGamma = gammaCode
# Find learned precedence codes
for i in range(0, nodors):
# labelGamma = 2 * i * 2 * 5 + 2 * 5 # labeling period
labelGamma = nGammaPerTraining*nodors + 5*2*i
precedenceCodeNaive.append(precedenceCodeGamma[labelGamma])
labelGamma = labelGamma + 4 # last gamma cycle of label
# -1 because first gamma missing in simulation
precedenceCodeLearned.append(precedenceCodeGamma[labelGamma])
# Compute similarity of test odors to learned odors at every gamma
# -1 because first gamma in simulation is missing
testThetaStart = nGammaPerTraining*nodors + 5*2*nodors
SImatrix_gamma = []
SImatrix_gammaNaive = []
gammaIndex = 0
# for i in range(testThetaStart, len(precedenceCodeGamma)-1):
for i in range(testThetaStart, len(precedenceCodeGamma)):
similarityIndices = []
similarityIndicesNaive = []
for k in range(0, nodors):
# SI = cosine_similarity(precedenceCodeGamma[i], precedenceCodeLearned[k])
if (gammaIndex < 5):
SI = computeSimilarity(precedenceCodeGamma[i],
precedenceCodeLearned[k])
similarityIndices.append(SI)
# SInaive = cosine_similarity(precedenceCodeGamma[i], precedenceCodeNaive[k])
SInaive = computeSimilarity(precedenceCodeGamma[i],
precedenceCodeNaive[k])
similarityIndicesNaive.append(SInaive)
else:
similarityIndices.append(0)
similarityIndicesNaive.append(0)
gammaIndex += 1
if (gammaIndex == 10):
gammaIndex = 0
SImatrix_gamma.append(similarityIndices)
SImatrix_gammaNaive.append(similarityIndicesNaive)
# Printing
for i in precedenceCodeGamma:
# print(i[0:10])
pass
if verbose:
for i in SImatrix_gamma:
print(i)
pass
# Find predictions and compute classification of EPL results
pValues, AChCnt, pValuesNaive = findPrediction(SImatrix_gamma, nACh=nACh,
pThreshold=similarityThreshold)
for i in range(0, len(pValues)):
# print(pValues[i])
pass
percentCorrect = []
percentCorrectNaive = []
for i in range(0, nNoiseLevels):
indexStart = int(nodors * nTestPerOdor * i)
indexEnd = int(indexStart + nodors * nTestPerOdor)
percentCorrect.append(
computeClassification(pValues[indexStart:indexEnd], nTestPerOdor,
nodors=nodors))
percentCorrectNaive.append(
computeClassification(pValuesNaive[indexStart:indexEnd],
nTestPerOdor, nodors=nodors))
for i in range(0, len(percentCorrect)):
percentCorrect[i] = 100 * round(
percentCorrect[i] / float(nodors * nTestPerOdor), 2)
# Printing info
print("*****Execution Report*****")
print("{} patterns presented. {} test samples for each pattern".format(
nodors, int(nTestPerOdor)))
print("""Classification performance = {}%; for similarity threshold = {}
""".format(percentCorrect[0], similarityThreshold))
return percentCorrect[0]
| 35.358779 | 95 | 0.619603 | 1,010 | 9,264 | 5.662376 | 0.29703 | 0.009792 | 0.014688 | 0.023081 | 0.129218 | 0.099668 | 0.087952 | 0.060675 | 0.043364 | 0.043364 | 0 | 0.023196 | 0.287997 | 9,264 | 261 | 96 | 35.494253 | 0.843845 | 0.304512 | 0 | 0.201149 | 0 | 0 | 0.027922 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051724 | false | 0.017241 | 0.022989 | 0 | 0.126437 | 0.022989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70093c19345ca7b163a2cb4f6904b25e16e97891 | 983 | py | Python | Spider.py | yxg995995/yxg | 7865cb8bd6fca4d59278365afdb96529a7757386 | [
"MIT"
] | null | null | null | Spider.py | yxg995995/yxg | 7865cb8bd6fca4d59278365afdb96529a7757386 | [
"MIT"
] | null | null | null | Spider.py | yxg995995/yxg | 7865cb8bd6fca4d59278365afdb96529a7757386 | [
"MIT"
] | null | null | null | import requests
import json
from DataBase import *
def getdaydata():#爬取股票每天的价格并插入到数据库保存
durl='''http://quotes.money.163.com/service/chddata.html?code=1000001&start=20200106&end=20210106&
fields=TCLOSE;HIGH;LOW;TOPEN;LCLOSE;CHG;PCHG;TURNOVER;VOTURNOVER;VATURNOVER;TCAP;MCAP'''
r=requests.get(durl)
data=r.text.splitlines()
n=len(data)
for i in range(n-1):
insertdayTB(data[-i-1].split(',')[0],data[-i-1].split(',')[2],data[-i-1].split(',')[6],
data[-i-1].split(',')[3],data[-i-1].split(',')[5],data[-i-1].split(',')[4])
#爬取某只股票的一天里面9:30到15:00每分钟的股票价格并插入数据库保存
#这个只能爬取当天9:30到这个时间段的数据,之前的数据请保存好
def getmindata():
url='http://pdfm.eastmoney.com/EM_UBG_PDTI_Fast/api/js?rtntype=5&id=0000012&type=r&iscr=false'
r=requests.get(url)
l=r.text.strip('(')
l1=l.strip(')')
data=json.loads(l1)
for i in data['data']:
insertminTB(i.split(',')[0], i.split(',')[1])
if __name__=='__main__':
getmindata() | 39.32 | 102 | 0.641913 | 144 | 983 | 4.305556 | 0.597222 | 0.048387 | 0.058065 | 0.106452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.071934 | 0.137335 | 983 | 25 | 103 | 39.32 | 0.659198 | 0.087487 | 0 | 0 | 0 | 0.136364 | 0.332961 | 0.094972 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.136364 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
700adc4ff1b09523fb554fd02562d6b99ea12187 | 3,941 | py | Python | testLogReader/testingUtils.py | simcesplatform/LogReader | eadeb7061df6fccdd0c7845284ab1a85df1bc20c | [
"MIT"
] | null | null | null | testLogReader/testingUtils.py | simcesplatform/LogReader | eadeb7061df6fccdd0c7845284ab1a85df1bc20c | [
"MIT"
] | null | null | null | testLogReader/testingUtils.py | simcesplatform/LogReader | eadeb7061df6fccdd0c7845284ab1a85df1bc20c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2021 Tampere University and VTT Technical Research Centre of Finland
# This software was developed as a part of the ProCemPlus project: https://www.senecc.fi/projects/procemplus
# This source code is licensed under the MIT license. See LICENSE in the repository root directory.
# Author(s): Otto Hylli <otto.hylli@tuni.fi> and Ville Heikkilä <ville.heikkila@tuni.fi>
'''
Helper functions used with tests.
'''
import csv
import io
import unittest
from typing import List, Dict
from falcon import testing
from LogReader.app import api
from LogReader.db.simulations import simIdAttr
class ApiTest(testing.TestCase):
'''
Super class for api tests which gets the falcon api instance.
'''
def setUp(self):
super(ApiTest, self).setUp()
self.app = api
def checkSimulations( test: unittest.TestCase, resultSims: List[dict], expectedSims: List[dict]):
'''
Check by simulation id that results and expected simulations are the same.
test: Test case which uses this so we can use its assert methods.
resultSims: List of simulations.
expectedSims: List of simulations.
'''
checkItemsById( test, simIdAttr, resultSims, expectedSims )
def checkMessages( test: unittest.TestCase, resultMsgs: List[dict], expectedMsgs: List[dict] ):
'''
Check by message id that the list of result messages matches with the list of expected messages.
'''
checkItemsById( test, 'MessageId', resultMsgs, expectedMsgs )
def checkItemsById( test: unittest.TestCase, idAttr: str, result: List[dict], expected: List[dict] ):
'''
Check by id that results and expected are the same.
test: Test case which uses this so we can use its assert methods.
idAttr: Name of attribute containing the id of the item used in comparison.
result: List of items.
expected: List of expected items.
'''
# get ids of results and expected and check they contain the same.
ids = [ item[ idAttr ] for item in result ]
expectedIds = [ item[ idAttr ] for item in expected ]
test.assertCountEqual( ids, expectedIds, 'Did not get the expected items.' )
def checkCsv( test: unittest.TestCase, result: str, expected: csv.DictReader, delimiter: str = ';' ):
'''
Check that result and expected csv contain the same data.
'''
# create a csv DictReader from result string.
result = io.StringIO( result, newline = '' )
result = csv.DictReader( result, delimiter = ';' )
# check that both have the same column titles
resultHeaders = set( result.fieldnames )
expectedHeaders = set( expected.fieldnames )
test.assertEqual( resultHeaders, expectedHeaders, 'Result and expected should have the same headers.' )
# check the csvs line by line
line = 1
for expectedRow in expected:
line += 1
try:
resultRow = next( result )
except StopIteration:
test.fail( f'No more rows in result but was expecting a row containing: {expectedRow}.' )
test.assertEqual( resultRow, expectedRow, f'Result and expected rows do not match on line {line}.' )
# result should not have more rows
with( test.assertRaises( StopIteration, msg = 'Result has more rows than expected.' )):
next( result )
def getTestDataResultFileName( testName: str, scenarioName: str, actual: bool = False, fileType: str = 'json' ) -> str:
'''
For time series test get name for a result file for given test and scenario.
Actual True gives the name of actual results file and False the expected results file.
fileType should contain the file type extension.
'''
result = 'result'
if actual:
result = 'actual_result'
# replace spaces in scenario name with underscores
scenarioName = scenarioName.replace( ' ', '_' )
return f'{testName}_{scenarioName}_{result}.{fileType}' | 41.484211 | 119 | 0.689165 | 503 | 3,941 | 5.39165 | 0.383698 | 0.020649 | 0.029499 | 0.016593 | 0.076696 | 0.044985 | 0.044985 | 0.044985 | 0.044985 | 0.044985 | 0 | 0.002297 | 0.226592 | 3,941 | 95 | 120 | 41.484211 | 0.887467 | 0.40269 | 0 | 0 | 0 | 0 | 0.145635 | 0.020353 | 0 | 0 | 0 | 0 | 0.097561 | 1 | 0.146341 | false | 0 | 0.170732 | 0 | 0.365854 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
700c2f0c5f5d02e18dff51805b19cfdc82f63425 | 4,111 | py | Python | sqlparse/__init__.py | Yelp/sqlparse | a29c8c1fb827863c6b57d8811ed3b69e982a3877 | [
"BSD-3-Clause"
] | 4 | 2015-03-16T17:08:44.000Z | 2017-02-21T22:33:18.000Z | sqlparse/__init__.py | Yelp/sqlparse | a29c8c1fb827863c6b57d8811ed3b69e982a3877 | [
"BSD-3-Clause"
] | 3 | 2015-09-30T23:53:08.000Z | 2016-05-27T18:37:02.000Z | sqlparse/__init__.py | Yelp/sqlparse | a29c8c1fb827863c6b57d8811ed3b69e982a3877 | [
"BSD-3-Clause"
] | 7 | 2015-03-16T20:55:44.000Z | 2020-06-18T18:17:51.000Z | # Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
"""Parse SQL statements."""
__version__ = '0.1.14'
# Setup namespace
from sqlparse import engine
from sqlparse import filters
from sqlparse import formatter
from sqlparse import lexer
from sqlparse import tokens as T
from sqlparse.engine import grouping
from sqlparse.parsers import SQLParser
# Deprecated in 0.1.5. Will be removed in 0.2.0
from sqlparse.exceptions import SQLParseError
def build_parsers():
parsers = dict()
for cls in SQLParser.__subclasses__():
parsers[cls.dialect] = cls()
return parsers
_parsers = build_parsers()
def parse(sql, encoding=None, dialect=None):
"""Parse sql and return a list of statements.
:param sql: A string containting one or more SQL statements.
:param encoding: The encoding of the statement (optional).
:param dialect: The sql engine dialect of the input sql statements.
It only supports "mysql" right now. If dialect is not specified,
The input sql will be parsed using the generic sql syntax. (optional)
:returns: A tuple of :class:`~sqlparse.sql.Statement` instances.
"""
stream = parsestream(sql, encoding, dialect)
return tuple(stream)
def parsestream(stream, encoding=None, dialect=None):
"""Parses sql statements from file-like object.
:param stream: A file-like object.
:param encoding: The encoding of the stream contents (optional).
:param dialect: The sql engine dialect of the input sql statements.
It only supports "mysql" right now. (optional)
:returns: A generator of :class:`~sqlparse.sql.Statement` instances.
"""
parser = _parsers.get(dialect)
if parser is None:
raise Exception("Unable to find parser to parse dialect ({0})."
.format(dialect))
return parser.parse(stream, encoding)
def format(sql, **options):
"""Format *sql* according to *options*.
Available options are documented in :ref:`formatting`.
In addition to the formatting options this function accepts the
keyword "encoding" which determines the encoding of the statement.
:returns: The formatted SQL statement as string.
"""
options = formatter.validate_options(options)
encoding = options.pop('encoding', None)
stream = lexer.tokenize(sql, encoding)
stream = _format_pre_process(stream, options)
stack = engine.FilterStack()
stack = formatter.build_filter_stack(stack, options)
stack.postprocess.append(filters.SerializerUnicode())
statements = split2(stream)
return ''.join(stack.run(statement) for statement in statements)
def _format_pre_process(stream, options):
pre_processes = []
if options.get('keyword_case', None):
pre_processes.append(
filters.KeywordCaseFilter(options['keyword_case']))
if options.get('identifier_case', None):
pre_processes.append(
filters.IdentifierCaseFilter(options['identifier_case']))
if options.get('truncate_strings', None) is not None:
pre_processes.append(filters.TruncateStringFilter(
width=options['truncate_strings'], char=options['truncate_char']))
return _pre_process(stream, pre_processes)
def _pre_process(stream, pre_processes):
if pre_processes:
for pre_process in pre_processes:
stream = pre_process.process(None, stream)
return stream
def split(sql, encoding=None):
"""Split *sql* into single statements.
:param sql: A string containting one or more SQL statements.
:param encoding: The encoding of the statement (optional).
:returns: A list of strings.
"""
stream = lexer.tokenize(sql, encoding)
splitter = StatementFilter()
stream = splitter.process(None, stream)
return [unicode(stmt).strip() for stmt in stream]
from sqlparse.engine.filter import StatementFilter
def split2(stream):
splitter = StatementFilter()
return list(splitter.process(None, stream))
| 31.868217 | 78 | 0.711992 | 520 | 4,111 | 5.548077 | 0.3 | 0.037435 | 0.031196 | 0.022184 | 0.267938 | 0.188908 | 0.131023 | 0.131023 | 0.131023 | 0.131023 | 0 | 0.005128 | 0.193627 | 4,111 | 128 | 79 | 32.117188 | 0.865158 | 0.363658 | 0 | 0.1 | 0 | 0 | 0.063377 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.15 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
700f946fb5baca6452e61d3f1e007eb88ce90d87 | 1,543 | py | Python | ghost/urls.py | Microcore/Quantic | 727234a5e9e58217eb8235b6d0f27fe1b95f5f83 | [
"MIT"
] | null | null | null | ghost/urls.py | Microcore/Quantic | 727234a5e9e58217eb8235b6d0f27fe1b95f5f83 | [
"MIT"
] | 7 | 2015-04-25T05:04:46.000Z | 2015-04-26T08:24:36.000Z | ghost/urls.py | Microcore/Quantic | 727234a5e9e58217eb8235b6d0f27fe1b95f5f83 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Login view
url(r'^$', 'ghost.views.index_view'),
url(r'^login/$', 'ghost.views.login_view'),
# Logout view
url(r'^logout/$', 'ghost.views.logout_view'),
# New post view
url(r'^newpost/$', 'ghost.views.newpost_view'),
# Save a post ( Ajax view )
url(r'^savepost/$', 'ghost.views.savepost'),
# Options view
url(r'^options/$', 'ghost.views.options_view'),
# Save option values ( Ajax view )
url(r'^saveoptions/$', 'ghost.views.saveoptions'),
# Edit a post
url(r'^editpost/(\d+)/$', 'ghost.views.editpost_view'),
# Post list view
url(r'^posts/$', 'ghost.views.posts_view'),
# Post list view - with page number
url(r'^posts/page/(\d+)/$', 'ghost.views.posts_view'),
# Delete a post
url(r'^deletepost/$', 'ghost.views.delete_post'),
# Comment management view
url(r'^comments/$', 'ghost.views.comments_view'),
# Comment management view - with page number
url(r'^comments/page/(\d+)/$', 'ghost.views.comments_view'),
# Report spam ( Ajax view )
url(r'^comments/reportspam/$', 'ghost.views.reportspam'),
# Delete a comment
url(r'^comments/delete/$', 'ghost.views.deletecomment'),
# Qiniu file upload callback
url(r'^upload/callback/$', 'ghost.views.uploadcallback'),
)
| 37.634146 | 65 | 0.62022 | 197 | 1,543 | 4.80203 | 0.314721 | 0.067653 | 0.084567 | 0.038055 | 0.046512 | 0.046512 | 0 | 0 | 0 | 0 | 0 | 0.00081 | 0.200259 | 1,543 | 40 | 66 | 38.575 | 0.765802 | 0.289047 | 0 | 0 | 0 | 0 | 0.564127 | 0.382835 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.052632 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
700ff1e444ff7f1930953d197f9dc705fff91e95 | 2,094 | py | Python | train_val_split.py | chintak/face_detection | 8370977f084fd66a9ddcc549a1b4ea8b030d23ac | [
"Apache-2.0"
] | 4 | 2016-05-04T11:27:24.000Z | 2018-10-20T15:50:57.000Z | train_val_split.py | chintak/face_detection | 8370977f084fd66a9ddcc549a1b4ea8b030d23ac | [
"Apache-2.0"
] | null | null | null | train_val_split.py | chintak/face_detection | 8370977f084fd66a9ddcc549a1b4ea8b030d23ac | [
"Apache-2.0"
] | null | null | null | import os
import pandas as pd
from pandas import read_csv
import glob
from joblib import Parallel, delayed
import numpy as np
import argparse
def read_facescrub_img_list(folder, actor_label_txt, actress_label_txt, accept_pattern='*.jpg'):
full_names = glob.glob(os.path.join(folder, accept_pattern))
only_names = map(lambda f: os.path.splitext(
os.path.basename(f))[0], full_names)
pd_male = read_csv(actor_label_txt, sep='\t')
del pd_male['url'], pd_male['image_id'], pd_male['face_id']
pd_female = read_csv(actress_label_txt, sep='\t')
del pd_female['url'], pd_female['image_id'], pd_female['face_id']
pd_celeb = pd.concat([pd_male, pd_female], ignore_index=True)
pd_celeb = pd_celeb.drop_duplicates(
subset='sha256', keep='last').set_index('sha256')
bboxes = map(lambda k: pd_celeb.bbox[k], only_names)
return full_names, bboxes
def perform_split(args):
fnames, bboxes = read_facescrub_img_list(
args.train_folder, args.actor_label_path, args.actress_label_path, accept_pattern='*/*.jpg')
np_fnames = np.asarray(fnames)
np_bboxes = np.asarray(bboxes)
train_split = int(round(args.train_split * len(fnames)))
rng = np.random.RandomState(seed=1234)
idx = np.arange(0, len(fnames))
rng.shuffle(idx)
X = np_fnames[idx]
y = np_bboxes[idx]
df = pd.DataFrame({'name': X, 'bbox': y})
df.ix[:train_split].to_csv('train.csv', sep='\t', index=False)
df.ix[train_split + 1:].to_csv('val.csv', sep='\t', index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('train_folder', help='Train folder root path')
parser.add_argument('actor_label_path', help='Path to actors label list')
parser.add_argument('actress_label_path',
help='Path to actresses label list')
parser.add_argument('-s', '--train_split',
help='Train/Val split ratio', type=float)
args = parser.parse_args()
args.train_split = 0.8 if args.train_split is None else args.train_split
perform_split(args)
| 38.777778 | 100 | 0.688634 | 317 | 2,094 | 4.280757 | 0.343849 | 0.058954 | 0.041268 | 0.029477 | 0.116433 | 0.025055 | 0 | 0 | 0 | 0 | 0 | 0.008681 | 0.174785 | 2,094 | 53 | 101 | 39.509434 | 0.77662 | 0 | 0 | 0 | 0 | 0 | 0.124642 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.155556 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
701176938ee12ffb5b5372fd1b648cf68178d256 | 508 | py | Python | Ignatov_Mikhail_dz_9/exercise_2.py | HellFrozenRain/GB_Homework_Python_1 | b0baea6b7efecc6bd649618d0aeba93be57a389a | [
"MIT"
] | null | null | null | Ignatov_Mikhail_dz_9/exercise_2.py | HellFrozenRain/GB_Homework_Python_1 | b0baea6b7efecc6bd649618d0aeba93be57a389a | [
"MIT"
] | null | null | null | Ignatov_Mikhail_dz_9/exercise_2.py | HellFrozenRain/GB_Homework_Python_1 | b0baea6b7efecc6bd649618d0aeba93be57a389a | [
"MIT"
] | null | null | null | class Road:
m = 25
def __init__(self, length: int, width:int):
self._length = length
self._width = width
def calculatiion(self, s: int):
print(f' road length: {self._length} m')
print(f' road width: {self._width} m')
print(f' road thikness: {s} cm')
result = self._length * self._width * self.m * s
return f'The mass of asphalt required to cover the entire road: {result / 1000} t.'
example = Road(5000, 20)
print(example.calculatiion(5)) | 33.866667 | 91 | 0.612205 | 73 | 508 | 4.123288 | 0.438356 | 0.13289 | 0.099668 | 0.07309 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034667 | 0.261811 | 508 | 15 | 92 | 33.866667 | 0.768 | 0 | 0 | 0 | 0 | 0 | 0.300589 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0 | 0 | 0.384615 | 0.307692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70181f30e72e0f68764f53dfd0b38a713dacd8ea | 651 | py | Python | chapter 4 - quicksort/binary-search-recursive.py | kingisaac95/grokking-algorithms | 32cca84f8b6577deb83fa77e1d9dec754b59b776 | [
"Apache-2.0"
] | null | null | null | chapter 4 - quicksort/binary-search-recursive.py | kingisaac95/grokking-algorithms | 32cca84f8b6577deb83fa77e1d9dec754b59b776 | [
"Apache-2.0"
] | null | null | null | chapter 4 - quicksort/binary-search-recursive.py | kingisaac95/grokking-algorithms | 32cca84f8b6577deb83fa77e1d9dec754b59b776 | [
"Apache-2.0"
] | null | null | null | # find position of `target` in subarray nums[left…right]
def binary_search(list, left, right, target):
# base case
if left > right:
return -1
mid = (left + right) // 2
if list[mid] == target:
return mid
elif list[mid] > target:
# move right pointer to the item before mid
return binary_search(list, left, mid - 1, target)
else:
# move left pointer to the item after mid
return binary_search(list, mid + 1, right, target)
my_list = [1, 3, 5, 7, 9]
right = len(my_list) - 1
left = 0
print(binary_search(my_list, left, right, 3))
print(binary_search(my_list, left, right, -1))
| 26.04 | 58 | 0.623656 | 101 | 651 | 3.960396 | 0.376238 | 0.15 | 0.12 | 0.1 | 0.285 | 0.16 | 0.16 | 0 | 0 | 0 | 0 | 0.02714 | 0.264209 | 651 | 24 | 59 | 27.125 | 0.80167 | 0.22427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0 | 0 | 0.333333 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70190c25c52cf54777c3b3dbdf96f6a2c2775501 | 4,206 | py | Python | scripts/roberta_experiment/roberta_classification_simple.py | sarnthil/emotion-classification-roles | 1fdd3a8cbdac5ab2ad9598a101b763882df78280 | [
"MIT"
] | 1 | 2022-02-17T14:16:20.000Z | 2022-02-17T14:16:20.000Z | scripts/roberta_experiment/roberta_classification_simple.py | sarnthil/emotion-classification-roles | 1fdd3a8cbdac5ab2ad9598a101b763882df78280 | [
"MIT"
] | null | null | null | scripts/roberta_experiment/roberta_classification_simple.py | sarnthil/emotion-classification-roles | 1fdd3a8cbdac5ab2ad9598a101b763882df78280 | [
"MIT"
] | null | null | null | import logging
import random
from pathlib import Path
from datetime import datetime, timezone
import click
import datasets as nlp
import torch
import numpy as np
import pandas as pd
from simpletransformers.classification import ClassificationModel
from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score
from sklearn.metrics import precision_recall_fscore_support, classification_report
from transformers import DistilBertTokenizerFast
from transformers import Trainer, TrainingArguments
from transformers import DistilBertForTokenClassification
from transformers import RobertaTokenizer
def new_call(self, *args, **kwargs):
return super(type(self), self).__call__(
*args, **kwargs, is_split_into_words=True
)
RobertaTokenizer.__call__ = new_call
def f1_micro(labels, preds):
return f1_score(labels, preds, average="micro")
def f1_macro(labels, preds):
return f1_score(labels, preds, average="macro")
def recall_macro(labels, preds):
return recall_score(labels, preds, average="macro")
def recall_micro(labels, preds):
return recall_score(labels, preds, average="micro")
def precision_macro(labels, preds):
return precision_score(labels, preds, average="macro")
def precision_micro(labels, preds):
return precision_score(labels, preds, average="micro")
def read_data(dataset, split):
texts = []
labels = []
for doc in dataset[split]:
texts.append(" ".join(token for token in doc["sentence"]))
labels.append(doc["emotion"])
return texts, labels
@click.command()
@click.option("--dataset", "-d", required=True)
@click.option("--mask-type", "-m", required=True)
@click.option("--role", "-r")
def cli(dataset, mask_type, role):
if mask_type in ("all", "inbandall"):
name = f"unified_{dataset}_{mask_type}"
else:
if not role:
raise click.BadParameter("Role is missing")
name = f"unified_{dataset}_{mask_type}_{role}"
dataset = nlp.load_dataset("scripts/unified-loader.py", name=name)
train_texts, train_labels = read_data(dataset, "train")
test_texts, test_labels = read_data(dataset, "test")
val_texts, val_labels = read_data(dataset, "validation")
unique_labels = set(train_labels)
label2id = {label: id for id, label in enumerate(unique_labels)}
id2label = {id: label for label, id in label2id.items()}
train_data = []
for train_text, train_label in zip(train_texts, train_labels):
train_data.append([train_text, label2id[train_label]])
train_df = pd.DataFrame(train_data)
train_df.columns = ["text", "label"]
eval_data = []
for test_text, test_label in zip(test_texts, test_labels):
eval_data.append([test_text, label2id[test_label]])
eval_df = pd.DataFrame(eval_data)
eval_df.columns = ["text", "label"]
logging.basicConfig(level=logging.INFO)
transformers_logger = logging.getLogger("transformers")
transformers_logger.setLevel(logging.WARNING)
# Create a ClassificationModel
model = ClassificationModel(
"roberta",
"roberta-base",
num_labels=len(unique_labels),
args={
"reprocess_input_data": True,
"save_eval_checkpoints": False,
"save_model_every_epoch": False,
"overwrite_output_dir": True,
"num_train_epochs": 5,
"n_gpu": 3,
"learning_rate": 5e-5,
"use_early_stopping": True,
"early_stopping_patience": 3,
"manual_seed": 4,
"no_cache": True,
},
)
# Train the model
model.train_model(train_df)
# Evaluate the model
result, model_outputs, wrong_predictions = model.eval_model(
eval_df, acc=accuracy_score, f1_micro=f1_micro, f1_macro=f1_macro, recall_macro=recall_macro, recall_micro=recall_micro, precision_macro=precision_macro, precision_micro=precision_micro)
print(id2label[predictions[0]])
now = int(datetime.now().astimezone(timezone.utc).timestamp())
source = Path("outputs/eval_results.txt")
target = Path(f"results/eval-{name}-{now}.txt")
source.rename(target)
if __name__ == "__main__":
cli()
| 31.62406 | 194 | 0.697813 | 524 | 4,206 | 5.354962 | 0.324427 | 0.047042 | 0.036351 | 0.04918 | 0.138275 | 0.138275 | 0.107627 | 0.097648 | 0 | 0 | 0 | 0.006454 | 0.189491 | 4,206 | 132 | 195 | 31.863636 | 0.816662 | 0.014979 | 0 | 0 | 0 | 0 | 0.121044 | 0.050495 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.161616 | 0.070707 | 0.333333 | 0.010101 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
701cc13b80e73e6036a93390352d580fb8410617 | 2,552 | py | Python | libraries/numbers.py | yoratoni/Euler-Python | 8272eed12162a9bdc33302554bb1af30e56a3e8b | [
"MIT"
] | null | null | null | libraries/numbers.py | yoratoni/Euler-Python | 8272eed12162a9bdc33302554bb1af30e56a3e8b | [
"MIT"
] | null | null | null | libraries/numbers.py | yoratoni/Euler-Python | 8272eed12162a9bdc33302554bb1af30e56a3e8b | [
"MIT"
] | null | null | null | from math import floor, ceil, sqrt, log2
from libraries import Digits
class Numbers:
@staticmethod
def is_perfect_square(n: int) -> bool:
'''Returns True if "n" is a perfect square.
Explanations:
https://www.quora.com/What-is-the-quickest-way-to-determine-if-a-number-is-a-perfect-square
Args:
n (int): The number to check.
Returns:
bool: True if the number is a perfect square.
'''
perfect_ends = [0, 1, 4, 5, 6, 9]
if n % 10 in perfect_ends:
perfect_sums = [1, 4, 7, 9]
digits_sum = Digits.sum_digits(Digits.sum_digits(n))
if digits_sum in perfect_sums:
root = sqrt(n)
if floor(root) == ceil(root):
return True
return False
@staticmethod
def average(k: list[int]) -> float:
'''Returns the average value of an integers list.
Args:
k (list[int]): List of integers.
Returns:
float: The average.
'''
return sum(k) * (1 / len(k))
@staticmethod
def variance(k: list[int]) -> float:
'''Returns the variance of an integers list.
Args:
k (list[int]): List of integers.
Returns:
float: The variance.
'''
avr = Numbers.average(k)
res = 0
for elem in k:
res += (elem - avr)**2
return res
@staticmethod
def is_k_number(quadruple: list[int]) -> bool:
'''Returns True if the quadruple is a k-number (PB-791)
which is a quadruple with these properties:
- 1 <= a <= b <= c <= d <= n
- Average * 2 == Variance
Args:
quadruple (list[int]): A list of 4 integers.
Returns:
bool: True if the quadruple is a k-number.
'''
avr = Numbers.average(quadruple)
var = 0
for elem in quadruple:
var += (elem - avr)**2
if avr * 2 == var:
return True
return False
@staticmethod
def highest_power_of_two(n: int) -> int:
'''Given a number n, it returns the highest power of 2 that divides n.
Args:
n (int): The number n.
Returns:
int: The highest power of 2 such as 2^result = n.
'''
return int(log2(n & (~(n - 1))))
| 23.412844 | 99 | 0.487461 | 309 | 2,552 | 3.977346 | 0.28479 | 0.014646 | 0.026037 | 0.039056 | 0.379984 | 0.234337 | 0.138324 | 0.138324 | 0.092758 | 0.092758 | 0 | 0.020027 | 0.413009 | 2,552 | 108 | 100 | 23.62963 | 0.800401 | 0.362853 | 0 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138889 | false | 0 | 0.055556 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
701f2a15136d5b62583702284b584bce2f46610e | 7,131 | py | Python | s4/clarity/utils/artifact_ancestry.py | AvroNelson/s4-clarity-lib | af6f2ec77cdb7f942f5ea3990a61950aa2a1b3a4 | [
"MIT"
] | 11 | 2019-04-11T16:29:36.000Z | 2022-01-31T18:32:27.000Z | s4/clarity/utils/artifact_ancestry.py | AvroNelson/s4-clarity-lib | af6f2ec77cdb7f942f5ea3990a61950aa2a1b3a4 | [
"MIT"
] | 9 | 2019-04-13T17:08:39.000Z | 2021-12-07T23:31:01.000Z | s4/clarity/utils/artifact_ancestry.py | AvroNelson/s4-clarity-lib | af6f2ec77cdb7f942f5ea3990a61950aa2a1b3a4 | [
"MIT"
] | 10 | 2019-04-13T14:29:43.000Z | 2021-04-01T01:35:51.000Z | # Copyright 2017 Semaphore Solutions, Inc.
# ---------------------------------------------------------------------------
from collections import defaultdict
def get_parent_artifacts(lims, artifacts):
"""
Helper method to get the parent artifacts keyed to the supplied artifacts
:param LIMS lims:
:param list[Artifact] artifacts: The artifacts to get parent artifacts for
:rtype: dict[Artifact, list[Artifact]]
"""
artifact_to_parent_artifacts = defaultdict(list)
artifacts_to_batch_fetch = []
for artifact in artifacts:
if artifact.parent_step:
# Ugly list comprehension that covers pooled inputs and replicates
artifact_to_parent_artifacts[artifact] = [input_artifact for iomap in artifact.parent_step.details.iomaps
for input_artifact in iomap.inputs
if any(output.limsid == artifact.limsid for output in iomap.outputs)]
artifacts_to_batch_fetch += artifact_to_parent_artifacts[artifact]
else:
# Without a parent_step, we've reached the end of the artifact history
artifact_to_parent_artifacts[artifact] = []
if artifact_to_parent_artifacts:
lims.artifacts.batch_fetch(set(artifacts_to_batch_fetch))
return artifact_to_parent_artifacts
def get_udfs_from_artifacts_or_ancestors(lims, artifacts_to_get_udf_from, required_udfs=None, optional_udfs=None):
"""
Walks the genealogy for each artifact in the artifacts_to_get_udf_from list and gets the value for udf_name from the
supplied artifact, or its first available ancestor that has a value for the UDF.
NOTE: The method will stop the search upon reaching any pooling step.
:param LIMS lims:
:param list[Artifact] artifacts_to_get_udf_from: the list of artifacts whose ancestors should be inspected for the udf. Passed
down recursively until all artifacts have been satisfied.
:param list[str] required_udfs: The list of UDFs that *must* be found. Exception will be raised otherwise.
:param list[str] optional_udfs: The list of UDFs that *can* be found, but do not need to be.
:rtype: dict[s4.clarity.Artifact, dict[str, str]]
:raises UserMessageException: if values can not be retrieved for all required_udfs for all of the provided artifacts
"""
if not required_udfs and not optional_udfs:
raise Exception("The get_udfs_from_artifacts_or_ancestors method must be called with at least one "
"of the required_udfs or optional_udfs parameters.")
required_udfs = required_udfs or []
optional_udfs = optional_udfs or []
# Assemble the dictionaries for the internal methods
ancestor_artifact_to_original_artifact = {}
original_artifact_to_udfs = {}
for artifact in artifacts_to_get_udf_from:
ancestor_artifact_to_original_artifact[artifact] = [artifact]
original_artifact_to_udfs[artifact] = {}
for name in (required_udfs + optional_udfs):
original_artifact_to_udfs[artifact][name] = artifact.get(name, None)
artifacts_to_udfs = _get_udfs_from_ancestors_internal(
lims, ancestor_artifact_to_original_artifact, original_artifact_to_udfs)
if required_udfs:
_validate_required_ancestor_udfs(artifacts_to_udfs, required_udfs)
return artifacts_to_udfs
def _validate_required_ancestor_udfs(artifacts_to_udfs, required_udfs):
"""
Validates that all items in the artifacts_to_udfs dict have values for the required_udfs
:type artifacts_to_udfs: dict[s4.clarity.Artifact, dict[str, str]]
:type required_udfs: list[str]
:raises UserMessageException: if any artifact is missing any of the required_udfs
"""
artifacts_missing_udfs = set()
missing_udfs = set()
for artifact, udf_name_to_value in artifacts_to_udfs.items():
for required_udf in required_udfs:
if udf_name_to_value.get(required_udf) in ["", None]:
artifacts_missing_udfs.add(artifact.name)
missing_udfs.add(required_udf)
if artifacts_missing_udfs:
raise Exception("Could not get required values for udf(s) '%s' from ancestors of artifact(s) '%s'." %
("', '".join(missing_udfs), "', '".join(artifacts_missing_udfs)))
def _get_udfs_from_ancestors_internal(lims, current_artifacts_to_original_artifacts, original_artifacts_to_udfs):
"""
Recursive method that gets parent artifacts, and searches them for any udfs that have not yet been filled in
:type lims: s4.clarity.LIMS
:type current_artifacts_to_original_artifacts: dict[s4.clarity.Artifact: list[s4.clarity.Artifact]]
:param current_artifacts_to_original_artifacts: dict of the currently inspected artifact to the original artifact.
:type original_artifacts_to_udfs: dict[s4.clarity.Artifact, dict[str, str]]
:param original_artifacts_to_udfs: dict of the original artifacts to their ancestors' UDF values, which will
get filled in over the recursive calls of this method.
:rtype: dict[s4.clarity.Artifact, dict[str, Any]]
"""
current_artifacts = current_artifacts_to_original_artifacts.keys()
current_artifacts_to_parent_artifacts = get_parent_artifacts(lims, current_artifacts_to_original_artifacts.keys())
# Initialize the 'next to search' dict
next_search_artifacts_to_original_artifacts = defaultdict(list)
for current_artifact in current_artifacts:
if not current_artifacts_to_parent_artifacts[current_artifact]:
# The end of the genealogy has been reached for this artifact
continue
if current_artifact.parent_step.pooling is not None:
# Stop looking when we reach a step with pooled inputs, as ancestor artifacts would likely contain multiple
# values for the UDFs in question
continue
# Can now get a single parent artifact with confidence, as validated it
current_artifact_parent = current_artifacts_to_parent_artifacts[current_artifact][0]
for original_artifact in current_artifacts_to_original_artifacts[current_artifact]:
continue_searching = False
for udf_name, udf_value in original_artifacts_to_udfs[original_artifact].items():
# Don't overwrite values that have already been found
if udf_value is not None:
continue
found_value = current_artifact_parent.get(udf_name, None)
if found_value is None:
continue_searching = True
continue
original_artifacts_to_udfs[original_artifact][udf_name] = found_value
if continue_searching:
next_search_artifacts_to_original_artifacts[current_artifact_parent].append(original_artifact)
if next_search_artifacts_to_original_artifacts:
return _get_udfs_from_ancestors_internal(lims, next_search_artifacts_to_original_artifacts, original_artifacts_to_udfs)
return original_artifacts_to_udfs
| 47.225166 | 130 | 0.715468 | 924 | 7,131 | 5.22619 | 0.188312 | 0.082005 | 0.043487 | 0.057983 | 0.349762 | 0.266308 | 0.136467 | 0.086146 | 0.065024 | 0.019052 | 0 | 0.002152 | 0.218202 | 7,131 | 150 | 131 | 47.54 | 0.864036 | 0.36741 | 0 | 0.057971 | 0 | 0.014493 | 0.050322 | 0.008272 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057971 | false | 0 | 0.014493 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
701f6cd529e5e7fa4f16f7edc5345178425ab490 | 6,829 | py | Python | App/softwares_env/softwares/houdini_wizard/plugin.py | Wizard-collab/wizard | c2ec623fe011626716493c232b895fb0513f68ff | [
"MIT"
] | null | null | null | App/softwares_env/softwares/houdini_wizard/plugin.py | Wizard-collab/wizard | c2ec623fe011626716493c232b895fb0513f68ff | [
"MIT"
] | null | null | null | App/softwares_env/softwares/houdini_wizard/plugin.py | Wizard-collab/wizard | c2ec623fe011626716493c232b895fb0513f68ff | [
"MIT"
] | null | null | null | import hou
from wizard.prefs.main import prefs
from wizard.vars import defaults
from wizard.tools import log
from wizard.asset import main as asset_core
from softwares.houdini_wizard.tools import *
from wizard.tools import utility as utils
import os
import traceback
import shutil
from wizard.project import wall
from wizard.signal import send_signal
import sys
logger = log.pipe_log()
prefs = prefs()
def save():
asset = asset_core.string_to_asset(os.environ[defaults._asset_var_])
asset.version = prefs.asset(asset).software.get_new_version()
hou.hipFile.save(file_name=asset.file)
string_asset = asset_core.asset_to_string(asset)
os.environ[defaults._asset_var_] = string_asset
send_signal.save_request_signal(asset.file, string_asset)
def set_f_range(preroll=0):
asset = asset_core.string_to_asset(os.environ[defaults._asset_var_])
f_range = prefs.asset(asset).name.range
if preroll:
preroll = prefs.asset(asset).name.preroll
postroll = prefs.asset(asset).name.postroll
f_range[0] = f_range[0]-preroll
f_range[1] = f_range[1]+postroll
hou.playbar.setFrameRange(f_range[0], f_range[1])
def export(batch=None, frange=None):
asset = asset_core.string_to_asset(os.environ[defaults._asset_var_])
if asset.extension == "hipnc":
export_hipfile()
elif asset.extension == "abc":
export_abc(batch=batch, frange=frange)
elif asset.extension == "vdb":
export_vdb(batch=batch, frange=frange)
def prepare_export():
asset = asset_core.string_to_asset(os.environ[defaults._asset_var_])
if asset.extension == "abc":
export_abc(prepare = 1)
elif asset.extension == "vdb":
export_vdb(prepare = 1)
def export_hipfile():
asset = asset_core.string_to_asset(os.environ[defaults._asset_var_])
export_file = asset.export("{}_{}".format(asset.name, asset.variant), from_asset=asset)
hou.hipFile.save()
current_file = hou.hipFile.path()
shutil.copyfile(current_file, export_file)
wall.wall().publish_event(asset)
def export_abc(batch=None, prepare=None, frange=None):
asset = asset_core.string_to_asset(os.environ[defaults._asset_var_])
if not batch:
abc_export_null = create_export_null_on_last_node('abc')
else:
abc_export_null = look_for_export_null('abc')
if abc_export_null:
wizard_exports_node = get_wizard_export_node()
abc_object_merge_node = create_node_without_duplicate('object_merge', 'abc_exports_object_merge', wizard_exports_node)
abc_object_merge_node.parm('objpath1').set(abc_export_null.path())
gtags_node = create_node_without_duplicate('attribcreate', 'GuerillaTags', wizard_exports_node)
gtags_node.parm('name1').set('GuerillaTags')
gtags_node.parm('class1').set('detail')
gtags_node.parm('type1').set('index')
gtags_node.parm('string1').set('{}, {}, {}, {}, {}-{}-{}-{}'.format(asset.category,
asset.name,
asset.variant,
asset.stage,
asset.category,
asset.name,
asset.variant,
asset.stage))
gtags_node.setInput(0, abc_object_merge_node)
rop_alembic_node = create_node_without_duplicate('rop_alembic', 'exports_alembic', wizard_exports_node)
rop_alembic_node.setInput(0, gtags_node)
wizard_exports_node.layoutChildren()
rop_alembic_node.parm("trange").set('normal')
if frange:
hou.playbar.setFrameRange(frange[0], frange[1])
rop_alembic_node.parm("f1").setExpression('$FSTART')
rop_alembic_node.parm("f2").setExpression('$FEND')
rop_alembic_node.parm("motionBlur").set(1)
rop_alembic_node.parm("shutter1").set(-0.2)
rop_alembic_node.parm("shutter2").set(0.2)
if batch:
rop_alembic_node.parm('lpostframe').set("python")
rop_alembic_node.parm('postframe').set(by_frame_script_to_file(80))
if not prepare:
export_file = asset.export("{}_{}".format(asset.name, asset.variant), from_asset=asset)
rop_alembic_node.parm("filename").set(export_file)
rop_alembic_node.parm("execute").pressButton()
wall.wall().publish_event(asset)
else:
logger.warning("No abc out node")
def export_vdb(batch=None, prepare=None, frange=None):
asset = asset_core.string_to_asset(os.environ[defaults._asset_var_])
if not batch:
vdb_export_null = create_export_null_on_last_node('vdb')
else:
vdb_export_null = look_for_export_null('vdb')
if vdb_export_null:
wizard_exports_node = get_wizard_export_node()
vdb_object_merge_node = create_node_without_duplicate('object_merge', 'vdb_exports_object_merge', wizard_exports_node)
vdb_object_merge_node.parm('objpath1').set(vdb_export_null.path())
rop_geometry_node = create_node_without_duplicate('rop_geometry', 'exports_vdb', wizard_exports_node)
rop_geometry_node.setInput(0, vdb_object_merge_node)
wizard_exports_node.layoutChildren()
temp_dir = utils.temp_dir()
export_path = os.path.join(temp_dir, "file.$F4.vdb")
if batch:
rop_geometry_node.parm('lpostframe').set("python")
rop_geometry_node.parm('postframe').set(by_frame_script_to_file(80))
rop_geometry_node.parm('sopoutput').set(export_path)
rop_geometry_node.parm("trange").set('normal')
if frange:
hou.playbar.setFrameRange(frange[0], frange[1])
rop_geometry_node.parm("f1").setExpression('$FSTART')
rop_geometry_node.parm("f2").setExpression('$FEND')
if not prepare:
rop_geometry_node.parm("execute").pressButton()
files_list = []
for file in os.listdir(temp_dir):
files_list.append(os.path.join(temp_dir, file))
publish_files_name = asset.export_multiple('{}_{}'.format(asset.name, asset.variant), files_list)
if batch:
print("current_task:copying output files")
sys.stdout.flush()
for file in files_list:
shutil.copyfile(file, publish_files_name[files_list.index(file)])
wall.wall().publish_event(asset)
else:
logger.warning("No vdb out node")
| 37.729282 | 126 | 0.634793 | 836 | 6,829 | 4.863636 | 0.16866 | 0.045253 | 0.041318 | 0.04427 | 0.517462 | 0.44786 | 0.315298 | 0.315298 | 0.275947 | 0.206099 | 0 | 0.007463 | 0.254356 | 6,829 | 180 | 127 | 37.938889 | 0.791045 | 0 | 0 | 0.276119 | 0 | 0 | 0.075571 | 0.00703 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052239 | false | 0 | 0.097015 | 0 | 0.149254 | 0.007463 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7022affd5d60868600ba08f00c4b7658c014b223 | 3,749 | py | Python | experiments/overalltests/report.py | WojciechMula/parsing-int-series | f0a45c8b1251018e52dac9ebf1d98e8dfb705755 | [
"BSD-2-Clause"
] | 19 | 2018-04-20T06:51:42.000Z | 2022-02-24T02:12:00.000Z | experiments/overalltests/report.py | WojciechMula/parsing-int-series | f0a45c8b1251018e52dac9ebf1d98e8dfb705755 | [
"BSD-2-Clause"
] | 2 | 2018-04-20T09:53:37.000Z | 2018-04-27T19:01:16.000Z | experiments/overalltests/report.py | WojciechMula/parsing-int-series | f0a45c8b1251018e52dac9ebf1d98e8dfb705755 | [
"BSD-2-Clause"
] | 3 | 2019-02-25T19:26:51.000Z | 2020-11-04T00:50:42.000Z | import sys
import os.path
if __name__ == '__main__' and __package__ is None:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from table import Table
from loader import load
from utils import groupby
from report_writer import RestWriter
from prettyprint import *
class Report(object):
def __init__(self, path):
with open(path, 'rt') as f:
self.raw_data = load(f)
# group by separators distribution
bysep = lambda item: item.sep_distribution
self.report = []
for sep, collection in groupby(self.raw_data, bysep).iteritems():
ret = self.split_by_distribution(collection)
self.report.append((
get_separator_title(sep),
ret
))
def get(self):
return self.report
def split_by_distribution(self, collection):
result = []
bynum = lambda item: (item.distribution_name)
tmp = groupby(collection, bynum)
for distribution_name, collection in tmp.iteritems():
res = self.split_by_parameters(distribution_name, collection)
result.append((
get_distribution_title(distribution_name),
res
))
return result
def split_by_parameters(self, distribution_name, collection):
byparam = lambda item: item.num_distribution
result = []
for key, collection in groupby(collection, byparam).iteritems():
table = self.prepare_table(collection)
ret = get_num_distribution_parameters(distribution_name, key)
result.append((
ret.title,
table,
ret.weight
))
result.sort(key=lambda row: row[-1])
return [item[:2] for item in result]
def prepare_table(self, procedures):
keyfun = lambda item: (item.size, item.loops)
tmp = groupby(procedures, keyfun)
data = []
for (size, loops), items in tmp.iteritems():
def get_time(procedure):
for item in items:
if item.procedure == procedure:
return item.time
raise KeyError("Procedure '%s' not found" % procedure)
data.append((
size,
loops,
get_time("scalar"),
get_time("sse"),
get_time("sse-block"),
))
data.sort(key=lambda t: t[0]) # sort by size
t = Table()
t.add_header([("input", 2), "scalar", ("SSE", 2), ("SSE block", 2)])
t.add_header(["size [B]", "loops", "time [us]", "time [us]", "speed-up", "time [us]", "speed-up"])
for item in data:
t0 = item[2]
t1 = item[3]
t2 = item[4]
if t0 < 10 and t1 < 10 and t2 < 10:
# don't fool people when all measurements are single-digit numbers
speedup_sse = '---'
speedup_sse_block = '---'
else:
speedup_sse = '%0.2f' % (float(t0)/t1)
speedup_sse_block = '%0.2f' % (float(t0)/t2)
t.add_row([
'{:,}'.format(item[0]),
'%d' % item[1],
'%d' % item[2],
'%d' % item[3],
speedup_sse,
'%d' % item[4],
speedup_sse_block,
])
return t
def main():
report = Report(sys.argv[1])
writer = RestWriter(sys.stdout, report.get())
try:
restsection = sys.argv[2]
except IndexError:
restsection = "-~#"
writer.write(restsection)
if __name__ == '__main__':
main()
| 27.566176 | 106 | 0.523873 | 410 | 3,749 | 4.614634 | 0.3 | 0.05074 | 0.029598 | 0.013742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014994 | 0.359563 | 3,749 | 135 | 107 | 27.77037 | 0.773011 | 0.029341 | 0 | 0.081633 | 0 | 0 | 0.047318 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0.010204 | 0.204082 | 0.010204 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7026d3905231076109cce84e7fcd5feb02e48371 | 5,376 | py | Python | sr/robot3/astoria.py | srobo/sr-robot3 | af88d44d681c4f730c2196eaab8c7cbaf4e39d98 | [
"MIT"
] | 4 | 2020-08-31T15:33:09.000Z | 2022-01-22T18:50:20.000Z | sr/robot3/astoria.py | srobo/sr-robot3 | af88d44d681c4f730c2196eaab8c7cbaf4e39d98 | [
"MIT"
] | 22 | 2020-07-02T20:59:09.000Z | 2022-02-23T20:47:17.000Z | sr/robot3/astoria.py | srobo/sr-robot3 | af88d44d681c4f730c2196eaab8c7cbaf4e39d98 | [
"MIT"
] | null | null | null | """Integration with Astoria."""
import asyncio
import logging
from json import JSONDecodeError, loads
from pathlib import Path
from typing import Match, NamedTuple, Optional
from astoria.common.broadcast_event import StartButtonBroadcastEvent
from astoria.common.consumer import StateConsumer
from astoria.common.messages.astmetad import Metadata, MetadataManagerMessage
from astoria.common.messages.astprocd import ProcessManagerMessage
from astoria.common.mqtt.broadcast_helper import BroadcastHelper
LOGGER = logging.getLogger(__name__)
loop = asyncio.get_event_loop()
class GetMetadataResult(NamedTuple):
"""Result returned from fetching metadata from astoria."""
metadata: Metadata
usb_path: Path
class GetMetadataConsumer(StateConsumer):
"""Astoria consumer to fetch metadata."""
name = "sr-robot3-metadata"
def _setup_logging(self, verbose: bool, *, welcome_message: bool = True) -> None:
"""Use the logging from sr-robot3."""
# Suppress INFO messages from gmqtt
logging.getLogger("gmqtt").setLevel(logging.WARNING)
def _init(self) -> None:
"""Initialise consumer."""
self._metadata_message: Optional[MetadataManagerMessage] = None
self._proc_message: Optional[ProcessManagerMessage] = None
self._state_lock = asyncio.Lock()
self._mqtt.subscribe("astmetad", self._handle_astmetad_message)
self._mqtt.subscribe("astprocd", self._handle_astprocd_message)
async def _handle_astmetad_message(
self,
match: Match[str],
payload: str,
) -> None:
"""Handle astmetad status messages."""
async with self._state_lock:
try:
message = MetadataManagerMessage(**loads(payload))
if message.status == MetadataManagerMessage.Status.RUNNING:
LOGGER.debug("Received metadata")
self._metadata_message = message
else:
LOGGER.warn("Cannot get metadata, astmetad is not running")
except JSONDecodeError:
LOGGER.error("Could not decode JSON metadata.")
if self._metadata_message is not None and self._proc_message is not None:
self.halt(silent=True)
async def _handle_astprocd_message(
self,
match: Match[str],
payload: str,
) -> None:
"""Handle astprocd status messages."""
async with self._state_lock:
try:
message = ProcessManagerMessage(**loads(payload))
if message.status == ProcessManagerMessage.Status.RUNNING:
LOGGER.debug("Received process info")
self._proc_message = message
else:
LOGGER.warn("Cannot get process info, astprocd is not running")
except JSONDecodeError:
LOGGER.error("Could not decode JSON metadata.")
if self._metadata_message is not None and self._proc_message is not None:
self.halt(silent=True)
async def main(self) -> None:
"""Main method of the command."""
await self.wait_loop()
@classmethod
def get_metadata(cls) -> GetMetadataResult:
"""Get metadata."""
gmc = cls(False, None)
metadata = Metadata.init(gmc.config)
path = Path("/dev/null")
try:
loop.run_until_complete(asyncio.wait_for(gmc.run(), timeout=0.1))
if gmc._metadata_message is not None:
metadata = gmc._metadata_message.metadata
if gmc._proc_message is not None and gmc._proc_message.disk_info is not None:
path = gmc._proc_message.disk_info.mount_path
except ConnectionRefusedError:
LOGGER.warning("Unable to connect to MQTT broker")
except asyncio.TimeoutError:
LOGGER.warning("Astoria took too long to respond, giving up.")
return GetMetadataResult(metadata, path)
class WaitForStartButtonBroadcastConsumer(StateConsumer):
"""Wait for a start button broadcast."""
name = "sr-robot3-wait-start"
def __init__(
self,
verbose: bool,
config_file: Optional[str],
start_event: asyncio.Event,
) -> None:
super().__init__(verbose, config_file)
self._start_event = start_event
def _setup_logging(self, verbose: bool, *, welcome_message: bool = True) -> None:
"""Use the logging from sr-robot3."""
# Suppress INFO messages from gmqtt
logging.getLogger("gmqtt").setLevel(logging.WARNING)
def _init(self) -> None:
"""
Initialisation of the data component.
Called in the constructor of the parent class.
"""
self._trigger_event = BroadcastHelper.get_helper(
self._mqtt,
StartButtonBroadcastEvent,
)
async def main(self) -> None:
"""Wait for a trigger event."""
while not self._start_event.is_set():
# wait_broadcast waits forever until a broadcoast, so we will use a short
# timeout to ensure that the loop condition is checked.
try:
await asyncio.wait_for(self._trigger_event.wait_broadcast(), timeout=0.1)
self._start_event.set()
except asyncio.TimeoutError:
pass
| 35.84 | 89 | 0.638393 | 580 | 5,376 | 5.746552 | 0.265517 | 0.013501 | 0.018902 | 0.028803 | 0.341134 | 0.268827 | 0.268827 | 0.246625 | 0.246625 | 0.192619 | 0 | 0.002048 | 0.273438 | 5,376 | 149 | 90 | 36.080537 | 0.851254 | 0.0984 | 0 | 0.356436 | 0 | 0 | 0.073618 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059406 | false | 0.009901 | 0.09901 | 0 | 0.237624 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
702871323b08cb3d0bde41f82365acd11a63fda1 | 715 | py | Python | util/FeatureExtractor.py | GaoZiqiang/Multiview-ObjectDetection | 41d28bc15622b4d3a863ba4c8b53b06f6b3b1568 | [
"MIT"
] | 370 | 2018-05-22T06:46:35.000Z | 2022-03-18T07:20:21.000Z | util/FeatureExtractor.py | GaoZiqiang/Multiview-ObjectDetection | 41d28bc15622b4d3a863ba4c8b53b06f6b3b1568 | [
"MIT"
] | 51 | 2018-06-21T02:04:07.000Z | 2022-03-22T08:25:03.000Z | util/FeatureExtractor.py | GaoZiqiang/Multiview-ObjectDetection | 41d28bc15622b4d3a863ba4c8b53b06f6b3b1568 | [
"MIT"
] | 107 | 2018-06-04T21:03:04.000Z | 2022-03-26T13:38:29.000Z | import torch.nn as nn
from IPython import embed
class FeatureExtractor(nn.Module):
def __init__(self,submodule,extracted_layers):
super(FeatureExtractor,self).__init__()
self.submodule = submodule
self.extracted_layers = extracted_layers
def forward(self, x):
outputs = []
for name, module in self.submodule._modules.items():
if name is "classfier":
x = x.view(x.size(0),-1)
if name is "base":
for block_name, cnn_block in module._modules.items():
x = cnn_block(x)
if block_name in self.extracted_layers:
outputs.append(x)
return outputs | 35.75 | 69 | 0.586014 | 84 | 715 | 4.77381 | 0.440476 | 0.149626 | 0.084788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004149 | 0.325874 | 715 | 20 | 70 | 35.75 | 0.827801 | 0 | 0 | 0 | 0 | 0 | 0.018156 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
702be8919da263b06d997a50b76d2360b8199c9d | 3,004 | py | Python | BioClients/lincs/Client.py | jeremyjyang/BioClients | b78ab2b948c79616fed080112e31d383346bec58 | [
"CC0-1.0"
] | 10 | 2020-05-26T07:29:14.000Z | 2021-12-06T21:33:40.000Z | BioClients/lincs/Client.py | jeremyjyang/BioClients | b78ab2b948c79616fed080112e31d383346bec58 | [
"CC0-1.0"
] | 1 | 2021-10-05T12:25:30.000Z | 2021-10-05T17:05:56.000Z | BioClients/lincs/Client.py | jeremyjyang/BioClients | b78ab2b948c79616fed080112e31d383346bec58 | [
"CC0-1.0"
] | 2 | 2021-03-16T03:20:24.000Z | 2021-08-08T20:17:10.000Z | #!/usr/bin/env python3
"""
LINCS REST API client
New (2019) iLINCS:
http://www.ilincs.org/ilincs/APIinfo
http://www.ilincs.org/ilincs/APIdocumentation
(http://lincsportal.ccs.miami.edu/dcic/api/ DEPRECATED?)
"""
###
import sys,os,argparse,re,time,json,logging
#
from .. import lincs
#
#############################################################################
if __name__=='__main__':
epilog="""\
Examples:
NCBI Gene IDs: 207;
PerturbagenIDs: BRD-A00100033 (get_compound);
LINCS PertIDs: LSM-2121;
Perturbagen-Compound IDs: LSM-2421;
Signature IDs: LINCSCP_10260,LINCSCP_10261,LINCSCP_10262;
Dataset IDs: EDS-1013,EDS-1014;
Search Terms: cancer, vorinostat, MCF7.
"""
parser = argparse.ArgumentParser(description=f'LINCS REST API client ({lincs.Utils.API_HOST})', epilog=epilog)
ops = [
'get_gene', 'get_compound', 'get_dataset',
'list_genes', 'list_compounds', 'list_datasets',
'search_dataset', 'search_signature',
'get_signature'
]
parser.add_argument("op", choices=ops, help='OPERATION')
parser.add_argument("--i", dest="ifile", help="input file, IDs")
parser.add_argument("--o", dest="ofile", help="output (TSV)")
parser.add_argument("--ids", help="input IDs, comma-separated")
parser.add_argument("--searchTerm", dest="searchTerm", help="Entity searchTerm e.g. Rock1)")
parser.add_argument("--lincs_only", action="store_true", help="LINCS datasets only")
parser.add_argument("--ngene", type=int, default=50, help="top genes per signature")
parser.add_argument("--nmax", type=int, help="max results")
parser.add_argument("--skip", type=int, help="skip results")
parser.add_argument("--api_host", default=lincs.Utils.API_HOST)
parser.add_argument("--api_base_path", default=lincs.Utils.API_BASE_PATH)
parser.add_argument("-v", "--verbose", action="count", default=0)
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s:%(message)s', level=(logging.DEBUG if args.verbose>1 else logging.INFO))
base_url = 'https://'+args.api_host+args.api_base_path
fout = open(args.ofile, "w+") if args.ofile else sys.stdout
ids=[];
if args.ifile:
fin = open(args.ifile)
while True:
line = fin.readline()
if not line: break
ids.append(line.strip())
elif args.ids:
ids = re.split('[,\s]+', args.ids.strip())
if args.op == 'get_gene':
lincs.Utils.GetGene(ids, base_url, fout)
elif args.op == 'get_compound':
lincs.Utils.GetCompound(ids, base_url, fout)
elif args.op == 'list_compounds':
lincs.Utils.ListCompounds(base_url, fout)
elif args.op == 'get_dataset':
lincs.Utils.GetDataset(ids, base_url, fout)
elif args.op == 'search_dataset':
lincs.Utils.SearchDataset(args.searchTerm, args.lincs_only, base_url, fout)
elif args.op == 'search_signature':
lincs.Utils.SearchSignature(ids, args.lincs_only, base_url, fout)
elif args.op == 'get_signature':
lincs.Utils.GetSignature(ids, args.ngene, base_url, fout)
else:
parser.error(f'Invalid operation: {args.op}')
| 34.528736 | 116 | 0.689414 | 413 | 3,004 | 4.861985 | 0.375303 | 0.053785 | 0.101594 | 0.044821 | 0.11255 | 0.090637 | 0.090637 | 0.033865 | 0.033865 | 0 | 0 | 0.020068 | 0.120839 | 3,004 | 86 | 117 | 34.930233 | 0.74025 | 0.067577 | 0 | 0 | 0 | 0 | 0.322509 | 0.033579 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.032787 | 0 | 0.032787 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
702ffbf7e8d1d0ad0ebb44737dc759a0391e4cba | 13,693 | py | Python | datagrid_gtk3/utils/transformations.py | nowsecure/datagrid-gtk3 | 28083a4b9b4e2e0c7fbe2755d8464d2b02163086 | [
"MIT"
] | 17 | 2015-07-02T20:02:42.000Z | 2021-05-06T06:08:13.000Z | datagrid_gtk3/utils/transformations.py | esosaja/datagrid-gtk3 | 28083a4b9b4e2e0c7fbe2755d8464d2b02163086 | [
"MIT"
] | 32 | 2015-04-27T16:49:10.000Z | 2015-08-25T16:07:29.000Z | datagrid_gtk3/utils/transformations.py | esosaja/datagrid-gtk3 | 28083a4b9b4e2e0c7fbe2755d8464d2b02163086 | [
"MIT"
] | 8 | 2015-04-28T10:45:03.000Z | 2016-10-06T08:22:39.000Z | """Data transformation utils."""
import datetime
import logging
import HTMLParser
from decimal import Decimal
import dateutil.parser
from gi.repository import Gtk
from datagrid_gtk3.utils import imageutils
from datagrid_gtk3.utils import dateutils
from datagrid_gtk3.utils import stringutils
logger = logging.getLogger(__name__)
_transformers = {}
__all__ = ('get_transformer', 'register_transformer')
def get_transformer(transformer_name):
"""Get transformation for the given name.
:param str transformer_name: the name of the registered transformer
:return: the transformer registered by transformer_name
:rtype: callable
"""
return _transformers.get(transformer_name, None)
def register_transformer(transformer_name, transformer):
"""Register a transformer.
:param str transformer_name: the name to register the transformer
:param callable transformer: the transformer to be registered
"""
assert callable(transformer)
_transformers[transformer_name] = transformer
def unregister_transformer(transformer_name):
"""Unregister a transformer.
:param str transformer_name: the name to register the transformer
:raise KeyError: if a transformer is not registered under the given name
"""
del _transformers[transformer_name]
def transformer(transformer_name):
"""A decorator to easily register a decorator.
Use this like::
@transformer('transformer_name')
def transformer_func(value):
return do_something_with_value()
:param str transformer_name: the name to register the transformer
"""
def _wrapper(f):
register_transformer(transformer_name, f)
return f
return _wrapper
###
# Default transformers
###
@transformer('string')
def string_transform(value, max_length=None, oneline=True,
decode_fallback=None):
"""String transformation.
:param object value: the value that will be converted to
a string
:param int max_length: if not `None`, will be used to
ellipsize the string if greater than that.
:param bool oneline: if we should join all the lines together
in one line
:param callable decode_fallback: a callable to use
to decode value in case it cannot be converted to unicode directly
:return: the string representation of the value
:rtype: str
"""
if value is None:
return '<NULL>'
if isinstance(value, str):
value = unicode(value, 'utf-8', 'replace')
else:
try:
value = unicode(value)
except UnicodeDecodeError:
if decode_fallback is None:
raise
value = decode_fallback(value)
# Replace non-printable characters on the string so the user will
# know that there's something there even though it is not printable.
value = stringutils.replace_non_printable(value)
if oneline:
value = u' '.join(v.strip() for v in value.splitlines() if v.strip())
# Don't show more than max_length chars in treeview. Helps with performance
if max_length is not None and len(value) > max_length:
value = u'%s [...]' % (value[:max_length], )
# At the end, if value is unicode, it needs to be converted to
# an utf-8 encoded str or it won't be rendered in the treeview.
return value.encode('utf-8')
@transformer('html')
def html_transform(value, max_length=None, oneline=True,
decode_fallback=None):
"""HTML transformation.
:param object value: the escaped html that will be unescaped
:param int max_length: if not `None`, will be used to
ellipsize the string if greater than that.
:param bool oneline: if we should join all the lines together
in one line
:param callable decode_fallback: a callable to use
to decode value in case it cannot be converted to unicode directly
:return: the html string unescaped
:rtype: str
"""
if value is None:
return '<NULL>'
html_parser = HTMLParser.HTMLParser()
unescaped = html_parser.unescape(value)
return string_transform(
unescaped, max_length=max_length, oneline=oneline,
decode_fallback=decode_fallback)
@transformer('boolean')
def boolean_transform(value):
"""Transform boolean values to a gtk stock image.
:param bool value: the value to transform
:return: a pixbuf representing the value's bool value
:rtype: :class:`GdkPixbuf.Pixbuf`
"""
img = Gtk.Image()
# NOTE: should be STOCK_NO instead of STOCK_CANCEL but it looks
# crappy in Lubuntu
return img.render_icon(
Gtk.STOCK_YES if value else Gtk.STOCK_CANCEL, Gtk.IconSize.MENU)
@transformer('bytes')
def bytes_transform(value):
"""Transform bytes into a human-readable value.
:param int value: bytes to be humanized
:returns: the humanized bytes
:rtype: str
"""
if value is None:
return ''
for suffix, factor in [
('PB', 1 << 50),
('TB', 1 << 40),
('GB', 1 << 30),
('MB', 1 << 20),
('kB', 1 << 10),
('B', 0)]:
if value >= factor:
value = '%.*f %s' % (1, float(value) / max(factor, 1), suffix)
break
else:
raise ValueError('Unexpected value: %s' % (value, ))
return value
@transformer('datetime')
def datetime_transform(value):
"""Transform datetime to ISO 8601 date format.
:param value: the datatime object
:type value: datetime.datetime
:return: the datetime represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
if isinstance(value, basestring):
try:
# Try to parse string as a date
value = dateutil.parser.parse(value)
except (OverflowError, TypeError, ValueError):
pass
# FIXME: Fix all places using 'datetime' for timestamp
# (either as an int/long or as a convertable str)
try:
long_value = long(value)
except (TypeError, ValueError):
pass
else:
return timestamp_transform(long_value)
if not isinstance(value, datetime.datetime):
# Convert value to string even if it cannot be parsed as a datetime
logger.warning('Not a datetime: %s', value)
return str(value)
return value.isoformat(' ')
@transformer('timestamp')
@transformer('timestamp_unix')
def timestamp_transform(value, date_only=False):
"""Transform timestamp to ISO 8601 date format.
:param int value: Unix timestamp
:param bool date_only: if we should format only the date part,
ignoring the time
:return: the datetime represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
try:
dt = datetime.datetime.utcfromtimestamp(value)
except (TypeError, ValueError):
# Convert value to string even if it cannot be parsed as a timestamp
logger.warning('Not a timestamp: %s', value)
return str(value)
if date_only:
return dt.date().isoformat()
else:
return dt.isoformat(' ')
@transformer('timestamp_ms')
@transformer('timestamp_unix_ms')
def timestamp_ms_transform(value):
"""Transform timestamp in milliseconds to ISO 8601 date format.
:param int value: Unix timestamp in milliseconds
:return: the datetime represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_transform(
dateutils.normalize_timestamp(value, 'timestamp_unix_ms'))
@transformer('timestamp_Ms')
@transformer('timestamp_unix_Ms')
def timestamp_Ms_transform(value):
"""Transform timestamp in microseconds to ISO 8601 date format.
:param int value: Unix timestamp in microseconds
:return: the datetime represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_transform(
dateutils.normalize_timestamp(value, 'timestamp_unix_Ms'))
@transformer('timestamp_ios')
@transformer('timestamp_apple')
def timestamp_apple_transform(value):
"""Transform apple timestamp to ISO 8601 date format.
Apple timestamps (e.g. those used on iOS) start at 2001-01-01.
:param int value: apple timestamp
:return: the datetime represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_transform(
dateutils.normalize_timestamp(value, 'timestamp_apple'))
@transformer('timestamp_webkit')
def timestamp_webkit_transform(value):
"""Transform WebKit timestamp to ISO 8601 date format.
WebKit timestamps are expressed in microseconds and
start at 1601-01-01.
:param int value: WebKit timestamp
:return: the datetime represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_transform(
dateutils.normalize_timestamp(value, 'timestamp_webkit'))
@transformer('timestamp_julian')
def timestamp_julian_transform(value, date_only=False):
"""Transform Julian timestamp to ISO 8601 date format.
Julian timestamps are the number of days that has passed since
noon Universal Time on January 1, 4713 BCE.
:param int value: Julian timestamp in days
:param bool date_only: if we should format only the date part,
ignoring the time
:return: the datetime represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_transform(
dateutils.normalize_timestamp(value, 'timestamp_julian'),
date_only=date_only)
@transformer('timestamp_julian_date')
def timestamp_julian_date_transform(value):
"""Transform julian timestamp to ISO 8601 date format.
Julian timestamps are the number of days that has passed since
noon Universal Time on January 1, 4713 BCE.
:param int value: Julian timestamp
:return: the date represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_julian_transform(value, date_only=True)
@transformer('timestamp_midnight')
def timestamp_midnight_transform(value):
"""Transform midnight timestamp to ISO 8601 time format.
Midnight timestamp is the count in seconds of the time that
has passed since midnight.
:param int value: midnight timestamp in seconds
:return: the time represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
dt = datetime.datetime.min + datetime.timedelta(0, value)
return dt.time().isoformat()
@transformer('timestamp_midnight_ms')
def timestamp_midnight_ms_transform(value):
"""Transform midnight timestamp in milliseconds to ISO 8601 time format.
Midnight timestamp is the count in seconds of the time that
has passed since midnight.
:param int value: midnight timestamp in milliseconds
:return: the time represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_midnight_transform(value / 10 ** 3)
@transformer('timestamp_midnight_Ms')
def timestamp_midnight_Ms_transform(value):
"""Transform midnight timestamp in microsecond to ISO 8601 time format.
Midnight timestamp is the count in seconds of the time that
has passed since midnight.
:param int value: midnight timestamp in microseconds
:return: the time represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_midnight_transform(value / 10 ** 6)
@transformer('image')
def image_transform(path, size=24, fill_image=True, draw_border=False,
draft=False, load_on_thread=False):
"""Render path into a pixbuf.
:param str path: the image path or `None` to use a fallback image
:param int size: the size to resize the image. It will be resized
to fit a square of (size, size)
:param bool fill_image: if we should fill the image with a transparent
background to make a smaller image be at least a square of
(size, size), with the real image at the center.
:param bool draw_border: if we should add a border on the image
:param bool draft: if we should load the image as a draft. This
trades a little quality for a much higher performance.
:param bool load_on_thread: if we should load the image on another
thread. This will make a placeholder be returned the first
time this method is called.
:returns: the resized pixbuf
:rtype: :class:`GdkPixbuf.Pixbuf`
"""
cm = imageutils.ImageCacheManager.get_default()
return cm.get_image(path, size, fill_image, draw_border,
draft, load_on_thread)
@transformer('degree_decimal_str')
def degree_decimal_str_transform(value, length=8):
"""Transform degree decimal string to a numeric value.
The string is expected to have <length> digits, if less digits are found,
it will be prefixed with zeroes as needed.
:param value: Degrees encoded as a string with digits
:type value: str
:param length: Maximum expected string length
:type length: int
"""
assert isinstance(value, basestring), 'String value expected'
assert value.isdigit(), 'All characters expected to be digits'
assert len(value) <= length, \
'String length expected to be {} or less'.format(length)
value = value.zfill(length)
# Add decimal point at the expected location
value = '{}.{}'.format(value[:2], value[2:])
# Remove non-significant leading zeroes
value = Decimal(value)
return str(value)
| 30.028509 | 79 | 0.682027 | 1,780 | 13,693 | 5.152247 | 0.176404 | 0.016792 | 0.01472 | 0.022898 | 0.449133 | 0.411733 | 0.379784 | 0.37684 | 0.37008 | 0.37008 | 0 | 0.014388 | 0.238662 | 13,693 | 455 | 80 | 30.094505 | 0.865324 | 0.470459 | 0 | 0.277778 | 0 | 0 | 0.092221 | 0.009572 | 0 | 0 | 0 | 0.002198 | 0.022222 | 1 | 0.122222 | false | 0.011111 | 0.05 | 0 | 0.383333 | 0.005556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70308c9bdc16492685bb5f8a040a5a606edea463 | 3,974 | py | Python | src/models/kmeans.py | stevieg3/when-do-reading-comprehension-models-learn | 22d348bb9b340ee84ecd135670890c1b1fe0db18 | [
"MIT"
] | null | null | null | src/models/kmeans.py | stevieg3/when-do-reading-comprehension-models-learn | 22d348bb9b340ee84ecd135670890c1b1fe0db18 | [
"MIT"
] | null | null | null | src/models/kmeans.py | stevieg3/when-do-reading-comprehension-models-learn | 22d348bb9b340ee84ecd135670890c1b1fe0db18 | [
"MIT"
] | null | null | null | import argparse
import logging
import pandas as pd
import numpy as np
from tslearn.clustering import TimeSeriesKMeans
from tqdm import tqdm
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
SEEDS = [27, 28, 29]
MODELS = ['dbert', 'dbidaf', 'droberta', 'squad']
DEV_DATA_SIZES = {
'dbert': 1000,
'dbidaf': 1000,
'droberta': 1000,
'squad': 10570
}
NUM_CHECKPOINTS = 120
def load_per_example_metrics_df(seed: int) -> pd.DataFrame:
logging.info('Loading per example metrics')
example_metric_df = pd.DataFrame()
for model in MODELS:
df = pd.read_csv(f'data/processed/per_example_metrics-squadv1-adversarialall-dataset={model}-seed={seed}.csv')
example_metric_df = example_metric_df.append(df)
assert example_metric_df.shape[0] == NUM_CHECKPOINTS * np.sum(list(DEV_DATA_SIZES.values()))
logging.info(example_metric_df.shape)
return example_metric_df
def _prepare_data(per_example_metrics_df: pd.DataFrame, value: str = 'f1') -> (np.array, dict):
"""
Prepare input array for k-means. Input is of dim (n_ts, sz, d) where n_ts=number of time series; sz=length of
time series; d=dimensionality of time series
:param per_example_metrics_df:
:return:
"""
logging.info('Preparing input for k-means')
per_example_metrics_df = per_example_metrics_df.copy()
per_example_metrics_df.sort_values(['id', 'checkpoint'], inplace=True)
n_ts = per_example_metrics_df['id'].nunique()
assert n_ts == np.sum(list(DEV_DATA_SIZES.values()))
sz = NUM_CHECKPOINTS
d = 1
X = np.zeros((n_ts, sz, d))
# Store mapping for index position to corresponding ID
idx_to_id_dict = {}
for idx, _id in tqdm(
enumerate(per_example_metrics_df['id'].unique()),
total=per_example_metrics_df['id'].nunique()
):
idx_to_id_dict[idx] = _id
X[idx, :, :] = per_example_metrics_df[per_example_metrics_df['id'] == _id][value].values.reshape(-1, 1)
logging.info(X.shape)
return X, idx_to_id_dict
def get_kmeans_clusters(
per_example_metrics_df: pd.DataFrame,
n_clusters: int,
model_seed: int,
km_seed: int,
max_iter: int = 300,
value: str = 'f1'
) -> pd.DataFrame:
X, idx_to_id_dict = _prepare_data(per_example_metrics_df, value=value)
# Fit K-means
logging.info('Fitting k-means')
km = TimeSeriesKMeans(
n_clusters=n_clusters,
metric="dtw",
max_iter=max_iter,
random_state=km_seed,
verbose=0,
n_jobs=-1
)
labels = km.fit_predict(X)
logging.info('Finished k-means')
logging.info('Processing labels')
id_km_labels = []
for idx, _id in idx_to_id_dict.items():
id_km_labels.append((_id, labels[idx]))
id_km_labels_df = pd.DataFrame(id_km_labels, columns=['id', 'KM_label'])
assert id_km_labels_df.shape[0] == np.sum(list(DEV_DATA_SIZES.values()))
id_km_labels_df['km_seed'] = km_seed
id_km_labels_df['model_seed'] = model_seed
logging.info(id_km_labels_df.shape)
logging.info(id_km_labels_df.head())
return id_km_labels_df
def main(seed, km_seed, n_clusters, savepath, max_iter):
per_example_metrics_df = load_per_example_metrics_df(seed=seed)
id_km_labels_df = get_kmeans_clusters(
per_example_metrics_df=per_example_metrics_df,
n_clusters=n_clusters,
model_seed=seed,
km_seed=km_seed,
max_iter=max_iter
)
id_km_labels_df.to_csv(savepath, index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int)
parser.add_argument("--km_seed", type=int)
parser.add_argument("--n_clusters", type=int)
parser.add_argument("--savepath", type=str)
parser.add_argument("--max_iter", type=int)
args = parser.parse_args()
main(args.seed, args.km_seed, args.n_clusters, args.savepath, args.max_iter)
| 28.797101 | 118 | 0.682436 | 583 | 3,974 | 4.319039 | 0.248714 | 0.075457 | 0.128276 | 0.128276 | 0.264893 | 0.21525 | 0.098491 | 0.045274 | 0 | 0 | 0 | 0.012203 | 0.195773 | 3,974 | 137 | 119 | 29.007299 | 0.775657 | 0.065425 | 0 | 0.021505 | 0 | 0 | 0.101221 | 0.024152 | 0 | 0 | 0 | 0 | 0.032258 | 1 | 0.043011 | false | 0 | 0.064516 | 0 | 0.139785 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7031785a0dfa5b63b843961bc1f7373b3543b899 | 8,301 | py | Python | scripts/analyze_leaderboard.py | VIDA-NYU/alphad3m | db40193a448300d87442c451f9da17fa5cb845fd | [
"Apache-2.0"
] | null | null | null | scripts/analyze_leaderboard.py | VIDA-NYU/alphad3m | db40193a448300d87442c451f9da17fa5cb845fd | [
"Apache-2.0"
] | null | null | null | scripts/analyze_leaderboard.py | VIDA-NYU/alphad3m | db40193a448300d87442c451f9da17fa5cb845fd | [
"Apache-2.0"
] | null | null | null | import os
import re
import logging
import pandas as pd
from bs4 import BeautifulSoup
from os.path import join, exists, dirname
from prettytable import PrettyTable
logger = logging.getLogger(__name__)
ALL_TA2S = {'NYU-TA2', 'CMU-TA2', 'UCB-TA2', 'Uncharted-TA2', 'SRI-TA2', 'Texas A&M-TA2', 'D3M ENSEMBLE-TA2', 'NEW-NYU-TA2'}
SKIP_DATASETS = {'LL1_FB15k_237', 'LL1_FB15k_237_V2'} # These datasets use unsupported metrics, so skip them
def get_task_name(task_keywords):
task_name = None
if 'clustering' in task_keywords:
task_name = 'CLUSTERING'
elif 'semi-supervised' in task_keywords:
task_name = 'SEMISUPERVISED_CLASSIFICATION'
elif 'collaborative' in task_keywords:
task_name = 'COLLABORATIVE_FILTERING'
elif 'forecasting' in task_keywords:
task_name = 'TIME_SERIES_FORECASTING'
elif 'lupi' in task_keywords:
task_name = 'LUPI'
elif 'community' in task_keywords:
task_name = 'COMMUNITY_DETECTION'
elif 'link' in task_keywords:
task_name = 'LINK_PREDICTION'
elif 'object' in task_keywords:
task_name = 'OBJECT_DETECTION'
elif 'matching' in task_keywords:
task_name = 'GRAPH_MATCHING'
elif 'series' in task_keywords:
task_name = 'TIME_SERIES_CLASSIFICATION'
elif 'vertex' in task_keywords:
task_name = 'VERTEX_CLASSIFICATION'
elif 'multipleInstanceLearning' in task_keywords:
task_name = 'TABULAR_CLASSIFICATION' # There are no primitives for multi instance classification
elif 'text' in task_keywords:
task_name = 'TEXT_CLASSIFICATION'
elif 'image' in task_keywords and 'classification' in task_keywords:
task_name = 'IMAGE_CLASSIFICATION'
elif 'image' in task_keywords and 'regression' in task_keywords:
task_name = 'IMAGE_REGRESSION'
elif 'audio' in task_keywords:
task_name = 'AUDIO_CLASSIFICATION'
elif 'video' in task_keywords:
task_name = 'VIDEO_CLASSIFICATION'
elif 'classification' in task_keywords:
task_name = 'TABULAR_CLASSIFICATION'
elif 'regression' in task_keywords:
task_name = 'TABULAR_REGRESSION'
return task_name
def get_leaderboard(leaderboard_path):
leaderboard = {}
with open(leaderboard_path) as fin:
html_doc = fin.read()
soup = BeautifulSoup(html_doc, 'html.parser')
items = soup.find_all('div', {'class': 'dropdown-menu'})[1].find_all('li')
logger.info('Found %d datasets', len(items))
datasets = []
task_types = {}
for item in items:
dataset_description = item.get_text().replace('\n', ' ')
match = re.search('(.+) \((.+)\)', dataset_description)
dataset_name, task_keywords = match.group(1), match.group(2)
dataset_name = dataset_name.rstrip()
task_keywords = re.split('\s+', task_keywords.strip())
datasets.append(dataset_name)
task_types[dataset_name] = get_task_name(task_keywords)
tables = soup.find_all('table', {'class': 'dataTable no-footer'})
tables = tables[1:]
logger.info('Found %d tables', len(tables))
for index, table in enumerate(tables):
dataset_name = datasets[index]
if dataset_name in SKIP_DATASETS:
continue
rows = table.find('tbody').find_all('tr')
ranking = []
for index_row, row in enumerate(rows):
cells = row.find_all('td')
team = cells[1].get_text()
score = cells[6].get_text()
baseline = cells[7].get_text()
metric = cells[9].get_text()
if team == 'NYU-TA2' and task_types[dataset_name] not in {'TABULAR_CLASSIFICATION', 'TABULAR_REGRESSION'}:
team = None # We consider NYU-TA2 as the system that supports only classification and regression
if team in ALL_TA2S: # Remove TA1 performances
ranking.append((team, round(float(score), 3)))
new_ranking = add_rank(ranking)
leaderboard[dataset_name] = {'ranking': new_ranking, 'task': task_types[dataset_name]}
return leaderboard
def add_rank(ranking, worst_rank=len(ALL_TA2S)):
new_ranking = {}
previous_score = ranking[0][1]
rank = 1
for team, score in ranking:
if score != previous_score:
rank += 1
new_ranking[team] = {'rank': rank, 'score': score}
previous_score = score
for team in ALL_TA2S:
if team not in new_ranking:
new_ranking[team] = {'rank': worst_rank, 'score': None} # Add the worse rank
return new_ranking
def collect_new_scores(folder_path):
new_scores = {}
datasets = sorted([x for x in os.listdir(folder_path) if os.path.isdir(join(folder_path, x))])
for dataset in datasets:
csv_path = join(folder_path, dataset, 'output/temp/statistics_datasets.csv')
if exists(csv_path):
data = pd.read_csv(csv_path, header=None, sep='\t')
data = data.replace({'None': None})
score = data.iloc[0][4]
metric = data.iloc[0][5]
if score is not None:
score = round(float(score), 3)
new_scores[dataset] = {'score': score, 'metric': metric}
return new_scores
def update_leaderboard(leaderboard, new_scores, new_team):
for dataset, ranking_info in leaderboard.items():
ranking = ranking_info['ranking']
ranking = [(t, s['score']) for t, s in ranking.items() if s['score'] is not None]
if dataset in new_scores:
new_score = new_scores[dataset]['score']
metric = new_scores[dataset]['metric']
ranking.append((new_team, new_score))
is_reverse = 'ERROR' not in metric
ranking = sorted(ranking, key=lambda x: x[1], reverse=is_reverse)
else:
logger.warning('No new score found for dataset %s', dataset)
new_ranking = add_rank(ranking)
leaderboard[dataset] = {'ranking': new_ranking, 'task': ranking_info['task']}
return leaderboard
def calculate_statistics(leaderboard):
team_statistics = {x: {'winner_pipelines': 0, 'avg_rank': 0} for x in ALL_TA2S}
for dataset in leaderboard:
dataset_ranking = leaderboard[dataset]['ranking']
for team in ALL_TA2S:
team_rank = dataset_ranking[team]['rank']
if team_rank == 1:
team_statistics[team]['winner_pipelines'] += 1
team_statistics[team]['avg_rank'] += team_rank
total_datasets = float(len(leaderboard))
for team in team_statistics:
team_statistics[team]['avg_rank'] = round(team_statistics[team]['avg_rank'] / total_datasets, 3)
team_statistics = sorted(team_statistics.items(), key=lambda x: x[1]['winner_pipelines'], reverse=True)
table_team = PrettyTable()
table_team.field_names = ['Team', 'Winner Pipelines', 'Avg. Rank']
for team, statistics in team_statistics:
table_team.add_row([team, statistics['winner_pipelines'], statistics['avg_rank']])
print(table_team)
task_statistics = {}
for dataset in leaderboard:
task = leaderboard[dataset]['task']
if task not in task_statistics:
task_statistics[task] = {'teams': {}, 'total': 0}
task_statistics[task]['total'] += 1
for team in ALL_TA2S:
team_score = leaderboard[dataset]['ranking'][team]['score']
if team not in task_statistics[task]['teams']:
task_statistics[task]['teams'][team] = 0
if team_score is not None:
task_statistics[task]['teams'][team] += 1
ta2s = sorted(ALL_TA2S)
table_task = PrettyTable()
table_task.field_names = ['Tasks', 'Total'] + ta2s
for task in task_statistics:
table_task.add_row([task, task_statistics[task]['total']] + [task_statistics[task]['teams'][x] for x in ta2s])
print(table_task)
logger.info('Top 1 pipeline')
leaderboard_path = join(dirname(__file__), '../../evaluations/leaderboard_december_2020_rank1.html')
leaderboard = get_leaderboard(leaderboard_path)
new_results_path = join(dirname(__file__), '../../evaluations/new_results')
new_scores = collect_new_scores(new_results_path)
leaderboard = update_leaderboard(leaderboard, new_scores, 'NEW-NYU-TA2')
calculate_statistics(leaderboard)
| 38.077982 | 124 | 0.654499 | 1,042 | 8,301 | 4.980806 | 0.200576 | 0.060116 | 0.056647 | 0.077071 | 0.228324 | 0.108863 | 0.060501 | 0 | 0 | 0 | 0 | 0.011421 | 0.229972 | 8,301 | 217 | 125 | 38.253456 | 0.800532 | 0.028551 | 0 | 0.062857 | 0 | 0 | 0.161951 | 0.040953 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034286 | false | 0 | 0.04 | 0 | 0.102857 | 0.011429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7031e7b7a07729e1593c15fa18b6a42894035c58 | 522 | py | Python | ArmstrongN.py | JHONATAN9A/Algritmo_num_narcisistas | b40cff2588b41efecf180e932cd7a104974d72c7 | [
"MIT"
] | null | null | null | ArmstrongN.py | JHONATAN9A/Algritmo_num_narcisistas | b40cff2588b41efecf180e932cd7a104974d72c7 | [
"MIT"
] | null | null | null | ArmstrongN.py | JHONATAN9A/Algritmo_num_narcisistas | b40cff2588b41efecf180e932cd7a104974d72c7 | [
"MIT"
] | null | null | null | def proceso(num, suma=0):
numero = []
for i in str(num):
exp = int(i) ** len(str(num))
numero.append(exp)
if len(numero) == len(str(num)):
total = sum(numero)
return num, total
numero.clear()
entrada = input()
datos = []
for i in range(int(entrada)):
entrada2 = input()
datos.append(entrada2)
for n in datos:
resul1, resul2 = proceso(int(n))
if resul1 == resul2:
print("Armstrong")
elif resul1 != resul2:
print("Not Armstrong")
| 18 | 37 | 0.557471 | 68 | 522 | 4.279412 | 0.455882 | 0.061856 | 0.041237 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024457 | 0.295019 | 522 | 28 | 38 | 18.642857 | 0.766304 | 0 | 0 | 0 | 0 | 0 | 0.044807 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0 | 0 | 0.1 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7032edb61921b45f1c8f0a54a32060e1a5dc7971 | 683 | py | Python | core/urls.py | mburst/burstolio | aa3c89986b600a2c444ed6a39d07e8f2013e1b7e | [
"BSD-2-Clause"
] | 2 | 2015-10-21T09:00:21.000Z | 2017-01-19T09:53:05.000Z | core/urls.py | mburst/burstolio | aa3c89986b600a2c444ed6a39d07e8f2013e1b7e | [
"BSD-2-Clause"
] | 6 | 2021-04-08T18:22:12.000Z | 2022-02-10T10:50:11.000Z | core/urls.py | mburst/burstolio | aa3c89986b600a2c444ed6a39d07e8f2013e1b7e | [
"BSD-2-Clause"
] | 1 | 2015-08-18T05:20:59.000Z | 2015-08-18T05:20:59.000Z | from django.conf.urls import url
from django.views.generic import TemplateView
from core import views
from core.feeds import rss_feed, atom_feed
app_name = 'core'
urlpatterns = [
url(r'^$', views.blog, name='home'),
url(r'^blog/$', views.blog, name='blog'),
url(r'^blog/(?P<slug>.*)/$', views.entry, name='entry'),
url(r'^subscribe/$', views.subscribe, name='subscribe'),
url(r'^unsubscribe/$', views.unsubscribe, name='unsubscribe'),
url(r'^contact/$', TemplateView.as_view(template_name="core/contact.html")),
url(r'^alertthepress/$', views.alert_the_press, name='alert-the-press'),
url(r'^rss/$', rss_feed()),
url(r'^atom/$', atom_feed()),
] | 35.947368 | 80 | 0.66325 | 96 | 683 | 4.625 | 0.354167 | 0.081081 | 0.058559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125915 | 683 | 19 | 81 | 35.947368 | 0.743719 | 0 | 0 | 0 | 0 | 0 | 0.238304 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7032f2619f5d9022162264acdbe5700a7785cd5d | 1,110 | py | Python | clib/printenv.py | 7exe/lostia-professional-dev | 148b86286abf7bd89f7ff96ba83b5b629442ebd8 | [
"MIT"
] | null | null | null | clib/printenv.py | 7exe/lostia-professional-dev | 148b86286abf7bd89f7ff96ba83b5b629442ebd8 | [
"MIT"
] | null | null | null | clib/printenv.py | 7exe/lostia-professional-dev | 148b86286abf7bd89f7ff96ba83b5b629442ebd8 | [
"MIT"
] | null | null | null | import command
import sys
count = 0
newEnvVarName = []
newEnvVarValue = []
for I in command.env_var_name:
newEnvVarName.append(I.replace("\n",""))
for I in command.env_var_value:
newEnvVarValue.append(I.replace("\n",""))
endPrint = False
for I in command.env_var_name:
if(command.new_arg_parser("-0") or command.new_arg_parser("--null")):
print(str(I).replace("\n","")+"="+str(command.env_var_value[count]).replace("\n",""),end=" ")
endPrint = True
else:
count3=0
for D in sys.argv:
if(len(sys.argv)!=1):
if(D not in newEnvVarName and count3!=0):
print(str(I).replace("\n","")+"="+str(command.env_var_value[count]).replace("\n",""))
break
else:
print(str(I).replace("\n","")+"="+str(command.env_var_value[count]).replace("\n",""))
break
count3+=1
count+=1
if(endPrint == True):
print()
if(len(sys.argv)!=1):
for I in sys.argv:
if(I in newEnvVarName):
newCount = 0
for O in newEnvVarName:
if(I == O):
print(newEnvVarValue[newCount])
newCount+=1
| 28.461538 | 99 | 0.587387 | 155 | 1,110 | 4.103226 | 0.258065 | 0.100629 | 0.122642 | 0.113208 | 0.399371 | 0.358491 | 0.328616 | 0.256289 | 0.256289 | 0.256289 | 0 | 0.015081 | 0.223423 | 1,110 | 38 | 100 | 29.210526 | 0.722738 | 0 | 0 | 0.277778 | 0 | 0 | 0.025225 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0.138889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7036904272e13a83b49efc934453218e894c4ca2 | 3,050 | py | Python | scripts/artifacts/textnowCallLogs.py | JamieSharpe/ALEAPP | acb06736d772d75c9dc0fd58b9f2a1726e795fb4 | [
"MIT"
] | null | null | null | scripts/artifacts/textnowCallLogs.py | JamieSharpe/ALEAPP | acb06736d772d75c9dc0fd58b9f2a1726e795fb4 | [
"MIT"
] | null | null | null | scripts/artifacts/textnowCallLogs.py | JamieSharpe/ALEAPP | acb06736d772d75c9dc0fd58b9f2a1726e795fb4 | [
"MIT"
] | null | null | null | import datetime
from scripts.ilapfuncs import timeline, open_sqlite_db_readonly
from scripts.plugin_base import ArtefactPlugin
from scripts.ilapfuncs import logfunc, tsv
from scripts import artifact_report
class TextNowCallLogsPlugin(ArtefactPlugin):
"""
"""
def __init__(self):
super().__init__()
self.author = 'Unknown'
self.author_email = ''
self.author_url = ''
self.category = 'Text Now'
self.name = 'Call Logs'
self.description = ''
self.artefact_reference = '' # Description on what the artefact is.
self.path_filters = ['**/com.enflick.android.TextNow/databases/textnow_data.db*'] # Collection of regex search filters to locate an artefact.
self.icon = 'phone' # feathricon for report.
def _processor(self) -> bool:
source_file_msg = ''
for file_found in self.files_found:
file_name = str(file_found)
if file_name.endswith('textnow_data.db'):
textnow_db = str(file_found)
source_file_msg = file_found.replace(self.seeker.directory, '')
db = open_sqlite_db_readonly(textnow_db)
cursor = db.cursor()
try:
cursor.execute('''
SELECT contact_value AS num,
case message_direction when 2 then "Outgoing" else "Incoming" end AS direction,
date/1000 + message_text AS duration,
date/1000 AS datetime
FROM messages AS M
WHERE message_type IN ( 100, 102 )
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
except:
usageentries = 0
if usageentries > 0:
data_headers = ('Start Time', 'End Time', 'From ID', 'To ID', 'Call Direction')
data_list = []
for row in all_rows:
phone_number_from = None
phone_number_to = None
if row[1] == "Outgoing":
phone_number_to = row[0]
else:
phone_number_from = row[0]
starttime = datetime.datetime.fromtimestamp(int(row[3])).strftime('%Y-%m-%d %H:%M:%S')
endtime = datetime.datetime.fromtimestamp(int(row[2])).strftime('%Y-%m-%d %H:%M:%S')
data_list.append((starttime, endtime, phone_number_from, phone_number_to, row[1]))
artifact_report.GenerateHtmlReport(self, file_found, data_headers, data_list)
tsv(self.report_folder, data_headers, data_list, self.full_name(), source_file_msg)
timeline(self.report_folder, self.full_name(), data_list, data_headers)
else:
logfunc('No Text Now Call Logs found')
db.close()
return True
| 37.195122 | 150 | 0.543934 | 324 | 3,050 | 4.898148 | 0.41358 | 0.041588 | 0.024575 | 0.032766 | 0.061752 | 0.017643 | 0.017643 | 0 | 0 | 0 | 0 | 0.011874 | 0.364918 | 3,050 | 81 | 151 | 37.654321 | 0.807434 | 0.038361 | 0 | 0.033898 | 0 | 0 | 0.232008 | 0.019534 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033898 | false | 0 | 0.084746 | 0 | 0.152542 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
703b0d817daf3c22ee84731783d61b246681b5da | 13,307 | py | Python | db_adapter/curw_obs/timeseries/timeseries.py | CUrW-SL/curw_db_adapter | 9d9ef24f42080910e0bd251bc7f001b0a4b0ab31 | [
"MIT"
] | 2 | 2019-04-26T07:50:33.000Z | 2019-09-28T20:15:33.000Z | db_adapter/curw_obs/timeseries/timeseries.py | CUrW-SL/curw_db_adapter | 9d9ef24f42080910e0bd251bc7f001b0a4b0ab31 | [
"MIT"
] | 1 | 2019-04-03T09:30:38.000Z | 2019-04-20T18:11:59.000Z | db_adapter/curw_obs/timeseries/timeseries.py | shadhini/curw_db_adapter | 4db8e1ea8794ffbd0dce29ac954a13315e83d843 | [
"MIT"
] | null | null | null | import pandas as pd
import hashlib
import json
import traceback
from pymysql import IntegrityError
from datetime import datetime, timedelta
from db_adapter.logger import logger
from db_adapter.exceptions import DatabaseAdapterError, DuplicateEntryError
from db_adapter.curw_obs.station import StationEnum
from db_adapter.constants import COMMON_DATE_TIME_FORMAT
class Timeseries:
def __init__(self, pool):
self.pool = pool
@staticmethod
def generate_timeseries_id(meta_data):
# def generate_timeseries_id(meta_data: object) -> object:
"""
Generate the event id for given metadata
Only 'latitude', 'longitude', 'station_type', 'variable', 'unit', 'unit_type'
are used to generate the id (i.e. hash value)
:param meta_data: Dict with 'latitude', 'longitude', 'station_type', 'variable',
'unit', 'unit_type' keys
:return: str: sha256 hash value in hex format (length of 64 characters)
"""
sha256 = hashlib.sha256()
hash_data = {
'latitude' : '',
'longitude' : '',
'station_type': '',
'variable' : '',
'unit' : '',
'unit_type' : ''
}
for key in hash_data.keys():
hash_data[key] = meta_data[key]
sha256.update(json.dumps(hash_data, sort_keys=True).encode("ascii"))
event_id = sha256.hexdigest()
return event_id
def get_timeseries_id_if_exists(self, meta_data):
"""
Check whether a timeseries id exists in the database for a given set of meta data
:param meta_data: Dict with 'latitude', 'longitude', 'station_type', 'variable',
'unit', 'unit_type' keys
:return: timeseries id if exist else raise DatabaseAdapterError
"""
event_id = self.generate_timeseries_id(meta_data)
connection = self.pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT 1 FROM `run` WHERE `id`=%s"
is_exist = cursor.execute(sql_statement, event_id)
return event_id if is_exist > 0 else None
except Exception as exception:
error_message = "Retrieving timeseries id for metadata={} failed.".format(meta_data)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def is_id_exists(self, id_):
"""
Check whether a given timeseries id exists in the database
:param id_:
:return: True, if id is in the database, False otherwise
"""
connection = self.pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT 1 FROM `run` WHERE `id`=%s"
is_exist = cursor.execute(sql_statement, id_)
return True if is_exist > 0 else False
except Exception as exception:
error_message = "Check operation to find timeseries id {} in the run table failed.".format(id_)
logger.error(error_message)
traceback.print_exc()
raise False
finally:
if connection is not None:
connection.close()
def insert_data(self, timeseries, upsert=False):
"""
Insert timeseries to Data table in the database
:param timeseries: list of [tms_id, time, value] lists
:param boolean upsert: If True, upsert existing values ON DUPLICATE KEY. Default is False.
Ref: 1). https://stackoverflow.com/a/14383794/1461060
2). https://chartio.com/resources/tutorials/how-to-insert-if-row-does-not-exist-upsert-in-mysql/
:return: row count if insertion was successful, else raise DatabaseAdapterError
"""
row_count = 0
connection = self.pool.connection()
try:
with connection.cursor() as cursor:
if upsert:
sql_statement = "INSERT INTO `data` (`id`, `time`, `value`) VALUES (%s, %s, %s) " \
"ON DUPLICATE KEY UPDATE `value`=VALUES(`value`)"
else:
sql_statement = "INSERT INTO `data` (`id`, `time`, `value`) VALUES (%s, %s, %s)"
row_count = cursor.executemany(sql_statement, timeseries)
connection.commit()
return row_count
except Exception as exception:
connection.rollback()
error_message = "Data insertion to data table for tms id {}, upsert={} failed.".format(timeseries[0][0],
upsert)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
# def insert_timeseries(self, timeseries, run_tuple):
#
# """
# Insert new timeseries into the Run table and Data table, for given timeseries id
# :param tms_id:
# :param timeseries: list of [tms_id, time, value] lists
# :param run_tuple: tuples like
# (tms_id[0], run_name[1], start_date[2], end_date[3], station_id[4], variable_id[5], unit_id[6])
# :return: timeseries id if insertion was successful, else raise DatabaseAdapterError
# """
#
# connection = self.pool.connection()
# try:
#
# with connection.cursor() as cursor:
# sql_statement = "INSERT INTO `run` (`id`, `run_name`, `start_date`, `end_date`, `station`, " \
# "`variable`, `unit`) " \
# "VALUES ( %s, %s, %s, %s, %s, %s, %s)"
# sql_values = run_tuple
# cursor.execute(sql_statement, sql_values)
#
# connection.commit()
# self.insert_data(timeseries, True)
# return run_tuple[0]
# except Exception as exception:
# connection.rollback()
# error_message = "Insertion failed for timeseries with tms_id={}, run_name={}, station_id={}, " \
# " variable_id={}, unit_id={}" \
# .format(run_tuple[0], run_tuple[1], run_tuple[4], run_tuple[5], run_tuple[6])
# logger.error(error_message)
# traceback.print_exc()
# raise exception
# finally:
# if connection is not None:
# connection.close()
# def insert_run(self, run_tuple):
# """
# Insert new run entry
# :param run_tuple: tuple like
# (tms_id[0], run_name[1], start_date[2], end_date[3], station_id[4], variable_id[5], unit_id[6])
# :return: timeseries id if insertion was successful, else raise DatabaseAdapterError
# """
#
# connection = self.pool.connection()
# try:
#
# with connection.cursor() as cursor:
# sql_statement = "INSERT INTO `run` (`id`, `run_name`, `start_date`, `end_date`, `station`, " \
# "`variable`, `unit`) " \
# "VALUES ( %s, %s, %s, %s, %s, %s, %s)"
# cursor.execute(sql_statement, run_tuple)
#
# connection.commit()
# return run_tuple[0]
# except Exception as exception:
# connection.rollback()
# error_message = "Insertion failed for run enty with tms_id={}, run_name={}, station_id={}, " \
# " variable_id={}, unit_id={}" \
# .format(run_tuple[0], run_tuple[1], run_tuple[4], run_tuple[5], run_tuple[6])
# logger.error(error_message)
# traceback.print_exc()
# raise exception
# finally:
# if connection is not None:
# connection.close()
def insert_run(self, run_meta):
"""
Insert new run entry
:param run_meta: dictionary like
{
'tms_id' : '',
'run_name' : '',
'station_id' : '',
'unit_id' : '',
'variable_id': ''
}
:return: timeseries id if insertion was successful, else raise DatabaseAdapterError
"""
connection = self.pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "INSERT INTO `run` (`id`, `station`, `variable`, `unit`) " \
"VALUES ( %s, %s, %s, %s)"
cursor.execute(sql_statement, (run_meta.get('tms_id'), run_meta.get('station_id'),
run_meta.get('variable_id'), run_meta.get('unit_id')))
connection.commit()
return run_meta.get('tms_id')
except Exception as exception:
connection.rollback()
error_message = "Insertion failed for run entry with tms_id={}, station_id={}, " \
" variable_id={}, unit_id={}" \
.format(run_meta.get('tms_id'), run_meta.get('station_id'),
run_meta.get('variable_id'), run_meta.get('unit_id'))
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def get_end_date(self, id_):
"""
Retrieve end date
:param id_: timeseries id
:return: end_date
"""
connection = self.pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `end_date` FROM `run` WHERE `id`=%s"
row_count= cursor.execute(sql_statement, id_)
if row_count > 0:
return cursor.fetchone()['end_date']
return None
except Exception as exception:
error_message = "Retrieving end_date for id={} failed.".format(id_)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def update_end_date(self, id_, end_date):
"""
Update end_date for inserted timeseries, if end date is latest date than the existing one
:param id_: timeseries id
:return: end_date if update is successful, else raise DatabaseAdapterError
"""
connection = self.pool.connection()
if type(end_date) is str:
end_date = datetime.strptime(end_date, COMMON_DATE_TIME_FORMAT)
existing_end_date = None
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `end_date` FROM `run` WHERE `id`=%s"
row_count= cursor.execute(sql_statement, id_)
if row_count > 0:
existing_end_date = cursor.fetchone()['end_date']
if existing_end_date is None or existing_end_date < end_date:
with connection.cursor() as cursor:
sql_statement = "UPDATE `run` SET `end_date`=%s WHERE `id`=%s"
cursor.execute(sql_statement, (end_date, id_))
connection.commit()
return end_date
except Exception as exception:
connection.rollback()
error_message = "Updating end_date for id={} failed.".format(id_)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def update_start_date(self, id_, start_date):
"""
Update (very first obs date) start_date for inserted timeseries, if start_date is earlier date than the existing one
:param id_: timeseries id
:return: start_date if update is successful, else raise DatabaseAdapterError
"""
connection = self.pool.connection()
if type(start_date) is str:
start_date = datetime.strptime(start_date, COMMON_DATE_TIME_FORMAT)
existing_start_date = None
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `start_date` FROM `run` WHERE `id`=%s"
row_count = cursor.execute(sql_statement, id_)
if row_count > 0:
existing_start_date = cursor.fetchone()['start_date']
if existing_start_date is None or existing_start_date > start_date:
with connection.cursor() as cursor:
sql_statement = "UPDATE `run` SET `start_date`=%s WHERE `id`=%s"
cursor.execute(sql_statement, (start_date, id_))
connection.commit()
return start_date
except Exception as exception:
connection.rollback()
error_message = "Updating start_date for id={} failed.".format(id_)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
| 39.841317 | 128 | 0.559931 | 1,466 | 13,307 | 4.893588 | 0.122101 | 0.028297 | 0.005854 | 0.033733 | 0.689992 | 0.658907 | 0.617508 | 0.605241 | 0.561751 | 0.543212 | 0 | 0.007956 | 0.338844 | 13,307 | 333 | 129 | 39.960961 | 0.807456 | 0.344931 | 0 | 0.511494 | 0 | 0.011494 | 0.129397 | 0.0028 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051724 | false | 0 | 0.057471 | 0 | 0.166667 | 0.04023 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
703d952afd1d46ba74ddbc33b7e1cb8c8c05aa16 | 1,148 | py | Python | apps/sales/urls.py | jorgesaw/kstore | 4ec6612eeeb96edb7b7bd374fd0520733c58451c | [
"MIT"
] | null | null | null | apps/sales/urls.py | jorgesaw/kstore | 4ec6612eeeb96edb7b7bd374fd0520733c58451c | [
"MIT"
] | 5 | 2021-03-19T10:16:00.000Z | 2022-02-10T09:16:32.000Z | apps/sales/urls.py | jorgesaw/kstore | 4ec6612eeeb96edb7b7bd374fd0520733c58451c | [
"MIT"
] | null | null | null | """Sales app URLs."""
# Django
from django.urls import path, include
# Views
from .views import customers as customers_views
from .views import sales as sales_views
urlpatterns = [
path('customers/', customers_views.CustomerFilterListViev.as_view(), name='customers'),
path('customers/create/', customers_views.CustomerCreateView.as_view(), name='customer-create'),
path('customers/<int:pk>/', customers_views.CustomerDetailView.as_view(), name='customer'),
path('customers/update/<int:pk>/', customers_views.CustomerUpdateView.as_view(), name='customer-update'),
path('customers/delete/<int:pk>/', customers_views.CustomerDelete.as_view(), name='customer-delete'),
path('sales/', sales_views.SaleFilterListView.as_view(), name='sales'),
path('sales/create/', sales_views.SaleCreateView.as_view(), name='sale-create'),
path('sales/<int:pk>/', sales_views.SaleDetailView.as_view(), name='sale'),
path('sales/update/<int:pk>/', sales_views.SaleUpdateView.as_view(), name='sale-update'),
path('sales/delete/<int:pk>/', sales_views.SaleDelete.as_view(), name='sale-delete'),
]
| 45.92 | 110 | 0.708188 | 140 | 1,148 | 5.65 | 0.221429 | 0.075853 | 0.126422 | 0.091024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.115854 | 1,148 | 24 | 111 | 47.833333 | 0.77931 | 0.025261 | 0 | 0 | 0 | 0 | 0.257353 | 0.088235 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
703da8b7398940bbb0b6d9ff84c7efd2ef81c2cf | 671 | py | Python | python3/learn-python/Loops and Iterations - For-While Loops.py | Nahid-Hassan/code-snippets | 24bd4b81564887822a0801a696001fcbeb6a7a75 | [
"MIT"
] | 2 | 2020-09-29T04:09:41.000Z | 2020-10-18T13:33:36.000Z | python3/learn-python/Loops and Iterations - For-While Loops.py | Nahid-Hassan/code-snippets | 24bd4b81564887822a0801a696001fcbeb6a7a75 | [
"MIT"
] | null | null | null | python3/learn-python/Loops and Iterations - For-While Loops.py | Nahid-Hassan/code-snippets | 24bd4b81564887822a0801a696001fcbeb6a7a75 | [
"MIT"
] | 1 | 2021-12-26T04:55:55.000Z | 2021-12-26T04:55:55.000Z | ############### for ###################
nums = [1, 2, 3, 4, 5]
# simple loop
for num in nums:
print(num)
# break keyword
for num in nums:
if num == 3:
print("found")
break
print(num)
# continue keyword
for num in nums:
if num == 3:
print("found")
continue
print(num)
# nested loop
for num in nums:
for letter in 'abc':
print(num, letter)
# range()
for i in range(10):
print(i)
for i in range(1, 11):
print(i)
############## while #################
x = 0
while x < 10:
if x == 5:
break
print(x)
x = x + 1
### infinity loop ###
# while True:
# print(x)
# x = x + 1
| 14.586957 | 39 | 0.463487 | 96 | 671 | 3.239583 | 0.302083 | 0.07717 | 0.102894 | 0.154341 | 0.385852 | 0.22508 | 0.22508 | 0.22508 | 0.22508 | 0.22508 | 0 | 0.03956 | 0.321908 | 671 | 45 | 40 | 14.911111 | 0.643956 | 0.19076 | 0 | 0.576923 | 0 | 0 | 0.028078 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.346154 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
703eb8f1a89f48d0c52579466960933c96dd59e6 | 7,830 | py | Python | python_scripts/extract.py | Sessa93/continuous-auth-service | c4131521a7960ab709c0eeaf43686ddea9bd8ba2 | [
"MIT"
] | 9 | 2018-09-17T07:33:05.000Z | 2022-02-13T12:41:03.000Z | python_scripts/extract.py | mykola-nec/keystroke- | e045bbbae1205629a4cc4bd369c0a0b5dd6fd93f | [
"MIT"
] | 1 | 2018-10-24T05:32:08.000Z | 2018-10-24T05:32:08.000Z | python_scripts/extract.py | mykola-nec/keystroke- | e045bbbae1205629a4cc4bd369c0a0b5dd6fd93f | [
"MIT"
] | 3 | 2018-10-20T14:37:23.000Z | 2021-04-19T13:45:00.000Z | # pylint: disable = C0111, C0103, C0411, C0301, W0102, C0330, W0603
"""This module is used to extract the key_holds times and digraph
up_down times from the raw events of the subjects."""
import read_write
import numpy as np
import operator
import time
import general_purpose
import sys
import py_node_communication as pynocomm
# All limits are in milliseconds
KEY_HOLD_UPPER_LIMIT = 400
KEY_HOLD_LOWER_LIMIT = 0
DIGRAPH_UP_DOWN_UPPER_LIMIT = 800
DIGRAPH_UP_DOWN_LOWER_LIMIT = -400
def _my_search_event(eventlist, event, key=''):
"""Searches a list of events for specific event and the specific key if specified
\n eventlist: A list of raw events of a subject
\n event: Can be 'keystrokeDown' or 'keystrokeUp'
\n key: It specifies a spefic key (e.g. KeyE or Space)
"""
if key == '':
for i, val in enumerate(eventlist):
if val['event'] == event:
return i, val
else:
for i, val in enumerate(eventlist):
if val['event'] == event and val['key'] == key:
return i, val
pynocomm.send_to_node(
'Returning -1 from my searchevent' + str(event) + str(key))
return -1, {}
def _digraph_all(subject_events_data, ignore_space=False, sortByDigraph=True):
"""Extracts the subject's digraph timings of key_holds and up_down by the raw events.
\n subject_events_data: The raw events list
\n ignore_space: Boolean. If True it ignores space
\n sortByDigraph: Boolean. If True it sorts data by digraph
\n Returns: An list of dicts [{'digraph', 'points'}]
where points is a nx3 numpy array with x,y,z as key_hold_1, key_hold_2 and up_down timings of the digraph
"""
ret = []
# work with a copy because the pop method changes the list of dict :/
events = subject_events_data[:]
if ignore_space is True:
events = [evt for evt in events if evt['data'] != 'Space']
while True:
if len(events) <= 2:
break
# The next keyDown event will be the first
key_1_down_event = events[0]
if key_1_down_event['event'] != 'keystrokeDown':
pynocomm.send_to_node(
'...digraph_all: Continuing, first event is not keydown ->' + str(events[0]))
events.pop(0)
continue
# Find the respective keyUp event of key_1
# DEBUGGING
# pynocomm.send_to_node(str(key_1_down_event))
# DEBUGGING
key_1_up_event_index, key_1_up_event = _my_search_event(
events[1:], 'keystrokeUp', key_1_down_event['key'])
if key_1_up_event_index == -1:
pynocomm.send_to_node(
'...digraph_all: Continuing, Couldnt find keystrokeUp event for key = ' + str(key_1_down_event['key']))
events.pop(0)
continue
else:
key_1_up_event_index += 1
# Find the following keyDown event after the keyDown of key_1
key_2_down_event_index, key_2_down_event = _my_search_event(
events[1:], 'keystrokeDown')
if key_2_down_event_index == -1:
pynocomm.send_to_node('1993: What now?')
else:
key_2_down_event_index += 1
# Find the respective keyUp event of key_2
key_2_up_event_index, key_2_up_event = _my_search_event(
events[key_2_down_event_index + 1:], 'keystrokeUp', key_2_down_event['key'])
if key_2_up_event_index == -1:
# Just pop and continue (it's noise)
events.pop(0)
events.pop(key_1_up_event_index - 1) # index has changed now
pynocomm.send_to_node('1994: Removed Noise')
continue
else:
key_2_up_event_index += key_2_down_event_index + 1
# Calculate
# Here if I want down_down: "down_down": key_2_down_event['timestamp'] - key_1_down_event['timestamp'],
digraph_obj = {
"digraph": key_1_down_event['key'] + key_2_down_event['key'],
"up_down": key_2_down_event['timestamp'] - key_1_up_event['timestamp'],
"key_holds": [key_1_up_event['timestamp'] - key_1_down_event['timestamp'], key_2_up_event['timestamp'] - key_2_down_event['timestamp']]
}
xyz = np.array([[digraph_obj['key_holds'][0],
digraph_obj['key_holds'][1], digraph_obj['up_down']]])
# Store appropriately
if (general_purpose.is_not_extreme_outlier(digraph_obj['up_down'], DIGRAPH_UP_DOWN_LOWER_LIMIT, DIGRAPH_UP_DOWN_UPPER_LIMIT)
and general_purpose.is_not_extreme_outlier(digraph_obj['key_holds'][0], KEY_HOLD_LOWER_LIMIT, KEY_HOLD_UPPER_LIMIT)
and general_purpose.is_not_extreme_outlier(digraph_obj['key_holds'][1], KEY_HOLD_LOWER_LIMIT, KEY_HOLD_UPPER_LIMIT)):
if ret == []:
ret.append({"digraph": digraph_obj['digraph'],
"points": xyz})
else:
tmpi = -1
for i, val in enumerate(ret):
if val['digraph'] == digraph_obj['digraph']:
tmpi = i
break
if tmpi != -1:
ret[tmpi]['points'] = np.append(
ret[tmpi]['points'], xyz, axis=0)
else:
ret.append({"digraph": digraph_obj['digraph'],
"points": xyz})
# Update and remove the 1st key down and up for next iteration
events.pop(0)
events.pop(key_1_up_event_index - 1) # index has changed now
# Sort by Digraph
if sortByDigraph is True:
ret = sorted(ret, key=operator.itemgetter('digraph'))
return ret
def one(doc, ignore_space=False, logg=True):
"""Extracts digraph up_down and key_holds times from subject doc events
\nReturns: Object with 'id', 'subject', 'track_code' and 'data': as calculated from digraph_all func
"""
start = time.time()
ret = {
"_id": doc['_id'],
"subject": doc['subject'], "track_code": doc['track_code'],
"data": _digraph_all(doc['sessions']['data'], ignore_space=ignore_space, sortByDigraph=True)}
if logg is True:
pynocomm.send_to_node(
'-Subject Timings of "' + doc['subject'] + '" extracted in ' + str(time.time() - start) + ' seconds.')
return ret
def all(docs, write_to_json=True, ignore_space=False, filename='./trained-projects/subjects-data'):
"""Just some wrapper that takes all docs
\nReturns: [{'_id':'', 'subject': '', 'track_code': '', data: {[...]]}}]
"""
ret = []
for subject_doc in docs:
ret.append(one(subject_doc, ignore_space=ignore_space))
if write_to_json is True:
read_write.write_timings_to_local(ret, filename)
return ret
def main():
"""It is called by node.js to extract data of all subjects."""
_DATA_ = pynocomm.receive_from_node()
DOCS = _DATA_['docs']
WRITE_EXTRACTED_TO_JSON = _DATA_['writeExtractedToJson']
TIM_LIMITS = _DATA_['timing_limits']
global KEY_HOLD_LOWER_LIMIT
KEY_HOLD_LOWER_LIMIT = TIM_LIMITS['key_hold']['min']
global KEY_HOLD_UPPER_LIMIT
KEY_HOLD_UPPER_LIMIT = TIM_LIMITS['key_hold']['max']
global DIGRAPH_UP_DOWN_LOWER_LIMIT
DIGRAPH_UP_DOWN_LOWER_LIMIT = TIM_LIMITS['digraph_up_down']['min']
global DIGRAPH_UP_DOWN_UPPER_LIMIT
DIGRAPH_UP_DOWN_UPPER_LIMIT = TIM_LIMITS['digraph_up_down']['max']
# Convert numpy arr to list, to be JSON serializable
_data = all(DOCS, write_to_json=WRITE_EXTRACTED_TO_JSON,
filename='./trained-projects/' + DOCS[0]['track_code'])
for d in _data:
for p in d['data']:
p['points'] = p['points'].tolist()
pynocomm.send_to_node(_data)
if __name__ == '__main__':
main()
| 40.569948 | 147 | 0.627075 | 1,077 | 7,830 | 4.252553 | 0.196843 | 0.037336 | 0.034061 | 0.031223 | 0.351747 | 0.265721 | 0.198908 | 0.139083 | 0.069869 | 0.069869 | 0 | 0.019652 | 0.265645 | 7,830 | 192 | 148 | 40.78125 | 0.77687 | 0.241635 | 0 | 0.263566 | 0 | 0 | 0.131399 | 0.005461 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03876 | false | 0 | 0.054264 | 0 | 0.139535 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7041318399ceaa12c9af5eb0808df75d0fc60902 | 37,836 | py | Python | src/borg/repository.py | musoke/borg | 808379921c4a679bb597db42b4412840d66f4fb1 | [
"BSD-3-Clause"
] | null | null | null | src/borg/repository.py | musoke/borg | 808379921c4a679bb597db42b4412840d66f4fb1 | [
"BSD-3-Clause"
] | null | null | null | src/borg/repository.py | musoke/borg | 808379921c4a679bb597db42b4412840d66f4fb1 | [
"BSD-3-Clause"
] | null | null | null | import errno
import os
import shutil
import struct
from binascii import unhexlify
from collections import defaultdict
from configparser import ConfigParser
from datetime import datetime
from functools import partial
from itertools import islice
from zlib import crc32
import msgpack
import logging
logger = logging.getLogger(__name__)
from .constants import * # NOQA
from .hashindex import NSIndex
from .helpers import Error, ErrorWithTraceback, IntegrityError, InternalOSError
from .helpers import Location
from .helpers import ProgressIndicatorPercent
from .helpers import bin_to_hex
from .locking import UpgradableLock, LockError, LockErrorT
from .lrucache import LRUCache
from .platform import SyncFile, sync_dir
MAX_OBJECT_SIZE = 20 * 1024 * 1024
MAGIC = b'BORG_SEG'
MAGIC_LEN = len(MAGIC)
TAG_PUT = 0
TAG_DELETE = 1
TAG_COMMIT = 2
FreeSpace = partial(defaultdict, int)
class Repository:
"""
Filesystem based transactional key value store
Transactionality is achieved by using a log (aka journal) to record changes. The log is a series of numbered files
called segments. Each segment is a series of log entries. The segment number together with the offset of each
entry relative to its segment start establishes an ordering of the log entries. This is the "definition" of
time for the purposes of the log.
Log entries are either PUT, DELETE or COMMIT.
A COMMIT is always the final log entry in a segment and marks all data from the beginning of the log until the
segment ending with the COMMIT as committed and consistent. The segment number of a segment ending with a COMMIT
is called the transaction ID of that commit, and a segment ending with a COMMIT is called committed.
When reading from a repository it is first checked whether the last segment is committed. If it is not, then
all segments after the last committed segment are deleted; they contain log entries whose consistency is not
established by a COMMIT.
Note that the COMMIT can't establish consistency by itself, but only manages to do so with proper support from
the platform (including the hardware). See platform.base.SyncFile for details.
A PUT inserts a key-value pair. The value is stored in the log entry, hence the repository implements
full data logging, meaning that all data is consistent, not just metadata (which is common in file systems).
A DELETE marks a key as deleted.
For a given key only the last entry regarding the key, which is called current (all other entries are called
superseded), is relevant: If there is no entry or the last entry is a DELETE then the key does not exist.
Otherwise the last PUT defines the value of the key.
By superseding a PUT (with either another PUT or a DELETE) the log entry becomes obsolete. A segment containing
such obsolete entries is called sparse, while a segment containing no such entries is called compact.
Sparse segments can be compacted and thereby disk space freed. This destroys the transaction for which the
superseded entries where current.
On disk layout:
dir/README
dir/config
dir/data/<X // SEGMENTS_PER_DIR>/<X>
dir/index.X
dir/hints.X
"""
class DoesNotExist(Error):
"""Repository {} does not exist."""
class AlreadyExists(Error):
"""Repository {} already exists."""
class InvalidRepository(Error):
"""{} is not a valid repository. Check repo config."""
class CheckNeeded(ErrorWithTraceback):
"""Inconsistency detected. Please run "borg check {}"."""
class ObjectNotFound(ErrorWithTraceback):
"""Object with key {} not found in repository {}."""
def __init__(self, path, create=False, exclusive=False, lock_wait=None, lock=True):
self.path = os.path.abspath(path)
self._location = Location('file://%s' % self.path)
self.io = None
self.lock = None
self.index = None
self._active_txn = False
self.lock_wait = lock_wait
self.do_lock = lock
self.do_create = create
self.exclusive = exclusive
def __del__(self):
if self.lock:
self.close()
assert False, "cleanup happened in Repository.__del__"
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.path)
def __enter__(self):
if self.do_create:
self.do_create = False
self.create(self.path)
self.open(self.path, self.exclusive, lock_wait=self.lock_wait, lock=self.do_lock)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
self.rollback()
self.close()
@property
def id_str(self):
return bin_to_hex(self.id)
def create(self, path):
"""Create a new empty repository at `path`
"""
if os.path.exists(path) and (not os.path.isdir(path) or os.listdir(path)):
raise self.AlreadyExists(path)
if not os.path.exists(path):
os.mkdir(path)
with open(os.path.join(path, 'README'), 'w') as fd:
fd.write('This is a Borg repository\n')
os.mkdir(os.path.join(path, 'data'))
config = ConfigParser(interpolation=None)
config.add_section('repository')
config.set('repository', 'version', '1')
config.set('repository', 'segments_per_dir', str(DEFAULT_SEGMENTS_PER_DIR))
config.set('repository', 'max_segment_size', str(DEFAULT_MAX_SEGMENT_SIZE))
config.set('repository', 'append_only', '0')
config.set('repository', 'id', bin_to_hex(os.urandom(32)))
self.save_config(path, config)
def save_config(self, path, config):
config_path = os.path.join(path, 'config')
with open(config_path, 'w') as fd:
config.write(fd)
def save_key(self, keydata):
assert self.config
keydata = keydata.decode('utf-8') # remote repo: msgpack issue #99, getting bytes
self.config.set('repository', 'key', keydata)
self.save_config(self.path, self.config)
def load_key(self):
keydata = self.config.get('repository', 'key')
return keydata.encode('utf-8') # remote repo: msgpack issue #99, returning bytes
def destroy(self):
"""Destroy the repository at `self.path`
"""
if self.append_only:
raise ValueError(self.path + " is in append-only mode")
self.close()
os.remove(os.path.join(self.path, 'config')) # kill config first
shutil.rmtree(self.path)
def get_index_transaction_id(self):
indices = sorted((int(name[6:]) for name in os.listdir(self.path) if name.startswith('index.') and name[6:].isdigit()))
if indices:
return indices[-1]
else:
return None
def check_transaction(self):
index_transaction_id = self.get_index_transaction_id()
segments_transaction_id = self.io.get_segments_transaction_id()
if index_transaction_id is not None and segments_transaction_id is None:
raise self.CheckNeeded(self.path)
# Attempt to automatically rebuild index if we crashed between commit
# tag write and index save
if index_transaction_id != segments_transaction_id:
if index_transaction_id is not None and index_transaction_id > segments_transaction_id:
replay_from = None
else:
replay_from = index_transaction_id
self.replay_segments(replay_from, segments_transaction_id)
def get_transaction_id(self):
self.check_transaction()
return self.get_index_transaction_id()
def break_lock(self):
UpgradableLock(os.path.join(self.path, 'lock')).break_lock()
def open(self, path, exclusive, lock_wait=None, lock=True):
self.path = path
if not os.path.isdir(path):
raise self.DoesNotExist(path)
if lock:
self.lock = UpgradableLock(os.path.join(path, 'lock'), exclusive, timeout=lock_wait).acquire()
else:
self.lock = None
self.config = ConfigParser(interpolation=None)
self.config.read(os.path.join(self.path, 'config'))
if 'repository' not in self.config.sections() or self.config.getint('repository', 'version') != 1:
raise self.InvalidRepository(path)
self.max_segment_size = self.config.getint('repository', 'max_segment_size')
self.segments_per_dir = self.config.getint('repository', 'segments_per_dir')
self.append_only = self.config.getboolean('repository', 'append_only', fallback=False)
self.id = unhexlify(self.config.get('repository', 'id').strip())
self.io = LoggedIO(self.path, self.max_segment_size, self.segments_per_dir)
def close(self):
if self.lock:
if self.io:
self.io.close()
self.io = None
self.lock.release()
self.lock = None
def commit(self, save_space=False):
"""Commit transaction
"""
self.io.write_commit()
if not self.append_only:
self.compact_segments(save_space=save_space)
self.write_index()
self.rollback()
def open_index(self, transaction_id, auto_recover=True):
if transaction_id is None:
return NSIndex()
index_path = os.path.join(self.path, 'index.%d' % transaction_id).encode('utf-8')
try:
return NSIndex.read(index_path)
except RuntimeError as error:
assert str(error) == 'hashindex_read failed' # everything else means we're in *deep* trouble
logger.warning('Repository index missing or corrupted, trying to recover')
try:
os.unlink(index_path)
except OSError as e:
raise InternalOSError(e) from None
if not auto_recover:
raise
self.prepare_txn(self.get_transaction_id())
# don't leave an open transaction around
self.commit()
return self.open_index(self.get_transaction_id())
except OSError as e:
raise InternalOSError(e) from None
def prepare_txn(self, transaction_id, do_cleanup=True):
self._active_txn = True
try:
self.lock.upgrade()
except (LockError, LockErrorT):
# if upgrading the lock to exclusive fails, we do not have an
# active transaction. this is important for "serve" mode, where
# the repository instance lives on - even if exceptions happened.
self._active_txn = False
raise
if not self.index or transaction_id is None:
try:
self.index = self.open_index(transaction_id, False)
except RuntimeError:
self.check_transaction()
self.index = self.open_index(transaction_id, False)
if transaction_id is None:
self.segments = {} # XXX bad name: usage_count_of_segment_x = self.segments[x]
self.compact = FreeSpace() # XXX bad name: freeable_space_of_segment_x = self.compact[x]
else:
if do_cleanup:
self.io.cleanup(transaction_id)
hints_path = os.path.join(self.path, 'hints.%d' % transaction_id)
index_path = os.path.join(self.path, 'index.%d' % transaction_id)
try:
with open(hints_path, 'rb') as fd:
hints = msgpack.unpack(fd)
except (msgpack.UnpackException, msgpack.ExtraData, FileNotFoundError) as e:
logger.warning('Repository hints file missing or corrupted, trying to recover')
if not isinstance(e, FileNotFoundError):
os.unlink(hints_path)
# index must exist at this point
os.unlink(index_path)
self.check_transaction()
self.prepare_txn(transaction_id)
return
except OSError as os_error:
raise InternalOSError(os_error) from None
if hints[b'version'] == 1:
logger.debug('Upgrading from v1 hints.%d', transaction_id)
self.segments = hints[b'segments']
self.compact = FreeSpace()
for segment in sorted(hints[b'compact']):
logger.debug('Rebuilding sparse info for segment %d', segment)
self._rebuild_sparse(segment)
logger.debug('Upgrade to v2 hints complete')
elif hints[b'version'] != 2:
raise ValueError('Unknown hints file version: %d' % hints[b'version'])
else:
self.segments = hints[b'segments']
self.compact = FreeSpace(hints[b'compact'])
def write_index(self):
hints = {b'version': 2,
b'segments': self.segments,
b'compact': self.compact}
transaction_id = self.io.get_segments_transaction_id()
hints_file = os.path.join(self.path, 'hints.%d' % transaction_id)
with open(hints_file + '.tmp', 'wb') as fd:
msgpack.pack(hints, fd)
fd.flush()
os.fsync(fd.fileno())
os.rename(hints_file + '.tmp', hints_file)
self.index.write(os.path.join(self.path, 'index.tmp'))
os.rename(os.path.join(self.path, 'index.tmp'),
os.path.join(self.path, 'index.%d' % transaction_id))
if self.append_only:
with open(os.path.join(self.path, 'transactions'), 'a') as log:
print('transaction %d, UTC time %s' % (transaction_id, datetime.utcnow().isoformat()), file=log)
# Remove old auxiliary files
current = '.%d' % transaction_id
for name in os.listdir(self.path):
if not name.startswith(('index.', 'hints.')):
continue
if name.endswith(current):
continue
os.unlink(os.path.join(self.path, name))
self.index = None
def compact_segments(self, save_space=False):
"""Compact sparse segments by copying data into new segments
"""
if not self.compact:
return
index_transaction_id = self.get_index_transaction_id()
segments = self.segments
unused = [] # list of segments, that are not used anymore
def complete_xfer():
# complete the transfer (usually exactly when some target segment
# is full, or at the very end when everything is processed)
nonlocal unused
# commit the new, compact, used segments
self.io.write_commit()
# get rid of the old, sparse, unused segments. free space.
for segment in unused:
assert self.segments.pop(segment) == 0
self.io.delete_segment(segment)
del self.compact[segment]
unused = []
for segment, freeable_space in sorted(self.compact.items()):
if not self.io.segment_exists(segment):
del self.compact[segment]
continue
segment_size = self.io.segment_size(segment)
if segment_size > 0.2 * self.max_segment_size and freeable_space < 0.15 * segment_size:
logger.debug('not compacting segment %d for later (only %d bytes are sparse)',
segment, freeable_space)
continue
segments.setdefault(segment, 0)
for tag, key, offset, data in self.io.iter_objects(segment, include_data=True):
if tag == TAG_PUT and self.index.get(key, (-1, -1)) == (segment, offset):
try:
new_segment, offset = self.io.write_put(key, data, raise_full=save_space)
except LoggedIO.SegmentFull:
complete_xfer()
new_segment, offset = self.io.write_put(key, data)
self.index[key] = new_segment, offset
segments.setdefault(new_segment, 0)
segments[new_segment] += 1
segments[segment] -= 1
elif tag == TAG_DELETE:
if index_transaction_id is None or segment > index_transaction_id:
try:
self.io.write_delete(key, raise_full=save_space)
except LoggedIO.SegmentFull:
complete_xfer()
self.io.write_delete(key)
assert segments[segment] == 0
unused.append(segment)
complete_xfer()
def replay_segments(self, index_transaction_id, segments_transaction_id):
self.prepare_txn(index_transaction_id, do_cleanup=False)
try:
segment_count = sum(1 for _ in self.io.segment_iterator())
pi = ProgressIndicatorPercent(total=segment_count, msg="Replaying segments %3.0f%%", same_line=True)
for i, (segment, filename) in enumerate(self.io.segment_iterator()):
pi.show(i)
if index_transaction_id is not None and segment <= index_transaction_id:
continue
if segment > segments_transaction_id:
break
objects = self.io.iter_objects(segment)
self._update_index(segment, objects)
pi.finish()
self.write_index()
finally:
self.rollback()
def _update_index(self, segment, objects, report=None):
"""some code shared between replay_segments and check"""
self.segments[segment] = 0
for tag, key, offset, size in objects:
if tag == TAG_PUT:
try:
# If this PUT supersedes an older PUT, mark the old segment for compaction and count the free space
s, _ = self.index[key]
self.compact[s] += size
self.segments[s] -= 1
except KeyError:
pass
self.index[key] = segment, offset
self.segments[segment] += 1
elif tag == TAG_DELETE:
try:
# if the deleted PUT is not in the index, there is nothing to clean up
s, offset = self.index.pop(key)
except KeyError:
pass
else:
if self.io.segment_exists(s):
# the old index is not necessarily valid for this transaction (e.g. compaction); if the segment
# is already gone, then it was already compacted.
self.segments[s] -= 1
size = self.io.read(s, offset, key, read_data=False)
self.compact[s] += size
elif tag == TAG_COMMIT:
continue
else:
msg = 'Unexpected tag {} in segment {}'.format(tag, segment)
if report is None:
raise self.CheckNeeded(msg)
else:
report(msg)
if self.segments[segment] == 0:
self.compact[segment] += self.io.segment_size(segment)
def _rebuild_sparse(self, segment):
"""Rebuild sparse bytes count for a single segment relative to the current index."""
self.compact[segment] = 0
if self.segments[segment] == 0:
self.compact[segment] += self.io.segment_size(segment)
return
for tag, key, offset, size in self.io.iter_objects(segment, read_data=False):
if tag == TAG_PUT:
if self.index.get(key, (-1, -1)) != (segment, offset):
# This PUT is superseded later
self.compact[segment] += size
elif tag == TAG_DELETE:
# The outcome of the DELETE has been recorded in the PUT branch already
self.compact[segment] += size
def check(self, repair=False, save_space=False):
"""Check repository consistency
This method verifies all segment checksums and makes sure
the index is consistent with the data stored in the segments.
"""
if self.append_only and repair:
raise ValueError(self.path + " is in append-only mode")
error_found = False
def report_error(msg):
nonlocal error_found
error_found = True
logger.error(msg)
logger.info('Starting repository check')
assert not self._active_txn
try:
transaction_id = self.get_transaction_id()
current_index = self.open_index(transaction_id)
except Exception:
transaction_id = self.io.get_segments_transaction_id()
current_index = None
if transaction_id is None:
transaction_id = self.get_index_transaction_id()
if transaction_id is None:
transaction_id = self.io.get_latest_segment()
if repair:
self.io.cleanup(transaction_id)
segments_transaction_id = self.io.get_segments_transaction_id()
self.prepare_txn(None) # self.index, self.compact, self.segments all empty now!
segment_count = sum(1 for _ in self.io.segment_iterator())
pi = ProgressIndicatorPercent(total=segment_count, msg="Checking segments %3.1f%%", step=0.1, same_line=True)
for i, (segment, filename) in enumerate(self.io.segment_iterator()):
pi.show(i)
if segment > transaction_id:
continue
try:
objects = list(self.io.iter_objects(segment))
except IntegrityError as err:
report_error(str(err))
objects = []
if repair:
self.io.recover_segment(segment, filename)
objects = list(self.io.iter_objects(segment))
self._update_index(segment, objects, report_error)
pi.finish()
# self.index, self.segments, self.compact now reflect the state of the segment files up to <transaction_id>
# We might need to add a commit tag if no committed segment is found
if repair and segments_transaction_id is None:
report_error('Adding commit tag to segment {}'.format(transaction_id))
self.io.segment = transaction_id + 1
self.io.write_commit()
if current_index and not repair:
# current_index = "as found on disk"
# self.index = "as rebuilt in-memory from segments"
if len(current_index) != len(self.index):
report_error('Index object count mismatch. {} != {}'.format(len(current_index), len(self.index)))
elif current_index:
for key, value in self.index.iteritems():
if current_index.get(key, (-1, -1)) != value:
report_error('Index mismatch for key {}. {} != {}'.format(key, value, current_index.get(key, (-1, -1))))
if repair:
self.compact_segments(save_space=save_space)
self.write_index()
self.rollback()
if error_found:
if repair:
logger.info('Completed repository check, errors found and repaired.')
else:
logger.error('Completed repository check, errors found.')
else:
logger.info('Completed repository check, no problems found.')
return not error_found or repair
def rollback(self):
"""
"""
self.index = None
self._active_txn = False
def __len__(self):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
return len(self.index)
def __contains__(self, id):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
return id in self.index
def list(self, limit=None, marker=None):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
return [id_ for id_, _ in islice(self.index.iteritems(marker=marker), limit)]
def get(self, id_):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
try:
segment, offset = self.index[id_]
return self.io.read(segment, offset, id_)
except KeyError:
raise self.ObjectNotFound(id_, self.path) from None
def get_many(self, ids, is_preloaded=False):
for id_ in ids:
yield self.get(id_)
def put(self, id, data, wait=True):
if not self._active_txn:
self.prepare_txn(self.get_transaction_id())
try:
segment, offset = self.index[id]
except KeyError:
pass
else:
self.segments[segment] -= 1
size = self.io.read(segment, offset, id, read_data=False)
self.compact[segment] += size
segment, size = self.io.write_delete(id)
self.compact[segment] += size
self.segments.setdefault(segment, 0)
segment, offset = self.io.write_put(id, data)
self.segments.setdefault(segment, 0)
self.segments[segment] += 1
self.index[id] = segment, offset
def delete(self, id, wait=True):
if not self._active_txn:
self.prepare_txn(self.get_transaction_id())
try:
segment, offset = self.index.pop(id)
except KeyError:
raise self.ObjectNotFound(id, self.path) from None
self.segments[segment] -= 1
size = self.io.read(segment, offset, id, read_data=False)
self.compact[segment] += size
segment, size = self.io.write_delete(id)
self.compact[segment] += size
self.segments.setdefault(segment, 0)
def preload(self, ids):
"""Preload objects (only applies to remote repositories)
"""
class LoggedIO:
class SegmentFull(Exception):
"""raised when a segment is full, before opening next"""
header_fmt = struct.Struct('<IIB')
assert header_fmt.size == 9
put_header_fmt = struct.Struct('<IIB32s')
assert put_header_fmt.size == 41
header_no_crc_fmt = struct.Struct('<IB')
assert header_no_crc_fmt.size == 5
crc_fmt = struct.Struct('<I')
assert crc_fmt.size == 4
_commit = header_no_crc_fmt.pack(9, TAG_COMMIT)
COMMIT = crc_fmt.pack(crc32(_commit)) + _commit
def __init__(self, path, limit, segments_per_dir, capacity=90):
self.path = path
self.fds = LRUCache(capacity,
dispose=self.close_fd)
self.segment = 0
self.limit = limit
self.segments_per_dir = segments_per_dir
self.offset = 0
self._write_fd = None
def close(self):
self.close_segment()
self.fds.clear()
self.fds = None # Just to make sure we're disabled
def close_fd(self, fd):
if hasattr(os, 'posix_fadvise'): # only on UNIX
os.posix_fadvise(fd.fileno(), 0, 0, os.POSIX_FADV_DONTNEED)
fd.close()
def segment_iterator(self, reverse=False):
data_path = os.path.join(self.path, 'data')
dirs = sorted((dir for dir in os.listdir(data_path) if dir.isdigit()), key=int, reverse=reverse)
for dir in dirs:
filenames = os.listdir(os.path.join(data_path, dir))
sorted_filenames = sorted((filename for filename in filenames
if filename.isdigit()), key=int, reverse=reverse)
for filename in sorted_filenames:
yield int(filename), os.path.join(data_path, dir, filename)
def get_latest_segment(self):
for segment, filename in self.segment_iterator(reverse=True):
return segment
return None
def get_segments_transaction_id(self):
"""Return the last committed segment.
"""
for segment, filename in self.segment_iterator(reverse=True):
if self.is_committed_segment(segment):
return segment
return None
def cleanup(self, transaction_id):
"""Delete segment files left by aborted transactions
"""
self.segment = transaction_id + 1
for segment, filename in self.segment_iterator(reverse=True):
if segment > transaction_id:
os.unlink(filename)
else:
break
def is_committed_segment(self, segment):
"""Check if segment ends with a COMMIT_TAG tag
"""
try:
iterator = self.iter_objects(segment)
except IntegrityError:
return False
with open(self.segment_filename(segment), 'rb') as fd:
try:
fd.seek(-self.header_fmt.size, os.SEEK_END)
except OSError as e:
# return False if segment file is empty or too small
if e.errno == errno.EINVAL:
return False
raise e
if fd.read(self.header_fmt.size) != self.COMMIT:
return False
seen_commit = False
while True:
try:
tag, key, offset, _ = next(iterator)
except IntegrityError:
return False
except StopIteration:
break
if tag == TAG_COMMIT:
seen_commit = True
continue
if seen_commit:
return False
return seen_commit
def segment_filename(self, segment):
return os.path.join(self.path, 'data', str(segment // self.segments_per_dir), str(segment))
def get_write_fd(self, no_new=False, raise_full=False):
if not no_new and self.offset and self.offset > self.limit:
if raise_full:
raise self.SegmentFull
self.close_segment()
if not self._write_fd:
if self.segment % self.segments_per_dir == 0:
dirname = os.path.join(self.path, 'data', str(self.segment // self.segments_per_dir))
if not os.path.exists(dirname):
os.mkdir(dirname)
sync_dir(os.path.join(self.path, 'data'))
self._write_fd = SyncFile(self.segment_filename(self.segment))
self._write_fd.write(MAGIC)
self.offset = MAGIC_LEN
return self._write_fd
def get_fd(self, segment):
try:
return self.fds[segment]
except KeyError:
fd = open(self.segment_filename(segment), 'rb')
self.fds[segment] = fd
return fd
def close_segment(self):
if self._write_fd:
self.segment += 1
self.offset = 0
self._write_fd.close()
self._write_fd = None
def delete_segment(self, segment):
if segment in self.fds:
del self.fds[segment]
try:
os.unlink(self.segment_filename(segment))
except FileNotFoundError:
pass
def segment_exists(self, segment):
return os.path.exists(self.segment_filename(segment))
def segment_size(self, segment):
return os.path.getsize(self.segment_filename(segment))
def iter_objects(self, segment, include_data=False, read_data=True):
"""
Return object iterator for *segment*.
If read_data is False then include_data must be False as well.
Integrity checks are skipped: all data obtained from the iterator must be considered informational.
The iterator returns four-tuples of (tag, key, offset, data|size).
"""
fd = self.get_fd(segment)
fd.seek(0)
if fd.read(MAGIC_LEN) != MAGIC:
raise IntegrityError('Invalid segment magic [segment {}, offset {}]'.format(segment, 0))
offset = MAGIC_LEN
header = fd.read(self.header_fmt.size)
while header:
size, tag, key, data = self._read(fd, self.header_fmt, header, segment, offset,
(TAG_PUT, TAG_DELETE, TAG_COMMIT),
read_data=read_data)
if include_data:
yield tag, key, offset, data
else:
yield tag, key, offset, size
offset += size
header = fd.read(self.header_fmt.size)
def recover_segment(self, segment, filename):
if segment in self.fds:
del self.fds[segment]
with open(filename, 'rb') as fd:
data = memoryview(fd.read())
os.rename(filename, filename + '.beforerecover')
logger.info('attempting to recover ' + filename)
with open(filename, 'wb') as fd:
fd.write(MAGIC)
while len(data) >= self.header_fmt.size:
crc, size, tag = self.header_fmt.unpack(data[:self.header_fmt.size])
if size < self.header_fmt.size or size > len(data):
data = data[1:]
continue
if crc32(data[4:size]) & 0xffffffff != crc:
data = data[1:]
continue
fd.write(data[:size])
data = data[size:]
def read(self, segment, offset, id, read_data=True):
"""
Read entry from *segment* at *offset* with *id*.
If read_data is False the size of the entry is returned instead and integrity checks are skipped.
The return value should thus be considered informational.
"""
if segment == self.segment and self._write_fd:
self._write_fd.sync()
fd = self.get_fd(segment)
fd.seek(offset)
header = fd.read(self.put_header_fmt.size)
size, tag, key, data = self._read(fd, self.put_header_fmt, header, segment, offset, (TAG_PUT, ), read_data)
if id != key:
raise IntegrityError('Invalid segment entry header, is not for wanted id [segment {}, offset {}]'.format(
segment, offset))
return data if read_data else size
def _read(self, fd, fmt, header, segment, offset, acceptable_tags, read_data=True):
# some code shared by read() and iter_objects()
try:
hdr_tuple = fmt.unpack(header)
except struct.error as err:
raise IntegrityError('Invalid segment entry header [segment {}, offset {}]: {}'.format(
segment, offset, err)) from None
if fmt is self.put_header_fmt:
crc, size, tag, key = hdr_tuple
elif fmt is self.header_fmt:
crc, size, tag = hdr_tuple
key = None
else:
raise TypeError("_read called with unsupported format")
if size > MAX_OBJECT_SIZE or size < fmt.size:
raise IntegrityError('Invalid segment entry size [segment {}, offset {}]'.format(
segment, offset))
length = size - fmt.size
if read_data:
data = fd.read(length)
if len(data) != length:
raise IntegrityError('Segment entry data short read [segment {}, offset {}]: expected {}, got {} bytes'.format(
segment, offset, length, len(data)))
if crc32(data, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
raise IntegrityError('Segment entry checksum mismatch [segment {}, offset {}]'.format(
segment, offset))
if key is None and tag in (TAG_PUT, TAG_DELETE):
key, data = data[:32], data[32:]
else:
if key is None and tag in (TAG_PUT, TAG_DELETE):
key = fd.read(32)
length -= 32
if len(key) != 32:
raise IntegrityError('Segment entry key short read [segment {}, offset {}]: expected {}, got {} bytes'.format(
segment, offset, 32, len(key)))
oldpos = fd.tell()
seeked = fd.seek(length, os.SEEK_CUR) - oldpos
data = None
if seeked != length:
raise IntegrityError('Segment entry data short seek [segment {}, offset {}]: expected {}, got {} bytes'.format(
segment, offset, length, seeked))
if tag not in acceptable_tags:
raise IntegrityError('Invalid segment entry header, did not get acceptable tag [segment {}, offset {}]'.format(
segment, offset))
return size, tag, key, data
def write_put(self, id, data, raise_full=False):
fd = self.get_write_fd(raise_full=raise_full)
size = len(data) + self.put_header_fmt.size
offset = self.offset
header = self.header_no_crc_fmt.pack(size, TAG_PUT)
crc = self.crc_fmt.pack(crc32(data, crc32(id, crc32(header))) & 0xffffffff)
fd.write(b''.join((crc, header, id, data)))
self.offset += size
return self.segment, offset
def write_delete(self, id, raise_full=False):
fd = self.get_write_fd(raise_full=raise_full)
header = self.header_no_crc_fmt.pack(self.put_header_fmt.size, TAG_DELETE)
crc = self.crc_fmt.pack(crc32(id, crc32(header)) & 0xffffffff)
fd.write(b''.join((crc, header, id)))
self.offset += self.put_header_fmt.size
return self.segment, self.put_header_fmt.size
def write_commit(self):
self.close_segment()
fd = self.get_write_fd()
header = self.header_no_crc_fmt.pack(self.header_fmt.size, TAG_COMMIT)
crc = self.crc_fmt.pack(crc32(header) & 0xffffffff)
fd.write(b''.join((crc, header)))
self.close_segment()
| 42.086763 | 130 | 0.597341 | 4,644 | 37,836 | 4.727821 | 0.124677 | 0.044999 | 0.01002 | 0.010202 | 0.33262 | 0.267672 | 0.213427 | 0.176171 | 0.143378 | 0.108308 | 0 | 0.005245 | 0.309599 | 37,836 | 898 | 131 | 42.13363 | 0.835273 | 0.143012 | 0 | 0.335219 | 0 | 0 | 0.063641 | 0 | 0 | 0 | 0.00156 | 0 | 0.014144 | 1 | 0.083451 | false | 0.005658 | 0.031117 | 0.007072 | 0.188119 | 0.001414 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
704239be8a576b9afe64abe3d128ac6481c4c3c9 | 848 | py | Python | care/facility/api/serializers/uccbedrequest.py | tncwr/care | 97731bfa8371332d898d82152e9cd85bbedea8c5 | [
"MIT"
] | null | null | null | care/facility/api/serializers/uccbedrequest.py | tncwr/care | 97731bfa8371332d898d82152e9cd85bbedea8c5 | [
"MIT"
] | null | null | null | care/facility/api/serializers/uccbedrequest.py | tncwr/care | 97731bfa8371332d898d82152e9cd85bbedea8c5 | [
"MIT"
] | null | null | null | from django.db.models import F
from rest_framework import serializers
from care.facility.models.uccbedrequest import UCCBedRequest
from care.users.api.serializers.user import UserBaseMinimumSerializer
from config.serializers import ChoiceField
from care.facility.api.serializers import TIMESTAMP_FIELDS
class UCCBedRequestSerializer(serializers.ModelSerializer):
#id = serializers.UUIDField(source="external_id", read_only=True)
class Meta:
model = UCCBedRequest
exclude = (
"deleted",
"modified_date",
"external_id"
)
read_only_fields = (
TIMESTAMP_FIELDS,
"created_date",
)
def create(self, validated_data):
bed_request = super(UCCBedRequestSerializer, self).create(validated_data)
return bed_request
| 26.5 | 85 | 0.693396 | 85 | 848 | 6.752941 | 0.541176 | 0.041812 | 0.055749 | 0.062718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.238208 | 848 | 31 | 86 | 27.354839 | 0.888545 | 0.075472 | 0 | 0 | 0 | 0 | 0.054917 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.285714 | 0 | 0.47619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70465e9867bb2b388f94f65d7c4303d55318d2bb | 1,126 | py | Python | mtorch/core/data/data_loaders/vision/cifar10.py | NullConvergence/torch_temp | 29a0d7190f0be6124f51bd85b8320cd8b3cef29a | [
"MIT"
] | 3 | 2019-08-08T13:23:50.000Z | 2019-08-15T15:29:36.000Z | mtorch/core/data/data_loaders/vision/cifar10.py | NullConvergence/torch-template | 29a0d7190f0be6124f51bd85b8320cd8b3cef29a | [
"MIT"
] | 10 | 2019-09-20T21:25:22.000Z | 2019-10-16T10:52:04.000Z | mtorch/core/data/data_loaders/vision/cifar10.py | NullConvergence/mtorch | 29a0d7190f0be6124f51bd85b8320cd8b3cef29a | [
"MIT"
] | 2 | 2019-08-08T13:23:52.000Z | 2019-08-08T19:46:55.000Z | from torchvision import datasets, transforms
from core.data.data_loaders.base import BaseDataLoader
class CIFAR10Loader(BaseDataLoader):
""" CIFAR10 data loading + transformations """
def __init__(self, data_dir,
batch_size=128,
training=True,
validation_split=0.0,
shuffle=False,
transformations="DefaultTransformations",
**kwargs):
print("[INFO][DATA] \t Preparing Cifar10 dataset ...")
_transf = BaseDataLoader.get_transformations(
self, name=transformations)
self.trans = _transf.get_train_trans() if training is True \
else _transf.get_test_trans()
self.data_dir = data_dir
self.dataset = datasets.CIFAR10(
self.data_dir, train=training, download=True, transform=self.trans)
super().__init__(self.dataset, batch_size, shuffle,
validation_split, **kwargs)
def get_class_names(self):
return('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
| 36.322581 | 79 | 0.601243 | 113 | 1,126 | 5.752212 | 0.530973 | 0.043077 | 0.050769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01627 | 0.290409 | 1,126 | 30 | 80 | 37.533333 | 0.797247 | 0.033748 | 0 | 0 | 0 | 0 | 0.099074 | 0.02037 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.086957 | 0.043478 | 0.217391 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7046602c016348dec7c023f79d562d05998a1063 | 1,751 | py | Python | rtutil/content_data.py | yaakiyu/rt-bot | f68bca95c516e08c31ecc846524dcea4c8ba1503 | [
"BSD-4-Clause"
] | null | null | null | rtutil/content_data.py | yaakiyu/rt-bot | f68bca95c516e08c31ecc846524dcea4c8ba1503 | [
"BSD-4-Clause"
] | null | null | null | rtutil/content_data.py | yaakiyu/rt-bot | f68bca95c516e08c31ecc846524dcea4c8ba1503 | [
"BSD-4-Clause"
] | null | null | null | # RT Util - Content Data
from typing import TypedDict, Any
from textwrap import shorten
from discord import Embed
from orjson import loads
from .utils import is_json
__all__ = (
"ContentData", "disable_content_json", "enable_content_json",
"convert_content_json", "to_text"
)
class ContentData(TypedDict):
"`send`で送信可能なJSON形式のデータの型です。"
content: dict[str, Any]
author: int
json: bool
_acj_check_embeds = lambda data, type_: \
"embeds" in data["content"] and data["content"]["embeds"] \
and isinstance(data["content"]["embeds"][0], type_)
def disable_content_json(data: ContentData) -> ContentData:
"ContentDataのデータを`send`等で使える状態にします。"
if data["json"] and _acj_check_embeds(data, dict):
for index, embed in enumerate(data["content"]["embeds"]):
data["content"]["embeds"][index] = Embed.from_dict(embed)
data["json"] = False
return data
def enable_content_json(data: ContentData) -> ContentData:
"ContentDataをJSON形式にできるようにしています。"
if not data["json"] and _acj_check_embeds(data, Embed):
for index, embed in enumerate(data["content"]["embeds"]):
data["content"]["embeds"][index] = embed.to_dict()
data["json"] = True
return data
def convert_content_json(content: str, author: int, force_author: bool = False) -> ContentData:
"渡された文字列をContentDataにします。"
data = loads(content) if is_json(content) else ContentData(
content={"content": content}, author=force_author, json=True
)
if force_author:
data["author"] = author
return data
def to_text(data: ContentData) -> str:
"ContentDataをちょっとした文字列で表した形式にします。"
return shorten("".join(data['content'].get(key, "") for key in ("content", "embeds")), 35) | 29.677966 | 95 | 0.683038 | 210 | 1,751 | 5.52381 | 0.295238 | 0.075862 | 0.087931 | 0.044828 | 0.231034 | 0.167241 | 0.167241 | 0.117241 | 0.117241 | 0.117241 | 0 | 0.002095 | 0.182182 | 1,751 | 59 | 96 | 29.677966 | 0.807961 | 0.100514 | 0 | 0.119048 | 0 | 0 | 0.211227 | 0.085648 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.119048 | 0 | 0.404762 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |