content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from django.apps import AppConfig
class SessionConfig(AppConfig):
name = "ticketflix.session"
verbose_name = "Session"
|
nilq/baby-python
|
python
|
try:
x = 3
print(x[1,2:3,4])
except:
print('it was supposed to fail')
|
nilq/baby-python
|
python
|
"""
By Dr Jie Zheng -Q, NAOC
v1 2019-04-27
"""
import numpy as np
from..util import *
def date_conv():
pass
#function date_conv,date,type, BAD_DATE = bad_date
#;+
#; NAME:
#; DATE_CONV
#; PURPOSE:
#; Procedure to perform conversion of dates to one of three possible formats.
#;
#; EXPLANATION:
#; The following date formats are allowed
#;
#; format 1: real*8 scalar encoded as:
#; year*1000 + day + hour/24. + min/24./60 + sec/24./60/60
#; where day is the day of year (1 to 366)
#; format 2: Vector encoded as:
#; date[0] = year (eg. 2005)
#; date[1] = day of year (1 to 366)
#; date[2] = hour
#; date[3] = minute
#; date[4] = second
#; To indicate a date only, set a negative hour.
#; format 3: string (ascii text) encoded as
#; DD-MON-YEAR HH:MM:SS.SS
#; (eg. 14-JUL-2005 15:25:44.23)
#; OR
#; YYYY-MM-DD HH:MM:SS.SS (ISO standard)
#; (eg. 1987-07-14 15:25:44.23 or 1987-07-14T15:25:44.23)
#;
#; OR
#; DD/MM/YY (pre-2000 option for FITS DATE keywords)
#; Time of day segment is optional in all of these.
#;
#; format 4: three element vector giving spacecraft time words
#; from a Hubble Space Telescope (HST) telemetry packet. Based on
#; total number of secs since midnight, JAN. 1, 1979
#;
#; format 5: Julian day. As this is also a scalar, like format 1,
#; the distinction between the two on input is made based on their
#; value. Numbers > 2300000 are interpreted as Julian days.
#;
#; CALLING SEQUENCE
#; results = DATE_CONV( DATE, TYPE )
#;
#; INPUTS:
#; DATE - input date in one of the possible formats. Must be scalar.
#; TYPE - type of output format desired. If not supplied then
#; format 3 (real*8 scalar) is used.
#; valid values:
#; 'REAL' - format 1
#; 'VECTOR' - format 2
#; 'STRING' - format 3
#; 'FITS' - YYYY-MM-DDTHH:MM:SS.SS'
#; 'JULIAN' - Julian date
#; 'MODIFIED' - Modified Julian date (JD-2400000.5)
#; TYPE can be abbreviated to the single character strings 'R',
#; 'V', 'S', 'F', 'J', and 'M'.
#; Nobody wants to convert TO spacecraft time (I hope!)
#; OUTPUTS:
#; The converted date is returned as the function value.
#; Output is -1 if date is unrecognisable.
#;
#; If the time of day is omitted from the input, it will also
#; be omitted from any output string (format STRING or FITS).
#; Note that date-only strings are allowed by the FITS standard.
#; For other output formats any missing time of day is set to
#; 00:00:00.0
#;
#; KEYWORD OUTPUTS
#;
#; BAD_DATE set to 1B if date is unrecognisable
#;
#; EXAMPLES:
#; IDL> print,date_conv('2006-03-13 19:58:00.00'),f='(f15.5)'
#; 2006072.83194
#; IDL> print,date_conv( 2006072.8319444d,'F')
#; 2006-03-13T19:58:00.00
#; IDL> print,date_conv( 2006072.8319444d,'V')
#; 2006.00 72.0000 19.0000 57.0000 59.9962
#; IDL> print,date_conv( 2006072.8319444d,'J'), f='(f15.5)'
#; 2453808.33194
#;
#;
#; HISTORY:
#; version 1 D. Lindler July, 1987
#; adapted for IDL version 2 J. Isensee May, 1990
#; Made year 2000 compliant; allow ISO format input jls/acc Oct 1998
#; DJL/ACC Jan 1998, Modified to work with dates such as 6-JAN-1996 where
#; day of month has only one digit.
#; DJL, Nov. 2000, Added input/output format YYYY-MM-DDTHH:MM:SS.SS
#; Replace spaces with '0' in output FITS format W.Landsman April 2006
#; Added Julian date capabilities on input and output. M.Perrin, July 2007
#; Removed spurious /WARN keyword to MESSAGE W.L. Feb 2012
#; ...and another /WARN; added BAD_DATE, drop spurious time-of-day
#; output from strings. J. P. Leahy July 2013
#; changed all /CONTINUE warning messages to /INFO: can be suppressed
#; by setting !QUIET = 1. J. P. Leahy July 2013
#;-
#;-------------------------------------------------------------
#;
#compile_opt idl2
#; data declaration
#;
#days = [0,31,28,31,30,31,30,31,31,30,31,30,31]
#months = [' ','JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT',$
# 'NOV','DEC']
#;
#; set default type if not supplied
#;
#if N_params() lt 2 then type = 'REAL'
#;
#; Determine type of input supplied
#;
#s = size(date) & ndim = s[0] & datatype = s[ndim+1]
#if ndim gt 0 then begin ;vector?
# if ndim gt 1 then goto,notvalid
# if (s[1] ne 5) && (s[1] ne 3) then goto,notvalid
# if (s[1] eq 5) then form = 2 else form = 4
# end else begin ;scalar input
# if datatype eq 0 then goto,notvalid
# if datatype eq 7 then form = 3 $ ;string
# else form = 1 ;numeric scalar
#end
#;
#; -----------------------------------
#;
#;*** convert input to year,day,hour,minute,second
#;
#; -----------------------------------
#case form of
#
# 1: begin ;real scalar
# ; The 'real' input format may be interpreted EITHER
# ; a) if < 2300000
# ; as the traditional 'real*8 encoded' format used by date_conv
# ; b) if > 2300000
# ; as a Julian Day Number
# idate = long(date)
# year = long(idate/1000)
#
# if year lt 2300 then begin
#
# ; if year is only 2 digits, assume 1900
# if year lt 100 then begin
# message,/INF, $
# 'Warning: Year specified is only 2 digits, assuming 19xx'
# year=1900+year
# idate=1900000+idate
# date=1900000.+date
# end
# day = idate - year*1000
# fdate = date-idate
# fdate = fdate*24.
# hour = fix(fdate)
# fdate = (fdate-hour)*60.0
# minute = fix(fdate)
# sec = float((fdate-minute)*60.0)
#
# endif else begin
# daycnv, date, year, mn, mndy, hr
# ; convert from month/day to day of year
# ; how many days PRECEED the start of each month?
# YDAYS = [0,31,59,90,120,151,181,212,243,273,304,334,366]
# LEAP = (((YeaR MOD 4) EQ 0) AND ((YeaR MOD 100) NE 0)) OR $
# ((YeaR MOD 400) EQ 0)
# IF LEAP THEN YDAYS[2:*] = YDAYS[2:*] + 1
# day = ydays[mn-1]+mndy
#
# hour = fix(hr)
# fmin = (hr-hour)*60
# minute = fix(fmin)
# sec = float((fmin-minute)*60)
# endelse
# end
#
# 2: begin ;vector
# year = fix(date[0])
#;
#; if year is only 2 digits, assume 1900
#;
# if year lt 100 then begin
# message,/INF, $
# 'Warning: Year specified is only 2 digits, assuming 19xx'
# year=1900+year
# end
#;
# day = fix(date[1])
# hour = fix(date[2])
# minute = fix(date[3])
# sec = float(date[4])
# end
#
# 3: begin ;string
# temp = date
#;
#; check for old type of date, DD-MMM-YYYY
#;
# test = STRPOS(temp,'-')
# if test ge 0 && test le 2 then begin
# day_of_month = fix(gettok(temp,'-'))
# month_name = gettok(temp,'-')
# year = fix(gettok(temp,' '))
#;
#; determine month number from month name
#;
# month_name = strupcase(month_name)
# for mon = 1,12 do begin
# if month_name eq months[mon] then goto,found
# end
# message,/INFORMATIONAL, 'Invalid month name specified'
# goto, notvalid
#;
#; check for new type of date, ISO: YYYY-MM-DD
#;
# end else if strpos(temp,'-') eq 4 then begin
# year = fix(gettok(temp,'-'))
# month_name = gettok(temp,'-')
# mon= FIX(month_name)
# day_of_month=gettok(temp,' ')
# if strlen(temp) eq 0 then begin
# dtmp=gettok(day_of_month,'T')
# temp=day_of_month
# day_of_month=dtmp
# end
# day_of_month=fix(day_of_month)
#;
#; check for DD/MM/YY
#;
# end else if STRPOS(temp,'/') eq 2 then begin
# day_of_month = FIX(gettok(temp,'/'))
# mon = FIX(gettok(temp,'/'))
# year = 1900 + FIX(STRMID(temp,0,2))
# end else goto, notvalid
#
# found:
# hour = gettok(temp,':')
# hour = hour NE '' ? FIX(hour) : -1
# minute = fix(gettok(temp,':'))
# sec = float(strtrim(strmid(temp,0,5)))
#
# IF (mon LT 1 || mon GT 12) THEN BEGIN
# MESSAGE, /INFORMATIONAL, 'Invalid month specified'
# goto, notvalid
# ENDIF
#;
#; if year is only 2 digits, assume 1900
#;
# if year lt 100 then begin
# message,/INFORMATIONAL, $
# 'Warning: Year specified is only 2 digits, assuming 19xx'
# year=1900+year
# end
#;
#;
#; convert to day of year from month/day_of_month
#;
#; correction for leap years
#;
#; if (fix(year) mod 4) eq 0 then days(2) = 29 ;add one to february
# lpyr = ((year mod 4) eq 0) and ((year mod 100) ne 0) $
# or ((year mod 400) eq 0)
# if lpyr eq 1 then days[2] = 29 ; if leap year, add day to Feb.
#;
#;
#; compute day of year
#;
# day = fix(total(days[0:mon-1])+day_of_month)
# end
#
# 4 : begin ;spacecraft time
# SC = DOUBLE(date)
# SC = SC + (SC LT 0.0)*65536. ;Get rid of neg. numbers
#;
#; Determine total number of secs since midnight, JAN. 1, 1979
#;
# SECS = SC[2]/64 + SC[1]*1024 + SC[0]*1024*65536.
# SECS = SECS/8192.0D0 ;Convert from spacecraft units
#;
#; Determine number of years
#;
# MINS = SECS/60.
# HOURS = MINS/60.
# TOTDAYS = HOURS/24.
# YEARS = TOTDAYS/365.
# YEARS = FIX(YEARS)
#;
#; Compute number of leap years past
#;
# LEAPYEARS = (YEARS+2)/4
#;
#; Compute day of year
#;
# DAY = FIX(TOTDAYS-YEARS*365.-LEAPYEARS)
#;
#; Correct for case of being right at end of leapyear
#;
# IF DAY LT 0 THEN BEGIN
# DAY = DAY+366
# LEAPYEARS = LEAPYEARS-1
# YEARS = YEARS-1
# END
#;
#; COMPUTE HOUR OF DAY
#;
# TOTDAYS = YEARS*365.+DAY+LEAPYEARS
# HOUR = FIX(HOURS - 24*TOTDAYS)
# TOTHOURS = TOTDAYS*24+HOUR
#;
#; COMPUTE MINUTE
#;
# MINUTE = FIX(MINS-TOTHOURS*60)
# TOTMIN = TOTHOURS*60+MINUTE
#;
#; COMPUTE SEC
#;
# SEC = SECS-TOTMIN*60
#;
#; COMPUTE ACTUAL YEAR
#;
# YEAR = YEARS+79
#;
#; if year is only 2 digits, assume 1900
#;
# if year lt 100 then begin
# message, /INF, $
# 'Warning: Year specified is only 2 digits, assuming 19xx'
# year=1900+year
# end
#;
#;
#; START DAY AT ONE AND NOT ZERO
#;
# DAY++
# END
#ENDCASE
#;
#; correction for leap years
#;
# if form ne 3 then begin ;Was it already done?
# lpyr = ((year mod 4) eq 0) && ((year mod 100) ne 0) $
# || ((year mod 400) eq 0)
# if lpyr eq 1 then days[2] = 29 ; if leap year, add day to Feb.
# end
#;
#; check for valid day
#;
# if (day lt 1) || (day gt total(days)) then begin
# message, /INFORMATIONAL, $
# 'ERROR -- There are only ' + strtrim(fix(total(days)),2) + $
# ' days in year '+strtrim(year,2)
# goto, notvalid
# endif
#;
#; find month which day occurs
#;
# day_of_month = day
# month_num = 1
# while day_of_month gt days[month_num] do begin
# day_of_month = day_of_month - days[month_num]
# month_num = month_num+1
# end
#; ---------------------------------------
#;
#; ***** Now convert to output format
#;
#; ---------------------------------------
#;
#; is type a string
#;
#s = size(type)
#if (s[0] ne 0) or (s[1] ne 7) then $
# message,'ERROR - Output type specification must be a string'
#;
#outcode = STRMID(STRUPCASE(type),0,1)
#IF (outcode EQ 'S' || outcode EQ 'F') && hour GE 0 THEN BEGIN
# xsec = strmid(string(sec+100,'(f6.2)'),1,5)
# if xsec EQ '60.00' then begin
# minute = minute+1
# xsec = '00.00'
# endif
# xminute = string(minute,'(i2.2)')
# if xminute EQ '60' then begin
# hour = hour+1
# xminute = '00'
# endif
# tod = string(hour,'(i2.2)') + ':' +xminute + ':'+ xsec
#ENDIF
#
#case outcode of
#
# 'V' : begin ;vector output
# out = fltarr(5)
# out[0] = year
# out[1] = day
# out[2] = hour > 0
# out[3] = minute
# out[4] = sec
# end
#
# 'R' : begin ;floating point scalar
#; if year gt 1900 then year = year-1900
# out = sec/24.0d0/60./60. + minute/24.0d0/60. $
# + (hour > 0)/24.0d0 + day + year*1000d0
# end
#
# 'S' : begin ;string output
#
# month_name = months[month_num]
#;
#; encode into ascii_date
#;
# out = string(day_of_month,'(i2)') +'-'+ month_name +'-' + $
# string(year,'(i4)')
#
# ; Omit time of day from output string if not specified on input
# IF hour GE 0 THEN out += ' '+tod
# end
# 'F' : begin
# out = string(year,'(i4)')+'-'+string(month_num,'(I2.2)') $
# + '-' + string(day_of_month,'(i2.2)')
# IF hour GE 0 THEN out += 'T' + tod
# end
#
# 'J' : begin ; Julian Date
# ydn2md, year, day, mn, dy
# juldate, [year, mn, dy, hour, minute, sec], rjd
# out = rjd+2400000 ; convert from reduced to regular JD
# end
# 'M' : begin ; Modified Julian Date = JD - 2400000.5
# ydn2md, year, day, mn, dy
# juldate, [year, mn, dy, hour, minute, sec], rjd
# out = rjd-0.5 ; convert from reduced to modified JD
# end
#
# else: begin ;invalid type specified
# print,'DATE_CONV-- Invalid output type specified'
# print,' It must be ''REAL'', ''STRING'', ''VECTOR'', ''JULIAN'', ''MODIFIED'', or ''FITS''.'
# return,-1
# end
#endcase
#
#bad_date = 0B
#return,out
#;
#; invalid input date error section
#;
#NOTVALID:
#bad_date = 1B
#message, 'Invalid input date specified', /INFORMATIONAL
#return, -1
#end
|
nilq/baby-python
|
python
|
from commandlib import Command, CommandError
from path import Path
import patoolib
import shutil
import os
def log(message):
print(message)
def extract_archive(filename, directory):
patoolib.extract_archive(filename, outdir=directory)
class DownloadError(Exception):
pass
def download_file(downloaded_file_path, url, max_connections=2, max_concurrent=5):
"""Download file to specified location."""
file_path = Path(downloaded_file_path)
assert file_path.isabs(), "download file path must be absolute, not relative"
if file_path.exists():
log("{} already downloaded".format(file_path))
return
log("Downloading: {}\n".format(url))
aria2c = Command("aria2c")
aria2c = aria2c("--max-connection-per-server={}".format(max_connections))
aria2c = aria2c("--max-concurrent-downloads={}".format(max_concurrent))
try:
aria2c(
"--dir={}".format(file_path.dirname()),
"--out={}.part".format(file_path.basename()),
url
).run()
except CommandError:
raise DownloadError(
"Failed to download {}. Re-running may fix the problem.".format(url)
)
shutil.move(file_path + ".part", file_path)
|
nilq/baby-python
|
python
|
from dataclasses import dataclass, field
from typing import Optional, List
@dataclass
class MessageEvent(object):
username: str
channel_name: str
text: Optional[str]
command: str = ""
args: List[str] = field(default_factory=list)
@dataclass
class ReactionEvent(object):
emoji: str
username: str
added: bool
message: MessageEvent
|
nilq/baby-python
|
python
|
"""
To get the mdp parameters from sepsis simulator
@author: kingsleychang
"""
import numpy as np
import pandas as pd
import torch
from .sepsisSimDiabetes.DataGenerator import DataGenerator
from .sepsisSimDiabetes.MDP import MDP_DICT
from .sepsisSimDiabetes.State import State
from sklearn.model_selection import train_test_split
import platform
from os.path import join as pjoin, exists as pexists
import os
import pickle
def run_policy(policy, N, mdp='linear', return_trajectories=False, seed=None,
obs_sigmas=0., gamma=0.9, max_num_steps=20):
## First, run the optimal policy to get rewards
if seed is None:
seed = np.random.randint(0, 1000)
dg = DataGenerator(seed=seed, mdp=mdp)
### first sim data under optimal policy to get range of what is best
(states, actions, seq_lens, rewards,
_, init_observs, observs, init_observs_mask,
observs_mask, action_probs) = dg.simulate(
policy, N, max_num_steps=max_num_steps,
policy_idx_type='full', p_diabetes=0.2,
output_state_idx_type='full', obs_sigmas=obs_sigmas)
rewards[np.isinf(rewards)] = 0
gam_t = np.power(gamma, np.arange(max_num_steps))
returns = np.sum(rewards * gam_t, axis=1)
avg_returns = np.mean(returns)
if not return_trajectories:
return avg_returns
observs[np.isinf(observs)] = 0 # The val after end time is -inf
mu = 0.0
for t in range(observs.shape[1]):
mu += observs[:, t, :] * (gamma ** t)
mu_mean = np.mean(mu, axis=0)
D = {'o_init': init_observs, 'o': observs, 's': states,
'a': actions, 'len': seq_lens, 'mu': mu_mean, 'r': rewards,
'seed': seed, 'N': N, 'reward': avg_returns, 'gamma': gamma,
'max_num_steps': max_num_steps}
return avg_returns, D
def run_policy_to_get_exp(
num_exp, policy, mdp='linear', seed=None, obs_sigmas=0.,
max_num_steps=20):
the_mdp = MDP_DICT[mdp](
init_state_idx=None, # Random initial state
policy_array=policy, policy_idx_type='full',
p_diabetes=0.2, seed=seed)
# Set the default value of states / actions to negative -1,
iter_obs = np.ones((num_exp, State.PHI_DIM), dtype=np.float32) * (-1)
iter_actions = np.ones(num_exp, dtype=int) * (-1)
iter_obs_next = np.ones((num_exp, State.PHI_DIM), dtype=np.float32) * (-1)
iter_s = np.ones((num_exp), dtype=np.int64) * (-1)
iter_s_next = np.ones((num_exp), dtype=np.int64) * (-1)
# Start
the_mdp.state = the_mdp.get_new_state()
t = 0
for i in range(num_exp):
iter_obs[i] = the_mdp.state.get_phi_vector()
iter_s[i] = the_mdp.state.get_state_idx(idx_type='full')
# this_obs = o_init + obs_sigmas * self.rng.normal(0, 1, NUM_OBS)
step_action = the_mdp.select_actions() # policy takes action & returns Action object
iter_actions[i] = step_action.get_action_idx().astype(int)
# t+1
step_reward = the_mdp.transition(step_action)
iter_obs_next[i] = the_mdp.state.get_phi_vector()
iter_s_next[i] = the_mdp.state.get_state_idx(idx_type='full')
t += 1
if t == max_num_steps:
the_mdp.state = the_mdp.get_new_state()
t = 0
return {
'o': iter_obs,
'o_next': iter_obs_next,
'a': iter_actions,
's': iter_s,
's_next': iter_s_next,
}
def train_test_split_D(D, val_ratio=0.2, seed=321):
'''
Split the sepsis database into train and val
'''
if val_ratio > 0:
train_D, val_D = {}, {}
train_D['s'], val_D['s'], \
train_D['o_init'], val_D['o_init'], \
train_D['o'], val_D['o'], \
train_D['r'], val_D['r'], \
train_D['a'], val_D['a'], \
= train_test_split(
D['s'], D['o_init'], D['o'], D['r'], D['a'],
test_size=val_ratio, random_state=seed, shuffle=True,
)
train_D['max_num_steps'] = val_D['max_num_steps'] = D['max_num_steps']
train_D['gamma'] = val_D['gamma'] = D['gamma']
val_D['N'] = int(val_ratio * D['N'])
train_D['N'] = D['N'] - val_D['N']
return train_D, val_D
def load_mma_model(name):
''' Follow the stored location in run_mma.py. Load the model based on val perf '''
best_path = pjoin('logs', name, 'mma.pkl')
# My-specific helper function
is_in_q_server = (platform.node().startswith('vws') or platform.node().startswith('q'))
if not pexists(best_path) and is_in_q_server:
cmd = f'rsync -avzL v:/h/kingsley/irl_nodegam/logs/{name} ./logs/'
print(cmd)
os.system(cmd)
assert pexists(best_path), f'No {best_path} exists!'
with open(best_path, 'rb') as fp:
params = pickle.load(fp)
W = params['weight'][np.argmax(params['val_a'])]
def model(x):
if isinstance(x, torch.Tensor):
x = x.cpu().numpy()
elif isinstance(x, pd.DataFrame):
x = x.values
return x @ W
return model
|
nilq/baby-python
|
python
|
SAMPLE_MAP = load_samples('examples/sample_list.xlsx')
print(f'SAMPLE_MAP:\n{SAMPLE_MAP}')
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
"""
Given a word, you need to judge whether the usage of capitals in it is right or
not.
We define the usage of capitals in a word to be right when one of the following
cases holds:
All letters in this word are capitals, like "USA".
All letters in this word are not capitals, like "leetcode".
Only the first letter in this word is capital if it has more than one letter,
like "Google".
Otherwise, we define that this word doesn't use capitals in a right way.
Example 1:
Input: "USA"
Output: True
Example 2:
Input: "FlaG"
Output: False
Note: The input will be a non-empty word consisting of uppercase and lowercase
latin letters.
"""
class Solution:
def detectCapitalUse(self, word: str) -> bool:
"""
Two passes is easy
How to do it in one pass
"""
if not word:
return True
head_upper = word[0].isupper()
# except for the head
has_lower = False
has_upper = False
for w in word[1:]:
if w.isupper():
has_upper = True
if has_lower or not head_upper:
return False
else:
has_lower = True
if has_upper:
return False
return True
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2
# coding: utf-8
# MedSal Database
# Connection & Data query
#
# University of Applied Sciences of Lübeck
#
# Anna Androvitsanea
# anna.androvitsanea@th-luebeck.de
# This scripts includes the code for connecting and querying the data that have been uploaded to the MedSal's project [database](https://www.uhydro.de/medsaldb/index.php).
from __future__ import print_function
# Import libraries
from datetime import date, datetime, timedelta
import mysql.connector
from mysql.connector import Error
import sqlalchemy as db
from sqlalchemy import create_engine, MetaData, Table, Column, String
from sqlalchemy.ext.automap import automap_base
import pandas as pd
# Connection
# Engine
# Create an engine to access the database as guest
print("\n")
print('**************************')
print('Connecting to the database')
print('**************************')
engine = db.create_engine('mysql+mysqlconnector://uhydro_16_r:MiRcTD69aRAYn2Ji@sql628.your-server.de:3306/uhydro_db16') # connect to server
# Entities
# Print the names of the available tables
Base = automap_base()
Base.prepare(engine, reflect=True)
print("The entities of the database are the following: ")
print("\n")
print(Base.classes.keys())
# Attributes
# Choose one entity to see its attributes
print("\n")
entity = raw_input("Please type the name of the entity you want to see its attributes, as presented in the list above, e.g. Stage_data: ")
print("\n")
print("You typed: ")
print(entity)
print("\n")
# Function to enumerate and print the attributes of a table
def find_attributes(entity, engine):
# search the attributes of the entity
meta = MetaData(bind = engine)
table = Table(entity, meta, autoload = True, autoload_with = engine)
columns = [c for c in table.columns]
for i in range(len(columns)):
column = columns[i]
print("%d. Table %s: Attribute %s." % (i + 1, entity, column.name))
# Check attributes for the chosen table
print("The entity has the following attributes: \n")
find_attributes(entity, engine)
print("\n")
# make connection as guest
connection = mysql.connector.connect(user='uhydro_16_r',
password='MiRcTD69aRAYn2Ji',
host='sql628.your-server.de',
database='uhydro_db16')
# construct cursor to store the data
cursor = connection.cursor()
# state query in raw sql and save it in the variable query
query = raw_input("Please type your SQL query, e.g. 'SELECT * FROM Gauging_characteristics': ")
print("\n")
# execute query
print('***************')
print('Executing query')
print('***************')
cursor.execute(query)
print("\n")
# print the output of the query
print('******************')
print('Print query output')
print('******************')
print("\n")
for i in cursor:
print(i)
# save all data into a dataframe for further processing
data = pd.read_sql(query, connection)
cursor.close()
connection.close()
print("\n")
# Export the results of the query to a csv file
print('*******************************')
print('Export query output to csv file')
data.to_csv('data.csv', sep =';', index = False, header = True, encoding = 'utf-8')
#with open('data.csv', mode='w') as data:
# csv.writer(data, delimiter=';', header = True)
print('*******************************')
print("\n")
print('*************')
print('End of script')
print('*************')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Filters module tests."""
from __future__ import absolute_import, print_function
import uuid
from invenio_admin.filters import FilterConverter, UUIDEqualFilter
def test_uuid_filter(app, testmodelcls):
"""Test UUID."""
with app.app_context():
f = UUIDEqualFilter(testmodelcls.uuidcol, 'uuidcol')
q = testmodelcls.query
assert q.whereclause is None
q_applied = f.apply(testmodelcls.query, str(uuid.uuid4()), None)
assert q_applied.whereclause is not None
q_applied = f.apply(testmodelcls.query, "", None)
assert q_applied.whereclause is None
q_applied = f.apply(testmodelcls.query, "test", None)
assert q_applied.whereclause is None
def test_filter_converter_uuid(testmodelcls):
"""Test filter converter."""
c = FilterConverter()
f = c.convert('uuidtype', testmodelcls.uuidcol, 'uuidcol')
assert len(f) == 1
assert isinstance(f[0], UUIDEqualFilter)
def test_filter_converter_variant(testmodelcls):
"""Test filter converter."""
c = FilterConverter()
f = c.convert('variant', testmodelcls.dt, 'dt')
assert len(f) == 7
|
nilq/baby-python
|
python
|
# Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Imports
import binascii
import unittest
from bip_utils import Base58ChecksumError, Bip38PubKeyModes, Bip38Decrypter, Bip38Encrypter
from tests.ecc.test_ecc import (
TEST_VECT_SECP256K1_PRIV_KEY_INVALID,
TEST_ED25519_PRIV_KEY, TEST_ED25519_BLAKE2B_PRIV_KEY, TEST_ED25519_MONERO_PRIV_KEY,
TEST_NIST256P1_PRIV_KEY, TEST_SR25519_PRIV_KEY
)
# Tests from BIP38 page (without EC multiplication)
# https://github.com/bitcoin/bips/blob/master/bip-0038.mediawiki
TEST_VECT = [
{
"pub_key_mode": Bip38PubKeyModes.UNCOMPRESSED,
"passphrase": "TestingOneTwoThree",
"priv_key_bytes": b"cbf4b9f70470856bb4f40f80b87edb90865997ffee6df315ab166d713af433a5",
"encrypted": "6PRVWUbkzzsbcVac2qwfssoUJAN1Xhrg6bNk8J7Nzm5H7kxEbn2Nh2ZoGg",
},
{
"pub_key_mode": Bip38PubKeyModes.UNCOMPRESSED,
"passphrase": "Satoshi",
"priv_key_bytes": b"09c2686880095b1a4c249ee3ac4eea8a014f11e6f986d0b5025ac1f39afbd9ae",
"encrypted": "6PRNFFkZc2NZ6dJqFfhRoFNMR9Lnyj7dYGrzdgXXVMXcxoKTePPX1dWByq",
},
{
"pub_key_mode": Bip38PubKeyModes.COMPRESSED,
"passphrase": "TestingOneTwoThree",
"priv_key_bytes": b"cbf4b9f70470856bb4f40f80b87edb90865997ffee6df315ab166d713af433a5",
"encrypted": "6PYNKZ1EAgYgmQfmNVamxyXVWHzK5s6DGhwP4J5o44cvXdoY7sRzhtpUeo",
},
{
"pub_key_mode": Bip38PubKeyModes.COMPRESSED,
"passphrase": "Satoshi",
"priv_key_bytes": b"09c2686880095b1a4c249ee3ac4eea8a014f11e6f986d0b5025ac1f39afbd9ae",
"encrypted": "6PYLtMnXvfG3oJde97zRyLYFZCYizPU5T3LwgdYJz1fRhh16bU7u6PPmY7",
},
]
# Tests for invalid encrypted strings
TEST_VECT_DEC_INVALID = {
Base58ChecksumError: [
"6PYRZqGd3ecBNWQhrkyJmJGcTnUv7pmiDRxQ3ipJjenAHBNiokh2HTV1BU",
"6PYV1dQkF66uex9TVxW9JQhjsr4bHkwu1zfjHtvZD7VcJssY4awDjGgc26",
],
ValueError: [
# Invalid base58 encoding
"6PYNKZ1EAgYgmQfmNVamxyXVWHzK5s6DGhwP4J5o44cvXdoY7sRzhtpUeO",
"6PYltMnXvfG3oJde97zRyLYFZCYizPU5T3LwgdYJz1fRhh16bU7u6PPmY7",
# Invalid length
"H3VYWSrgqLzqdXreTTfkL83ZJASYVFvy78q7j69nnt5WAcgMfq3eX2i",
"cGAd8AVkr5wZEQpJ7wzyc4BKerkEwiyGVPUnJ2cV6wgLhpVuXPr71eh1G1Hm7Gu",
# Invalid prefix
"6SSstNWVoV33gBrLYEbxUDj7xdnWcX6SNZvCedM3812j7vLysouLGzeFz9",
# Invalid flagbyte
"6PJQrGM5jUZ2mSug3ZKcy6W72T54dbu1wZSD8Q2TWRJ3q9qHiQPEBkafwL",
# Invalid address hash
"6PYTRmk5E6ddFqtiPZZu6BpZ1LXAVazbvkmUys9R2qz6o3eSsW9GDknHNu",
],
}
#
# Tests
#
class Bip38NoEcTests(unittest.TestCase):
# Run all tests in test vector
def test_vector(self):
for test in TEST_VECT:
# Test encryption
enc = Bip38Encrypter.EncryptNoEc(binascii.unhexlify(test["priv_key_bytes"]), test["passphrase"], test["pub_key_mode"])
self.assertEqual(test["encrypted"], enc)
# Test decryption
dec, pub_key_mode = Bip38Decrypter.DecryptNoEc(test["encrypted"], test["passphrase"])
self.assertEqual(test["priv_key_bytes"], binascii.hexlify(dec))
self.assertEqual(test["pub_key_mode"], pub_key_mode)
# Test invalid for decoding
def test_dec_invalid(self):
for ex, tests in TEST_VECT_DEC_INVALID.items():
for test in tests:
# "with" is needed because some exceptions are raised by Base58 module
with self.assertRaises(ex):
Bip38Decrypter.DecryptNoEc(test, "")
# Tests invalid keys for encrypting
def test_enc_invalid_keys(self):
self.assertRaises(TypeError, Bip38Encrypter.EncryptNoEc, TEST_ED25519_PRIV_KEY, "")
self.assertRaises(TypeError, Bip38Encrypter.EncryptNoEc, TEST_ED25519_BLAKE2B_PRIV_KEY, "")
self.assertRaises(TypeError, Bip38Encrypter.EncryptNoEc, TEST_ED25519_MONERO_PRIV_KEY, "")
self.assertRaises(TypeError, Bip38Encrypter.EncryptNoEc, TEST_NIST256P1_PRIV_KEY, "")
self.assertRaises(TypeError, Bip38Encrypter.EncryptNoEc, TEST_SR25519_PRIV_KEY, "")
for test in TEST_VECT_SECP256K1_PRIV_KEY_INVALID:
self.assertRaises(ValueError, Bip38Encrypter.EncryptNoEc, binascii.unhexlify(test), b"\x00")
|
nilq/baby-python
|
python
|
from unittest.mock import patch
from django.test import TestCase
from store.models import product_image_file_path
class ModelTests(TestCase):
@patch('uuid.uuid4')
def test_product_file_name_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = product_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/product/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
|
nilq/baby-python
|
python
|
"""Tests in the tutorial."""
from fractions import Fraction
from dice_stats import Dice
def test_basic_dice_operations_ga():
"""Test basic dice operations."""
d12 = Dice.from_dice(12)
assert d12 + 3 == Dice.from_full(
{
4: Fraction(1, 12),
5: Fraction(1, 12),
6: Fraction(1, 12),
7: Fraction(1, 12),
8: Fraction(1, 12),
9: Fraction(1, 12),
10: Fraction(1, 12),
11: Fraction(1, 12),
12: Fraction(1, 12),
13: Fraction(1, 12),
14: Fraction(1, 12),
15: Fraction(1, 12),
}
)
def test_basic_dice_operations_gs():
"""Test basic dice operations."""
d6 = Dice.from_dice(6)
gsw = Dice.from_full(
{
5: Fraction(1, 36),
6: Fraction(2, 36),
7: Fraction(3, 36),
8: Fraction(4, 36),
9: Fraction(5, 36),
10: Fraction(6, 36),
11: Fraction(5, 36),
12: Fraction(4, 36),
13: Fraction(3, 36),
14: Fraction(2, 36),
15: Fraction(1, 36),
}
)
assert 2 * d6 + 3 == gsw
assert d6 + d6 + 3 == gsw
def test_rerolling_reroll():
"""Test reroll."""
d6 = Dice.from_dice(6)
assert 2 * d6.reroll([1, 2]) + 3 == Dice.from_full(
{
5: Fraction(1, 324),
6: Fraction(1, 162),
7: Fraction(1, 36),
8: Fraction(4, 81),
9: Fraction(8, 81),
10: Fraction(12, 81),
11: Fraction(14, 81),
12: Fraction(16, 81),
13: Fraction(12, 81),
14: Fraction(8, 81),
15: Fraction(4, 81),
}
)
|
nilq/baby-python
|
python
|
import propar
import time
import random
dut = propar.instrument('com1')
print()
print("Testing using propar @", propar.__file__)
print()
n = 10
all_parameters = dut.db.get_all_parameters()
bt = time.perf_counter()
for i in range(n):
for p in all_parameters:
dut.read_parameters([p])
et = time.perf_counter()
print("{:<20}{:>8}".format("read all parameters", (et - bt) / n))
print("{:<20}{:>8}".format("read one parameter ", (et - bt) / len(all_parameters) / n))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pytest
from exoscale.api.compute import *
class TestComputeSSHKey:
def test_delete(self, exo, sshkey):
ssh_key = SSHKey._from_cs(exo.compute, sshkey(teardown=False))
ssh_key_name = ssh_key.name
ssh_key.delete()
assert ssh_key.name is None
res = exo.compute.cs.listSSHKeyPairs(name=ssh_key_name, fetch_list=True)
assert len(res) == 0
|
nilq/baby-python
|
python
|
from jobmine.jobmine import JobMine # yes, I do find this quite funny
|
nilq/baby-python
|
python
|
import requests
bad = []
good = []
proxy_file = open("proxies.txt", "r")
proxies = proxy_file.read()
proxies = proxies.splitlines()
for proxy in proxies:
try:
print("Checking: " + proxy)
resp = (requests.get("http://discord.com", proxies={"http":proxy, "https":proxy}, timeout=2))
good.append(proxy)
except requests.exceptions.ProxyError:
bad.append(proxy)
pass
except requests.exceptions.ConnectionError:
bad.append(proxy)
pass
print("\nBad:")
print('\n'.join(bad))
print("\nGood:")
print('\n'.join(good))
|
nilq/baby-python
|
python
|
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'dacodesjobs',
'USER': 'django',
'PASSWORD': 'holamundo',
'HOST': 'localhost',
'PORT': '',
}
}
STATICFILES_DIRS = (BASE_DIR,'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
|
nilq/baby-python
|
python
|
import numpy as np
import plotly
import plotly.graph_objs as go
from HypeNet.Networks.FCNN_SoftmaxCE import FCNN_SoftmaxCE
from HypeNet.Core.loadData import loadFashionMnist
from HypeNet.Core.Trainer import Trainer
from HypeNet.Core.utils import *
import os
DIR = os.path.dirname(os.path.abspath(__file__)) + '/SavedNetwork/FashionMnist/'
X_train, Y_train, X_val, Y_val, Y_train_label, Y_val_label = loadFashionMnist()
num_epoch = 10
minibatch_size = 256
save_network = True
learning_rate = 0.001
optimizer_type = 'adam'
network = FCNN_SoftmaxCE(784, [256, 256, 256, 256, 256], 10, ['Relu', 'Relu', 'Relu', 'Relu', 'Relu'], weight_init_std = 'he', use_dropout = True, use_batchnorm = True, keep_probs = [0.9, 0.9, 0.9, 0.9, 0.9])
trainer = Trainer(network, X_train, Y_train, X_val, Y_val, num_epoch, minibatch_size, optimizer_type, {'lr' : learning_rate}, verbose = True, LossAccInterval = 20)
train_loss_list, val_loss_list, train_acc_list, val_acc_list, x_axis, lrs = trainer.train()
if(save_network == True):
networkSaver(network, DIR)
trainLoss = go.Scatter(x = x_axis, y = train_loss_list, mode = 'lines', name = 'training loss')
valLoss = go.Scatter(x = x_axis, y = val_loss_list, mode = 'lines', name = 'validation loss')
trainAcc = go.Scatter(x = x_axis, y = train_acc_list, mode = 'lines', name = 'training acc')
valAcc = go.Scatter(x = x_axis, y = val_acc_list, mode = 'lines', name = 'validation acc')
loss_data = [trainLoss, valLoss]
acc_data = [trainAcc, valAcc]
plotly.offline.plot({'data' : loss_data, 'layout' : go.Layout(title = 'Loss')}, filename = 'FashionMnist_Loss.html')
plotly.offline.plot({'data' : acc_data, 'layout' : go.Layout(title = 'Accuracy')}, filename = 'FashionMnist_Acc.html')
|
nilq/baby-python
|
python
|
'''
Exercício Python 73: Crie uma tupla preenchida com os 20 primeiros colocados da Tabela do Campeonato Brasileiro de Futebol,
na ordem de colocação. Depois mostre:
a) Os 5 primeiros times.
b) Os últimos 4 colocados.
c) Times em ordem alfabética.
d) Em que posição está o time do Bragantino.
obs.: Usarei a tabela do Campeonato Brasileiro de 2020.
'''
times = ('Flamengo', 'Internacional', 'Atlético-MG', 'São Paulo', 'Fluminense',
'Grêmio', 'Palmeiras', 'Santos', 'Athletico-PR', 'Bragantino',
'Ceará', 'Corinthians', 'Atlético-GO', 'Bahia', 'Sport',
'Fortaleza', 'Vasco da Gama', 'Goiás', 'Coritiba', 'Botafogo')
print('=-'*30)
print(f'Lista de times do Brasileirão: {times}')
print('=-'*30)
print(f'Os 5 primeiros times são: {times[0:5]}')
print('=-'*30)
print(f'Os 4 ultimos colocados são: {times[-4:]}')
print('=-'*30)
print(f'Times em ordem alfabética: {sorted(times)}')
print('=-'*30)
print(f'O Bragantino está na {times.index("Bragantino") + 1}ª posição.')
|
nilq/baby-python
|
python
|
import os
import sys
import json
import numpy as np
import torch
import pdb
from torch.autograd import Variable
from PIL import Image
import time
from opts import parse_opts
from model import generate_model
from mean import get_mean
def main(video_root,output_root):
start_time = time.time()
for class_name in os.listdir(video_root):
if 'Split' in class_name:
continue
print(class_name)
class_path = os.path.join(video_root, class_name)
if not os.path.isdir(class_path):
continue
dst_class_path = os.path.join(output_root, class_name)
if not os.path.exists(dst_class_path):
os.makedirs(dst_class_path)
for jpg_folder in os.listdir(class_path):
vid_matrix = []
jpg_path = os.path.join(class_path,jpg_folder)
if len(os.listdir(jpg_path))>0:
for img in os.listdir(jpg_path):
if img.endswith('.jpg'):
with Image.open(os.path.join(jpg_path, img)) as tmp:
# tmp = tmp.convert('RGB')
tmp = np.asarray(tmp)
vid_matrix.append(tmp)
vid_matrix = np.stack(vid_matrix, axis=0)
dst_matrix = os.path.join(dst_class_path, jpg_folder + '.npy')
np.save(dst_matrix, vid_matrix)
exc_time = time.time() - start_time
print("--- %s seconds ---" % exc_time)
if __name__ == "__main__":
video_root = sys.argv[1]
output_root = sys.argv[2]
main(video_root,output_root)
|
nilq/baby-python
|
python
|
from multipledispatch import dispatch
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from .colour import PAL, gen_PAL
sns.set()
# Remove stheno from this temporarily cus too many dependencies and not maintained, it depends on lab and wbml which is not easy to install.
a = (list, np.ndarray)
@dispatch(np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray)
def viz(x, y, mean, lower, upper):
pal = gen_PAL()
plt.figure(figsize=(12, 6))
plt.scatter(x[:, 0], y, label='Observations', c=pal[0], alpha=0.8)
plt.plot(x[:, 0], mean, label='Prediction', c=pal[1])
plt.fill_between(x[:, 0], lower, upper, color=pal[2], alpha=0.3)
plt.legend()
plt.show()
return
# @dispatch(a, a, stheno.graph.GP)
# def viz(x, y, p):
# # Now condition on the observations to make predictions.
# mean, lower, upper = p(x).marginals()
# # Plot result.
# plt.scatter(x, y, label='Observations', c=PAL[1])
# plt.plot(x, mean, label='Prediction', c=PAL[2])
# plt.plot(x, lower, ls='--', c=PAL[2])
# plt.plot(x, upper, ls='--', c=PAL[2])
# plt.show()
# return
# @dispatch(a, stheno.graph.GP)
# def viz(x, p):
# mean, lower, upper = p(x).marginals()
# plt.plot(x, mean, label='Prediction', c=PAL[2])
# plt.plot(x, lower, ls='--', c=PAL[2])
# plt.plot(x, upper, ls='--', c=PAL[2])
# plt.show()
# return
|
nilq/baby-python
|
python
|
class Pessoa:
def __init__(self, nome,idade):
self.nome = nome
self.idade = idade
p = Pessoa.__new__(Pessoa)
dados = {'nome':'Fábio','idade':25}
for k,y in dados.items():
setattr(p,k,y)
print(p.nome, p.idade)
|
nilq/baby-python
|
python
|
"""
TransformDF2Numpy is a simple tool for quick transformation from pandas.DataFrame to numpy.array dataset,
containing some utilities such as re-transformation of new data,
minimal pre-processing, and access to variable information.
##################
### Overview ###
##################
+ Transform a training set of the pandas.DataFrame to a numpy.array dataset, and fit a transformer instance.
The numpy.array containing the factorized categorical variables (first half)
and the numerical variables (second half).
+ Utilities of a fitted transformer instance.
+ Transforming New DataFrame samely as DataFrame used for fitting.
+ Access to variable information.
+ linking variable index and name
+ variable names (all, categorical, numerical)
+ linking factorized value and category name
+ unique categories of categorical variables
+ Minimal pre-processing (optional).
+ Scaling numerical variables.
+ robustness control by a parameter
+ Thresholding categorical variables by minimum count of each variable.
+ Filling missing values.
+ new category (or the most frequent category) for categorical variables.
+ mean value for numerical variables
+ robustness control by a parameter
(Note: A categorical variable which has only two unique categories is treated as a numerical variable)
(*) Factorization: The process of converting each element of a categorical variable into a corresponding positive index.
####################
### Parameters ###
####################
objective_col : str (optional, default None)
The column name of objective variable.
If you specify this, the instance automatically find the column
and the output numpy array will be splitted into
x (explanatory variables) and y (objective variables).
objective_scaling : bool (optional, default False)
The flag for scaling objective variable.
numerical_scaling : bool (optional, default False)
The flag for scaling numerical variables.
scaling_robustness_factor : float in range of [0. 1.] (optional, default 0.)
The parameter to control robustness of scaling operation.
Specifying a larger value will make it more robust against outliers.
fillnan : bool (optional, default True)
The flag to fill missing values (nan, NaN).
If True, the numerical nan will be filled with the mean,
and the categorical nan will be filled as new category (or most frequent category).
If False, the numerical nan will not be filled,
and the categorical nan will be filled with -1.
fillnan_robustness_factor : float in range of [0. 1.] (optional, default 0.)
The parameter to control robustness of calculating the filling value to nan.
Specifying a larger value will make it more robust against outliers.
min_category_count : integer (optional, default 0)
The minimum number of appearance of each category, in each categorical variable.
The categories with a number of appearance below this parameter will be thresholded,
and treated as a new single category.
copy : bool (optional, default True)
Set to False to perform inplace the input DataFrame and avoid a copy.
#################
### Methods ###
#################
fit_transform(df)
Inputs: training set of DataFrame
Returns: x, (y)
x : The numpy.array containing factorized categorical variables (first half)
and numerical variables (second half).
The variables which have only two unique categories are treated as numerical variables.
y : numpy array of objective variable (returned only when objective column exists)
transform(df)
Inputs: testing set of DataFrame
Returns: x, (y)
x : numpy array of explanatory variables same as fit_transform()
y : numpy array of objective variable (only when objective column exists)
variables()
Returns: the list of the name of all variables in order of the output numpy array
categoricals()
Returns: the list of the name of categorical variables in order of the output numpy array
numericals()
Returns: the list of the name of numerical variables in order of the output numpy array
name_to_index(colname)
Inputs: column name of DataFrame
Returns: the corresponding column index of numpy array
index_to_name(index)
Inputs: column index of numpy array
Returns: the corresponding column name of DataFrame
is_numerical(index_or_colname)
Inputs: column index of numpy array
Returns: the bool indicating whether the variable is treated as a numerical variable or not
categories(index_or_colname)
Inputs: column name of DataFrame, or column index of numpy array
Return: the list of unique categories in the variable which index correspond to the factorized values
category_to_factorized(index_or_colname, category_name):
Inputs: index_or_colname : column name of DataFrame, or column index of numpy array
category_name : name of the single category
Returns: the factorized value
factorized_to_category(index_or_colname, factorized_value):
Inputs: index_or_colname : column name of DataFrame, or column index of numpy array
factorized_value : factorized value of the single category
Returns: the name of the single category
nuniques()
Returns: the list of the number of unique categories of the categorical variables
nunique(index_or_colname)
Inputs: column name of DataFrame, or column index of numpy array
Returns: the number of unique categories of the categorical variable
####################
### Attributes ###
####################
self.y_mean : the mean of the objective variable before scaling
self.y_std : the standard deviation of the objective variable before scaling
self.num_categoricals : the number of the categorical variables
self.num_numericals : the number of the numerical variables
"""
import pandas as pd
import numpy as np
import warnings
from .errors import *
# global parameters
logging = True
# global constants
DROPPED_CATEGORY = "TransformDF2Numpy_dropped_category"
NAN_CATEGORY = "TransformDF2Numpy_NaN_category"
class TransformDF2Numpy:
def __init__(self,
objective_col=None,
objective_scaling=False,
numerical_scaling=False,
scaling_robustness_factor=0.,
fillnan=True,
fillnan_robustness_factor=0.,
min_category_count=0,
copy=True):
# param for objective variable
if objective_col is not None:
if type(objective_col) == str:
self.objective_col = objective_col
else:
raise InvalidInputForSpecifyingObjectiveColumnError
else:
self.objective_col = None
# params for scaling values
self.objective_scaling = objective_scaling
self.numerical_scaling = numerical_scaling
self.scaling_robustness_factor = scaling_robustness_factor
# params for filling missing values
# If fillnan == False, missing categorical amd numerical variables will be -1 and nan, respectively.
self.fillnan = fillnan
self.fillnan_robustness_factor = fillnan_robustness_factor
# param for category-threshold by minimum appearance of each category in each categorical variable
self.min_category_count = min_category_count
# param for internal copy.
# set to False to perform inplace the input DataFrame and avoid a copy.
self.copy = copy
# internal flags
self._fitted = False
def fit_transform(self, df):
if self._fitted:
raise TransformerAlreadyFittedError
if self.copy:
df = df.copy()
if logging:
_start_message_fit_transform()
if self.objective_col:
y_is_numeric = pd.api.types.is_numeric_dtype(df[self.objective_col])
y = df[self.objective_col].values.copy()
if self.objective_scaling:
if y_is_numeric:
self.y_mean, self.y_std = _mean_std_for_scaling(y, self.scaling_robustness_factor,
self.objective_col)
y = (y - self.y_mean) / self.y_std
else:
message = "Because the objective variable is categorical, " +\
"no scaling was performed to objective variable despite objective_scaling=True "
warnings.warn(message)
self.y_mean, self.y_std = None, None
else:
self.y_mean, self.y_std = None, None
# information of variables
self.variable_information = {
"variables": None,
"transform_index": None,
"categorical_variables": [],
"numerical_variables": [],
"categorical_uniques": []
}
self.transforms = []
categorical_transform_index = []
numerical_transform_index = []
num_rows = len(df)
for i, col in enumerate(df.columns):
num_uniques = df[col].nunique()
is_numeric = pd.api.types.is_numeric_dtype(df[col])
if (col == self.objective_col) or (num_uniques == 1) or \
(not is_numeric and num_uniques == num_rows):
trans = Dropper()
trans.fit_transform(col, self.objective_col)
self.transforms.append(trans)
elif (num_uniques > 2) and (not is_numeric):
trans = Factorizer(self.min_category_count, self.fillnan)
trans.fit_transform(df, col, self.variable_information)
self.transforms.append(trans)
if not trans.ct.all_thresholded:
categorical_transform_index.append(i)
elif (num_uniques == 2) and (not is_numeric):
trans = BinaryFactorizer(self.numerical_scaling, self.scaling_robustness_factor,
self.fillnan, self.fillnan_robustness_factor)
trans.fit_transform(df, col, self.variable_information)
self.transforms.append(trans)
numerical_transform_index.append(i)
elif is_numeric:
trans = NumericalHandler(self.numerical_scaling, self.scaling_robustness_factor,
self.fillnan, self.fillnan_robustness_factor)
trans.fit_transform(df, col, self.variable_information)
self.transforms.append(trans)
numerical_transform_index.append(i)
else:
message = "debug: something wrong with column: " + col
raise Exception(message)
self.variable_information["variables"] = self.variable_information["categorical_variables"]\
+ self.variable_information["numerical_variables"]
self.variable_information["transform_index"] = categorical_transform_index + numerical_transform_index
self.num_categoricals = len(self.variable_information["categorical_variables"])
self.num_numericals = len(self.variable_information["numerical_variables"])
x = self._df_to_numpy(df)
if logging:
_end_message_fit_transform(self.variable_information)
self._fitted = True
return (x, y) if self.objective_col else x
def transform(self, df):
if not self._fitted:
raise TransformerNotFittedError
if self.copy:
df = df.copy()
if self.objective_col in df.columns:
y_exist = True
y = df[self.objective_col].values.copy()
if self.objective_scaling:
y = (y - self.y_mean) / self.y_std
else:
y_exist = False
idx_transform = 0
for col in df.columns:
if not y_exist and self.transforms[idx_transform].col_name == self.objective_col:
idx_transform += 1
self.transforms[idx_transform].transform(df, col)
idx_transform += 1
x = self._df_to_numpy(df)
return (x, y) if y_exist else x
def variables(self):
var_names = self.variable_information["variables"]
out = []
for name in var_names:
trans = self._get_transform(name)
if type(trans) == BinaryFactorizer:
out.append(name + "_" + self.categories(name)[-1])
else:
out.append(name)
return out
def categoricals(self):
return self.variable_information["categorical_variables"]
def numericals(self):
var_names = self.variable_information["numerical_variables"]
out = []
for name in var_names:
trans = self._get_transform(name)
if type(trans) == BinaryFactorizer:
out.append(name + "_" + self.categories(name)[-1])
else:
out.append(name)
return out
def name_to_index(self, colname):
if colname not in self.variable_information["variables"]:
raise VariableNotExistError(colname)
return self.variable_information["variables"].index(colname)
def index_to_name(self, index):
return self.variable_information["variables"][index]
def is_numerical(self, index_or_colname):
trans = self._get_transform(index_or_colname)
if type(trans) == Factorizer:
return False
else:
return True
def categories(self, index_or_colname):
trans = self._get_transform(index_or_colname)
if type(trans) == Factorizer or type(trans) == BinaryFactorizer:
return trans.categories
else:
raise HasNoDictionaryError
def category_to_factorized(self, index_or_colname, category_name):
trans = self._get_transform(index_or_colname)
categories = self.categories(index_or_colname)
if category_name not in categories:
raise CategoryNotExistError(category_name)
if type(trans) == Factorizer:
return float(np.where(categories == category_name)[0][0])
elif type(trans) == BinaryFactorizer:
categories = self.categories(index_or_colname)
if self.numerical_scaling:
return float((np.where(categories == category_name)[0][0] - trans.mean) / trans.std)
else:
return float(np.where(categories == category_name)[0][0])
def factorized_to_category(self, index_or_colname, factorized_value):
trans = self._get_transform(index_or_colname)
categories = self.categories(index_or_colname)
if type(trans) == Factorizer:
return _factorized_to_category(factorized_value, factorized_value, categories)
elif type(trans) == BinaryFactorizer:
if self.numerical_scaling:
fixed_factorized_value = float(factorized_value * trans.std + trans.mean)
# if not integer, raise error
if not float.is_integer(fixed_factorized_value):
raise FactorizedNotExistError(factorized_value)
return _factorized_to_category(fixed_factorized_value, factorized_value, categories)
else:
return _factorized_to_category(factorized_value, factorized_value, categories)
def nuniques(self):
return self.variable_information["categorical_uniques"]
def nunique(self, index_or_colname=None):
if index_or_colname is not None:
trans = self._get_transform(index_or_colname)
if type(trans) == Factorizer:
return trans.num_uniques
elif type(trans) == BinaryFactorizer:
return 2
elif type(trans) == NumericalHandler:
raise WronglySpecifiedNumericalVariableError
else:
return self.variable_information["categorical_uniques"]
def _df_to_numpy(self, df):
x_categorical = df[self.variable_information["categorical_variables"]].values
x_numerical = df[self.variable_information["numerical_variables"]].values
return np.concatenate([x_categorical, x_numerical], axis=1)
def _get_transform(self, index_or_colname):
if type(index_or_colname) in [int, np.int, np.int8, np.int16, np.int32, np.int64]:
return self.transforms[self.variable_information["transform_index"][index_or_colname]]
elif type(index_or_colname) == str:
if index_or_colname not in self.variable_information["variables"]:
raise VariableNotExistError(index_or_colname)
index = self.variable_information["variables"].index(index_or_colname)
return self.transforms[self.variable_information["transform_index"][index]]
else:
raise InvalidInputForSpecifyingVariableError
############################
### Internal Functions ###
############################
def _start_message_fit_transform():
print("Starting to fit a transformer of TransformDF2Numpy.")
def _end_message_fit_transform(info):
print()
print("Transformer fitted.")
print("Number of the categorical variables:", len(info["categorical_variables"]))
print("Number of the numerical variables:", len(info["numerical_variables"]))
print("---------------------------------------------------")
def _message_variable_dropped(col_name):
print("Garbage variable Dropped: (column: '%s')" % col_name)
def _message_categories_thresholed(col_name, num_valids, num_dropped):
print("Categories thresholded: (column: '%s'), (valid categories: %d, dropped categories: %d)"
% (col_name, num_valids, num_dropped))
def _message_numerical_nans_filled(col_name, nan_count, nan_value):
print("Numerical NaNs filled with alternative value: (column: '%s'), (filled rows: %d, value: %f)"
% (col_name, nan_count, nan_value))
def _message_categirical_nans_filled(col_name, nan_count, factorized_nan_value):
message = "Categorical NaNs filled with alternative value: (column: '%s'), " % col_name +\
"(filled rows: %d, factorized value: %f, category: '%s')" %\
(nan_count, factorized_nan_value, NAN_CATEGORY)
print(message)
def _factorized_to_category(fixed_factorized, factorized, categories):
if fixed_factorized < len(categories):
return categories[fixed_factorized]
else:
raise FactorizedNotExistError(factorized)
def _fit_factorize_fillnan_true(df, col_name):
nan_count = df[col_name].isnull().sum()
if nan_count:
nan_value = NAN_CATEGORY # nan will be replaced by new category
df[col_name].fillna(nan_value, inplace=True)
df[col_name], categories = df[col_name].factorize()
factorized_nan_value = np.where(categories == NAN_CATEGORY)[0][0]
if logging:
_message_categirical_nans_filled(col_name, nan_count, factorized_nan_value)
else:
nan_value = df[col_name].mode()[0] # future nan will be replaced by most frequently appeared category
df[col_name], categories = df[col_name].factorize()
return categories, nan_value
def _fit_factorize_fillnan_false(df, col_name):
df[col_name], categories = df[col_name].factorize()
return categories
def _numerical_nan_value(values, fillnan_robustness_factor):
values = values[~np.isnan(values)]
values = np.sort(values)
start_index = int(len(values) / 2 * fillnan_robustness_factor) # robustness_factorは片側
gorl_index = int(len(values) - start_index)
if start_index == gorl_index:
gorl_index += 1
nan_value = values[start_index:gorl_index].mean()
return nan_value
def _mean_std_for_scaling(values, scaling_robustness_factor, col_name):
values = values[~np.isnan(values)]
values = np.sort(values)
start_index = int(len(values) / 2 * scaling_robustness_factor) # robustness_factorは片側
gorl_index = int(len(values) - start_index)
if start_index == gorl_index:
gorl_index += 1
std = values[start_index:gorl_index].std() + 0.000001
if std == 0.000001:
if logging:
message = "Robust scaling of the variable:'%s' was failed due to infinite std appeared." % col_name\
+ " The mean and std will be calculated by all values instead."
warnings.warn(message)
std = values.std() + 0.000001
mean = values.mean()
return mean, std
else:
mean = values[start_index:gorl_index].mean()
return mean, std
##########################
### Internal Classes ###
##########################
class CategoryThreshold:
def __init__(self):
self.all_thresholded = False
def fit_transform(self, df, col_name, min_count):
val_cnt = df[col_name].value_counts()
valid_categories_series = val_cnt >= min_count
self.valid_categories = valid_categories_series[valid_categories_series].index
drop_targets = list(set(df[col_name].values) - set(self.valid_categories) - set([np.nan]))
df[col_name] = df[col_name].map(lambda x: DROPPED_CATEGORY if x in drop_targets else x)
if len(drop_targets) != 0 and logging:
_message_categories_thresholed(col_name, len(self.valid_categories), len(drop_targets))
if len(self.valid_categories) == 0:
self.all_thresholded = True
if logging:
message = "All categories in column '%s' were thresholded. This column will be dropped." % col_name
warnings.warn(message)
def transform(self, df, col_name):
drop_targets = list(set(df[col_name].values) - set(self.valid_categories) - set([np.nan]))
df[col_name] = df[col_name].map(lambda x: DROPPED_CATEGORY if x in drop_targets else x)
class Dropper:
def __init__(self):
pass
def fit_transform(self, col_name, obj_col_name):
self.col_name = col_name
if logging and (col_name != obj_col_name):
_message_variable_dropped(col_name)
def transform(self, df, col_name):
if col_name != self.col_name:
raise WrongDataFrameConstructionError
class Factorizer:
def __init__(self, min_category_count, fillnan_flag):
self.min_category_count = min_category_count
self.fillnan_flag = fillnan_flag
def fit_transform(self, df, col_name, variable_info):
self.col_name = col_name
self.ct = CategoryThreshold()
self.ct.fit_transform(df, col_name, min_count=self.min_category_count)
if not self.ct.all_thresholded:
if self.fillnan_flag:
self.categories, self.nan_value = _fit_factorize_fillnan_true(df, col_name)
else:
self.categories = _fit_factorize_fillnan_false(df, col_name)
variable_info["categorical_variables"].append(col_name)
self.num_uniques = len(self.categories)
variable_info["categorical_uniques"].append(self.num_uniques)
# starting to create params used for an external one-hot-encoding function
category_counts = df[col_name].value_counts()
if -1 in category_counts.index.values:
category_counts.drop(-1, axis=0, inplace=True)
category_counts = category_counts.sort_index().values
# means of one-hot-vectors
self.categories_one_hot_means = category_counts / category_counts.sum()
# standard deviations of one-hot-vectors
self.categories_one_hot_stds = np.sqrt(
self.categories_one_hot_means * (1 - self.categories_one_hot_means) ** 2 +
(1 - self.categories_one_hot_means) * self.categories_one_hot_means ** 2
)
def transform(self, df, col_name):
if col_name != self.col_name:
raise WrongDataFrameConstructionError
if not self.ct.all_thresholded:
self.ct.transform(df, col_name)
if self.fillnan_flag:
df[col_name].fillna(self.nan_value, inplace=True)
df[col_name] = self.categories.get_indexer(df[col_name])
class BinaryFactorizer:
def __init__(self, scaling_flag, scaling_robustness_factor,
fillnan_flag, fillnan_robustness_factor):
self.scaling_flag = scaling_flag
self.scaling_robustness_factor = scaling_robustness_factor
self.fillnan_flag = fillnan_flag
self.fillnan_robustness_factor = fillnan_robustness_factor
def fit_transform(self, df, col_name, variable_info):
self.col_name = col_name
df[col_name], self.categories = df[col_name].factorize()
variable_info["numerical_variables"].append(col_name)
# fill nan
nan_count = (df[col_name].values == -1).sum()
if self.fillnan_flag and nan_count:
df.loc[df[col_name] == -1, col_name] = np.nan
self.nan_value = _numerical_nan_value(df[col_name].values, self.fillnan_robustness_factor)
df[col_name].fillna(self.nan_value, inplace=True)
if logging:
_message_numerical_nans_filled(col_name, nan_count, self.nan_value)
elif not self.fillnan_flag and nan_count:
df.loc[df[col_name] == -1, col_name] = np.nan
# scaling
if self.scaling_flag:
self.mean, self.std = _mean_std_for_scaling(df[col_name].values,
self.scaling_robustness_factor,
col_name)
df[col_name] = (df[col_name].values - self.mean) / self.std
def transform(self, df, col_name):
if col_name != self.col_name:
raise WrongDataFrameConstructionError
df[col_name] = self.categories.get_indexer(df[col_name])
if self.fillnan_flag and (-1 in df[col_name].values):
df.loc[df[col_name] == -1, col_name] = self.nan_value
elif not self.fillnan_flag and (-1 in df[col_name].values):
df.loc[df[col_name] == -1, col_name] = np.nan
if self.scaling_flag:
df[col_name] = (df[col_name].values - self.mean) / self.std
class NumericalHandler:
def __init__(self, scaling_flag, scaling_robustness_factor,
fillnan_flag, fillnan_robustness_factor):
self.scaling_flag = scaling_flag
self.scaling_robustness_factor = scaling_robustness_factor
self.fillnan_flag = fillnan_flag
self.fillnan_robustness_factor = fillnan_robustness_factor
def fit_transform(self, df, col_name, variable_info):
self.col_name = col_name
if self.fillnan_flag:
self.nan_value = _numerical_nan_value(df[col_name].values, self.fillnan_robustness_factor)
nan_count = (df[col_name].isnull()).sum()
if nan_count:
_message_numerical_nans_filled(col_name, nan_count, self.nan_value) if logging else None
df[col_name].fillna(self.nan_value, inplace=True)
if self.scaling_flag:
self.mean, self.std = _mean_std_for_scaling(df[col_name].values, self.scaling_robustness_factor, col_name)
df[col_name] = (df[col_name].values - self.mean) / self.std
variable_info["numerical_variables"].append(col_name)
def transform(self, df, col_name):
if col_name != self.col_name:
raise WrongDataFrameConstructionError
if self.fillnan_flag:
df[col_name].fillna(self.nan_value, inplace=True)
if self.scaling_flag:
df[col_name] = (df[col_name].values - self.mean) / self.std
|
nilq/baby-python
|
python
|
import numpy as np
def gtd_bias(z, growth, alpha, b0, c):
b = c + (b0 - c) / growth**alpha
return b
def q_bias(k, Q, A):
return (1 + Q * k**2) / (1 + A * k)
def make_grids(k, z):
K = np.tile(k[:, None], z.size)
Z = np.tile(z[:, None], k.size).T
return K, Z
def q_model(k, z, Q, A):
# Make 2D versions of k,z arrays for convenience
K, Z = make_grids(k, z)
bias = q_bias(K, Q, A)
return bias
def gtd_model(k, z, z_growth, growth, alpha, b0, c):
K, Z = make_grids(k, z)
D = np.interp(z, z_growth, growth)
D = np.tile(D[:, None], k.size).T
bias = gtd_bias(Z, D, alpha, b0, c)
return bias
def gtd_q_model(k, z, z_growth, growth, alpha, b0, c, Q, A):
K, Z = make_grids(k, z)
bias_k = q_bias(K, Q, A)
bias_z = gtd_bias(Z, D, alpha, b0, c)
bias = bias_k * bias_z
return bias
|
nilq/baby-python
|
python
|
import os.path
from datetime import datetime
import click
from spoty import settings
from typing import List
import dateutil.parser
import numpy as np
from multiprocessing import Process, Lock, Queue, Value, Array
import sys
import time
from time import strftime
from time import gmtime
import string
THREADS_COUNT = 12
tag_allies = [
['YEAR', 'DATE'],
['TRACK', 'TRACKNUMBER'],
['DISK', 'DISKNUMBER']
]
spoty_tags = \
[
'SPOTY_DUP_GROUP',
'SPOTY_DEF_DUP_TAGS',
'SPOTY_PROB_DUP_TAGS',
'SPOTY_DUP_LIST',
'SPOTY_DUP_ID',
'SPOTY_FOUND_BY',
'SPOTY_SOURCE',
'SPOTY_PLAYLIST_NAME',
'SPOTY_PLAYLIST_ID',
'SPOTY_PLAYLIST_INDEX',
'SPOTY_FILE_NAME',
'SPOTY_TRACK_ID',
'SPOTY_TRACK_ADDED',
'SPOTY_LENGTH',
'SPOTY_TRACK_LISTENED',
]
spotify_tags = [
'SPOTIFY_TRACK_ID',
'SPOTIFY_ALBUM_ID',
]
deezer_tags = [
'DEEZER_TRACK_ID',
'DEEZER_ALBUM_ID',
'DEEZER_ARTIST_ID',
'DEEZER_LYRICS_ID',
]
main_tags = \
[
'ISRC',
'ARTIST',
'ALBUMARTIST',
'TITLE',
'ALBUM',
'GENRE',
'MOOD',
'OCCASION',
'RATING',
'COMMENT'
'SOURCE'
'BPM',
'QUALITY',
'TEMPO',
'YEAR',
]
additional_tags = \
[
'1T_TAGGEDDATE', # auto tagger
'AUTHOR',
'COMPILATION',
'COMPOSER',
'COPYRIGHT',
'DISC',
'ENCODER',
'EXPLICIT',
'FILEOWNER',
'GAIN',
'INITIAL KEY',
'INITIALKEY',
'ENGINEER',
'INVOLVEDPEOPLE',
'ITUNESADVISORY',
'LABEL',
'LOVE RATING',
'LYRICS',
'MIXER',
'PRODUCER',
'PUBLISHER',
'REPLAYGAIN_TRACK_GAIN',
'RELEASE DATE',
'STYLE',
'TOTALDISCS',
'TOTALTRACKS',
'TRACK',
'UPC',
'WRITER',
]
class DuplicatesGroup:
source_tags: dict
def_duplicates: list
prob_duplicates: list
def_found_tags: list
prob_found_tags: list
def __init__(self):
self.source_tags = {}
self.def_duplicates = []
self.prob_duplicates = []
self.def_found_tags = []
self.prob_found_tags = []
def get_duplicates_count(self):
return len(self.def_duplicates) + len(self.prob_duplicates)
def has_duplicates(self):
return self.get_duplicates_count() > 0
class SpotyContext:
tags_lists: list
summary: list
duplicates_groups: List[DuplicatesGroup]
unique_first_tracks: list
unique_second_tracks: list
def __init__(self):
self.tags_lists = []
self.summary = []
self.duplicates_groups = []
self.unique_first_tracks = []
self.unique_second_tracks = []
mutex = Lock()
def tuple_to_list(some_tuple: tuple):
l = []
l.extend(some_tuple)
return l
def dict_to_list(some_dics: dict):
l = []
for key, value in some_dics.items():
l.append(value)
return l
def is_valid_path(path: str):
return os.path.isdir(path)
def is_valid_file(path: str):
return os.path.isfile(path)
def slugify_file_pah(text: str):
valid_chars = "ЯЧСМИТЬБЮФЫВАПРОЛДЖЭЙЦУКЕНГШЩЗХЪячсмитьбюфывапролджэйцукенгшщзхъ!@#$%%^&()_-=+.,[]{}`№ %s%s" % (string.ascii_letters, string.digits)
return ''.join(c for c in text if c in valid_chars).strip()
# invalid_chars = '<>:"/\|?*'
# for char in invalid_chars:
# text = text.replace(char, '')
# return text
def filter_duplicates(src_arr: list, dest_arr: list):
return list(filter(lambda id: id not in src_arr, dest_arr))
def remove_duplicates(arr: list):
good = []
duplicates = []
for item in arr:
if item in good:
duplicates.append(item)
else:
good.append(item)
return good, duplicates
def remove_exist(exist_arr: list, new_arr: list):
new = []
exist = []
for item in new_arr:
if item in exist_arr:
exist.append(item)
else:
new.append(item)
return new, exist
def remove_duplicated_tags(tags_list: list, tags_to_compare: list, allow_missing=False, show_progressbar=False):
good = []
duplicates = []
if show_progressbar:
bar = click.progressbar(length=len(tags_list), label=f'Finding duplicates in {len(tags_list)} tracks')
for new_tags in tags_list:
if show_progressbar:
bar.update(1)
found = False
for exist_tags in good:
if compare_tags(exist_tags, new_tags, tags_to_compare, allow_missing):
duplicates.append(new_tags)
found = True
break
if not found:
good.append(new_tags)
if show_progressbar:
bar.finish()
click.echo()
return good, duplicates
def remove_exist_tags(exist_tags_list: list, new_tags_list: list, tags_to_compare: list, allow_missing=False,
show_progressbar=False):
new = []
exist = []
if show_progressbar:
bar = click.progressbar(new_tags_list,
label=f'Searching for tags matching in {len(exist_tags_list)} and {len(new_tags_list)} tracks')
for new_tags in new_tags_list:
if show_progressbar:
bar.update(1)
found = False
for exist_tags in exist_tags_list:
if compare_tags(exist_tags, new_tags, tags_to_compare, allow_missing):
exist.append(new_tags)
found = True
break
if not found:
new.append(new_tags)
if show_progressbar:
bar.finish()
click.echo()
return new, exist
def remove_exist_tags_by_isrc_and_length(exist_tags_list: list, new_tags_list: list, show_progressbar=False):
exist_tags_dict = tags_list_to_dict_by_isrc_and_length(exist_tags_list)
return remove_exist_tags_by_isrc_and_length_dict(exist_tags_dict,new_tags_list, show_progressbar)
def tags_list_to_dict_by_isrc_and_length(exist_tags_list: list):
exist_tags_dict = {}
for tags in exist_tags_list:
if 'ISRC' in tags and 'SPOTY_LENGTH' in tags:
if tags['ISRC'] not in exist_tags_dict:
exist_tags_dict[tags['ISRC']] = []
exist_tags_dict[tags['ISRC']].append(tags['SPOTY_LENGTH'])
return exist_tags_dict
def remove_exist_tags_by_isrc_and_length_dict(exist_tags_dict: dict, new_tags_list: list, show_progressbar=False):
new = []
exist = []
if show_progressbar:
bar = click.progressbar(new_tags_list,
label=f'Searching for tags matching in {len(exist_tags_list)} and {len(new_tags_list)} tracks')
COMPARE_LENGTH_TOLERANCE_SEC = int(settings.SPOTY.COMPARE_LENGTH_TOLERANCE_SEC)
for new_tags in new_tags_list:
if show_progressbar:
bar.update(1)
found = False
if 'ISRC' in new_tags and 'SPOTY_LENGTH' in new_tags:
if new_tags['ISRC'] in exist_tags_dict:
for exist_length in exist_tags_dict[new_tags['ISRC']]:
if abs(int(new_tags['SPOTY_LENGTH']) - int(exist_length) < COMPARE_LENGTH_TOLERANCE_SEC):
found = True
break
if found:
exist.append(new_tags)
else:
new.append(new_tags)
if show_progressbar:
bar.finish()
click.echo()
return new, exist
def compare_tags(tags1: dict, tags2: dict, tags_to_compare: list, allow_missing=False):
for tag in tags_to_compare:
if not tag in tags1 or not tag in tags2:
if allow_missing:
continue
else:
return False
if tag == 'SPOTY_LENGTH':
if abs(int(tags1['SPOTY_LENGTH']) - int(tags2['SPOTY_LENGTH'])) \
> settings.SPOTY.COMPARE_LENGTH_TOLERANCE_SEC:
return False
else:
continue
if tag == "ARTIST":
artist1 = tags1[tag].replace(',', ';').upper()
artist1 = artist1.split(';')
artist2 = tags2[tag].replace(',', ';').upper()
artist2 = artist2.split(';')
found = False
for art in artist1:
if art in artist2:
found = True
if not found:
return False
else:
continue
if tag == "TITLE":
title1 = tags1[tag].upper()
title1 = ''.join(char for char in title1 if char.isalnum())
title2 = tags2[tag].upper()
title2 = ''.join(char for char in title2 if char.isalnum())
if not title2.startswith(title1) and not title1.startswith(title2):
return False
else:
continue
if tag == "ALBUM":
album1 = tags1[tag].upper()
album2 = tags2[tag].upper()
if not album2.startswith(album1) and not album1.startswith(album2):
return False
else:
continue
if tag == "ISRC":
isrc1 = tags1[tag].upper().replace('-', '')
isrc2 = tags2[tag].upper().replace('-', '')
if isrc1 != isrc2:
return False
else:
continue
if tags1[tag] != tags2[tag]:
return False
return True
def find_duplicates_in_tags(tags_list: list, compare_tags: list):
if len(compare_tags) == 0:
return
duplicates = {}
pattern = ""
for tag in compare_tags:
pattern += "%" + tag + "%,"
pattern = pattern[:-1]
groupped_tags = group_tags_by_pattern(tags_list, pattern, "Unknown")
for group, tags in groupped_tags.items():
if group == "Unknown":
continue
if len(tags) > 1:
if not group in duplicates:
duplicates[group] = []
duplicates[group].extend(tags)
skipped_tags = groupped_tags['Unknown'] if 'Unknown' in groupped_tags else []
return duplicates, skipped_tags
def print_main_tags(tags: dict):
if 'ISRC' in tags: print(f'ISRC: {tags["ISRC"]}')
if 'ARTIST' in tags: print(f'ARTIST: {tags["ARTIST"]}')
if 'TITLE' in tags: print(f'TITLE: {tags["TITLE"]}')
if 'ALBUM' in tags: print(f'ALBUM: {tags["ALBUM"]}')
if 'GENRE' in tags: print(f'GENRE: {tags["GENRE"]}')
if 'MOOD' in tags: print(f'MOOD: {tags["MOOD"]}')
if 'OCCASION' in tags: print(f'OCCASION: {tags["OCCASION"]}')
if 'RATING' in tags: print(f'RATING: {tags["RATING"]}')
if 'COMMENT' in tags: print(f'COMMENT: {tags["COMMENT"]}')
if 'BARCODE' in tags: print(f'BARCODE: {tags["BARCODE"]}')
if 'SPOTY_LENGTH' in tags:
seconds = int(tags["SPOTY_LENGTH"])
m, s = divmod(seconds, 60)
time = '{:02d}:{:02d}'.format(m, s)
print(f'SPOTY_LENGTH: {tags["SPOTY_LENGTH"]} ({time})')
if 'SPOTIFY_TRACK_ID' in tags: print(f'SPOTIFY_TRACK_ID: {tags["SPOTIFY_TRACK_ID"]}')
if 'DEEZER_TRACK_ID' in tags: print(f'DEEZER_TRACK_ID: {tags["DEEZER_TRACK_ID"]}')
if 'SOURCE' in tags: print(f'SOURCE: {tags["SOURCE"]}')
if 'SOURCEID' in tags: print(f'SOURCEID: {tags["SOURCEID"]}')
if 'YEAR' in tags: print(f'YEAR: {tags["YEAR"]}')
def print_tags_list_grouped(tags_list: list, print_pattern: str, grouping_pattern: str):
if len(tags_list) == 0:
return
grouped_tags = group_tags_by_pattern(tags_list, grouping_pattern)
for group, tags_l in grouped_tags.items():
print(f'\n------------------------- {group}:')
print_tags_list(tags_l, print_pattern)
def print_tags_list(tags_list: list, print_pattern: str):
if len(tags_list) == 0:
return
for tags in tags_list:
txt = parse_pattern(tags, print_pattern)
print(" " + txt)
def print_duplicates_tags_list(tags_list: list, print_pattern: str = None):
if len(tags_list) == 0:
return
for tags in tags_list:
if print_pattern is None:
print_pattern = settings.DUPLICATE_PRINT_PATTERN[tags['SPOTY_SOURCE']]
txt = parse_pattern(tags, print_pattern)
print(" " + txt)
def check_tag_has_allies(tag: str):
for allies in tag_allies:
if tag in allies:
return True
return False
def get_tag_allies(tag: str, include_source_tag=True):
res = []
for allies in tag_allies:
if tag in allies:
res = allies.copy()
if tag in res:
res.remove(tag)
if include_source_tag:
res.append(tag)
return res
def print_tags(tags: dict, tags_to_print: list):
for tag in tags_to_print:
allies = get_tag_allies(tag, True)
for a in allies:
if a.upper() in tags:
print(f'{a}: {tags[a]}')
def add_playlist_index_from_playlist_names(tags_list: list):
res = []
groups = group_tags_by_pattern(tags_list, "%SPOTY_PLAYLIST_NAME%")
for group, g_tags_list in groups.items():
for i, tags in enumerate(g_tags_list):
tags['SPOTY_PLAYLIST_INDEX'] = str(i + 1)
res.append(tags)
return res
def filter_tags_list_have_tags(tags_list: list, filter_tags: list):
filtered = []
for tags in tags_list:
if check_all_tags_exist(tags, filter_tags):
filtered.append(tags)
return filtered
def filter_tags_list_have_no_tags(tags_list: list, filter_tags: list):
filtered = []
for tags in tags_list:
if not check_all_tags_exist(tags, filter_tags):
filtered.append(tags)
return filtered
def filter_added_after_date(tags_list: list, date: str, add_if_date_tag_missing=False):
filtered = []
for tags in tags_list:
if 'SPOTY_TRACK_ADDED' in tags:
track_added = datetime.strptime(tags['SPOTY_TRACK_ADDED'], "%Y-%m-%d %H:%M:%S")
# specified_date = datetime.strptime(added_after_time, "%Y-%m-%d %H:%M:%S")
try:
specified_date = dateutil.parser.parse(date)
except:
click.echo(f'Cant parse date: "{date}". Use this format: "2018-06-29 08:15:27"', err=True)
exit()
if track_added > specified_date:
filtered.append(tags)
else:
if add_if_date_tag_missing:
filtered.append(tags)
return filtered
def filter_added_before_date(tags_list: list, date: str, add_if_date_tag_missing=False):
filtered = []
for tags in tags_list:
if 'SPOTY_TRACK_ADDED' in tags:
track_added = datetime.strptime(tags['SPOTY_TRACK_ADDED'], "%Y-%m-%d %H:%M:%S")
# specified_date = datetime.strptime(added_after_time, "%Y-%m-%d %H:%M:%S")
try:
specified_date = dateutil.parser.parse(date)
except:
click.echo(f'Cant parse date: "{date}". Use this format: "2018-06-29 08:15:27"', err=True)
exit()
if track_added < specified_date:
filtered.append(tags)
else:
if add_if_date_tag_missing:
filtered.append(tags)
return filtered
def check_all_tags_exist(tags: dict, tags_to_check: list):
for tag in tags_to_check:
if not tag.upper() in tags:
return False
return True
def group_tags_by_pattern(tags_list: list, pattern: str, not_found_tag_name="Unknown"):
groups = {}
for tags in tags_list:
group_name = parse_pattern(tags, pattern)
if not group_name in groups:
groups[group_name] = []
groups[group_name].append(tags)
return groups
def parse_pattern(tags: dict, pattern: str):
result = ""
tag_name = ""
building_tag = False
for c in pattern:
if c == "%":
building_tag = not building_tag
if not building_tag:
allies = get_tag_allies(tag_name, True)
for a in allies:
if a in tags:
tag = tags[a]
result += str(tag)
tag_name = ""
else:
if building_tag:
tag_name += c
tag_name = tag_name.upper()
else:
result += c
return result
def reorder_tag_keys_main_first(keys: list):
res = []
# reorder spoty tags first
for key in spoty_tags:
if key in keys:
res.append(key)
for key in spotify_tags:
if key in keys:
res.append(key)
for key in deezer_tags:
if key in keys:
res.append(key)
# reorder main tags first
for key in main_tags:
if key in keys:
res.append(key)
# add other tags
for key in keys:
if not key in res:
res.append(key)
return res
def get_missing_tags(exist_tags: dict, new_tags: dict, compare_tags: list = None, ignore_tags: list = None):
if compare_tags is None:
compare_tags = []
if ignore_tags is None:
ignore_tags = []
missing_tags = {}
for key, value in new_tags.items():
if len(compare_tags) > 0:
if key not in compare_tags:
continue
if len(ignore_tags) > 0:
if key in ignore_tags:
continue
if key == 'LENGTH':
continue
if key in spoty_tags:
continue
if key in exist_tags:
continue
found = False
for aliases in tag_allies:
if key in aliases:
for al in aliases:
if al in exist_tags:
found = True
if found:
continue
missing_tags[key] = value
return missing_tags
def find_empty_file_name(exist_file_name: str):
exist_file_name = os.path.abspath(exist_file_name)
if not os.path.isfile(exist_file_name):
return exist_file_name
base_name = os.path.basename(exist_file_name)
ext = os.path.splitext(base_name)[1]
base_name = os.path.splitext(base_name)[0]
dir_name = os.path.dirname(exist_file_name)
i = 1
while True:
i += 1
new_file_name = os.path.join(dir_name, base_name + f' {i}' + ext)
if not os.path.isfile(new_file_name):
return new_file_name
def clean_tags_list_before_write(tags_list):
for tags in tags_list:
if 'SPOTY_PLAYLIST_INDEX' in tags:
del tags['SPOTY_PLAYLIST_INDEX']
if 'LENGTH' in tags:
del tags['LENGTH']
return tags_list
def clean_tags_list_after_read(tags_list):
for i, tags in enumerate(tags_list):
tags_list[i] = clean_tags_after_read(tags)
def clean_tags_after_read(tags):
# local files from deemix
if 'ISRC' in tags:
tags['ISRC'] = tags['ISRC'].upper().replace('-', '')
if 'SOURCEID' in tags and 'DEEZER_TRACK_ID' not in tags \
and 'SOURCE' in tags and tags['SOURCE'].upper() == "DEEZER":
tags['DEEZER_TRACK_ID'] = tags['SOURCEID']
# missing deezer track id
if 'SPOTY_SOURCE' in tags and tags['SPOTY_SOURCE'].upper() == "DEEZER":
if 'SPOTY_TRACK_ID' not in tags and 'DEEZER_TRACK_ID' in tags:
tags['SPOTY_TRACK_ID'] = tags['DEEZER_TRACK_ID']
if 'DEEZER_TRACK_ID' not in tags and 'SPOTY_TRACK_ID' in tags:
tags['DEEZER_TRACK_ID'] = tags['SPOTY_TRACK_ID']
# missing spotify track id
if 'SPOTY_SOURCE' in tags and tags['SPOTY_SOURCE'].upper() == "SPOTIFY":
if 'SPOTY_TRACK_ID' not in tags and 'SPOTIFY_TRACK_ID' in tags:
tags['SPOTY_TRACK_ID'] = tags['SPOTIFY_TRACK_ID']
if 'SPOTIFY_TRACK_ID' not in tags and 'SPOTY_TRACK_ID' in tags:
tags['SPOTIFY_TRACK_ID'] = tags['SPOTY_TRACK_ID']
return tags
def find_duplicates_in_groups(check_tags: dict, groups: List[DuplicatesGroup], compare_tags_list: list,
compare_with_def_duplicates=False, compare_with_prob_duplicates=False) -> (
DuplicatesGroup, list):
if len(compare_tags_list) == 0:
return None, None
for tags_to_compare in compare_tags_list:
for group in groups:
if len(group.source_tags.items()) > 0:
if compare_tags(check_tags, group.source_tags, tags_to_compare, False):
return group, tags_to_compare
if compare_with_def_duplicates:
for tags_to_compare in compare_tags_list:
for group in groups:
for tags in group.def_duplicates:
if compare_tags(check_tags, tags, tags_to_compare, False):
return group, tags_to_compare
if compare_with_prob_duplicates:
for tags_to_compare in compare_tags_list:
for group in groups:
for tags in group.prob_duplicates:
if compare_tags(check_tags, tags, tags_to_compare, False):
return group, tags_to_compare
return None, None
def find_duplicates_in_tag_list2(tags_list: list, compare_tags_def_list: list, compare_tags_prob_list: list,
add_dup_tags=False):
# get tags to compare from config
for i, tags in enumerate(compare_tags_def_list):
compare_tags_def_list[i] = tags.split(',')
for i, tags in enumerate(compare_tags_prob_list):
compare_tags_prob_list[i] = tags.split(',')
groups: List[DuplicatesGroup] = []
# find duplicates
with click.progressbar(tags_list, label=f'Finding duplicates in {len(tags_list)} tracks') as bar:
for tags in bar:
group, found_tags = find_duplicates_in_groups(tags, groups, compare_tags_def_list, True, True)
if group is not None:
group.def_duplicates.append(tags)
group.def_found_tags.append(found_tags)
else:
group, found_tags = find_duplicates_in_groups(tags, groups, compare_tags_prob_list, True, True)
if group is not None:
group.prob_duplicates.append(tags)
group.prob_found_tags.append(found_tags)
else:
d = DuplicatesGroup()
d.source_tags = tags
groups.append(d)
# remove unique
unique_tracks = []
duplicates_groups: List[DuplicatesGroup] = []
for group in groups:
if group.has_duplicates():
duplicates_groups.append(group)
else:
unique_tracks.append(group.source_tags)
if add_dup_tags:
for i, group in enumerate(duplicates_groups):
if len(group.source_tags.items()) > 0:
group.source_tags['SPOTY_DUP_GROUP'] = i + 1
for y, tags in enumerate(group.def_duplicates):
tags['SPOTY_DUP_GROUP'] = i + 1
tags['SPOTY_DEF_DUP_TAGS'] = ','.join(group.def_found_tags[y])
for y, tags in enumerate(group.prob_duplicates):
tags['SPOTY_DUP_GROUP'] = i + 1
tags['SPOTY_PROB_DUP_TAGS'] = ','.join(group.prob_found_tags[y])
return duplicates_groups, unique_tracks
def find_duplicates_in_tag_lists(source_list: list, dest_list: list, compare_tags_def_list: list,
compare_tags_prob_list: list,
add_dup_tags=False, remove_duplicates_in_source=True):
# get tags to compare from config
for i, tags in enumerate(compare_tags_def_list):
compare_tags_def_list[i] = tags.split(',')
for i, tags in enumerate(compare_tags_prob_list):
compare_tags_prob_list[i] = tags.split(',')
# find duplicates in dest
groups: List[DuplicatesGroup] = []
unique_dest_tracks = []
for source_tags in source_list:
d = DuplicatesGroup()
d.source_tags = source_tags
groups.append(d)
if len(source_list) + len(dest_list) < 2000: # single thread
with click.progressbar(dest_list,
label=f'Finding duplicates in {len(source_list) + len(dest_list)} tracks') as bar:
for dest_tags in bar:
group, found_tags = find_duplicates_in_groups(dest_tags, groups, compare_tags_def_list)
if group is not None:
group.def_duplicates.append(dest_tags)
group.def_found_tags.append(found_tags)
else:
group, found_tags = find_duplicates_in_groups(dest_tags, groups, compare_tags_prob_list)
if group is not None:
group.prob_duplicates.append(dest_tags)
group.prob_found_tags.append(found_tags)
else:
unique_dest_tracks.append(dest_tags)
else: # multi thread
try:
parts = np.array_split(dest_list, THREADS_COUNT)
threads = []
counters = []
results = Queue()
with click.progressbar(length=len(dest_list),
label=f'Finding duplicates in {len(source_list) + len(dest_list)} tracks') as bar:
# start threads
for i, part in enumerate(parts):
counter = Value('i', 0)
counters.append(counter)
dest_list_part = list(part)
thread = Process(target=find_duplicates_in_groups_thread, args=(
dest_list_part, groups, compare_tags_def_list, compare_tags_prob_list, counter, results))
threads.append(thread)
thread.daemon = True # This thread dies when main thread exits
thread.start()
# update bar
total = sum([x.value for x in counters])
added = total - bar.pos
if added > 0:
bar.update(added)
# waiting for complete
while not bar.finished:
time.sleep(0.1)
total = sum([x.value for x in counters])
added = total - bar.pos
if added > 0:
bar.update(added)
# combine results
for i in range(len(parts)):
res = results.get()
unique_dest_tracks.extend(res['unique_dest_tracks'])
for i, group in enumerate(res['groups']):
if len(group.def_duplicates) > 0:
groups[i].def_duplicates.extend(group.def_duplicates)
groups[i].def_found_tags.extend(group.def_found_tags)
if len(group.prob_duplicates) > 0:
groups[i].prob_duplicates.extend(group.prob_duplicates)
groups[i].prob_found_tags.extend(group.prob_found_tags)
except (KeyboardInterrupt, SystemExit): # aborted by user
click.echo()
click.echo('Aborted.')
sys.exit()
# remove unique source
unique_source_tracks = []
temp_groups: List[DuplicatesGroup] = []
for group in groups:
if group.has_duplicates():
temp_groups.append(group)
else:
unique_source_tracks.append(group.source_tags)
groups = temp_groups
# remove duplicates in unique source tracks
sources_def_dups = []
sources_prob_dups = []
if remove_duplicates_in_source:
unique_sources = []
with click.progressbar(unique_source_tracks,
label=f'Finding duplicates in {len(unique_source_tracks)} source tracks') as bar:
for dest_tags in bar:
group, found_tags = find_duplicates_in_groups(dest_tags, groups, compare_tags_def_list)
if group is not None:
sources_def_dups.append(dest_tags)
else:
group, found_tags = find_duplicates_in_groups(dest_tags, groups, compare_tags_prob_list)
if group is not None:
sources_prob_dups.append(dest_tags)
else:
unique_sources.append(dest_tags)
unique_source_tracks = unique_sources
if add_dup_tags:
for i, group in enumerate(groups):
group.source_tags['SPOTY_DUP_GROUP'] = i + 1
for y, tags in enumerate(group.def_duplicates):
tags['SPOTY_DUP_GROUP'] = i + 1
tags['SPOTY_DEF_DUP_TAGS'] = ','.join(group.def_found_tags[y])
for y, tags in enumerate(group.prob_duplicates):
tags['SPOTY_DUP_GROUP'] = i + 1
tags['SPOTY_PROB_DUP_TAGS'] = ','.join(group.prob_found_tags[y])
return groups, unique_source_tracks, unique_dest_tracks, sources_def_dups, sources_prob_dups
def find_duplicates_in_groups_thread(dest_list, groups, compare_tags_def_list, compare_tags_prob_list, counter, result):
unique_dest_tracks = []
for i, dest_tags in enumerate(dest_list):
group, found_tags = find_duplicates_in_groups(dest_tags, groups, compare_tags_def_list)
if group is not None:
group.def_duplicates.append(dest_tags)
group.def_found_tags.append(found_tags)
else:
group, found_tags = find_duplicates_in_groups(dest_tags, groups, compare_tags_prob_list)
if group is not None:
group.prob_duplicates.append(dest_tags)
group.prob_found_tags.append(found_tags)
else:
unique_dest_tracks.append(dest_tags)
if (i + 1) % 10 == 0:
counter.value += 10
if i + 1 == len(dest_list):
counter.value += (i % 10) + 1
res = {}
res['unique_dest_tracks'] = unique_dest_tracks
res['groups'] = groups
result.put(res)
def compare_by_tags(source_list: list, dest_list: list, tags_to_compare: list, dest_unique: dict, dest_dups: dict,
dup_tag: str, add_dup_tags=False):
unique = []
dups = []
for dest_tags in dest_list:
found = False
for source_tags in source_list:
if compare_tags(source_tags, dest_tags, tags_to_compare, False):
found = True
if add_dup_tags:
if dup_tag not in dest_tags:
dest_tags[dup_tag] = ""
dest_tags[dup_tag] += f'{source_tags["SPOTY_DUP_ID"]} : {",".join(tags_to_compare)}\n'
if found:
dups.append(dest_tags)
else:
unique.append(dest_tags)
# move duplicates from unique to dups
for item in dups:
id = item['SPOTY_DUP_ID']
if id in dest_unique:
dest_dups[id] = item
del dest_unique[id]
def move_audio_files_to_path(tags_list, path):
moved_files = []
for tags in tags_list:
if 'SPOTY_FILE_NAME' in tags:
old_file_name = tags['SPOTY_FILE_NAME']
base_name = os.path.basename(old_file_name)
new_file_name = os.path.join(path, base_name)
if os.path.isfile(new_file_name):
new_file_name = find_empty_file_name(new_file_name)
os.rename(old_file_name, new_file_name)
moved_files.append(new_file_name)
return moved_files
def sort_tracks_by_source(tags_list):
spotify_playlists = {}
deezer_playlists = {}
local_audio_files = []
csv_playlists = {}
for tags in tags_list:
if tags['SPOTY_SOURCE'] == 'SPOTIFY':
playlist_id = tags['SPOTY_PLAYLIST_ID']
if playlist_id not in spotify_playlists:
spotify_playlists[playlist_id] = []
spotify_playlists[playlist_id].append(tags['SPOTIFY_TRACK_ID'])
if tags['SPOTY_SOURCE'] == 'DEEZER':
playlist_id = tags['SPOTY_PLAYLIST_ID']
if playlist_id not in deezer_playlists:
deezer_playlists[playlist_id] = []
deezer_playlists[playlist_id].append(tags['DEEZER_TRACK_ID'])
if tags['SPOTY_SOURCE'] == 'LOCAL':
local_audio_files.append(tags['SPOTY_FILE_NAME'])
if tags['SPOTY_SOURCE'] == 'CSV':
playlist_name = tags['SPOTY_PLAYLIST_NAME']
if playlist_name not in csv_playlists:
csv_playlists[playlist_name] = []
csv_playlists[playlist_name].append(tags)
return spotify_playlists, deezer_playlists, local_audio_files, csv_playlists
|
nilq/baby-python
|
python
|
from atexit import register
from datetime import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
# from .models import Patient
from django.conf import settings
from django.contrib.auth.models import User
from django.urls import reverse
from patientStuff.models import PatientDailyForm, PatientStatusHistory
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase, APIClient
from users.models import Doctor, Patient, UserInfo
# Create your tests here.
class PatientDailyFormTestCase(APITestCase):
patient_daily_form = reverse('patient_daily_form')
def setUp(self):
# self.client = APIClient(enforce_csrf_checks=True)
self.user = User.objects.create_superuser(
username="test123",
first_name="Tester",
last_name="Tester",
email="Tester@gmail.com",
password="test123"
)
self.user_info = UserInfo.objects.create(
user=self.user
)
self.patient = Patient.objects.create(
user_info=self.user_info
)
# settings.MEDIA_ROOT = tempfile.mkdtemp()
# self.token = Token.objects.create(user=self.user)
self.api_authentication()
def api_authentication(self):
self.client.force_authenticate(user=self.user)
def test_create_form(self):
data = {
"sex": 0,
"age_range": 0,
"test_status": True,
"recent_test_date": None,
"test_result": True,
"body_temp": 120.5,
"weight": 123.5,
"self_assessment": 0,
"symptoms": 2,
"vaxination_count": 3
}
response = self.client.post(
self.patient_daily_form,
data=data,
format='json',
)
# Get back the form stored in the table
form = PatientDailyForm.objects.get(pk=1)
# Check if the data response stored the form correctly
self.assertEqual(response.data['sex'], form.sex)
self.assertEqual(response.data['age_range'], form.age_range)
self.assertEqual(response.data['test_status'], form.test_status)
self.assertEqual(response.data['recent_test_date'], str(form.recent_test_date))
self.assertEqual(
response.data['test_result'], form.test_result)
self.assertEqual(
response.data['body_temp'], form.body_temp)
self.assertEqual(response.data['weight'], form.weight)
self.assertEqual(response.data['self_assessment'], form.self_assessment)
self.assertEqual(response.data['symptoms'], form.symptoms)
self.assertEqual(response.data['vaxination_count'], form.vaxination_count)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class PatientDailyFormTestCase(APITestCase):
patient_status_history = reverse('patient_status_history')
def setUp(self):
# self.client = APIClient(enforce_csrf_checks=True)
self.user = User.objects.create_superuser(
username="test123",
first_name="Tester",
last_name="Tester",
email="Tester@gmail.com",
password="test123"
)
self.user_info = UserInfo.objects.create(
user=self.user
)
self.patient = Patient.objects.create(
user_info=self.user_info
)
self.form = PatientDailyForm.objects.create(
sex=0,
age_range=0,
test_status=True,
recent_test_date=None,
test_result=True,
body_temp=120.5,
weight=123.5,
self_assessment=0,
symptoms=2,
vaxination_count=3
)
# settings.MEDIA_ROOT = tempfile.mkdtemp()
# self.token = Token.objects.create(user=self.user)
self.api_authentication()
def api_authentication(self):
self.client.force_authenticate(user=self.user)
def test_create_history(self):
data = {
"patient": self.patient.id,
"patient_form": self.form.id,
}
response = self.client.post(
self.patient_status_history,
data=data,
format='json',
)
# Get back the status history stored in the table
status_history = PatientStatusHistory.objects.get(pk=1)
self.assertEqual(response.data['patient'], status_history.patient.id)
self.assertEqual(
response.data['patient_form'], status_history.patient_form.id)
# Check if the data response stored the history correctly
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
|
nilq/baby-python
|
python
|
#!/home/miranda9/miniconda3/envs/automl-meta-learning/bin/python
from argparse import Namespace
import torch
import torch.nn as nn
import torch.optim as optim
# from transformers import Adafactor
# from transformers.optimization import AdafactorSchedule
import uutils
from uutils.torch_uu import get_layer_names_to_do_sim_analysis_fc
from meta_learning.training.meta_training import meta_eval, meta_train_fixed_iterations_full_epoch_possible
from meta_learning.meta_learners.maml_meta_learner import MAMLMetaLearner
from meta_learning.meta_learners.pretrain_convergence import FitFinalLayer
from meta_learning.base_models.resnet_rfs import resnet12, resnet18
from meta_learning.base_models.learner_from_opt_as_few_shot_paper import Learner
from meta_learning.base_models.kcnn import Kcnn
from meta_learning.datasets.rand_fc_nn_vec_mu_ls_gen import get_backbone
import pathlib
from pathlib import Path
from uutils.torch_uu.dataloaders import get_torchmeta_sinusoid_dataloaders, get_torchmeta_rand_fnn_dataloaders, \
get_miniimagenet_dataloaders_torchmeta
from uutils.torch_uu.distributed import is_lead_worker
def manual_args_load() -> Namespace:
"""
Manually load args.
Divided into three parts (due to legacy code)
1. parse args from terminal
2. manually load args in this script
3. add remaining common setup args to experiment
:param args:
:return:
"""
# -- parse args from terminal
args: Namespace = uutils.parse_basic_meta_learning_args_from_terminal()
# -- manual args load
# Config for few-shot learning
args.k_shots = 5
# args.k_eval = 15
args.k_eval = 100
args.n_classes = 5
# - training its/epochs
# args.num_its = 30
# args.num_its = 4
# args.meta_batch_size_train = 8
args.meta_batch_size_train = 32
args.log_train_freq = 100 if not args.debug else 1
args.eval_iters = 1
# args.meta_batch_size_eval = 8
args.meta_batch_size_eval = 32
args.log_val_freq = 100 if not args.debug else 1 # for hyperparam tuning. note: lower the quicker the code.
# - maml
args.meta_learner_name = 'maml_fixed_inner_lr'
args.inner_lr = 1e-1
args.nb_inner_train_steps = 5
args.track_higher_grads = True # set to false only during meta-testing, but code sets it automatically only for meta-test
args.copy_initial_weights = False # DONT PUT TRUE. details: set to True only if you do NOT want to train base model's initialization https://stackoverflow.com/questions/60311183/what-does-the-copy-initial-weights-documentation-mean-in-the-higher-library-for
args.fo = True # True, dissallows flow of higher order grad while still letting params track gradients.
# args.fo = True
# - outer trainer params
args.outer_lr = 1e-5
# args.grad_clip_rate = None # does no gradient clipping if None
# args.grad_clip_mode = None # more specific setting of the crad clipping split
args.grad_clip_rate = 0.25 # does no gradient clipping if None, meta-lstm used 0.25
args.grad_clip_mode = 'clip_all_together' # clip all params together/the same way
# - pff
# args.meta_learner_name = 'FitFinalLayer'
# -- Data-set options
args.split = "train"
# args.split = 'val'
# args.split = "test"
# - with BN really small to really large --
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_with_BN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_with_BN_std1_1e-16_std2_1.0_noise_std0.1nb_h_layes3_out1_H15/').expanduser()
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_with_BN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_with_BN_std1_1e-08_std2_1.0_noise_std0.1nb_h_layes3_out1_H15/').expanduser()
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_with_BN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_with_BN_std1_0.0001_std2_1.0_noise_std0.1nb_h_layes3_out1_H15/').expanduser()
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_with_BN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_with_BN_std1_0.01_std2_1.0_noise_std0.1nb_h_layes3_out1_H15/').expanduser()
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_with_BN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_with_BN_std1_0.1_std2_1.0_noise_std0.1nb_h_layes3_out1_H15/').expanduser()
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_with_BN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_with_BN_std1_0.25_std2_1.0_noise_std0.1nb_h_layes3_out1_H15/').expanduser()
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_with_BN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_with_BN_std1_0.5_std2_1.0_noise_std0.1nb_h_layes3_out1_H15/').expanduser()
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_with_BN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_with_BN_std1_1.0_std2_1.0_noise_std0.1nb_h_layes3_out1_H15/').expanduser()
args.data_path = Path('~/data/dataset_LS_fully_connected_NN_with_BN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_with_BN_std1_2.0_std2_1.0_noise_std0.1nb_h_layes3_out1_H15/').expanduser()
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_with_BN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_with_BN_std1_4.0_std2_1.0_noise_std0.1nb_h_layes3_out1_H15/').expanduser()
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_with_BN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_with_BN_std1_8.0_std2_1.0_noise_std0.1nb_h_layes3_out1_H15/').expanduser()
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_with_BN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_with_BN_std1_16.0_std2_1.0_noise_std0.1nb_h_layes3_out1_H15/').expanduser()
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_with_BN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_with_BN_std1_32.0_std2_1.0_noise_std0.1nb_h_layes3_out1_H15/').expanduser()
# -- NO BN --
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_std1_0.0001_std2_1.0_noise_std0.1nb_h_layes3_out1_H15').expanduser()
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_std1_0.1_std2_1.0_noise_std0.1nb_h_layes3_out1_H15').expanduser()
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_std1_4_std2_1.0_noise_std0.1nb_h_layes3_out1_H15').expanduser()
# args.data_path = Path('~/data/dataset_LS_fully_connected_NN_nb_tasks200_data_per_task1000_l_4_nb_h_layes3_out1_H15/meta_set_fully_connected_NN_std1_16_std2_1.0_noise_std0.1nb_h_layes3_out1_H15').expanduser()
# mini-imagenet
# args.data_path = 'torchmeta_mini_imagenet'
# args.data_path = 'sinusoid'
# Data loader options
# Base model
# args.base_model_mode = 'cnn'
# args.base_model_mode = 'child_mdl_from_opt_as_a_mdl_for_few_shot_learning_paper' # & MAML
# args.base_model_mode = 'resnet12_rfs'
# args.base_model_mode = 'resnet18_rfs'
# args.base_model_mode = 'resnet18'
# args.base_model_mode = 'resnet50'
# args.base_model_mode = 'resnet101'
# args.base_model_mode = 'resnet152'
# args.base_model_mode = 'rand_init_true_arch'
# args.base_model_mode = 'f_avg'
# args.base_model_mode = 'f_avg_add_noise'
# args.base_model_mode = 'custom_synthetic_backbone_NO_BN'
# args.base_model_mode = 'custom_synthetic_backbone_YES_BN'
args.base_model_mode = 'custom_synthetic_backbone_YES_BN' if '_BN' in str(args.data_path) else 'custom_synthetic_backbone_NO_BN'
# args.base_model_mode = 'cbfinn_sinusoid'
# args.base_model_mode = Path('~/data/logs/logs_Sep29_13-05-52_jobid_383794.iam-pbs/ckpt_file.pt').expanduser()
# args.base_model_mode = '/home/miranda9/data/logs/logs_Nov06_16-45-35_jobid_669/ckpt_file.pt'
# args.base_model_mode = '/home/miranda9/data/logs/logs_Nov11_13-32-07_jobid_866/ckpt_file.pt'
# args.base_model_mode = '/home/miranda9/data/logs/logs_Nov05_15-44-03_jobid_668/ckpt_file.pt'
# args.base_model_mode = '/home/miranda9/data/logs/logs_Nov11_13-03-40_jobid_858/ckpt_file.pt'
# args.base_model_mode = '/home/miranda9/data/logs/logs_Nov12_09-33-21_jobid_934/ckpt_file.pt'
# args.base_model_mode = '/home/miranda9/data/logs/logs_Nov11_15-10-28_jobid_851/ckpt_file.pt'
# args.base_model_mode = Path(args.base_model_mode).expanduser()
# -- Setup up remaining stuff for experiment
args: Namespace = uutils.setup_args_for_experiment(args)
args.num_workers = 4
args.pin_memory = False # it is generally not recommended to return CUDA tensors in multi-process loading because of many subtleties in using CUDA and sharing CUDA tensors in multiprocessing (see CUDA in multiprocessing). Instead, we recommend using automatic memory pinning (i.e., setting pin_memory=True), which enables fast data transfer to CUDA-enabled GPUs. https://pytorch.org/docs/stable/data.html
# load_cluster_jobids_to(args)
return args
def main(args):
print('-------> Inside Main <--------')
# Set up the learner/base model
print(f'--> args.base_model_model: {args.base_model_mode}')
if args.base_model_mode == 'cnn':
args.bn_momentum = 0.95
args.bn_eps = 1e-3
args.grad_clip_mode = 'clip_all_together'
args.image_size = 84
args.act_type = 'sigmoid'
args.base_model = Kcnn(args.image_size, args.bn_eps, args.bn_momentum, args.n_classes,
filter_size=args.n_classes,
nb_feature_layers=6,
act_type=args.act_type)
elif args.base_model_mode == 'child_mdl_from_opt_as_a_mdl_for_few_shot_learning_paper':
args.k_eval = 150
args.bn_momentum = 0.95
args.bn_eps = 1e-3
args.grad_clip_mode = 'clip_all_together'
args.image_size = 84
args.base_model = Learner(image_size=args.image_size, bn_eps=args.bn_eps, bn_momentum=args.bn_momentum, n_classes=args.n_classes).to(args.device)
elif args.base_model_mode == 'resnet12_rfs':
args.k_eval = 30
args.base_model = resnet12(avg_pool=True, drop_rate=0.1, dropblock_size=5, num_classes=args.n_classes).to(args.device)
elif args.base_model_mode == 'resnet18_rfs':
args.k_eval = 30
args.base_model = resnet18(avg_pool=True, drop_rate=0.1, dropblock_size=5, num_classes=args.n_classes).to(
args.device)
elif args.base_model_mode == 'resnet18':
args.base_model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=False)
# replace_bn(args.base_model, 'model')
args.base_model.fc = torch.nn.Linear(in_features=512, out_features=args.n_classes, bias=True)
elif args.base_model_mode == 'resnet50':
model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet50', pretrained=False)
# replace_bn(model, 'model')
model.fc = torch.nn.Linear(in_features=2048, out_features=args.n_classes, bias=True)
args.base_model = model
elif args.base_model_mode == 'resnet101':
model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet101', pretrained=False)
# replace_bn(model, 'model')
model.fc = torch.nn.Linear(in_features=2048, out_features=args.n_classes, bias=True)
args.base_model = model
elif args.base_model_mode == 'resnet152':
model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet152', pretrained=False)
# replace_bn(model, 'model')
model.fc = torch.nn.Linear(in_features=2048, out_features=args.n_classes, bias=True)
args.base_model = model
elif args.base_model_mode == 'rand_init_true_arch':
db = torch.load(str(args.data_path / args.split / 'f_avg.pt'))
args.base_model = db['f'].to(args.device)
# re-initialize model: https://discuss.pytorch.org/t/reinitializing-the-weights-after-each-cross-validation-fold/11034
[layer.reset_parameters() for layer in args.base_model.children() if hasattr(layer, 'reset_parameters')]
elif args.base_model_mode == 'f_avg':
db = torch.load(str(args.data_path / args.split / 'f_avg.pt'))
args.base_model = db['f'].to(args.device)
elif args.base_model_mode == 'f_avg_add_noise':
db = torch.load(str(args.data_path / args.split / 'f_avg.pt'))
args.base_model = db['f'].to(args.device)
# add small noise to initial weight to break symmetry
print()
with torch.no_grad():
for i, w in enumerate(args.base_model.parameters()):
mu = torch.zeros(w.size())
std = w * 1.25e-2 # two decimal places and a little more
noise = torch.distributions.normal.Normal(loc=mu, scale=std).sample()
w += noise
print('>>> f_avg_add_noise')
elif 'custom_synthetic_backbone' in args.base_model_mode:
# - hps for backbone
Din, Dout = 1, 1
# H = 15*20 # 15 is the number of features of the target function
H = 15*4
# 10 layers, 9 hidden layers
# hidden_dim = [(Din, H), (H, H), (H, H), (H, H), (H, H), (H, H), (H, H), (H, H), (H, H), (H, Dout)]
# 9 layers, 8 hidden layers
# hidden_dim = [(Din, H), (H, H), (H, H), (H, H), (H, H), (H, H), (H, H), (H, H), (H, Dout)]
# 8 layers, 7 hidden layers
# hidden_dim = [(Din, H), (H, H), (H, H), (H, H), (H, H), (H, H), (H, H), (H, Dout)]
# 7 layers, 6 hidden layers
# hidden_dim = [(Din, H), (H, H), (H, H), (H, H), (H, H), (H, H), (H, Dout)]
# 6 layers, 5 hidden layers
# hidden_dim = [(Din, H), (H, H), (H, H), (H, H), (H, H), (H, Dout)]
# 5 layers, 4 hidden layers
# hidden_dim = [(Din, H), (H, H), (H, H), (H, H), (H, Dout)]
# 4 layers, 3 hidden layers
hidden_dim = [(Din, H), (H, H), (H, H), (H, Dout)]
# 3 layers, 2 hidden layers
# hidden_dim = [(Din, H), (H, H), (H, Dout)]
print(f'# of hidden layers = {len(hidden_dim) - 1}')
print(f'total layers = {len(hidden_dim)}')
section_label = [1] * (len(hidden_dim) - 1) + [2]
# - hps for model
target_f_name = 'fully_connected_NN_with_BN' if 'YES_BN' in args.base_model_mode else 'fully_connected_NN'
task_gen_params = {
'metaset_path': None,
'target_f_name': target_f_name,
'hidden_dim': hidden_dim,
'section_label': section_label,
'Din': Din, 'Dout': Dout, 'H': H
}
# - CUSTOM
args.base_model = get_backbone(task_gen_params)
# args.base_model = get_backbone(task_gen_params, act='sigmoid')
# - save params for generating bb
args.task_gen_params = task_gen_params
elif args.base_model_mode == 'cbfinn_sinusoid':
target_f_name = 'fully_connected_NN'
# params for backbone
Din, Dout = 1, 1
H = 40 # original cbfinn
# 3 layers, 2 hidden layers (origal cbfinn)
hidden_dim = [(Din, H), (H, H), (H, Dout)]
print(f'# of hidden layers = {len(hidden_dim) - 1}')
print(f'total layers = {len(hidden_dim)}')
section_label = [1] * (len(hidden_dim) - 1) + [2]
task_gen_params = {
'metaset_path': None,
'target_f_name': target_f_name,
'hidden_dim': hidden_dim,
'section_label': section_label,
'Din': Din, 'Dout': Dout, 'H': H
}
# CBFINN SINUSOID
args.base_model = get_backbone(task_gen_params)
# args.base_model = get_backbone(task_gen_params, act='sigmoid')
# save params for generating bb
args.task_gen_params = task_gen_params
elif type(args.base_model_mode) is pathlib.PosixPath:
# db = torch_uu.load(str(args.resume_ckpt_path))
db = torch.load(str(args.base_model_mode))
# meta_learner = db['meta_learner']
args.base_model = db['f']
# in case loading directly doesn't work
# modules = eval(db['f_modules_str'])
# args.base_model = torch_uu.nn.Sequential(modules)
# f_state_dict = db['f_state_dict']
# args.base_model.load_state_dict(f_state_dict)
print('RUNNING FROM CHECKPOINT')
args.logger.loginfo('RUNNING FROM CHECKPOINT')
else:
raise ValueError(f'Not Implemented: args.base_model_mode = {args.base_model_mode}')
# GPU safety check
args.base_model.to(args.device) # make sure it is on GPU
if torch.cuda.is_available():
args.base_model.cuda()
print(f'{args.base_model=}')
# Set up Meta-Learner
args.scheduler = None
if args.meta_learner_name == 'maml_fixed_inner_lr':
args.grad_clip_rate = None
args.meta_learner = MAMLMetaLearner(args, args.base_model, fo=args.fo, lr_inner=args.inner_lr)
args.outer_opt = optim.Adam(args.meta_learner.parameters(), args.outer_lr)
# args.outer_opt = Adafactor(args.meta_learner.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
# args.scheduler = AdafactorSchedule(args.outer_opt)
elif args.meta_learner_name == "FitFinalLayer":
args.meta_learner = FitFinalLayer(args, args.base_model)
args.inner_opt_name = 'PFF'
args.outer_opt = 'None'
else:
raise ValueError(f"Invalid trainable opt: {args.meta_learner_name}")
# Get Meta-Sets for few shot learning
if 'torchmeta_mini_imagenet' in str(args.data_path):
args.meta_learner.classification()
args.training_mode = 'iterations'
meta_train_dataloader, meta_val_dataloader, meta_test_dataloader = get_miniimagenet_dataloaders_torchmeta(args)
elif 'sinusoid' in str(args.data_path):
args.training_mode = 'iterations'
args.criterion = nn.MSELoss()
args.meta_learner.regression()
meta_train_dataloader, meta_val_dataloader, meta_test_dataloader = get_torchmeta_sinusoid_dataloaders(args)
elif 'fully_connected' in str(args.data_path.name):
args.training_mode = 'iterations'
args.criterion = nn.MSELoss()
args.meta_learner.regression()
meta_train_dataloader, meta_val_dataloader, meta_test_dataloader = get_torchmeta_rand_fnn_dataloaders(args)
else:
raise ValueError(f'Not such task: {args.data_path}')
args.dataloaders = {'train': meta_train_dataloader, 'val': meta_val_dataloader, 'test': meta_test_dataloader}
# -- load layers to do sim analysis
args.include_final_layer_in_lst = True
args.layer_names = get_layer_names_to_do_sim_analysis_fc(args, include_final_layer_in_lst=args.include_final_layer_in_lst)
# args.layer_names = get_layer_names_to_do_sim_analysis_bn(args, include_final_layer_in_lst=args.include_final_layer_in_lst)
# -- Choose experiment split
assert 'meta' not in args.split
if args.split == 'train':
print('--------------------- META-TRAIN ------------------------')
# if not args.trainin_with_epochs:
meta_train_fixed_iterations_full_epoch_possible(args)
# else:
# meta_train_epochs(args, meta_learner, args.outer_opt, meta_train_dataloader, meta_val_dataloader)
elif args.split == 'val':
print('--------------------- META-Eval Val ------------------------')
# args.track_higher_grads = False # so to not track intermeddiate tensors that for back-ward pass when backward pass won't be done
acc_mean, acc_std, loss_mean, loss_std = meta_eval(args, meta_test_dataloader)
args.logger.loginfo(f"val loss: {loss_mean} +- {loss_std}, val acc: {acc_mean} +- {acc_std}")
elif args.split == 'test':
print('--------------------- META-Eval Test ------------------------')
# args.track_higher_grads = False # so to not track intermeddiate tensors that for back-ward pass when backward pass won't be done
acc_mean, acc_std, loss_mean, loss_std = meta_eval(args, meta_test_dataloader)
args.logger.loginfo(f"val loss: {loss_mean} +- {loss_std}, val acc: {acc_mean} +- {acc_std}")
else:
raise ValueError(f'Value error: args.split = {args.split}, is not a valid split.')
# - wandb
if is_lead_worker(args.rank) and args.log_to_wandb:
import wandb
print('---> about to call wandb.finish()')
wandb.finish()
print('---> done calling wandb.finish()')
if __name__ == "__main__":
import time
start = time.time()
# - run experiment
args = manual_args_load()
main(args)
# - print success
duration_secs = time.time() - start
print(f"\nSuccess, time passed: hours:{duration_secs / (60 ** 2)}, minutes={duration_secs / 60}, seconds={duration_secs}")
print('--> Success Done! (python print) \a')
|
nilq/baby-python
|
python
|
from typing import List
import logging
import orjson
from instauto.api.actions.structs.feed import FeedGet
from instauto.api.client import ApiClient
logging.basicConfig()
logger = logging.getLogger(__name__)
def get_feed(client: ApiClient, limit: int) -> List[dict]:
ret = []
obj = FeedGet()
while len(ret) < limit:
obj, resp = client.feed_get(obj)
data = orjson.loads(resp.text)
items = [i['media_or_ad'] for i in data['feed_items'] if 'media_or_ad' in i]
logger.info("Retrieved {} posts, {} more to go.".format(len(ret), limit - len(ret)))
if len(items) == 0:
break
ret.extend(items)
return ret
|
nilq/baby-python
|
python
|
from django.urls import path
from boards.views import home, board_topics, new_topic, topic_posts, reply_topic
app_name = "boards"
urlpatterns = [
path("", home, name="home"),
path("boards/<int:pk>/", board_topics, name="board_topics"),
path("boards/<int:pk>/new/", new_topic, name="new_topics"),
path("boards/<int:pk>/topics/<int:topic_pk>/", topic_posts, name="topic_posts"),
path(
"boards/<int:pk>/topics/<int:topic_pk>/reply/", reply_topic, name="reply_topic"
),
]
|
nilq/baby-python
|
python
|
"""Used for tidying up any changes made during testing"""
import shutil
def test_tidy_up(): # pragma: no cover
"""Delete all files and folders created during testing"""
try:
shutil.rmtree('config')
except (FileNotFoundError, PermissionError):
pass
assert True
|
nilq/baby-python
|
python
|
import cherrypy
def serve(app, port=5000, config={}) -> None:
"""
Serve Flask app with production settings
:param app: Flask application object
:param port: on which port to run
:param config: additional config dictionary
:return:
"""
cherrypy.tree.graft(app, '/')
# Set the configuration of the web server to production mode
cherrypy.config.update({**{
'environment': 'production',
'engine.autoreload_on': False,
'log.screen': True,
'server.socket_port': port,
'server.socket_host': '0.0.0.0'
}, **config})
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
|
nilq/baby-python
|
python
|
import pytest
from cowdict import CowDict
base_dict = {
"foo1": "bar1",
"foo2": "bar2",
"foo3": "bar3",
"foo4": "bar4",
"foo5": "bar5",
}
base_dict_items = tuple(base_dict.items())
keys = ("foo1", "foo2", "foo3", "foo4", "foo5")
def test_same_unchanged():
cd = CowDict(base_dict)
for key in keys:
assert cd[key] == base_dict[key]
assert set(base_dict_items) == set(cd.items())
assert base_dict_items == tuple(base_dict.items())
def test_same_changed():
cd = CowDict(base_dict)
cd["foo2"] = "baz2"
cd["foo5"] = "baz5"
for key in keys:
if key in ("foo2", "foo5"):
assert cd[key] == key.replace("foo", "baz")
else:
assert cd[key] == base_dict[key]
assert set(cd.items()) == {
('foo1', 'bar1'),
('foo2', 'baz2'),
('foo3', 'bar3'),
('foo4', 'bar4'),
('foo5', 'baz5'),
}
assert base_dict_items == tuple(base_dict.items())
def test_new_keys_added():
cd = CowDict(base_dict)
cd["foo6"] = "bar6"
cd["foo7"] = "bar7"
for key in keys:
assert cd[key] == base_dict[key]
assert cd["foo6"] == "bar6"
assert cd["foo7"] == "bar7"
assert set(cd.items()) == {
('foo1', 'bar1'),
('foo2', 'bar2'),
('foo3', 'bar3'),
('foo4', 'bar4'),
('foo5', 'bar5'),
('foo6', 'bar6'),
('foo7', 'bar7'),
}
assert base_dict_items == tuple(base_dict.items())
def test_base_keys_deleted():
cd = CowDict(base_dict)
del cd["foo1"]
del cd["foo5"]
assert cd["foo2"] == "bar2"
assert cd["foo3"] == "bar3"
assert cd["foo4"] == "bar4"
assert set(cd.items()) == {
('foo2', 'bar2'),
('foo3', 'bar3'),
('foo4', 'bar4'),
}
with pytest.raises(KeyError):
cd["foo1"]
with pytest.raises(KeyError):
cd["foo5"]
assert base_dict_items == tuple(base_dict.items())
def test_new_keys_deleted():
cd = CowDict(base_dict)
cd["foo6"] = "bar6"
cd["foo7"] = "bar7"
del cd["foo6"]
del cd["foo7"]
for key in keys:
assert cd[key] == base_dict[key]
assert set(base_dict_items) == set(cd.items())
assert base_dict_items == tuple(base_dict.items())
def test_missing_keys_deleted():
cd = CowDict(base_dict)
with pytest.raises(KeyError):
del cd["foo6"]
assert base_dict_items == tuple(base_dict.items())
def test_multiple_operations():
cd = CowDict(base_dict)
del cd["foo1"]
del cd["foo3"]
cd["new_key1"] = "new_value1"
cd["new_key2"] = "new_value2"
cd["foo4"] = "changed_value"
with pytest.raises(KeyError):
del cd["non_existing_key"]
assert set(cd.keys()) == {"foo2", "foo4", "foo5", "new_key1", "new_key2"}
assert set(cd.items()) == {
("foo2", "bar2"),
("foo4", "changed_value"),
("foo5", "bar5"),
("new_key1", "new_value1"),
("new_key2", "new_value2"),
}
|
nilq/baby-python
|
python
|
"""Pythonic toolkit for web development."""
|
nilq/baby-python
|
python
|
from ElevatorComponent import ElevatorComponent
from Messages import *
from time import sleep
class STATE(Enum):
"""
States used exclusively by Car Door
"""
OPENED = "opened"
OPENING = "opening"
CLOSED = "closed"
CLOSING = "closing"
class CarDoor(ElevatorComponent):
def __init__(self, CarCtrl, ElevatorCar):
super().__init__()
# input
self.IN = None # Received from Car Controller
# output
self.OUT = None # Recipient is Car Controller and Elevator Car
# Coupled Input/Output: Sends and receives from Car Controller and sends to Elevator Car, so an instance of the both is needed
self.ctrl = CarCtrl
self.car = ElevatorCar
# component vars
self.state = STATE.CLOSED # initialize in CLOSED state
self.processing_time = 5.0
self.motion_time = 3.0
def setIN(self, IN):
# in ? job && cmdDoor == OPEN
# Above Met: MoveTo STATE.OPENING
self.IN = IN
if(self.IN):
if(self.IN.contents["value"] == CommandDoor.DOOR_CAR_OPEN):
self.state = STATE.OPENING
# Generate IN Log
self.write_log(self.get_sim_time(), self.get_real_time(),"Car Ctrl","Car Door","R","in",self.IN)
# in ? job && cmdDoor == CLOSE
# Above Met: MoveTo STATE.CLOSING
elif(self.IN.contents["value"] == CommandDoor.DOOR_CAR_CLOSE):
self.state = STATE.CLOSING
# Generate IN Log
self.write_log(self.get_sim_time(), self.get_real_time(),"Car Ctrl","Car Door","R","in",self.IN)
def state_processor(self):
while True:
if self.state == STATE.CLOSED:
pass
# Generate IN Status Log
# TODO: if(self.IN):
# TODO: self.write_log(self.get_sim_time(), self.get_real_time(),"Car Ctrl","","C",self.IN.contents)
elif self.state == STATE.OPENING:
# Send message MsgDoor -> OUT
self.OUT = MsgDoor("out", StatusDoor.DOOR_CAR_OPENED, 100, False)
# MoveTo STATE.OPENED
self.state = STATE.OPENED
elif self.state == STATE.OPENED:
# Do some timeout logic, MoveTo STATE.CLOSING
# Generate OUT Log
self.write_log(self.get_sim_time(), self.get_real_time(),"Car Door","Car Ctrl","S","out",self.OUT)
self.write_log(self.get_sim_time(), self.get_real_time(),"Car Door","Elevator Car","S","out",self.OUT)
self.ctrl.setiDoor(self.OUT)
self.car.setoStDoorMsg(self.OUT)
sleep(self.processing_time)
sleep(self.motion_time)
self.state = STATE.CLOSING
elif self.state == STATE.CLOSING:
# Send message MsgDoor -> OUT
self.OUT = MsgDoor("out", StatusDoor.DOOR_CAR_CLOSED, 100, False)
# MoveTo STATE.CLOSED
self.state = STATE.CLOSED
# Generate OUT Log
self.write_log(self.get_sim_time(), self.get_real_time(),"Car Door","Car Ctrl","S","out",self.OUT)
self.write_log(self.get_sim_time(), self.get_real_time(),"Car Door","Elevator Car","S","out",self.OUT)
self.ctrl.setiDoor(self.OUT)
self.car.setoStDoorMsg(self.OUT)
def main(self):
self.state_processor()
if __name__ == '__main__':
ctrl = None
car = None
door = CarDoor(ctrl, car)
door.main()
|
nilq/baby-python
|
python
|
from flask import Flask
from flask import flash
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import InputRequired
app = Flask(__name__)
app.secret_key = "asdfdf"
# 配置数据库
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql://root:mysql@127.0.0.1:3306/booktest"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class AddBookForm(FlaskForm):
"""自定义添加书籍的表单"""
author = StringField('作者:', validators=[InputRequired('请输入作者')])
book = StringField('书名:', validators=[InputRequired('请输入书名')])
submit = SubmitField('添加')
class Author(db.Model):
"""作者模型:一的一方"""
__tablename__ = "authors"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
# 定义属性,以便作者模型可以直接通过该属性访问其多的一方的数据(书的数据)
# backref 给 Book 也添加了一个 author 的属性,可以通过 book.author 获取 book 所对应的作者信息
books = db.relationship('Book', backref='author')
class Book(db.Model):
"""书的模型:多的一方"""
__tablename__ = "books"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
# 记录一的一方的id作为外键
author_id = db.Column(db.Integer, db.ForeignKey(Author.id))
@app.route('/delete_author/<author_id>')
def delete_author(author_id):
"""删除作者以及作者所有的书籍"""
try:
author = Author.query.get(author_id)
except Exception as e:
print(e)
return "查询错误"
if not author:
return "作者不存在"
# 删除作者及其所有书籍
try:
# 先删除书籍
Book.query.filter(Book.author_id == author_id).delete()
# 再删除指定作者
db.session.delete(author)
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
return "删除失败"
return redirect(url_for('index'))
@app.route('/delete_book/<book_id>')
def delete_book(book_id):
"""删除书籍"""
try:
book = Book.query.get(book_id)
except Exception as e:
print(e)
return "查询错误"
if not book:
return "书籍不存在"
try:
db.session.delete(book)
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
return '删除失败'
return redirect(url_for('index'))
@app.route('/', methods=['get', 'post'])
def index():
"""返回首页"""
book_form = AddBookForm()
# 如果book_form可以被提交
if book_form.validate_on_submit():
# 1. 取出表单中数据
author_name = book_form.author.data
book_name = book_form.book.data
# 2. 做具体业务逻辑代码实现
# 2.1 查询指定名字的作者
author = Author.query.filter(Author.name == author_name).first()
# if 指定名字的作者不存在:
if not author:
try:
# 添加作者信息到数据库
# 初始化作者的模型对象
author = Author(name=author_name)
db.session.add(author)
db.session.commit()
# 添加书籍信息到数据库(指定其作者)
book = Book(name=book_name, author_id=author.id)
db.session.add(book)
db.session.commit()
except Exception as e:
db.session.rollback()
print(e)
flash("添加失败")
else:
book = Book.query.filter(Book.name == book_name).first()
if not book:
try:
# 添加书籍信息到数据库(指定其作者)
book = Book(name=book_name, author_id=author.id)
db.session.add(book)
db.session.commit()
except Exception as e:
print(e)
flash("添加失败")
else:
flash("已存在")
else:
if request.method == "POST":
flash('参数错误')
# 1. 查询数据
authors = Author.query.all()
# 2. 将数据传入到模板中进行渲染返回
return render_template('demo1_bookDemo.html', authors=authors, form=book_form)
if __name__ == '__main__':
# 删除所有的表
db.drop_all()
# 创建所有的表
db.create_all()
au1 = Author(name='老王')
au2 = Author(name='老尹')
au3 = Author(name='老刘')
# 把数据提交给用户会话
db.session.add_all([au1, au2, au3])
# 提交会话
db.session.commit()
bk1 = Book(name='老王回忆录', author_id=au1.id)
bk2 = Book(name='我读书少,你别骗我', author_id=au1.id)
bk3 = Book(name='如何才能让自己更骚', author_id=au2.id)
bk4 = Book(name='怎样征服美丽少女', author_id=au3.id)
bk5 = Book(name='如何征服英俊少男', author_id=au3.id)
# 把数据提交给用户会话
db.session.add_all([bk1, bk2, bk3, bk4, bk5])
# 提交会话
db.session.commit()
app.run(debug=True)
|
nilq/baby-python
|
python
|
"""*****************************************************************************
* Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
from math import ceil, floor
###################################################################################################
#################################### Global Variables #############################################
###################################################################################################
global interruptVector
global interruptHandler
global interruptHandlerLock
RegionDescList = []
###################################################################################################
######################################### Functions ###############################################
###################################################################################################
def interruptControl(NVIC, event):
global interruptVector
global interruptHandler
global interruptHandlerLock
Database.clearSymbolValue("core", interruptVector)
Database.clearSymbolValue("core", interruptHandler)
Database.clearSymbolValue("core", interruptHandlerLock)
if (event["value"] == True):
Database.setSymbolValue("core", interruptVector, True, 2)
Database.setSymbolValue("core", interruptHandler, icmInstanceName.getValue() + "_InterruptHandler", 2)
Database.setSymbolValue("core", interruptHandlerLock, True, 2)
else :
Database.setSymbolValue("core", interruptVector, False, 2)
Database.setSymbolValue("core", interruptHandler, "ICM_Handler", 2)
Database.setSymbolValue("core", interruptHandlerLock, False, 2)
def icmCreateRegionDesc(component, menu, RegionNumber):
regionDescriptor = component.createMenuSymbol(icmInstanceName.getValue() + "_REGION_DESC"+ str(RegionNumber), menu)
regionDescriptor.setLabel("Region descriptor " + str(RegionNumber))
icmRegionDescStartAddr = component.createHexSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_TYPE", regionDescriptor)
icmRegionDescStartAddr.setLabel("Start Address :")
icmRegionDescAlgo = component.createKeyValueSetSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_ALGO", regionDescriptor)
icmRegionDescAlgo.setLabel("SHA Algorithm")
icmRegionDescAlgo.setDisplayMode("Description")
icmRegionDescAlgo.setOutputMode("Value")
icmRegionDescAlgo.addKey("SHA1", "0", "SHA1 algorithm")
icmRegionDescAlgo.addKey("SHA256", "1", "SHA256 algorithm")
icmRegionDescAlgo.addKey("SHA224", "4", "SHA224 algorithm")
icmRegionDescAlgo.setSelectedKey("SHA1")
icmRegionDescPROCDLY = component.createKeyValueSetSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_PROCDLY", regionDescriptor)
icmRegionDescPROCDLY.setLabel("SHA Processing Delay")
icmRegionDescPROCDLY.setOutputMode("Value")
icmRegionDescPROCDLY.addKey("SHORTEST", "0", "SHA processing runtime shortest")
icmRegionDescPROCDLY.addKey("LONGEST", "1", "SHA processing runtime longest")
icmRegionDescPROCDLY.setDefaultValue(0)
icmRegionDescPROCDLY.setSelectedKey("SHORTEST")
icmRegionDescDisableInt = component.createMenuSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_DISABLE_INT", regionDescriptor)
icmRegionDescDisableInt.setLabel("Disable interrupt events")
icmRegionDescDisIntSUIEN = component.createBooleanSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_SUIEN", icmRegionDescDisableInt)
icmRegionDescDisIntSUIEN.setLabel("Disable Status Updated Condition")
icmRegionDescDisIntSUIEN.setDescription("If disabled, the Region Status Updated Condition interrupt flag remains cleared")
icmRegionDescDisIntSUIEN.setDefaultValue(False)
icmRegionDescDisIntECIEN = component.createBooleanSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_ECIEN", icmRegionDescDisableInt)
icmRegionDescDisIntECIEN.setLabel("Disable End Bit Condition")
icmRegionDescDisIntECIEN.setDescription("If disabled, the End Bit Condition interrupt flag remains cleared")
icmRegionDescDisIntECIEN.setDefaultValue(False)
icmRegionDescDisIntWCIEN = component.createBooleanSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_WCIEN", icmRegionDescDisableInt)
icmRegionDescDisIntWCIEN.setLabel("Disable Wrap Condition")
icmRegionDescDisIntWCIEN.setDescription("If disabled, the Wrap Condition interrupt flag remains cleared")
icmRegionDescDisIntWCIEN.setDefaultValue(False)
icmRegionDescDisIntBEIEN = component.createBooleanSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_BEIEN", icmRegionDescDisableInt)
icmRegionDescDisIntBEIEN.setLabel("Disable Bus Error Interrupt")
icmRegionDescDisIntBEIEN.setDescription("If disabled, the Bus Error Interrupt flag remains cleared")
icmRegionDescDisIntBEIEN.setDefaultValue(False)
icmRegionDescDisIntDMIEN = component.createBooleanSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_DMIEN", icmRegionDescDisableInt)
icmRegionDescDisIntDMIEN.setLabel("Disable Digest Mismatch Interrupt")
icmRegionDescDisIntDMIEN.setDescription("If disabled, the Digest Mismatch Interrupt flag remains cleared")
icmRegionDescDisIntDMIEN.setDefaultValue(False)
icmRegionDescDisIntRHIEN = component.createBooleanSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_RHIEN", icmRegionDescDisableInt)
icmRegionDescDisIntRHIEN.setLabel("Disable Digest Mismatch Interrupt")
icmRegionDescDisIntRHIEN.setDescription("If disabled, the Digest Mismatch Interrupt flag remains cleared")
icmRegionDescDisIntRHIEN.setDefaultValue(False)
icmRegionDescEOM = component.createBooleanSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_EOM", regionDescriptor)
icmRegionDescEOM.setLabel("Enable End of Monitoring")
icmRegionDescEOM.setDescription("The current descriptor terminates the Main List. WRAP value has no effect.")
icmRegionDescEOM.setDefaultValue(False)
icmRegionDescWRAP = component.createBooleanSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_WRAP", regionDescriptor)
icmRegionDescWRAP.setLabel("Wrap command")
icmRegionDescWRAP.setDescription("The next region descriptor address loaded is the descriptor list base address.")
icmRegionDescWRAP.setDefaultValue(False)
icmRegionDescCDWBN = component.createKeyValueSetSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_CDWBN", regionDescriptor)
icmRegionDescCDWBN.setLabel("Digest process")
icmRegionDescCDWBN.setOutputMode("Value")
icmRegionDescCDWBN.addKey("Write Back", "0", "The digest is written to the Hash area.")
icmRegionDescCDWBN.addKey("Compare", "1", "The digest value is compared to the digest stored in the Hash area.")
icmRegionDescCDWBN.setSelectedKey("Write Back")
icmRegionDescSize = component.createIntegerSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_SIZE", regionDescriptor)
icmRegionDescSize.setLabel("Size in byte (multiple of 64):")
icmRegionDescSize.setMin(64)
icmRegionDescSize.setMax(64*65536)
icmRegionDescSize.setDefaultValue(64)
icmRegionDescSizeRounded = component.createIntegerSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_SIZE_REG", regionDescriptor)
icmRegionDescSizeRounded.setDependencies(adjustRegionDescriptorSize, [icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_SIZE"])
icmRegionDescSizeRounded.setVisible(False)
# Region size rounded display
icmRegionDescSizeComment = component.createCommentSymbol(icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_SIZE_COMMENT", regionDescriptor)
icmRegionDescSizeComment.setLabel("****Region size will be rounded to n bytes****")
icmRegionDescSizeComment.setVisible(False)
icmRegionDescSizeComment.setDependencies(checkRegionDescriptorSizeComment, [icmInstanceName.getValue() + "_REGION_DESC" + str(RegionNumber) + "_SIZE"])
regionDescriptor.setVisible(False)
regionDescriptor.setEnabled(False)
return regionDescriptor
###################################################################################################
########################################## Callbacks #############################################
###################################################################################################
# Round entered value to multiple of 64 byte
def adjustRegionDescriptorSize(symbol, event):
value = event["value"]
if (value != 64):
symbol.setValue(int(floor(value/64)))
else:
symbol.setValue(0)
# Display comment if value is rounded
def checkRegionDescriptorSizeComment(symbol, event):
value = event["value"]
if ((value % 64) != 0):
symbol.setLabel("****Region size will be rounded to " + str(int((floor(value/64)+1)*64)) +" bytes****")
symbol.setVisible(True)
else:
symbol.setVisible(False)
# adjust how many region descriptors are shown based on number entered
def adjustRegionDescriptor(list, event):
for region in RegionDescList[:event["value"]]:
if region.getVisible() != True:
region.setVisible(True)
region.setEnabled(True)
for region in RegionDescList[event["value"]:]:
if region.getVisible() != False:
region.setVisible(False)
region.setEnabled(False)
def icmClockWarningStatus(symbol, event):
symbol.setVisible(not event["value"])
def InterruptStatusWarning(symbol, event):
if (Database.getSymbolValue(icmInstanceName.getValue().lower(), "INTERRUPT_MODE") == True):
symbol.setVisible(event["value"])
###################################################################################################
########################################## Component #############################################
###################################################################################################
def instantiateComponent(icmComponent):
global icmInstanceName
global InterruptVectorUpdate
global interruptVector
global interruptHandler
global interruptHandlerLock
icmInstanceName = icmComponent.createStringSymbol("ICM_INSTANCE_NAME", None)
icmInstanceName.setVisible(False)
icmInstanceName.setDefaultValue(icmComponent.getID().upper())
print("Running " + icmInstanceName.getValue())
# Initialize peripheral clock
Database.setSymbolValue("core", icmInstanceName.getValue() + "_CLOCK_ENABLE", True, 1)
################################################################################
#### Menu ####
################################################################################
icmInterruptMode = icmComponent.createBooleanSymbol("INTERRUPT_MODE", None)
icmInterruptMode.setLabel("Interrupt Mode")
icmInterruptMode.setDefaultValue(False)
icmDualBuff = icmComponent.createBooleanSymbol("DUALBUFF", None)
icmDualBuff.setLabel("Enable dual input buffer")
icmDualBuff.setDefaultValue(False)
icmASCD = icmComponent.createBooleanSymbol("ASCD", None)
icmASCD.setLabel("Automatic switch to compare digest")
icmASCD.setDefaultValue(False)
icmBusBurdenControl = icmComponent.createIntegerSymbol("BUS_BURDEN_CONTROL", None)
icmBusBurdenControl.setLabel("Bus Burden Control:")
icmBusBurdenControl.setDefaultValue(0)
icmBusBurdenControl.setMin(0)
icmBusBurdenControl.setMax(15)
icmDisableSecList = icmComponent.createBooleanSymbol("SLBDIS", None)
icmDisableSecList.setLabel("Disable Secondary list branch")
icmDisableSecList.setDefaultValue(False)
icmDisableEndMonitoring = icmComponent.createBooleanSymbol("EOMDIS", None)
icmDisableEndMonitoring.setLabel("Disable End of Monitoring")
icmDisableEndMonitoring.setDefaultValue(False)
icmDisableWriteBack = icmComponent.createBooleanSymbol("WBDIS", None)
icmDisableWriteBack.setLabel("Disable Write Back")
icmDisableWriteBack.setDefaultValue(False)
# up to 4 region descriptor
icmRegionDescriptorMenu = icmComponent.createMenuSymbol("regionDescriptor", None)
icmRegionDescriptorMenu.setLabel("Region Descriptor (up to 4)")
icmRegionDescriptorMenu.setDependencies(adjustRegionDescriptor, ["REGION_DESC_NUM"])
icmRegionDescriptorNumber = icmComponent.createIntegerSymbol("REGION_DESC_NUM", icmRegionDescriptorMenu)
icmRegionDescriptorNumber.setLabel("Number of Region Descriptor:")
icmRegionDescriptorNumber.setDefaultValue(0)
icmRegionDescriptorNumber.setMin(0)
icmRegionDescriptorNumber.setMax(4)
#Create all of the standard filters in a disabled state
for filter in range (4):
RegionDescList.append(icmCreateRegionDesc(icmComponent, icmRegionDescriptorMenu, filter))
############################################################################
#### Dependency ####
############################################################################
# Clock dependency Warning status
icmClkEnComment = icmComponent.createCommentSymbol("ICM_CLOCK_ENABLE_COMMENT", None)
icmClkEnComment.setLabel("Warning!!! " + icmInstanceName.getValue() + " Peripheral Clock is Disabled in Clock Manager")
icmClkEnComment.setVisible(False)
icmClkEnComment.setDependencies(icmClockWarningStatus, ["core." + icmInstanceName.getValue() + "_CLOCK_ENABLE"])
interruptVector = icmInstanceName.getValue() + "_INTERRUPT_ENABLE"
interruptHandler = icmInstanceName.getValue() + "_INTERRUPT_HANDLER"
interruptHandlerLock = icmInstanceName.getValue() + "_INTERRUPT_HANDLER_LOCK"
interruptVectorUpdate = icmInstanceName.getValue() + "_INTERRUPT_ENABLE_UPDATE"
# NVIC Dynamic settings
icminterruptControl = icmComponent.createBooleanSymbol("NVIC_ICM_ENABLE", None)
icminterruptControl.setDependencies(interruptControl, ["INTERRUPT_MODE"])
icminterruptControl.setVisible(False)
# Dependency Status for interrupt
icmIntEnComment = icmComponent.createCommentSymbol("ICM_INTERRUPT_ENABLE_COMMENT", None)
icmIntEnComment.setVisible(False)
icmIntEnComment.setLabel("Warning!!! " + icmInstanceName.getValue() + " Interrupt is Disabled in Interrupt Manager")
icmIntEnComment.setDependencies(InterruptStatusWarning, ["core." + interruptVectorUpdate])
###################################################################################################
####################################### Code Generation ##########################################
###################################################################################################
configName = Variables.get("__CONFIGURATION_NAME")
icmHeaderFile = icmComponent.createFileSymbol("ICM_HEADER", None)
icmHeaderFile.setSourcePath("/peripheral/icm_11105/templates/plib_icm.h.ftl")
icmHeaderFile.setOutputName("plib_" + icmInstanceName.getValue().lower() + ".h")
icmHeaderFile.setDestPath("peripheral/icm/")
icmHeaderFile.setProjectPath("config/" + configName +"/peripheral/icm/")
icmHeaderFile.setType("HEADER")
icmHeaderFile.setMarkup(True)
icmSource1File = icmComponent.createFileSymbol("ICM_SOURCE", None)
icmSource1File.setSourcePath("/peripheral/icm_11105/templates/plib_icm.c.ftl")
icmSource1File.setOutputName("plib_" + icmInstanceName.getValue().lower() + ".c")
icmSource1File.setDestPath("peripheral/icm/")
icmSource1File.setProjectPath("config/" + configName +"/peripheral/icm/")
icmSource1File.setType("SOURCE")
icmSource1File.setMarkup(True)
icmSystemInitFile = icmComponent.createFileSymbol("ICM_INIT", None)
icmSystemInitFile.setType("STRING")
icmSystemInitFile.setOutputName("core.LIST_SYSTEM_INIT_C_SYS_INITIALIZE_PERIPHERALS")
icmSystemInitFile.setSourcePath("/peripheral/icm_11105/templates/system/initialization.c.ftl")
icmSystemInitFile.setMarkup(True)
icmSystemDefFile = icmComponent.createFileSymbol("ICM_DEF", None)
icmSystemDefFile.setType("STRING")
icmSystemDefFile.setOutputName("core.LIST_SYSTEM_DEFINITIONS_H_INCLUDES")
icmSystemDefFile.setSourcePath("/peripheral/icm_11105/templates/system/definitions.h.ftl")
icmSystemDefFile.setMarkup(True)
|
nilq/baby-python
|
python
|
"""
Created on 30/9/2015
@author: victor
"""
import sys
from trajectory_comparison.T_Disp_super_batch_analysis import get_folders_for_analysis
import os
import glob
import numpy
def get_num_models(merged_pdb):
models = 0
handler = open(merged_pdb,"r")
for line in handler:
if "MODEL" == line[0:5]:
models += 1
handler.close()
return models
if __name__ == '__main__':
folders = get_folders_for_analysis(sys.argv[1])
base_path = sys.argv[2]
results = {}
expected_data = ["rgyr.jsd",
"sasa.jsd",
"rms_rmsfs",
"acc",
"models_per_h_node"]
ordered_data = ["T","disp","it"]
ordered_data.extend(expected_data)
num_processors = int(sys.argv[3])
num_hours = int(sys.argv[4])
for folder, data in folders:
path = os.path.join(sys.argv[2], folder)
print "Summarizing folder: ", path
key = (int(data[0]), data[1], data[2])
results[key] = {"T":data[0],"disp":data[1],"it":data[2]}
for ext in expected_data:
files = glob.glob(os.path.join(path, "*.%s"%ext))
if len(files) != 1:
print "PROBLEM in %s finding files with extension %s. Num files: %d"%(path, ext, len(files))
else:
results[key][ext] = "%.3f"%numpy.loadtxt(files[0])
try:
merged_pdb = glob.glob(os.path.join(path, "*.pdb"))[0]
acc_steps = get_num_models(merged_pdb)
total_steps = acc_steps / float(results[key]["acc"])
results[key]["models_per_h_node"] = "%.3f"%(total_steps / (num_processors*num_hours))
except:
pass
all_ordered_keys = sorted(results.keys())
for key in all_ordered_keys:
for data_type in ordered_data:
try:
print "%6s "%results[key][data_type],
except KeyError:
print "%6s "%"---",
print
|
nilq/baby-python
|
python
|
# Generated by Django 3.2 on 2021-04-28 04:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('team', '0001_initial'),
('lead', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='lead',
name='team',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='leads', to='team.team'),
preserve_default=False,
),
]
|
nilq/baby-python
|
python
|
"""606 · Kth Largest Element II"""
class Solution:
"""
@param nums: an integer unsorted array
@param k: an integer from 1 to n
@return: the kth largest element
"""
def kthLargestElement2(self, nums, k):
# write your code here
import heapq
heap = []
for num in nums:
heapq.heappush(heap, num)
if len(heap) > k:
heapq.heappop(heap)
return heapq.heappop(heap)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import os
import re
import sys
print('please set min_sentence_len: ')
min_sentence_len = int(input())
outfile='namu_extracted_deleted.txt'
max_sentence_len = 9999
if len(sys.argv) >1:
max_sentence_len=int(sys.argv[2])
outfile = outfile.rsplit('.')[0] + '_' + str(min_sentence_len) + '.txt'
#not korean.
regex0 = r'[^가-힣\s\.]'
#word with decimals.
regex1 = r'\w*\d\w*'
#word with english.
regex2 = r'\w*[A-Za-z]\w*'
reg2 = r'\.+'
reg_mw = r'\s+'
reg_mn = r'\n+'
epch=100000
total_length=45038943
DMODE = False
line_cnt = 0
print('output file: %s' % outfile)
if os.path.isfile(outfile):
print('output file exists')
sys.exit()
f2= open(outfile, 'w')
with open('namu_extracted.json', 'r') as f:
for i, line in enumerate(f):
if DMODE:
print('=======================')
print('original: ' + line)
r1 = re.sub(regex1, '', line)
if DMODE:
print('r1: ' + r1)
r2 = re.sub(regex2, '', r1)
if DMODE:
print('r2: ' + r2)
r3 = re.sub(regex0, '', r2)
if DMODE:
print('r3: ' + r3)
t= re.sub(r'\n', '', r3)
if DMODE:
print('remove newline: ' + t)
t= re.sub(r'\.+', '\n', r3)
if DMODE:
print('remove multiple dots to new line: ' + t)
#t= t.replace('.','\n')
t= re.sub(r'\ +', ' ', t)
if DMODE:
print('remove multiple withe: ' + t)
#t= re.sub(reg_mn, '', t)
t= re.sub(r'\ *\n+\ *', '\n', t)
if DMODE:
print('remove starting space: ' + t)
#t= re.search(r'\n*(.*)\n*', t).group(1)
t= re.search(r'\s*(.*)\s*', t).group(1)
if len(t) >= min_sentence_len and len(t) < max_sentence_len:
f2.write(t + '\n')
line_cnt += 1
#print(str(len(x)),x+'\n', end='')
if DMODE:
print('\nfilnal: ' + t)
break
if i%epch==0:
print('epch '+str(i) + '/' + str(total_length) + ':' + t + ' - ' + str(len(t)))
print('line count: %d' % line_cnt)
f2.close()
print('done: sentence count: ' + str(line_cnt))
|
nilq/baby-python
|
python
|
""" Test brainspace.utils.parcellation """
import pytest
import numpy as np
from brainspace.utils import parcellation as parc
parametrize = pytest.mark.parametrize
testdata_consecutive = [
# default start_from = 0 and dtype
(np.array([1, 3, 3, 2, 2, 2], dtype=np.int),
{},
np.array([0, 2, 2, 1, 1, 1], dtype=np.int)),
# default start_from = 0 and dtype
(np.array([1, 3, 3, 2, 2, 2], dtype=np.uint8),
{'start_from': 0},
np.array([0, 2, 2, 1, 1, 1], dtype=np.uint8)),
# default start_from = 1 and dtype
(np.array([1, 3, 3, 2, 2, 2], dtype=np.float),
{'start_from': 1},
np.array([1, 3, 3, 2, 2, 2], dtype=np.float)),
]
testdata_relabel = [
# default new_labels = None => consecutive
(np.array([1, 3, 3, 2, 2, 2], dtype=np.int),
{},
np.array([0, 2, 2, 1, 1, 1], dtype=np.int)),
# with new_labels as array
(np.array([1, 3, 3, 2, 2, 2], dtype=np.uint8),
{'new_labels': np.array([2, 2, 3])},
np.array([2, 3, 3, 2, 2, 2], dtype=np.uint8)),
# without some labels
(np.array([1, 3, 3, 2, 2, 2], dtype=np.uint8),
{'new_labels': np.array([2, 3])},
np.array([2, 3, 3, 3, 3, 3], dtype=np.uint8)),
# with new_labels as dict
(np.array([1, 3, 3, 2, 2, 2], dtype=np.float),
{'new_labels': {1: 0, 2: 4, 3: 1}},
np.array([0, 1, 1, 4, 4, 4], dtype=np.float)),
# without some labels
(np.array([1, 3, 3, 2, 2, 2], dtype=np.float),
{'new_labels': {1: 0, 3: 1}},
np.array([0, 1, 1, 2, 2, 2], dtype=np.float)),
]
testdata_correspondence = [
# dict correspondence
(np.array([1, 3, 3, 2, 2, 2], dtype=np.int),
np.array([0, 2, 2, 1, 1, 1], dtype=np.int),
{1: 0, 3: 2, 2: 1}),
# dict correspondence with more input labels
(np.array([3, 1, 1, 2, 2, 2], dtype=np.uint8),
np.array([2, 3, 3, 2, 2, 2], dtype=np.uint8),
{1: 3, 2: 2}),
# dict correspondence with more ref labels
(np.array([3, 1, 1, 2, 2, 2], dtype=np.float),
np.array([4, 3, 3, 6, 1, 1], dtype=np.float),
{1: 3, 2: 1, 3: 4}),
]
testdata_overlap = [
# overlap
(np.array([1, 3, 3, 2, 2, 2], dtype=np.int),
np.array([0, 2, 2, 1, 1, 1], dtype=np.int),
np.array([0, 2, 2, 1, 1, 1], dtype=np.int)),
# overlap with more input labels -> remaining with consecutive
(np.array([3, 1, 1, 2, 2, 2], dtype=np.uint8),
np.array([2, 3, 3, 2, 2, 2], dtype=np.uint8),
np.array([4, 3, 3, 2, 2, 2], dtype=np.uint8)),
# overlap with more ref labels
(np.array([3, 1, 1, 2, 2, 2], dtype=np.float),
np.array([4, 3, 3, 6, 1, 1], dtype=np.float),
np.array([4, 3, 3, 1, 1, 1], dtype=np.float))
]
testdata_map_mask = [
# with default fill=0
(np.array([1, 3, 3, 2], dtype=np.int),
np.array([0, 0, 1, 1, 1, 1], dtype=np.bool),
{},
np.array([0, 0, 1, 3, 3, 2], dtype=np.int),
None),
# raises ValueError is integer and fill=nan
(np.array([1, 3, 3, 2], dtype=np.int),
np.array([0, 0, 1, 1, 1, 1], dtype=np.bool),
{'fill': np.nan},
np.array([0, 0, 1, 3, 3, 2], dtype=np.int),
ValueError),
# test default axis=0
(np.array([[1, 3, 3, 2], [3, 4, 4, 0]], dtype=np.float),
np.array([1, 0, 0, 1, 1, 1], dtype=np.bool),
{'fill': np.nan},
np.array([[1, np.nan, np.nan, 3, 3, 2],
[3, np.nan, np.nan, 4, 4, 0]], dtype=np.float),
None),
# test axis=1
(np.array([[1, 3, 3, 2], [3, 4, 4, 0]], dtype=np.float),
np.array([1, 0, 1], dtype=np.bool),
{'fill': np.nan, 'axis': 1},
np.array([[1, 3, 3, 2],
[np.nan, np.nan, np.nan, np.nan],
[3, 4, 4, 0]], dtype=np.float),
None),
]
testdata_map_labels = [
# test defaults
(np.array([1, 2, 3], dtype=np.float),
np.array([1, 1, 2, 2, 0, 0], dtype=np.int),
{},
np.array([2, 2, 3, 3, 1, 1], dtype=np.float),
None),
# test defaults small labels
(np.array([1, 2, 3], dtype=np.float),
np.array([5, 6], dtype=np.int),
{},
np.array([1, 2], dtype=np.float),
None),
# test default fill=0
(np.array([2, 1, 3], dtype=np.float),
np.array([1, 1, 2, 2, 0, 0], dtype=np.int),
{'mask': np.array([1, 1, 1, 0, 0, 1], dtype=np.bool)},
np.array([1, 1, 3, 0, 0, 2], dtype=np.float),
None),
# test default fill=np.nan with int
(np.array([2, 1, 3], dtype=np.int),
np.array([1, 1, 2, 2, 0, 0], dtype=np.int),
{'mask': np.array([1, 1, 1, 0, 0, 1], dtype=np.bool), 'fill': np.nan},
np.array([1, 1, 3, 0, 0, 2], dtype=np.int),
ValueError),
# test source_lab
(np.array([2, 1, 3], dtype=np.float),
np.array([1, 1, 2, 2, 0, 0], dtype=np.int),
{'mask': np.array([1, 1, 1, 0, 0, 1], dtype=np.bool), 'fill': np.nan,
'source_lab': np.array([2, 1, 0])},
np.array([1, 1, 2, np.nan, np.nan, 3], dtype=np.float),
None),
# test source_lab.size != source_val.size
(np.array([2, 1, 3], dtype=np.float),
np.array([1, 1, 2, 2, 0, 0], dtype=np.int),
{'mask': np.array([1, 1, 1, 0, 0, 1], dtype=np.bool), 'fill': np.nan,
'source_lab': np.array([2, 1])},
np.array([1, 1, 2, np.nan, np.nan, 3], dtype=np.float),
ValueError),
# test (unique source_lab).size != source_val.size
(np.array([2, 1, 3], dtype=np.float),
np.array([1, 1, 2, 2, 0, 0], dtype=np.int),
{'mask': np.array([1, 1, 1, 0, 0, 1], dtype=np.bool), 'fill': np.nan,
'source_lab': np.array([2, 1, 2])},
np.array([1, 1, 2, np.nan, np.nan, 3], dtype=np.float),
ValueError),
# test (unique source_lab).size != source_val.size
pytest.param(np.array([2, 1, 3], dtype=np.float),
np.array([1, 1, 2, 2, 1, 0], dtype=np.int),
{'mask': np.array([1, 1, 1, 0, 0, 1], dtype=np.bool),
'fill': np.nan,
'source_lab': np.array([2, 1, 0])},
np.array([1, 1, 2, np.nan, np.nan, 1], dtype=np.float),
None,
marks=pytest.mark.xfail),
]
testdata_reduce = [
# test defaults
(np.array([1, 2, 3, 4, 5, 6], dtype=np.float),
np.array([1, 1, 2, 2, 0, 0], dtype=np.int),
{},
np.array([5.5, 1.5, 3.5], dtype=np.float),
None),
# test weights
(np.array([1, 2, 3, 4, 5, 6], dtype=np.float),
np.array([1, 1, 2, 2, 0, 0], dtype=np.int),
{'weights': np.array([1, 1, 2, 1, 1, 2])},
np.array([17/3, 1.5, 10/3], dtype=np.float),
None),
# Test target labels
(np.array([1, 2, 3, 4, 5, 6], dtype=np.float),
np.array([1, 1, 2, 2, 0, 0], dtype=np.int),
{'target_labels': np.array([2, 1, 0])},
np.array([3.5, 1.5, 5.5], dtype=np.float),
None),
# Test target labels small
(np.array([1, 2, 3, 4, 5, 6], dtype=np.float),
np.array([1, 1, 2, 2, 0, 0], dtype=np.int),
{'target_labels': np.array([2, 1])},
np.array([3.5, 1.5], dtype=np.float),
None),
# Test red_op
(np.array([1, 2, 2, 5, 5, 6], dtype=np.int),
np.array([1, 1, 1, 0, 0, 0], dtype=np.int),
{'red_op': 'mode', 'dtype': np.int},
np.array([5, 2], dtype=np.int),
None),
# Test default axis=0
(np.array([[1, 2, 2, 5], [6, 6, 7, 8]], dtype=np.int),
np.array([1, 1, 1, 0], dtype=np.int),
{'red_op': 'mode', 'dtype': np.int},
np.array([[5, 2], [8, 6]], dtype=np.int),
None),
# Test default axis=1
(np.array([[1, 2, 2, 5], [6, 4, 7, 8], [6, 4, 7, 5]], dtype=np.int),
np.array([0, 0, 0], dtype=np.int),
{'red_op': 'mode', 'dtype': np.int, 'axis': 1},
np.array([[6, 4, 7, 5]], dtype=np.int),
None),
# Test red_op callable
(np.array([[1, 2, 2, 5], [6, 4, 7, 8], [6, 4, 7, 5]], dtype=np.int),
np.array([0, 0, 0], dtype=np.int),
{'red_op': lambda x, w: np.mean(x), 'axis': 1},
np.array([[13/3, 10/3, 16/3, 18/3]], dtype=np.float),
None),
]
@parametrize('lab, kwds, out', testdata_consecutive)
def test_consecutive(lab, kwds, out):
res = parc.relabel_consecutive(lab, **kwds)
assert np.all(res == out)
assert res.dtype == out.dtype
@parametrize('lab, kwds, out', testdata_relabel)
def test_relabel(lab, kwds, out):
res = parc.relabel(lab, **kwds)
assert np.all(res == out)
assert res.dtype == out.dtype
@parametrize('lab1, lab2, out', testdata_correspondence)
def test_label_correspondence(lab1, lab2, out):
res = parc.find_label_correspondence(lab1, lab2)
assert res == out
@parametrize('lab, ref_lab, out', testdata_overlap)
def test_overlap(lab, ref_lab, out):
res = parc.relabel_by_overlap(lab, ref_lab)
assert np.all(res == out)
assert res.dtype == out.dtype
@parametrize('lab, mask, kwds, out, expects', testdata_map_mask)
def test_map_to_mask(lab, mask, kwds, out, expects):
if expects:
with pytest.raises(expects):
parc.map_to_mask(lab, mask, **kwds)
else:
res = parc.map_to_mask(lab, mask, **kwds)
assert np.all((res == out) | (np.isnan(out) & np.isnan(out)))
assert res.dtype == out.dtype
assert res.shape == out.shape
@parametrize('source_lab, target_lab, kwds, out, expects', testdata_map_labels)
def test_map_to_labels(source_lab, target_lab, kwds, out, expects):
if expects:
with pytest.raises(expects):
parc.map_to_labels(source_lab, target_lab, **kwds)
else:
res = parc.map_to_labels(source_lab, target_lab, **kwds)
assert np.all((res == out) | (np.isnan(out) & np.isnan(out)))
assert res.dtype == out.dtype
@parametrize('values, labels, kwds, out, expects', testdata_reduce)
def test_reduce(values, labels, kwds, out, expects):
if expects:
with pytest.raises(expects):
parc.reduce_by_labels(values, labels, **kwds)
else:
res = parc.reduce_by_labels(values, labels, **kwds)
assert np.allclose(res, out)
assert res.dtype == out.dtype
assert res.shape == out.shape
|
nilq/baby-python
|
python
|
from dataset import RailData
import torch
from torch import optim
import torch.nn as nn
from torch.utils.data import DataLoader, random_split
from multiprocessing import cpu_count
import pathlib
from tqdm import tqdm
from wcid import NetSeq
import sys
from validation.metrics import calculate_metrics
import os
import colorama
from colorama import Fore, Back, Style
from p_logging import val_logging
from torchsummary import summary
from torchvision import datasets
import datetime
def train(
train_img,
train_msk,
val_img,
val_msk,
res_scale=0.1,
epochs=5,
bs=1,
lr=1e-3,
weights_pth=None,
):
"""
:param train_img: Path to training images.
:param train_msk: Path to training masks.
:param val_img: Path to validation images.
:param val_msk: Path to validation masks.
:param res_scale: Scale height and width of image.
:param epochs: Training epochs.
:param bs: Batch size.
:param lr: Learning rate
:param weights_pth: Path to weights from previous training.
:return: None.
"""
# Training start time
start_datetime = datetime.datetime.now()
# Computing device
# dev = "cuda" if torch.cuda.is_available() else "cpu"
dev = "cpu"
# Instance of neural network
net = NetSeq()
net = net.to(dev)
# Prepare data parallel
# net = nn.DataParallel(net)
# Load weights
if weights_pth is not None:
net.load_state_dict(torch.load(weights_pth, map_location=dev))
weight_file_name = os.path.basename(weights_pth)
weight_file_name = os.path.splitext(weight_file_name)[-2]
start_epoch = int(weight_file_name.replace("CP_epoch", ""))
print(f"Continue training in epoch {start_epoch + 1}")
else:
start_epoch = 0
# Training and validation Dataset
train_dataset = RailData(train_img, train_msk, res_scale, transform=True)
val_dataset = RailData(val_img, val_msk, res_scale)
# Length of training and validation Dataset
n_train = len(train_dataset)
n_val = len(val_dataset)
# Create data loader
cpus = cpu_count()
train_loader = DataLoader(
train_dataset,
batch_size=bs,
shuffle=True,
num_workers=cpus,
pin_memory=True,
)
val_loader = DataLoader(
val_dataset,
batch_size=bs,
shuffle=False,
num_workers=cpus,
pin_memory=True,
drop_last=True,
)
# Optimizer and learning rate scheduler
# optimizer = optim.RMSprop(net.parameters(), lr=lr, momentum=0.9) # weight_decay=1e-8
optimizer = optim.Adam(net.parameters(), lr=0.00001)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, "max", patience=100, verbose=True
)
# Loss function (binary cross entropy)
criterion = nn.BCEWithLogitsLoss()
overall_batches = 0
last_val_loss = float("inf")
# Training loop
for epoch in range(start_epoch, epochs + start_epoch):
net.to(dev)
net.train()
epoch_loss = 0
desc = f"Epoch {epoch + 1}/{epochs}"
# Epoch progress bar
with tqdm(total=n_train, desc=desc, leave=False, position=0) as bar:
# Training batches
for batch in train_loader:
# Increment bar by batch size
bar.update(bs)
# Get images from batch
images = batch["image"]
masks = batch["mask"]
# Load images and masks to computing device
images = images.to(device=dev, dtype=torch.float32)
masks = masks.to(device=dev, dtype=torch.float32)
# print(f"{images.device=}")
# print(f"{masks.device=}")
# print(f"{next(net.parameters()).device=}")
# Predict masks from images
prediction = net(images)
# Calculate loss
loss = criterion(prediction, masks)
# Accumulate batch loss to epoch loss
epoch_loss += loss.item()
# Clear old gradients and loss backpropagation
optimizer.zero_grad()
loss.backward()
# nn.utils.clip_grad_value_(net.parameters(), 0.1) # Why???
optimizer.step()
# Increase batches counter
overall_batches += 1
# Validate 10 times per epoch with validation set
if False: # overall_batches % (n_train // (10 * bs)) == 0:
val_loss = 0
iou, f1, acc, pre, rec = 0, 0, 0, 0, 0
# Set neural net to evaluation state
net.eval()
for val_batch in val_loader:
# Get images from batch
images = val_batch["image"]
masks = val_batch["mask"]
# Load images and masks to computing device
images = images.to(device=dev, dtype=torch.float32)
masks = masks.to(device=dev, dtype=torch.float32)
# Predict validation batch (no gradients needed)
with torch.no_grad():
prediction = net(images)
# Calculate validation loss
criterion = nn.BCEWithLogitsLoss()
# Validation loss
loss = criterion(prediction, masks)
val_loss += loss
# Force prediction between 0 and 1
# prediction = torch.sigmoid(prediction)
# Threshold at 0.5 between 0 and 1
prediction = prediction > 0.5
# TODO: Validation metrics
metrics = calculate_metrics(prediction, masks)
iou += metrics["iou"]
f1 += metrics["f1"]
acc += metrics["acc"]
pre += metrics["pre"]
rec += metrics["rec"]
# Normalize Validation metrics
val_loss /= n_val
iou /= n_val
f1 /= n_val
acc /= n_val
pre /= n_val
rec /= n_val
# Validation message
sys.stdout.write("\r\033[K")
val_msg = f" Validated with "
val_msg += f"IoU: {iou:.1f} F1: {f1:.2f} ACC: {acc:.2f}"
val_msg += f" Pre: {pre:.2f} Rec: {rec:.2f}"
val_msg += f" Lss: {val_loss:.3e} ✓"
val_msg += f" {(Fore.RED + '↑') if val_loss > last_val_loss else (Fore.GREEN +'↓')}"
last_val_loss = val_loss
print(val_msg)
# Validation logg
logg_file_pth = os.path.join(
"loggs/", f"{start_datetime.isoformat()}.csv"
)
val_logging.val_metrics_logger(metrics, logg_file_pth)
scheduler.step(epoch_loss / n_train)
epoch_msg = (
f"Trained epoch {epoch + 1:02d} with loss {epoch_loss / n_train:.3e} "
)
epoch_msg += f"at learning rate {optimizer.param_groups[0]['lr']:.3e} ✓"
print(epoch_msg)
# Save weights every epoch
weight_pth = "weight/"
pathlib.Path(weight_pth).mkdir(parents=True, exist_ok=True)
net.to("cpu")
torch.save(net.state_dict(), weight_pth + f"CP_epoch{epoch + 1}.pth")
net.to(dev)
def main():
colorama.init(autoreset=True)
# """
train_img = "/media/flo/External/files_not_unter_backup/nlb/smr/nlb_summer/img_h/trn_0/"
train_msk = "/media/flo/External/files_not_unter_backup/nlb/smr/nlb_summer/msk_track_bin/png_uint8_h/trn_0/"
val_img = "/media/flo/External/files_not_unter_backup/nlb/smr/nlb_summer/img_h/val_0/"
val_msk = "/media/flo/External/files_not_unter_backup/nlb/smr/nlb_summer/msk_track_bin/png_uint8_h/val_0/"
weights_pth = None # "weight/CP_epoch26.pth"
train(
train_img,
train_msk,
val_img,
val_msk,
res_scale=0.2,
epochs=80000,
bs=1,
lr=1e-0,
weights_pth=weights_pth,
)
"""
model = NetSeq()
summary(model, (3, 160, 320), device="cpu", col_names=["input_size", "output_size", "num_params"])
"""
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import sys, os, traceback, itertools, tempfile
from os import walk
import json
import subprocess32 as subprocess
from pyparsing import *
from common import *
import problems
class InconsistentPredicateException(Exception):
pass
"""
check_solution receives json of that form
{
"task_id" : 8xyz_uuid,
"problem_id" : 15asfba_uuid,
"preds": [
{
"assignment": "v1 == v0 % 2",
"args": [
"v0",
"v1"
],
"name": "IsOdd"
}
]
}
in the form of a dictionary and the path where all the
task and problem files are.
First it checks if any of the assignments is inconsistent. If so,
it throws an InconsistentPredicateException.
Then it checks if the clauses are valid under the assignment and
returns a list of integers with one entry per clause where 1 means
the clause is valid, and 0 means it is not or couldn't be solved.
"""
def check_solution(solution, sol_dir):
task = load_task(sol_dir, solution[task_id_key])
# check for each clause individually if the assignment makes it valid
valid_clauses = []
create_princess_tautology_check(solution)
for clause in task[clauses_key]:
output = dict()
with tempfile.NamedTemporaryFile(mode='w', suffix='.pri') as pri_file:
create_princess_file(sol_dir, solution, [clause], pri_file)
pri_file.flush()
output = run_cmd([princess_command, "-timeout=1000", "-clausifier=simple", pri_file.name])
log.info("Output of princess: %s", str(output))
valid_clauses += [0]
if parse_princess_output(output) == True:
valid_clauses[-1] = 1
# print("{}/{} clauses valid".format(valid_clauses, len(task[clauses_key])))
return valid_clauses
# =========== helper methods for check_solution =============
def parse_princess_output(output):
if output and 'output' in output:
for line in output['output'].splitlines():
if line.rstrip() == "VALID":
return True
elif line.rstrip().startswith("ERROR"):
raise SyntaxError, line
return False
def create_princess_tautology_check(solution):
res = []
for pred in solution[predicate_key]:
lines = list()
lines.append("\\predicates {")
#conj with &
type_sig=""
comma = ""
for arg in pred["args"]:
type_sig+=comma
comma = ", "
type_sig+="int "+arg
lines.append(" {}({});".format(pred["name"], type_sig))
lines.append("}")
lines.append("\\functions {")
#conj with &
type_sig="int "
comma = ""
for arg in pred["args"]:
type_sig+=comma
comma = ", "
type_sig+=arg
lines.append("{};".format(type_sig))
lines.append("}")
lines.append("\\problem {")
lines.append(pred["assignment"])
lines.append("-> false ")
lines.append("}")
output = None
with tempfile.NamedTemporaryFile(mode='w', suffix='.pri') as pri_file:
pri_file.write("\n".join(lines))
pri_file.flush()
output = run_cmd([princess_command, "-timeout=1000", "-clausifier=simple", pri_file.name])
if parse_princess_output(output):
raise InconsistentPredicateException, pred["name"]
"""
creates a pri file to check with princess if the user provided
predicates make all clauses valid.
"""
def create_princess_file(sol_dir, solution, list_of_clauses, out_file):
lines = list()
lines.append("\\predicates {")
#TODO IsOdd(int, int);
for pred in solution[predicate_key]:
#conj with &
type_sig=""
comma = ""
for arg in pred["args"]:
type_sig+=comma
comma = ", "
type_sig+="int "+arg
lines.append(" {}({}) {{ {} }};".format(pred["name"], type_sig, pred["assignment"]))
lines.append("}")
lines.append("\\problem {")
conj = ""
for clause in list_of_clauses:
lines.append(conj + clause)
conj = "& "
# \forall int v0; \forall int v1; (v1 >= 2 | -1 >= v1 | 0 >= v0 | IsOdd(1 + v0, v1))
lines.append("}")
text = "\n".join(lines)
#print text
out_file.write(text)
#======== check solution against SMT file ========
"""
Takes a user-provided solution and re-runs the Horn solver
with this solution as a hint.
It call the same method problems.check_smt_file that we use
to generate problems.
"""
def check_solution_against_smt_file(sol, problem_dir, base_dir, generate=True):
probl = load_problem(problem_dir, sol[problem_id_key])
hint_file_name = create_tuple_file_from_solution(sol)
smt_file_name = os.path.join(base_dir, probl["smt_file"])
return problems.check_smt_file(smt_file_name, problem_dir, timeout=10, hint_file=hint_file_name, problem=probl, generate=generate)
"""
ONLY UTILITY METHODS BELOW THIS POINT
"""
# returns the name of the tuple file.
def create_tuple_file_from_solution(sol):
cegar_list = []
for pred in sol[predicate_key]:
pri_string = "\\functions {\n"
pri_string += "int "
comma = ""
for arg in pred["args"]:
pri_string+=comma + arg
comma = ", "
pri_string +=";\n}\n"
pri_string += "\\problem { !(\n" + pred["assignment"] +"\n)}\n"
with tempfile.NamedTemporaryFile(mode='w', suffix='.pri') as pri_file:
pri_file.write(pri_string)
pri_file.flush()
smt_file = tempfile.NamedTemporaryFile(delete=False, suffix=".smt2")
output = run_cmd([princess_command, "-timeout=0", pri_file.name, "-printSMT={}".format(smt_file.name)])
cegar_string = "(initial-predicates "
cegar_string += pred["name"]+"("
for arg in pred["args"]:
cegar_string +="(" + arg +" Int)"
cegar_string += ")"
cegar_string += get_assertion_line_from_file(smt_file.name)
cegar_string += ")"
cegar_list += [cegar_string]
os.unlink(smt_file.name)
print ("\n".join(cegar_list))
tpl_file = tempfile.NamedTemporaryFile(delete=False, suffix=".tpl")
tpl_file.write("\n".join(cegar_list))
tpl_file.close()
return tpl_file.name
## only boiler plate below this point ##
def get_assertion_line_from_file(smt_file_name):
with open(smt_file_name, "r") as f:
data = "({})".format(f.read())
for outer in nestedExpr(opener='(', closer=')').parseString(data):
for el in outer:
if el[0]=="assert":
return print_ptree(el[1])
def print_ptree(ptree):
if isinstance(ptree, basestring):
return ptree
ret = "("
space = ""
for el in ptree:
ret += space + print_ptree(el)
space = " "
ret+=")"
return ret
def make_test_solution():
solution = dict()
solution[task_id_key] = "97e5ee774a4c66c579276d0644a3d6b5172afd9b069c4809f0e4041b"
solution[problem_id_key] = "c4178476de99aae26ccf3ffcd85dfcffcfbe5cb0610c29b4a046ed80"
solution[predicate_key] = list()
pred = dict()
pred["assignment"] = "3>v0"
pred["args"] = ["v0", "v1"]
pred["name"] = "IsOdd"
solution["preds"].append(pred)
return solution
if __name__ == "__main__":
if len(sys.argv)<2:
print("Requires json file dir")
sys.exit()
if not os.path.isdir(sys.argv[1]):
print("Json dir not a directory: {}".format(sys.argv[1]))
sys.exit()
print check_solution(make_test_solution(), sys.argv[1])
|
nilq/baby-python
|
python
|
# Time: O(n * 2^n)
# Space: O(n), longest possible path in tree, which is if all numbers are increasing.
# Given an integer array, your task is
# to find all the different possible increasing
# subsequences of the given array,
# and the length of an increasing subsequence should be at least 2 .
#
# Example:
# Input: [4, 6, 7, 7]
# Output: [[4, 6], [4, 7], [4, 6, 7], [4, 6, 7, 7], [6, 7], [6, 7, 7], [7,7], [4,7,7]]
# Note:
# The length of the given array will not exceed 15.
# The range of integer in the given array is [-100,100].
# The given array may contain duplicates,
# and two equal integers should also be considered as a special case of increasing sequence.
class Solution(object):
def findSubsequences(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
def findSubsequencesHelper(nums, pos, seq, result):
if len(seq) >= 2:
result.append(list(seq))
lookup = set()
for i in xrange(pos, len(nums)):
if (not seq or nums[i] >= seq[-1]) and \
nums[i] not in lookup:
lookup.add(nums[i])
seq.append(nums[i])
findSubsequencesHelper(nums, i+1, seq, result)
seq.pop()
result, seq = [], []
findSubsequencesHelper(nums, 0, seq, result)
return result
|
nilq/baby-python
|
python
|
from dataclasses import dataclass
from typing import List
from csw.Parameter import Parameter
@dataclass
class CommandResponse:
"""
Type of a response to a command (submit, oneway or validate).
Note that oneway and validate responses are limited to Accepted, Invalid or Locked.
"""
runId: str
def _asDict(self):
"""
Returns: XXX: a dictionary corresponding to this object
"""
return {
"_type": self.__class__.__name__,
'runId': self.runId,
}
@dataclass
class Cancelled(CommandResponse):
"""Represents a negative response that describes the cancellation of command"""
pass
@dataclass
class Accepted(CommandResponse):
"""Represents a final response stating acceptance of a command received"""
pass
@dataclass
class Error(CommandResponse):
"""Represents a negative response that describes an error in executing the command"""
message: str
def _asDict(self):
"""
Returns: dict
a dictionary corresponding to this object
"""
return {
"_type": self.__class__.__name__,
'runId': self.runId,
'message': self.message
}
@dataclass
class Locked(CommandResponse):
"""Represents a negative response stating that a component is Locked and command was not validated or executed"""
message: str
def _asDict(self):
"""
Returns: dict
a dictionary corresponding to this object
"""
return {
"_type": self.__class__.__name__,
'runId': self.runId,
'message': self.message
}
@dataclass
class Started(CommandResponse):
"""Represents an intermediate response stating a long running command has been started"""
message: str
def _asDict(self):
"""
Returns: dict
a dictionary corresponding to this object
"""
return {
"_type": self.__class__.__name__,
'runId': self.runId,
'message': self.message
}
@dataclass
class Result:
"""A result containing parameters for command response"""
paramSet: List[Parameter]
# noinspection PyProtectedMember
def _asDict(self):
"""
Returns: dict
a dictionary corresponding to this object
"""
return {
'paramSet': list(map(lambda p: p._asDict(), self.paramSet))
}
@dataclass
class Completed(CommandResponse):
"""Represents a positive response stating completion of command"""
result: Result = Result([])
# noinspection PyProtectedMember
def _asDict(self):
"""
Returns: dict
a dictionary corresponding to this object
"""
return {
"_type": self.__class__.__name__,
'runId': self.runId,
'result': self.result._asDict()
}
# --- Invalid ---
@dataclass
class CommandIssue:
"""Describes a command issue with appropriate reason for validation failure"""
reason: str
class IdNotAvailableIssue(CommandIssue):
"""Returned when a CommandResponse associated with runId is not available"""
class HCDBusyIssue(CommandIssue):
"""Returned when the HCD is busy and can't process a command"""
class WrongCommandTypeIssue(CommandIssue):
"""Returned when some given command type is not expected"""
class MissingKeyIssue(CommandIssue):
"""Returned when a command is missing a required key/parameter"""
class WrongPrefixIssue(CommandIssue):
"""Returned when an Assembly receives a configuration with a prefix that it doesn't support"""
class WrongParameterTypeIssue(CommandIssue):
"""Returned when the parameter for a key is not the correct type (i.e. int vs double, etc.)"""
class WrongUnitsIssue(CommandIssue):
"""Returned when a parameter value does not have the correct units"""
class WrongNumberOfParametersIssue(CommandIssue):
"""Returned when a command does not have the correct number of parameters"""
class AssemblyBusyIssue(CommandIssue):
"""Returned when an Assembly receives a command and one is already executing"""
class UnresolvedLocationsIssue(CommandIssue):
"""Returned when some required location is not available"""
class ParameterValueOutOfRangeIssue(CommandIssue):
"""Parameter of a command is out of range"""
class WrongInternalStateIssue(CommandIssue):
"""The component is in the wrong internal state to handle a command"""
class UnsupportedCommandInStateIssue(CommandIssue):
"""A command is unsupported in the current state"""
class UnsupportedCommandIssue(CommandIssue):
"""A command is unsupported by component"""
class RequiredServiceUnavailableIssue(CommandIssue):
"""A required service is not available"""
class RequiredHCDUnavailableIssue(CommandIssue):
"""A required HCD is not available"""
class RequiredAssemblyUnavailableIssue(CommandIssue):
"""A required Assembly is not available"""
class RequiredSequencerUnavailableIssue(CommandIssue):
"""Returned when some other issue occurred apart from those already defined"""
class OtherIssue(CommandIssue):
"""A required Sequencer is not available"""
@dataclass
class Invalid(CommandResponse):
issue: CommandIssue
def _asDict(self):
"""
Returns: dict
a dictionary for this object
"""
return {
"_type": self.__class__.__name__,
'runId': self.runId,
'issue': {
"_type": self.issue.__class__.__name__,
"reason": self.issue.reason
}
}
|
nilq/baby-python
|
python
|
import datetime
import json
import os
import time
import requests
STIX_TAXII_URL = 'http://54.244.134.70/api'
DOMAINS_URL = STIX_TAXII_URL + '/domains'
IPS_URL = STIX_TAXII_URL + '/ips'
class api():
def getInfo(self, firstrun=True):
"""
Get a list of bad domains and IPs.
@param firstrun: If true, fetch all data, otherwise only go back the last ten days.
"""
domainsurl = DOMAINS_URL
ipsurl = IPS_URL
if not firstrun:
tendaysago = '/' + datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(days=10), '%Y%m%d')
domainsurl += tendaysago
ipsurl += tendaysago
try:
domains = requests.get(DOMAINS_URL, timeout=10)
ips = requests.get(IPS_URL, timeout=10)
return domains.json() + ips.json()
except requests.exceptions.Timeout:
print('ERROR: TIMEOUT! Check If You Are Whitelisted with the MS-ISAC. Please Contact indicator.sharing@cisecurity.org')
if __name__ == '__main__':
info = api().getInfo(False)
for i in info:
print(i)
|
nilq/baby-python
|
python
|
from srcs.parser.tokens.abstract_token import AbstractToken
class OpenBracketToken(AbstractToken):
pass
|
nilq/baby-python
|
python
|
# coding=utf-8
from django import forms
class QueueSearchForm(forms.Form):
key = forms.CharField(label=u'KEY', required=False)
sender = forms.CharField(label=u'发件人', required=False)
recipients = forms.CharField(label=u'收件人', required=False)
senderip = forms.CharField(label=u'发件IP', required=False)
|
nilq/baby-python
|
python
|
from .colors import Colors
import contextlib
import functools
import subprocess
TERMINAL_ENVIRONMENT_VAR = '_NC_TERMINAL_COLOR_COUNT'
SIZES = 256, 16, 8
def context(fg=None, bg=None, print=print, count=None):
return Context(count)(fg, bg, print)
@functools.lru_cache()
def color_count():
cmd = 'tput', 'colors'
try:
count = int(subprocess.check_output(cmd, stderr=subprocess.STDOUT))
except subprocess.CalledProcessError: # pragma: no cover
return 0
return next((s for s in SIZES if count >= s), 0)
class _Context:
def __init__(self, count=None):
count = color_count() if count is None else count
if count:
self.colors = Colors('terminal%s' % count)
palette = self.colors._palettes[0]
codes = palette['CODES']
self.CODES = {self.colors[k]: v for k, v in codes.items()}
self.fg = palette['fg']
self.bg = palette['bg']
else:
self.colors = None
def __bool__(self):
return bool(self.colors)
def __len__(self):
return self.colors and len(self.colors) or 0
def print_codes(self, *codes, print=print):
result = '\x1b[%sm' % ';'.join(str(c) for c in codes)
print(result, end='')
@contextlib.contextmanager
def __call__(self, fg=None, bg=None, print=print):
def color_codes(color, coder):
if not color:
return ()
closest = self.colors.closest(color)
return coder(self.CODES[closest])
if self and (fg or bg):
codes = color_codes(fg, self.fg) + color_codes(bg, self.bg)
self.print_codes(*codes, print=print)
try:
yield
finally:
self.print_codes(print=print)
else:
yield
Context = functools.lru_cache()(_Context)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import ctypes
import gc
import logging
import multiprocessing
import os
import queue
import threading
import time
import unittest
import ringbuffer
class SlotArrayTest(unittest.TestCase):
def setUp(self):
self.array = ringbuffer.SlotArray(slot_bytes=20, slot_count=10)
def test_read_empty(self):
for data in self.array:
self.assertEqual(b'', data)
def test_read_write(self):
self.array[0] = b'hello'
self.array[1] = b''
self.array[5] = b'how are'
self.array[9] = b'you doing today?'
self.assertEqual(b'hello', self.array[0])
self.assertEqual(b'', self.array[1])
self.assertEqual(b'how are', self.array[5])
self.assertEqual(b'you doing today?', self.array[9])
def test_write_too_big(self):
try:
self.array[3] = b'asdfkljasdlfkajsflkjasdfasdfkljasdf'
self.fail()
except ringbuffer.DataTooLargeError:
pass
class TestException(Exception):
pass
class ReadersWriterLockTest(unittest.TestCase):
def setUp(self):
self.lock = ringbuffer.ReadersWriterLock()
self.assert_unlocked()
self.result_queues = {}
def assert_unlocked(self):
self.assertEqual(0, self.lock.readers.value)
self.assertFalse(self.lock.writer.value)
def assert_readers(self, count):
self.assertEqual(count, self.lock.readers.value)
self.assertFalse(self.lock.writer.value)
def assert_writer(self):
self.assertEqual(0, self.lock.readers.value)
self.assertTrue(self.lock.writer.value)
def reader_count(self):
return self.lock.readers.value
def async(self, func):
def wrapper(result_queue):
result = func()
result_queue.put(result)
result_queue = multiprocessing.Queue()
process = multiprocessing.Process(
target=wrapper,
args=(result_queue,))
self.result_queues[process] = result_queue
process.start()
return process
def get_result(self, process):
process.join()
return self.result_queues[process].get()
def test_read_then_write(self):
with self.lock.for_read():
self.assert_readers(1)
self.assert_unlocked()
with self.lock.for_write():
self.assert_writer()
self.assert_unlocked()
def test_reentrant_readers(self):
with self.lock.for_read():
self.assert_readers(1)
with self.lock.for_read():
self.assert_readers(2)
with self.lock.for_read():
self.assert_readers(3)
self.assert_readers(2)
self.assert_readers(1)
self.assert_unlocked()
def test_writer_blocks_reader(self):
with self.lock.for_write():
event = multiprocessing.Event()
def test():
self.assert_writer()
# Caller will block until this event is released.
event.set()
with self.lock.for_read():
self.assert_readers(1)
return 'read'
r = self.async(test)
# Wait until we can confirm that the reader is locked out.
event.wait()
self.assert_writer()
self.assertEqual('read', self.get_result(r))
self.assert_unlocked()
def test_writer_blocks_multiple_readers(self):
with self.lock.for_write():
before_read = multiprocessing.Barrier(3)
during_read = multiprocessing.Barrier(2)
after_read = multiprocessing.Barrier(2)
def test():
self.assert_writer()
before_read.wait()
with self.lock.for_read():
during_read.wait()
value = self.reader_count()
after_read.wait()
return value
r1 = self.async(test)
r2 = self.async(test)
# Wait until we can confirm that all readers are locked out
before_read.wait()
self.assert_writer()
self.assertEqual(2, self.get_result(r1))
self.assertEqual(2, self.get_result(r2))
self.assert_unlocked()
def test_reader_blocks_writer(self):
with self.lock.for_read():
before_write = multiprocessing.Barrier(2)
during_write = multiprocessing.Barrier(2)
after_write = multiprocessing.Barrier(2)
after_unlock = multiprocessing.Barrier(2)
def test():
self.assert_readers(1)
before_write.wait()
with self.lock.for_write():
self.assert_writer()
return 'written'
writer = self.async(test)
# Wait until we can confirm that all writers are locked out.
before_write.wait()
self.assert_readers(1)
self.assertEqual('written', self.get_result(writer))
self.assert_unlocked()
def test_multiple_readers_block_writer(self):
with self.lock.for_read():
before_read = multiprocessing.Barrier(3)
after_read = multiprocessing.Barrier(2)
def test_reader():
self.assert_readers(1)
with self.lock.for_read():
before_read.wait()
value = self.reader_count()
after_read.wait()
return value
def test_writer():
before_read.wait()
with self.lock.for_write():
self.assert_writer()
return 'written'
reader = self.async(test_reader)
writer = self.async(test_writer)
# Wait for the write to be blocked by multiple readers.
before_read.wait()
self.assert_readers(2)
after_read.wait()
self.assertEqual(2, self.get_result(reader))
self.assertEqual('written', self.get_result(writer))
self.assert_unlocked()
def test_multiple_writers_block_each_other(self):
with self.lock.for_write():
before_write = multiprocessing.Barrier(2)
def test():
before_write.wait()
with self.lock.for_write():
self.assert_writer()
return 'written'
writer = self.async(test)
before_write.wait()
self.assert_writer()
self.assertEqual('written', self.get_result(writer))
self.assert_unlocked()
def test_wait_for_write(self):
event = multiprocessing.Event()
wait_count = 0
with self.lock.for_read():
def test():
with self.lock.for_write():
self.assert_writer()
event.set()
return 'written'
writer = self.async(test)
while not event.is_set():
self.assert_readers(1)
wait_count += 1
self.lock.wait_for_write()
self.assert_readers(1)
self.assertEqual('written', self.get_result(writer))
self.assert_unlocked()
self.assertLessEqual(wait_count, 2)
def test_wait_for_write__writer_already_waiting_for_reader(self):
event = multiprocessing.Event()
with self.lock.for_read():
def test():
event.set()
with self.lock.for_write():
self.assert_writer()
event.set()
return 'written'
writer = self.async(test)
event.wait()
# Force a context switch so the writer is waiting
time.sleep(0.1)
self.lock.wait_for_write()
self.assert_readers(1)
self.assertEqual('written', self.get_result(writer))
self.assert_unlocked()
def test_wait_for_write_without_lock(self):
self.assert_unlocked()
self.assertRaises(
ringbuffer.InternalLockingError,
self.lock.wait_for_write)
def test_unlock_readers_on_exception(self):
try:
with self.lock.for_read():
self.assert_readers(1)
raise TestException
except TestException:
self.assert_unlocked()
else:
self.fail()
def test_unlock_writer_on_exception(self):
try:
with self.lock.for_write():
self.assert_writer()
raise TestException
except TestException:
self.assert_unlocked()
else:
self.fail()
class Expecter:
def __init__(self, ring, pointer, testcase):
self.ring = ring
self.pointer = pointer
self.testcase = testcase
def expect_index(self, i):
self.testcase.assertEqual(i, self.pointer.get().index)
def write(self, data):
self.ring.try_write(data)
def write_memory_view(self, data):
view = memoryview(data)
self.ring.try_write(view)
def write_ctype(self, data):
data_type = ctypes.c_double * len(data)
cdata = data_type()
cdata[:] = data
self.ring.try_write(cdata)
def _get_read_func(self, blocking):
if blocking:
return self.ring.blocking_read
else:
return self.ring.try_read
def expect_read(self, expected_data, blocking=False):
read = self._get_read_func(blocking)
data = read(self.pointer)
self.testcase.assertEqual(expected_data, data, 'Data was: %r' % data)
def expect_waiting_for_writer(self):
# There's no blocking version of this because the WaitingForWriterError
# is what's used to determine when to block on the condition variable.
self.testcase.assertRaises(
ringbuffer.WaitingForWriterError,
self.ring.try_read,
self.pointer)
def expect_waiting_for_reader(self):
self.testcase.assertRaises(
ringbuffer.WaitingForReaderError,
self.ring.try_write,
b'should not work')
def writer_done(self):
self.ring.writer_done()
def expect_writer_finished(self, blocking=False):
read = self._get_read_func(blocking)
self.testcase.assertRaises(
ringbuffer.WriterFinishedError,
read,
self.pointer)
def expect_already_closed(self):
self.testcase.assertRaises(
ringbuffer.AlreadyClosedError,
self.ring.try_write,
b'should not work')
def force_reader_sync(self):
self.ring.force_reader_sync()
def expect_try_read_type(self, type_or_class):
data = self.ring.try_read(self.pointer)
self.testcase.assertTrue(isinstance(data, type_or_class))
class AsyncProxy:
def __init__(self, expecter, in_queue, error_queue):
self.expecter = expecter
self.in_queue = in_queue
self.error_queue = error_queue
self.runner = None
def run(self):
while True:
item = self.in_queue.get()
try:
if item == 'done':
logging.debug('Exiting %r', self.runner)
return
name, args, kwargs = item
logging.debug('Running %s(%r, %r)', name, args, kwargs)
try:
result = getattr(self.expecter, name)(*args, **kwargs)
except Exception as e:
logging.exception(
'Problem running %s(*%r, **%r)', name, args, kwargs)
self.error_queue.put(e)
finally:
self.in_queue.task_done()
def shutdown(self):
self.in_queue.put('done')
def __getattr__(self, name):
func = getattr(self.expecter, name)
def proxy(*args, **kwargs):
self.expecter.testcase.assertTrue(
self.runner,
'Must call start_proxies() before setting test expectations')
# This queue is used to sequence operations between functions
# that are running asynchronously (threads or processes).
self.in_queue.put((name, args, kwargs))
# If this test function is running in blocking mode, that means
# the locking and sequencing is built into the semantics of the
# function call itself. That means we can skip waiting for the
# asynchronous function to consume the queue before letting
# subsequent test methods run.
if kwargs.get('blocking'):
# Allow a context switch so the asynchronous function has
# a chance to actually start the function call.
time.sleep(0.1)
else:
self.in_queue.join()
return proxy
class RingBufferTestBase:
def setUp(self):
self.ring = ringbuffer.RingBuffer(slot_bytes=100, slot_count=10)
self.proxies = []
self.error_queue = self.new_queue()
def tearDown(self):
for proxy in self.proxies:
if proxy.runner:
proxy.shutdown()
for proxy in self.proxies:
if proxy.runner:
proxy.in_queue.join()
if not self.error_queue.empty():
raise self.error_queue.get()
# Force child processes and pipes to be garbage collected, otherwise
# we'll run out of file descriptors.
gc.collect()
def new_queue(self):
raise NotImplementedError
def run_proxy(self, proxy):
raise NotImplementedError
def start_proxies(self):
for proxy in self.proxies:
self.run_proxy(proxy)
def new_reader(self):
expecter = Expecter(self.ring, self.ring.new_reader(), self)
proxy = AsyncProxy(expecter, self.new_queue(), self.error_queue)
self.proxies.append(proxy)
return proxy
def new_writer(self):
self.ring.new_writer()
expecter = Expecter(self.ring, self.ring.writer, self)
proxy = AsyncProxy(expecter, self.new_queue(), self.error_queue)
self.proxies.append(proxy)
return proxy
def test_write_bytes(self):
writer = self.new_writer()
self.start_proxies()
writer.write(b'this works')
def test_write_string(self):
writer = self.new_writer()
self.start_proxies()
self.assertTrue(self.error_queue.empty())
writer.write('this does not work')
error = self.error_queue.get()
self.assertTrue(isinstance(error, TypeError))
def test_write_bytearray(self):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
byte_list = [124, 129, 92, 3, 97]
data = bytearray(byte_list)
writer.write(data)
expected_bytes = b'|\x81\\\x03a'
self.assertListEqual(list(expected_bytes), byte_list)
reader.expect_read(expected_bytes)
def test_write_memoryview(self):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
data = b'|\x81\\\x03a'
writer.write_memory_view(data)
reader.expect_read(data)
def test_write_ctype_array(self):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
data = [
0.10547615602385774,
0.7852261064650733,
0.9641224591137485,
0.7119325400788387,
0.0351822948099656,
0.7533559074003938,
0.40285734175834087,
0.9567564883196842,
0.38539673218346415,
0.2682555751644704,
]
writer.write_ctype(data)
expected_bytes = (
b'\xe0X\xa1@|\x00\xbb?\xf3s\xe7\x7f\x92 \xe9?\xd8q\xe7W\x17\xda'
b'\xee?)\x19\x13\xc0&\xc8\xe6?\x00\xcd6\xebi\x03\xa2?\x1f\x0f'
b'\x11\xd9}\x1b\xe8?r\x8e\xf3(j\xc8\xd9?\x044r\xc8\xbf\x9d\xee?'
b'\xe0\xa5-\x0eW\xaa\xd8?\xbcD\x93n\x19+\xd1?')
reader.expect_read(expected_bytes)
data_type = ctypes.c_double * len(data)
expected = data_type.from_buffer_copy(expected_bytes)
self.assertEqual(list(expected), data)
def _do_read_single_write(self, blocking):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
writer.expect_index(0)
writer.write(b'first write')
writer.expect_index(1)
reader.expect_index(0)
reader.expect_read(b'first write', blocking=blocking)
reader.expect_index(1)
def test_read_is_bytes(self):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
writer.write(b'this works')
reader.expect_try_read_type(bytearray)
def test_read_single_write_blocking(self):
self._do_read_single_write(True)
def test_read_single_write_non_blocking(self):
self._do_read_single_write(False)
def _do_read_ahead_of_writes(self, blocking):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
reader.expect_waiting_for_writer()
writer.write(b'first write')
reader.expect_read(b'first write', blocking=blocking)
def test_read_ahead_of_writes_blocking(self):
self._do_read_ahead_of_writes(True)
def test_read_ahead_of_writes_non_blocking(self):
self._do_read_ahead_of_writes(False)
def _do_two_reads_one_behind_one_ahead(self, blocking):
r1 = self.new_reader()
r2 = self.new_reader()
writer = self.new_writer()
self.start_proxies()
writer.write(b'first write')
r1.expect_read(b'first write', blocking=blocking)
r1.expect_waiting_for_writer()
r2.expect_read(b'first write', blocking=blocking)
r2.expect_waiting_for_writer()
def test_two_reads_one_behind_one_ahead_blocking(self):
self._do_two_reads_one_behind_one_ahead(True)
def test_two_reads_one_behind_one_ahead_non_blocking(self):
self._do_two_reads_one_behind_one_ahead(False)
def test_write_conflict_first_slot(self):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
for i in range(self.ring.slot_count):
writer.write(b'write %d' % i)
# The writer has wrapped around and is now waiting for the reader
# to free up a slot. They have the same index, but are different
# generations.
reader.expect_index(0)
writer.expect_index(0)
writer.expect_waiting_for_reader()
reader.expect_read(b'write 0')
writer.write(b'now it works')
for i in range(1, self.ring.slot_count):
reader.expect_read(b'write %d' % i)
reader.expect_index(0)
reader.expect_read(b'now it works')
def test_write_conflict_last_slot(self):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
last_slot = self.ring.slot_count - 1
self.assertGreater(last_slot, 0)
for i in range(last_slot):
data = b'write %d' % i
writer.write(data)
reader.expect_read(data)
writer.expect_index(last_slot)
reader.expect_index(last_slot)
# The reader's pointed at the last slot, now wrap around the writer
# to catch up. They'll have the same index, but different generation
# numbers.
for i in range(self.ring.slot_count):
data = b'write %d' % (self.ring.slot_count + i)
writer.write(data)
reader.expect_index(last_slot)
writer.expect_index(last_slot)
writer.expect_waiting_for_reader()
reader.expect_read(b'write 10')
writer.write(b'now it works')
writer.expect_index(0)
reader.expect_index(0)
def test_create_reader_after_writing(self):
writer = self.new_writer()
self.start_proxies()
self.new_reader() # No error because no writes happened yet.
writer.write(b'hello')
self.assertRaises(
ringbuffer.MustCreatedReadersBeforeWritingError,
self.new_reader)
def _do_read_after_close_beginning(self, blocking):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
writer.writer_done()
reader.expect_writer_finished(blocking=blocking)
def test_read_after_close_beginning_blocking(self):
self._do_read_after_close_beginning(True)
def test_read_after_close_beginning_non_blocking(self):
self._do_read_after_close_beginning(False)
def _do_close_before_read(self, blocking):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
writer.write(b'fill the buffer')
writer.writer_done()
writer.expect_index(1)
reader.expect_read(b'fill the buffer')
reader.expect_writer_finished(blocking=blocking)
reader.expect_index(1)
def test_close_before_read_blocking(self):
self._do_close_before_read(True)
def test_close_before_read_non_blocking(self):
self._do_close_before_read(False)
def _do_close_after_read(self, blocking):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
writer.write(b'fill the buffer')
reader.expect_read(b'fill the buffer')
reader.expect_waiting_for_writer()
reader.expect_index(1)
writer.writer_done()
writer.expect_index(1)
reader.expect_writer_finished(blocking=blocking)
def test_close_after_read_blocking(self):
self._do_close_after_read(True)
def test_close_after_read_non_blocking(self):
self._do_close_after_read(False)
def test_close_then_write(self):
writer = self.new_writer()
self.start_proxies()
writer.write(b'one')
writer.writer_done()
writer.expect_already_closed()
def test_blocking_readers_wake_up_after_write(self):
writer = self.new_writer()
r1 = self.new_reader()
r2 = self.new_reader()
self.start_proxies()
r1.expect_read(b'write after read', blocking=True)
r2.expect_read(b'write after read', blocking=True)
writer.write(b'write after read')
def test_blocking_readers_wake_up_after_close(self):
writer = self.new_writer()
r1 = self.new_reader()
r2 = self.new_reader()
self.start_proxies()
r1.expect_writer_finished(blocking=True)
r2.expect_writer_finished(blocking=True)
writer.writer_done()
def test_force_reader_sync(self):
writer = self.new_writer()
r1 = self.new_reader()
r2 = self.new_reader()
self.start_proxies()
writer.write(b'one')
writer.write(b'two')
writer.write(b'three')
writer.expect_index(3)
r1.expect_index(0)
r2.expect_index(0)
writer.force_reader_sync()
r1.expect_index(3)
r2.expect_index(3)
def _do_multiple_writers(self, blocking):
w1 = self.new_writer()
w2 = self.new_writer()
reader = self.new_reader()
self.start_proxies()
w1.write(b'aaa')
w1.expect_index(1)
w2.expect_index(1)
w2.write(b'bbb')
w1.expect_index(2)
w2.expect_index(2)
w2.write(b'ccc')
w1.expect_index(3)
w2.expect_index(3)
w1.write(b'ddd')
w1.expect_index(4)
w2.expect_index(4)
reader.expect_read(b'aaa', blocking=blocking)
reader.expect_read(b'bbb', blocking=blocking)
reader.expect_read(b'ccc', blocking=blocking)
reader.expect_read(b'ddd', blocking=blocking)
def test_multiple_writers_blocking(self):
self._do_multiple_writers(True)
def test_multiple_writers_non_blocking(self):
self._do_multiple_writers(False)
def _do_test_multiple_writers_close(self, blocking):
w1 = self.new_writer()
w2 = self.new_writer()
reader = self.new_reader()
self.start_proxies()
w1.write(b'aaa')
w1.writer_done()
w2.write(b'bbb')
w2.writer_done()
reader.expect_read(b'aaa', blocking=blocking)
reader.expect_read(b'bbb', blocking=blocking)
reader.expect_writer_finished(blocking=blocking)
def test_multiple_writers_close_blocking(self):
self._do_test_multiple_writers_close(True)
def test_multiple_writers_close_non_blocking(self):
self._do_test_multiple_writers_close(False)
def _do_start_read_before_writer_setup(self, blocking):
reader = self.new_reader()
self.start_proxies()
reader.expect_writer_finished(blocking=blocking)
def test_start_read_before_writer_setup_blocking(self):
self._do_start_read_before_writer_setup(True)
def test_start_read_before_writer_setup_non_blocking(self):
self._do_start_read_before_writer_setup(False)
class ThreadingTest(RingBufferTestBase, unittest.TestCase):
def new_queue(self):
return queue.Queue()
def run_proxy(self, proxy):
thread = threading.Thread(target=proxy.run)
proxy.runner = thread
thread.daemon = True
thread.start()
class MultiprocessingTest(RingBufferTestBase, unittest.TestCase):
def new_queue(self):
return multiprocessing.JoinableQueue()
def run_proxy(self, proxy):
process = multiprocessing.Process(target=proxy.run)
proxy.runner = process
process.daemon = True
process.start()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
|
nilq/baby-python
|
python
|
#################################################
# (c) Copyright 2014 Hyojoon Kim
# All Rights Reserved
#
# email: deepwater82@gmail.com
#################################################
import os
from optparse import OptionParser
import python_api
import plot_lib
import sys
import pickle
def plot_the_data(the_map, output_dir, saveAsFileName, plot_title):
xa = []
ymap = {}
#### Do your stuff
plot_lib.plot_multiline(xa, ymap, output_dir, saveAsFileName, plot_title)
# plot_lib.plot_distribution(xa, ymap, output_dir, saveAsFileName, plot_title)
return
def main():
desc = ( 'Plotting data' )
usage = ( '%prog [options]\n'
'(type %prog -h for details)' )
op = OptionParser( description=desc, usage=usage )
# Options
op.add_option( '--inputfile', '-i', action="store", \
dest="input_file", help = "Pickled data")
op.add_option( '--outputdir', '-o', action="store", \
dest="output_dir", help = "Directory to store plots")
# Parsing and processing args
options, args = op.parse_args()
args_check = sys.argv[1:]
if len(args_check) != 4:
print 'Something wrong with paramenters. Please check.'
print op.print_help()
sys.exit(1)
# Check and add slash to directory if not there.
output_dir = python_api.check_directory_and_add_slash(options.output_dir)
# Check file, open, read
if os.path.isfile(options.input_file) is True:
fd = open(options.input_file, 'r')
data = pickle.load(fd)
fd.close()
# Plot
saveAsFileName = '' # Add file extension yourself.
plot_title = ''
plot_the_data(data, output_dir, saveAsFileName, plot_title)
######
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
'''
Application 1
factorial problem
n!=n*(n-1)!
'''
def factorial(n):
if n == 0:
return 1
elif n >=1:
return n *factorial(n-1) # here we apply the function itself recursion
#print(factorial(5))
'''
Application 2
Draw English Ruler
'''
def draw_line(tick_length,tick_label=''): # tick_length = 3 then print '---'
'''tick label shoud be str. AT EACH INCH there would be a sign eg ----0,---1 '''
line = '-'*tick_length
if tick_label:
line +=' '+tick_label
print(line)
def draw_interval(center_length):
'''draw tick interval based upon a central tick length'''
if center_length>0:
draw_interval(center_length-1) # recursion
draw_line(center_length)
draw_interval(center_length-1)
def draw_ruler(num_inches,major_length):
'''num of inches decide how many time the draw interval function would repeat'''
draw_line(major_length,'0')
for i in range(1,1+num_inches):
draw_interval(major_length)
draw_line(major_length,str(i))
'''
Application 3
Binary Search
'''
def Binary_search(sorted_sequence,target_number,low,high):
'''
:param sorted_sequence: for binary search , the data must be sorted
:param low,high: each search, compare low,high to the target number and upgrade one of the 2 parameters
If the target equals data[mid], then we have found the item we are looking for,and the search terminates successfully.
• If target < data[mid], then we recur on the first half of the sequence, that is,
on the interval of indices from low to mid − 1.
• If target > data[mid], then we recur on the second half of the sequence, that is,
on the interval of indices from mid + 1 to high.
'''
if low > high:
return False
else:
mid = (low+high)//2
if sorted_sequence[mid] == target_number:
return mid
elif sorted_sequence[mid] < target_number:
low = mid +1
'''
low = mid works as well, here low = mid + 1 just makes the code quicker
'''
return Binary_search(sorted_sequence,target_number,low,high)
else:
high = mid -1
return Binary_search(sorted_sequence,target_number,low,high)
#Test
# data = [2,4,5,7,8,9,12,14,17,19,22,25,27,28,33,37]
# a = Binary_search(data,19,0,len(data)-1)
# print(data[a]==19)
'''
Application 4
computing the total disk usage for all
files and directories nested within a particular directory.
In this application, we would use Python's os module
os.path.getsize(path) returns the immediate disk usage for the file or directory
os.path.isdir(path) return True if entry designated by string path is a directory
os.listdir(path) return names oaf all entries within a directory
os.path.join(path,filename) compose the path string and filename string using '/' for Unix/Linux
'''
import os
def Disk_Usage(path):
'''return the number of bytes used by a file/folder and any descendents'''
total = os.path.getsize(path)
if os.path.isdir(path) == True:
for filename in os.listdir(path):
childpath = os.path.join(path,filename)
total += Disk_Usage(childpath)
return total
#print(Disk_Usage('/Users/leojin/Desktop/CODE')*10e-7)
|
nilq/baby-python
|
python
|
"""
Utils module.
This module contains simple utility classes and functions.
"""
import signal
import textwrap
from datetime import timedelta
from pathlib import Path
from typing import Any, Dict, List
import pkg_resources
import toml
from appdirs import user_config_dir
from loguru import logger
from aria2p.types import PathOrStr
class SignalHandler:
"""A helper class to handle signals."""
def __init__(self, signals: List[str]) -> None:
"""
Initialize the object.
Arguments:
signals: List of signals names as found in the `signal` module (example: SIGTERM).
"""
logger.debug("Signal handler: handling signals " + ", ".join(signals))
self.triggered = False
for sig in signals:
try:
signal.signal(signal.Signals[sig], self.trigger) # noqa: E1101 (signal.Signals)
except ValueError as error:
logger.error(f"Failed to setup signal handler for {sig}: {error}")
def __bool__(self) -> bool:
"""
Return True when one of the given signal was received, False otherwise.
Returns:
True when signal received, False otherwise.
"""
return self.triggered
def trigger(self, signum, frame) -> None: # noqa: W0613 (unused frame)
"""
Mark this instance as 'triggered' (a specified signal was received).
Arguments:
signum: The signal code.
frame: The signal frame (unused).
"""
logger.debug(
f"Signal handler: caught signal {signal.Signals(signum).name} ({signum})", # noqa: E1101 (signal.Signals)
)
self.triggered = True
def human_readable_timedelta(value: timedelta, precision: int = 0) -> str:
"""
Return a human-readable time delta as a string.
Arguments:
value: The timedelta.
precision: The precision to use:
- `0` to display all units
- `1` to display the biggest unit only
- `2` to display the first two biggest units only
- `n` for the first N biggest units, etc.
Returns:
A string representing the time delta.
"""
pieces = []
if value.days:
pieces.append(f"{value.days}d")
seconds = value.seconds
if seconds >= 3600: # noqa: WPS432 (magic number)
hours = int(seconds / 3600) # noqa: WPS432
pieces.append(f"{hours}h")
seconds -= hours * 3600 # noqa: WPS432
if seconds >= 60:
minutes = int(seconds / 60)
pieces.append(f"{minutes}m")
seconds -= minutes * 60
if seconds > 0 or not pieces:
pieces.append(f"{seconds}s")
if precision == 0:
return "".join(pieces)
return "".join(pieces[:precision])
def human_readable_bytes(value: int, digits: int = 2, delim: str = "", postfix: str = "") -> str:
"""
Return a human-readable bytes value as a string.
Arguments:
value: The bytes value.
digits: How many decimal digits to use.
delim: String to add between value and unit.
postfix: String to add at the end.
Returns:
The human-readable version of the bytes.
"""
chosen_unit = "B"
for unit in ("KiB", "MiB", "GiB", "TiB"):
if value > 1000:
value /= 1024
chosen_unit = unit
else:
break
return f"{value:.{digits}f}" + delim + chosen_unit + postfix # noqa: WPS221 (not complex)
def bool_or_value(value) -> Any:
"""
Return `True` for `"true"`, `False` for `"false"`, original value otherwise.
Arguments:
value: Any kind of value.
Returns:
- `True` for `"true"`
- `False` for `"false"`
- Original value otherwise
"""
if value == "true":
return True
if value == "false":
return False
return value
def bool_to_str(value) -> Any:
"""
Return `"true"` for `True`, `"false"` for `False`, original value otherwise.
Arguments:
value: Any kind of value.
Returns:
- `"true"` for `True`
- `"false"` for `False`
- Original value otherwise
"""
if value is True:
return "true"
if value is False:
return "false"
return value
def get_version() -> str:
"""
Return the current `aria2p` version.
Returns:
The current `aria2p` version.
"""
try:
distribution = pkg_resources.get_distribution("aria2p")
except pkg_resources.DistributionNotFound:
return "0.0.0"
else:
return distribution.version
def load_configuration() -> Dict[str, Any]:
"""
Return dict from TOML formatted string or file.
Returns:
The dict configuration.
"""
default_config = """
[key_bindings]
AUTOCLEAR = "c"
CANCEL = "esc"
ENTER = "enter"
FILTER = ["F4", "\\\\"]
FOLLOW_ROW = "F"
HELP = ["F1", "?"]
MOVE_DOWN = ["down", "j"]
MOVE_DOWN_STEP = "J"
MOVE_END = "end"
MOVE_HOME = "home"
MOVE_LEFT = ["left", "h"]
MOVE_RIGHT = ["right", "l"]
MOVE_UP = ["up", "k"]
MOVE_UP_STEP = "K"
NEXT_SORT = ["p", ">"]
PREVIOUS_SORT = "<"
PRIORITY_DOWN = ["F8", "d", "]"]
PRIORITY_UP = ["F7", "u", "["]
QUIT = ["F10", "q"]
REMOVE_ASK = ["del", "F9"]
RETRY = "r"
RETRY_ALL = "R"
REVERSE_SORT = "I"
SEARCH = ["F3", "/"]
SELECT_SORT = "F6"
SETUP = "F2"
TOGGLE_EXPAND_COLLAPSE = "x"
TOGGLE_EXPAND_COLLAPSE_ALL = "X"
TOGGLE_RESUME_PAUSE = "space"
TOGGLE_RESUME_PAUSE_ALL = "P"
TOGGLE_SELECT = "s"
UN_SELECT_ALL = "U"
ADD_DOWNLOADS = "a"
[colors]
BRIGHT_HELP = "CYAN BOLD BLACK"
FOCUSED_HEADER = "BLACK NORMAL CYAN"
FOCUSED_ROW = "BLACK NORMAL CYAN"
HEADER = "BLACK NORMAL GREEN"
METADATA = "WHITE UNDERLINE BLACK"
SIDE_COLUMN_FOCUSED_ROW = "BLACK NORMAL CYAN"
SIDE_COLUMN_HEADER = "BLACK NORMAL GREEN"
SIDE_COLUMN_ROW = "WHITE NORMAL BLACK"
STATUS_ACTIVE = "CYAN NORMAL BLACK"
STATUS_COMPLETE = "GREEN NORMAL BLACK"
STATUS_ERROR = "RED BOLD BLACK"
STATUS_PAUSED = "YELLOW NORMAL BLACK"
STATUS_WAITING = "WHITE BOLD BLACK"
"""
config_dict = {}
config_dict["DEFAULT"] = toml.loads(default_config)
# Check for configuration file
config_file_path = Path(user_config_dir("aria2p")) / "config.toml"
if config_file_path.exists():
try:
config_dict["USER"] = toml.load(config_file_path)
except Exception as error: # noqa: W0703 (too broad exception)
logger.error(f"Failed to load configuration file: {error}")
else:
# Write initial configuration file if it does not exist
config_file_path.parent.mkdir(parents=True, exist_ok=True)
with config_file_path.open("w") as fd:
fd.write(textwrap.dedent(default_config).lstrip("\n"))
return config_dict
def read_lines(path: PathOrStr) -> List[str]:
"""
Read lines in a file.
Arguments:
path: The file path.
Returns:
The list of lines.
"""
return Path(path).read_text().splitlines()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
modules for universal fetcher that gives historical daily data and realtime data
for almost everything in the market
"""
import requests
import time
import datetime as dt
import pandas as pd
from bs4 import BeautifulSoup
from functools import wraps
from xalpha.info import fundinfo, mfundinfo
from xalpha.cons import connection_errors
def rget(*args, **kws):
tries = 5
for count in range(tries):
try:
r = requests.get(*args, **kws)
return r
except connection_errors as e:
if count == tries - 1:
print(*args, sep="\n")
raise e
time.sleep(1)
def rpost(*args, **kws):
tries = 5
for count in range(tries):
try:
r = requests.post(*args, **kws)
return r
except connection_errors as e:
if count == tries - 1:
print(*args, sep="\n")
raise e
time.sleep(1)
def today_obj():
now = dt.datetime.today()
return now.replace(hour=0, minute=0, second=0, microsecond=0)
def tomorrow_ts():
dto = dt.datetime.now() + dt.timedelta(1)
return dto.timestamp()
def get_token():
r = rget("https://xueqiu.com", headers={"user-agent": "Mozilla"})
return r.cookies["xq_a_token"]
def get_history(
code, prefix="SH", count=365, token="a664afb60c7036c7947578ac1a5860c4cfb6b3b5"
):
url = "https://stock.xueqiu.com/v5/stock/chart/kline.json?symbol={prefix}{code}&begin={tomorrow}&period=day&type=before&count=-{count}"
data = rget(
url.format(
code=code, prefix=prefix, tomorrow=int(tomorrow_ts() * 1000), count=count
),
cookies={"xq_a_token": token},
headers={"user-agent": "Mozilla/5.0"},
)
return data.json()
def ts2pdts(ts):
tz_bj = dt.timezone(dt.timedelta(hours=8))
dto = dt.datetime.fromtimestamp(ts / 1000, tz=tz_bj).replace(tzinfo=None)
return dto.replace(
hour=0, minute=0, second=0, microsecond=0
) # 雪球美股数据时间戳是美国0点,按北京时区换回时间后,把时分秒扔掉就重合了
def get_xueqiu(code, count):
r = get_history(code=code, prefix="", count=count, token=get_token())
df = pd.DataFrame(data=r["data"]["item"], columns=r["data"]["column"])
df["date"] = (df["timestamp"]).apply(ts2pdts) # reset hours to zero
return df
def get_cninvesting(curr_id, st_date, end_date):
r = rpost(
"https://cn.investing.com/instruments/HistoricalDataAjax",
data={
"curr_id": curr_id,
# "smlID": smlID, # ? but seems to be fixed with curr_id, it turns out it doesn't matter
"st_date": st_date,
"end_date": end_date,
"interval_sec": "Daily",
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data",
},
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4)\
AppleWebKit/537.36 (KHTML, like Gecko)",
"Host": "cn.investing.com",
"X-Requested-With": "XMLHttpRequest",
},
)
s = BeautifulSoup(r.text, "lxml")
dfdict = {}
cols = []
for col in s.find_all("th"):
dfdict[str(col.contents[0])] = []
cols.append(str(col.contents[0]))
num_cols = len(cols)
for i, td in enumerate(s.find_all("td")[:-5]):
if cols[i % num_cols] == "日期":
dfdict[cols[i % num_cols]].append(
dt.datetime.strptime(str(td.string), "%Y年%m月%d日")
)
else:
dfdict[cols[i % num_cols]].append(str(td.string))
return pd.DataFrame(dfdict)
def prettify(df):
_map = {
"日期": "date",
"收盘": "close",
"开盘": "open",
"高": "high",
"低": "low",
"涨跌幅": "percent",
}
df.rename(_map, axis=1, inplace=True)
if len(df) > 1 and df.iloc[1]["date"] < df.iloc[0]["date"]:
df = df[::-1]
df = df[["date", "open", "close", "high", "low", "percent"]]
for k in ["open", "close", "high", "low"]:
df[k] = df[k].apply(_float)
return df
def dstr2dobj(dstr):
if len(dstr.split("/")) > 1:
d_obj = dt.datetime.strptime(dstr, "%Y/%m/%d")
elif len(dstr.split(".")) > 1:
d_obj = dt.datetime.strptime(dstr, "%Y.%m.%d")
elif len(dstr.split("-")) > 1:
d_obj = dt.datetime.strptime(dstr, "%Y-%m-%d")
else:
d_obj = dt.datetime.strptime(dstr, "%Y%m%d")
return d_obj
def get_investing_id(suburl):
url = "https://cn.investing.com"
if not suburl.startswith("/"):
url += "/"
url += suburl
r = rget(
url,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36"
},
)
s = BeautifulSoup(r.text, "lxml")
pid = s.find("span", id="last_last")["class"][-1].split("-")[1]
return pid
def get_rmb(start=None, end=None, prev=360, currency="USD/CNY"):
"""
获取人民币汇率中间价
:param start:
:param end:
:param prev:
:param currency:
:return: pd.DataFrame
"""
url = "http://www.chinamoney.com.cn/ags/ms/cm-u-bk-ccpr/CcprHisNew?startDate={start_str}&endDate={end_str}¤cy={currency}&pageNum=1&pageSize=300"
if not end:
end_obj = today_obj()
else:
end_obj = dstr2dobj(end)
if not start:
start_obj = end_obj - dt.timedelta(prev)
else:
start_obj = dstr2dobj(start)
start_str = start_obj.strftime("%Y-%m-%d")
end_str = end_obj.strftime("%Y-%m-%d")
count = (end_obj - start_obj).days + 1
rl = []
if count <= 360:
r = rpost(url.format(start_str=start_str, end_str=end_str, currency=currency))
rl.extend(r.json()["records"])
else: # data more than 1 year cannot be fetched once due to API limitation
sepo_obj = end_obj
sepn_obj = sepo_obj - dt.timedelta(360)
# sep0_obj = end_obj - dt.timedelta(361)
while sepn_obj > start_obj: # [sepn sepo]
r = rpost(
url.format(
start_str=sepn_obj.strftime("%Y-%m-%d"),
end_str=sepo_obj.strftime("%Y-%m-%d"),
currency=currency,
)
)
rl.extend(r.json()["records"])
sepo_obj = sepn_obj - dt.timedelta(1)
sepn_obj = sepo_obj - dt.timedelta(360)
r = rpost(
url.format(
start_str=start_obj.strftime("%Y-%m-%d"),
end_str=sepo_obj.strftime("%Y-%m-%d"),
currency=currency,
)
)
rl.extend(r.json()["records"])
data = {"date": [], "close": []}
for d in rl:
data["date"].append(pd.Timestamp(d["date"]))
data["close"].append(d["values"][0])
df = pd.DataFrame(data)
df = df[::-1]
df["close"] = pd.to_numeric(df["close"])
return df
def get_fund(code):
if code[0] == "F":
df = fundinfo(code[1:]).price
elif code[0] == "M":
df = mfundinfo(code[1:]).price
df["close"] = df["netvalue"]
return df[["date", "close"]]
def get_daily(code, start=None, end=None, prev=365, _from=None):
"""
universal fetcher for daily historical data of literally everything has a value in market.
数据来源包括天天基金,雪球,英为财情,外汇局官网
:param code: str.
1. 对于沪深市场的股票,指数,ETF,LOF 基金,可转债和债券,直接使用其代码,主要开头需要包括 SH 或者 SZ。
2. 对于香港市场的股票,指数,使用其数字代码,同时开头要添加 HK。
3. 对于美国市场的股票,指数,ETF 等,直接使用其字母缩写代码即可。
4. 对于人民币中间价数据,使用 "USD/CNY" 的形式,具体可能的值可在 http://www.chinamoney.com.cn/chinese/bkccpr/ 历史数据的横栏查询
5. 对于所有可以在 cn.investing.com 网站查到的金融产品,其代码可以是该网站对应的统一代码,或者是网址部分,比如 DAX 30 的概览页面为 https://cn.investing.com/indices/germany-30,那么对应代码即为 "indices/germany-30"。也可去网页 inspect 手动查找其内部代码(一般不需要自己做,推荐直接使用网页url作为 code 变量值),手动 inspect 加粗的实时价格,其对应的网页 span class 中的 pid 的数值即为内部代码。
6. 对于国内发行的基金,使用基金代码,同时开头添加 F。
7. 对于国内发行的货币基金,使用基金代码,同时开头添加 M。(全部按照净值数据处理)
:param start: str. "20200101", "2020/01/01", "2020-01-01" are all legal. The starting date of daily data.
:param end: str. format is the same as start. The ending date of daily data.
:param prev: Optional[int], default 365. If start is not specified, start = end-prev.
:param _from: Optional[str]. can be one of "xueqiu", "zjj", "investing", "tiantianjijin". Only used for debug to
enfore data source. For common use, _from can be chosed automatically based on code in the run time.
:return: pd.Dataframe.
must include cols: date[pd.Timestampe],close[float64]。
"""
if not end:
end_obj = today_obj()
else:
end_obj = dstr2dobj(end)
if not start:
start_obj = end_obj - dt.timedelta(prev)
else:
start_obj = dstr2dobj(start)
if not _from:
if code.startswith("SH") or code.startswith("SZ"):
_from = "xueqiu"
elif code.endswith("/CNY") or code.startswith("CNY/"):
_from = "zjj"
elif len(code.split("/")) > 1:
_from = "cninvesting"
code = get_investing_id(code)
elif code.isdigit():
_from = "cninvesting"
elif code[0] in ["F", "M"] and code[1:].isdigit():
_from = "ttjj"
elif code.startswith("HK") and code[2:].isdigit() and len(code) == 7:
_from = "xueqiu"
code = code[2:]
else:
_from = "xueqiu"
count = (today_obj() - start_obj).days + 1
start_str = start_obj.strftime("%Y/%m/%d")
end_str = end_obj.strftime("%Y/%m/%d")
if _from in ["cninvesting", "investing", "default"]:
df = get_cninvesting(code, start_str, end_str)
return prettify(df)
elif _from in ["xueqiu", "xq", "snowball"]:
df = get_xueqiu(code, count)
df = df[df.date <= end_str]
df = df[df.date >= start_str]
return prettify(df)
elif _from in ["zhongjianjia", "zjj", "chinamoney"]:
df = get_rmb(start, end, prev, currency=code)
return df
elif _from in ["ttjj", "tiantianjijin", "xalpha", "eastmoney"]:
df = get_fund(code)
df = df[df.date <= end_str]
df = df[df.date >= start_str]
return df
def _float(n):
try:
n = n.replace(",", "")
except AttributeError:
pass
return float(n)
def get_xueqiu_rt(code, token="a664afb60c7036c7947578ac1a5860c4cfb6b3b5"):
url = "https://stock.xueqiu.com/v5/stock/quote.json?symbol={code}&extend=detail"
r = rget(
url.format(code=code),
cookies={"xq_a_token": token},
headers={"user-agent": "Mozilla/5.0"},
)
r = r.json()
n = r["data"]["quote"]["name"]
q = r["data"]["quote"]["current"]
q_ext = r["data"]["quote"].get("current_ext", None)
percent = r["data"]["quote"]["percent"]
currency = r["data"]["quote"]["currency"]
return {
"name": n,
"current": _float(q),
"percent": _float(percent),
"current_ext": _float(q_ext) if q_ext else None,
"currency": currency,
}
def get_cninvesting_rt(suburl):
url = "https://cn.investing.com"
if not suburl.startswith("/"):
url += "/"
url += suburl
r = rget(
url,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36"
},
)
s = BeautifulSoup(r.text, "lxml")
last_last = s.find("span", id="last_last")
q = _float(last_last.string)
name = s.find("h1").string.strip()
ind = 0
l = s.find("div", class_="lighterGrayFont").contents
for i, c in enumerate(l):
if isinstance(c, str) and c.strip() == "货币":
ind = i
break
if ind == 0:
currency = None
else:
currency = l[ind - 1].string
percent = _float(
s.find("span", attrs={"dir": "ltr", "class": "parentheses"}).string[:-1]
)
panhou = s.find("div", class_="afterHoursInfo")
if panhou:
q_ext = _float(panhou.find("span").string)
else:
q_ext = None
return {
"name": name,
"current": q,
"current_ext": q_ext,
"currency": currency,
"percent": percent,
}
def get_rt(code, _from=None):
"""
universal fetcher for realtime price of literally everything.
:param code: str. 规则同 :func:`get_daily`. 需要注意场外基金和外汇中间价是不支持实时行情的,因为其每日只有一个报价。对于 investing 的数据源,只支持网址格式代码。
:param _from: Optional[str]. can be one of "xueqiu", "investing". Only used for debug to
enfore data source. For common use, _from can be chosed automatically based on code in the run time.
:return: Dict[str, Any].
包括 "name", "current", "percent" 三个必有项和 "current_ext"(盘后价格), "currency" (计价货币)两个值可能为 ``None`` 的选项。
"""
if not _from:
if len(code.split("/")) > 1:
_from = "investing"
elif code.startswith("HK") and code[2:].isdigit():
_from = "xueqiu"
code = code[2:]
else:
_from = "xueqiu"
if _from in ["cninvesting", "investing"]:
return get_cninvesting_rt(code)
elif _from in ["xueqiu", "xq", "snowball"]:
return get_xueqiu_rt(code, token=get_token())
get_realtime = get_rt
_cached_data = {}
def reset_cache():
"""
clear all cache of daily data
:return: None.
"""
global _cached_data
_cached_data = {}
def cached(s):
"""
Usage as follows:
.. code-block:: python
@cached("20170101")
def get_daily(*args, **kws):
return xa.get_daily(*args, **kws)
Automatically cache the result in memory and avoid refetching
:param s: str. eg. "20160101", the starting date of cached table.
:return: wrapped function.
"""
def cached_start(f):
@wraps(f)
def wrapper(*args, **kws):
if args:
code = args[0]
else:
code = kws.get("code")
start = kws.get("start", None)
end = kws.get("end", None)
prev = kws.get("prev", None)
if not prev:
prev = 365
if not end:
end_obj = today_obj()
else:
end_obj = dstr2dobj(end)
if not start:
start_obj = end_obj - dt.timedelta(prev)
else:
start_obj = dstr2dobj(start)
start_str = start_obj.strftime("%Y%m%d")
end_str = end_obj.strftime("%Y%m%d")
kws["start"] = s
kws["end"] = dt.datetime.now().strftime("%Y%m%d")
global _cached_data
_cached_data.setdefault(s, {})
if code not in _cached_data[s]:
df = f(*args, **kws)
# print("cached %s" % code)
_cached_data[s][code] = df
else:
pass
# print("directly call cache")
df = _cached_data[s][code]
df = df[df["date"] <= end_str]
df = df[df["date"] >= start_str]
return df
return wrapper
return cached_start
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
def convert_to_int(rom_num, num = 0):
if len(rom_num) == 0:
return num
else:
if rom_num[0] == 'M':
return convert_to_int(rom_num[1:], num + 1000)
elif rom_num[:2] == 'CM':
return convert_to_int(rom_num[2:], num + 900)
elif rom_num[0] == 'D':
return convert_to_int(rom_num[1:], num + 500)
elif rom_num[:2] == 'CD':
return convert_to_int(rom_num[2:], num + 400)
elif rom_num[0] == 'C':
return convert_to_int(rom_num[1:], num + 100)
elif rom_num[:2] == 'XC':
return convert_to_int(rom_num[2:], num + 90)
elif rom_num[0] == 'L':
return convert_to_int(rom_num[1:], num + 50)
elif rom_num[:2] == 'XL':
return convert_to_int(rom_num[2:], num + 40)
elif rom_num[0] == 'X':
return convert_to_int(rom_num[1:], num + 10)
elif rom_num[:2] == 'IX':
return convert_to_int(rom_num[2:], num + 9)
elif rom_num[0] == 'V':
return convert_to_int(rom_num[1:], num + 5)
elif rom_num[:2] == 'IV':
return convert_to_int(rom_num[2:], num + 4)
elif rom_num[0] == 'I':
return convert_to_int(rom_num[1:], num + 1)
print(convert_to_int(input('Enter Roman numerals to convert to integer: ')))
|
nilq/baby-python
|
python
|
INPUTS_ROOT_PATH = "./dragons_test_inputs/geminidr/gmos/longslit/"
|
nilq/baby-python
|
python
|
from BasicTypeAttr import BasicTypeAttr
class DecimalAttr(BasicTypeAttr):
# @@ 2003-01-14 ce: it would make more sense if the Float type spewed a
# SQL decimal type in response to having "precision" and "scale" attributes.
pass
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from PIL import Image
from io import BytesIO
import numpy as np
import matplotlib.pyplot as plt
import os
import gmaps
import requests
import google_streetview.api
from src import settings
class GoogleImages(object):
"""Save pictures from google using the lat lon."""
def __init__(self, show=False):
"""Initiator.
:arg show: (bool) show or not the images
"""
self.key = settings.google_key
self.show = show
self.size = "600x300"
self.zoom = "16"
self.roadmap = "roadmap"
self.base_url = "https://maps.googleapis.com/maps/api/staticmap?"
self.url = "{base_url}center={lat}+{lng}&zoom={zoom}&size={size}&maptype={roadmap}&key={key}"
gmaps.configure(api_key=self.key)
def show_img(self, img):
"""Show the picture.
:arg img: (PIL) image
"""
if self.show:
plt.imshow(img)
plt.show()
@staticmethod
def save_image(img, lat, lng):
"""Save the picture into the directory.
:param img: (PIL) image
:param lat: (float) latitude
:param lng: (float) longitude
"""
path = os.path.join(settings.IMAGE_GPS_PATH, f"{lat}+{lng}.jpg")
img.save(path)
def image_gps(self, lat, lng):
"""Get image from google maps api.
:arg lat: (float) latitude
:arg lng: (float) longitude
"""
url = self.url.format(**{
"lat": lat, "lng": lng, "key": self.key, "size": self.size,
"zoom": self.zoom, "roadmap": self.roadmap, "base_url": self.base_url
})
response = requests.get(url)
img = Image.open(BytesIO(response.content)).convert("RGB")
self.show_img(img)
self.save_image(img, lat, lng)
return np.asarray(img)
def image_street(self, lat, lng):
"""Get image from google street api.
:arg lat: (float) latitude
:arg lng: (float) longitude
"""
directory = f"{lat}+{lng}"
for head in ["0", "090", "180", "270"]:
params = [
{
"size": "300x200", "location": f"{lat},{lng}",
"heading": head, "pitch": "0", "fov": "90", "key": self.key
}
]
response = google_streetview.api.results(params)
path = os.path.join(settings.IMAGE_STREET_PATH, directory)
response.download_links(f"{path}/{head}")
if __name__ == '__main__':
GoogleImages(show=False).image_gps(48.8584, 2.29466)
|
nilq/baby-python
|
python
|
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from src.customer import forms
from django.contrib import messages
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from django.conf import settings
import firebase_admin
from firebase_admin import credentials, auth, messaging
import stripe
from src.models import *
import requests
# Firebase Configuration
cred = credentials.Certificate({
"type": settings.FIREBASE_TYPE,
"project_id": settings.FIREBASE_PROJECT_ID,
"private_key_id": settings.FIREBASE_PRIVATE_KEY_ID,
"private_key": settings.FIREBASE_PRIVATE_KEY,
"client_email": settings.FIREBASE_CLIENT_EMAIL,
"client_id": settings.FIREBASE_CLIENT_ID,
"auth_uri": settings.FIREBASE_AUTH_URI,
"token_uri": settings.FIREBASE_TOKEN_URI,
"auth_provider_x509_cert_url": settings.FIREBASE_AUTH_PROVIDER_X509_CERT_URL,
"client_x509_cert_url": settings.FIREBASE_CLIENT_X509_CERT_URL,
})
firebase_admin.initialize_app(cred)
# stripe setup
stripe.api_key = settings.STRIPE_API_SECRET_KEY
# write your views here
@login_required()
def home(request):
return redirect(reverse('customer:profile'))
@login_required(login_url='/sign-in/?next=/customer/')
def profile_page(request):
user_form = forms.BasicUserForm(instance=request.user)
customer_form = forms.BasicCustomerForm(instance=request.user.customer)
change_password_form = PasswordChangeForm(request.user)
if request.method == 'POST':
if request.POST.get('action') == 'update_profile':
user_form = forms.BasicUserForm(request.POST, instance=request.user)
customer_form = forms.BasicCustomerForm(request.POST,request.FILES, instance=request.user.customer)
if user_form.is_valid() and customer_form.is_valid():
user_form.save()
customer_form.save()
messages.success(request, 'Your Profile has been updated successfully!')
return redirect(reverse('customer:profile'))
elif request.POST.get('action') == 'update_password':
change_password_form = PasswordChangeForm(request.user, request.POST)
if change_password_form.is_valid():
user = change_password_form.save()
update_session_auth_hash(request, user)
messages.success(request, 'Your Password has been updated successfully!')
return redirect(reverse('customer:profile'))
elif request.POST.get('action') == 'update_phone':
# Get Firebase user data
firebase_user = auth.verify_id_token(request.POST.get('id_token'))
request.user.customer.phone_number = firebase_user['phone_number']
request.user.customer.save()
return redirect(reverse('customer:profile'))
context = {
'user_form': user_form,
'customer_form': customer_form,
'change_password_form': change_password_form,
# firebase configuration
'FIREBASE_API_KEY': settings.FIREBASE_API_KEY,
'FIREBASE_AUTH_DOMAIN': settings.FIREBASE_AUTH_DOMAIN,
'FIREBASE_PROJECT_ID': settings.FIREBASE_PROJECT_ID,
'FIREBASE_STORAGE_BUCKET': settings.FIREBASE_STORAGE_BUCKET,
'FIREBASE_MESSAGING_SENDER_ID': settings.FIREBASE_MESSAGING_SENDER_ID,
'FIREBASE_APP_ID': settings.FIREBASE_APP_ID,
}
return render(request, 'customer/profile.html', context)
@login_required(login_url='/sign-in/?next=/customer/')
def payment_method_page(request):
current_customer = request.user.customer
# remove existing card
if request.method == 'POST':
stripe.PaymentMethod.detach(current_customer.stripe_payment_method_id)
current_customer.stripe_payment_method_id = ""
current_customer.stripe_card_last4 = ""
current_customer.save()
return redirect(reverse('customer:payment_method'))
# save stripe customer info
if not current_customer.stripe_customer_id:
customer = stripe.Customer.create()
current_customer.stripe_customer_id = customer['id']
current_customer.save()
# Get stripe payment method of the customer
stripe_payment_methods = stripe.PaymentMethod.list(customer=current_customer.stripe_customer_id, type="card")
if stripe_payment_methods and len(stripe_payment_methods.data) > 0:
payment_method = stripe_payment_methods.data[0]
current_customer.stripe_payment_method_id = payment_method.id
current_customer.stripe_card_last4 = payment_method.card.last4
current_customer.save()
else:
current_customer.stripe_payment_method_id = ""
current_customer.stripe_card_last4 = ""
current_customer.save()
if not current_customer.stripe_payment_method_id:
intent = stripe.SetupIntent.create(customer = current_customer.stripe_customer_id)
context = {
"client_secret": intent.client_secret,
"STRIPE_API_PUBLIC_KEY": settings.STRIPE_API_PUBLIC_KEY,
}
return render(request, 'customer/payment_method.html', context)
else:
return render(request, 'customer/payment_method.html')
@login_required(login_url='/sign-in/?next=/customer/')
def create_job_page(request):
current_customer = request.user.customer
if not current_customer.stripe_payment_method_id:
return redirect(reverse('customer:payment_method'))
has_current_job = Job.objects.filter(
customer=current_customer,
status__in=[
Job.PROCESSING_STATUS,
Job.PICKING_STATUS,
Job.DELIVERING_STATUS,
]
).exists()
if has_current_job:
messages.warning(request, "You currently have an active job.")
return redirect(reverse('customer:current_jobs'))
creating_job = Job.objects.filter(customer=current_customer, status=Job.CREATING_STATUS).last()
step1_form = forms.JobCreateStep1Form(instance=creating_job)
step2_form = forms.JobCreateStep2Form(instance=creating_job)
step3_form = forms.JobCreateStep3Form(instance=creating_job)
if request.method == 'POST':
if request.POST.get('step') == '1':
step1_form = forms.JobCreateStep1Form(request.POST, request.FILES, instance=creating_job)
if step1_form.is_valid():
creating_job = step1_form.save(commit=False)
creating_job.customer = current_customer
creating_job.save()
return redirect(reverse('customer:create_job'))
elif request.POST.get('step') == '2':
step2_form = forms.JobCreateStep2Form(request.POST, instance=creating_job)
if step2_form.is_valid():
creating_job = step2_form.save()
return redirect(reverse('customer:create_job'))
elif request.POST.get('step') == '3':
step3_form = forms.JobCreateStep3Form(request.POST, instance=creating_job)
if step3_form.is_valid():
creating_job = step3_form.save()
try:
r = requests.get(f"https://maps.google.com/maps/api/distancematrix/json?origins={creating_job.pickup_address}&destinations={creating_job.delivery_address}&mode=transit&key={settings.GOOGLE_API_KEY}")
distance = r.json()['rows'][0]['elements'][0]['distance']['value']
duration = r.json()['rows'][0]['elements'][0]['duration']['value']
creating_job.distance = round(distance / 1000, 2)
creating_job.duration = round(duration / 60)
creating_job.price = round(creating_job.distance * 1, 2) # $1 per km
creating_job.save()
except Exception as e:
print(e)
messages.error(request, "Unfortunately, we do not support shipping at this distance")
return redirect(reverse('customer:create_job'))
elif request.POST.get('step') == '4':
if creating_job.price:
try:
payment_intent = stripe.PaymentIntent.create(
amount=int(creating_job.price * 100),
currency='inr',
customer=current_customer.stripe_customer_id,
payment_method=current_customer.stripe_payment_method_id,
off_session=True,
confirm=True,
)
Transaction.objects.create(
stripe_payment_intent_id = payment_intent['id'],
job = creating_job,
amount = creating_job.price,
)
creating_job.status = Job.PROCESSING_STATUS
creating_job.save()
# send the push notification to all couriers
couriers = Courier.objects.all()
registration_tokens = [i.fcm_token for i in couriers if i.fcm_token]
message = messaging.MulticastMessage(
notification=messaging.Notification(
title=creating_job.job_name,
body=creating_job.description,
),
webpush = messaging.WebpushConfig(
notification=messaging.WebpushNotification(
icon=creating_job.photo.url,
),
fcm_options=messaging.WebpushFCMOptions(
link = settings.NOTIFICATION_URL + reverse('courier:available_jobs'),
),
),
tokens = registration_tokens,
)
response = messaging.send_multicast(message)
print(response)
print(f'{response.success_count} messages were sent successfully.')
return redirect(reverse('customer:home'))
except stripe.error.CardError as e:
err = e.error
# Error code will be authentication_required if authentication is needed
print("Code is: %s" % err.code)
payment_intent_id = err.payment_intent['id']
payment_intent = stripe.PaymentIntent.retrieve(payment_intent_id)
# Determine the current step
if not creating_job:
current_step = 1
elif creating_job.delivery_name:
current_step = 4
elif creating_job.pickup_name:
current_step = 3
else:
current_step = 2
context = {
'job': creating_job,
'step' : current_step,
'GOOGLE_API_KEY': settings.GOOGLE_API_KEY,
'step1_form': step1_form,
'step2_form': step2_form,
'step3_form': step3_form,
}
return render(request, 'customer/create_job.html', context)
@login_required(login_url='/sign-in/?next=/customer/')
def current_jobs_page(request):
jobs = Job.objects.filter(
customer = request.user.customer,
status__in=[
Job.PROCESSING_STATUS,
Job.PICKING_STATUS,
Job.DELIVERING_STATUS
]
)
context = {
"jobs": jobs,
}
return render(request, 'customer/jobs.html', context)
@login_required(login_url='/sign-in/?next=/customer/')
def archived_jobs_page(request):
jobs = Job.objects.filter(
customer = request.user.customer,
status__in=[
Job.COMPLETED_STATUS,
Job.CANCELLED_STATUS,
]
)
context = {
"jobs": jobs,
}
return render(request, 'customer/jobs.html', context)
@login_required(login_url='/sign-in/?next=/customer/')
def job_details_page(request, job_id):
job = Job.objects.get(id=job_id)
if request.method == 'POST' and job.status ==Job.PROCESSING_STATUS:
job.status = Job.CANCELLED_STATUS
job.save()
return redirect(reverse('customer:archived_jobs'))
context = {
'job': job,
"GOOGLE_API_KEY": settings.GOOGLE_API_KEY,
}
return render(request, 'customer/job_details.html', context)
|
nilq/baby-python
|
python
|
"""
owtf.settings
~~~~~~~~~~~~~
It contains all the owtf global configs.
"""
import os
import re
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
import yaml
HOME_DIR = os.path.expanduser("~")
OWTF_CONF = os.path.join(HOME_DIR, ".owtf")
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_DIR = os.path.join(ROOT_DIR, "data", "conf")
DEBUG = True
# Used by tools like dirbuster to launch gui or cli versions
INTERACTIVE = True
# Database Server
# Change this if you deploy OWTF to a public facing server
DATABASE_PASS = "jgZKW33Q+HZk8rqylZxaPg1lbuNGHJhgzsq3gBKV32g="
DATABASE_NAME = "owtf_db"
DATABASE_USER = "owtf_db_user"
DATABASE_IP = "127.0.0.1"
DATABASE_PORT = 5432
# API and UI Server
SERVER_ADDR = "0.0.0.0"
SERVER_PORT = 8009
FILE_SERVER_PORT = 8010
# Default API version
DEFAULT_API_VERSION = "v1"
# Application secret
# Change this
APP_SECRET = "changeme"
SESSION_COOKIE_NAME = "owtf-session"
# CORS settings. Fine grained, do not override if possible.
SIMPLE_HEADERS = ["accept", "accept-language", "content-language"]
ALLOWED_ORIGINS = ["http:/localhost:8009", "http://localhost:8010"]
ALLOWED_METHODS = ["GET", "POST", "DELETE"]
SEND_CREDENTIALS = False
# ERROR reporting
USE_SENTRY = False
SENTRY_API_KEY = ""
# IMP PATHS
WEB_TEST_GROUPS = os.path.join(OWTF_CONF, "conf", "profiles", "plugin_web", "groups.cfg")
NET_TEST_GROUPS = os.path.join(OWTF_CONF, "conf", "profiles", "plugin_net", "groups.cfg")
AUX_TEST_GROUPS = os.path.join(OWTF_CONF, "conf", "profiles", "plugin_aux", "groups.cfg")
PLUGINS_DIR = os.path.join(ROOT_DIR, "plugins")
# Output Settings
OUTPUT_PATH = "owtf_review"
AUX_OUTPUT_PATH = "owtf_review/auxiliary"
NET_SCANS_PATH = "owtf_review/scans"
# The name of the directories relative to output path
TARGETS_DIR = "targets"
WORKER_LOG_DIR = "logs"
# Default profile settings
DEFAULT_GENERAL_PROFILE = os.path.join(OWTF_CONF, "conf", "general.yaml")
DEFAULT_FRAMEWORK_CONFIG = os.path.join(OWTF_CONF, "conf", "framework.yaml")
DEFAULT_RESOURCES_PROFILE = os.path.join(OWTF_CONF, "conf", "resources.cfg")
DEFAULT_WEB_PLUGIN_ORDER_PROFILE = os.path.join(OWTF_CONF, "conf", "profiles", "plugin_web", "order.cfg")
DEFAULT_NET_PLUGIN_ORDER_PROFILE = os.path.join(OWTF_CONF, "conf", "profiles", "plugin_net", "order.cfg")
# logs_dir can be both relative or absolute path ;)
LOGS_DIR = "logs"
# Used for logging in OWTF
OWTF_LOG_FILE = "/tmp/owtf.log"
# Interface static folders
TEMPLATES = os.path.join(OWTF_CONF, "build")
STATIC_ROOT = os.path.join(OWTF_CONF, "build")
# SMTP
EMAIL_FROM = "you@your_server.com"
SMTP_LOGIN = "login@your_server.com"
SMTP_PASS = "your_password"
SMTP_HOST = "your_mail_server.com"
SMTP_PORT = 25
# OUTBOUND PROXY
USE_OUTBOUND_PROXY = False
OUTBOUND_PROXY_IP = ""
OUTBOUND_PROXY_PORT = ""
OUTBOUND_PROXY_AUTH = None
# Inbound Proxy Configuration
INBOUND_PROXY_IP = "127.0.0.1"
INBOUND_PROXY_PORT = 8008
INBOUND_PROXY_PROCESSES = 0
INBOUND_PROXY_CACHE_DIR = "/tmp/owtf/proxy-cache"
CA_CERT = os.path.join(OWTF_CONF, "proxy", "certs", "ca.crt")
CA_KEY = os.path.join(OWTF_CONF, "proxy", "certs", "ca.key")
CA_PASS_FILE = os.path.join(OWTF_CONF, "proxy", "certs", "ca_pass.txt")
CERTS_FOLDER = os.path.join(OWTF_CONF, "proxy", "certs")
BLACKLIST_COOKIES = ["_ga", "__utma", "__utmb", "__utmc", "__utmz", "__utmv"]
WHITELIST_COOKIES = ""
PROXY_RESTRICTED_RESPONSE_HEADERS = [
"Content-Length",
"Content-Encoding",
"Etag",
"Transfer-Encoding",
"Connection",
"Vary",
"Accept-Ranges",
"Pragma",
]
PROXY_RESTRICTED_REQUEST_HEADERS = ["Connection", "Pragma", "Cache-Control", "If-Modified-Since"]
PROXY_LOG = "/tmp/owtf/proxy.log"
# Define regex patterns
REGEXP_FILE_URL = (
"^[^\?]+\.(xml|exe|pdf|cs|log|inc|dat|bak|conf|cnf|old|zip|7z|rar|tar|gz|bz2|txt|xls|xlsx|doc|docx|ppt|pptx)$"
)
# Potentially small files will be retrieved for analysis
REGEXP_SMALL_FILE_URL = "^[^\?]+\.(xml|cs|inc|dat|bak|conf|cnf|old|txt)$"
REGEXP_IMAGE_URL = "^[^\?]+\.(jpg|jpeg|png|gif|bmp)$"
REGEXP_VALID_URL = "^[^\?]+\.(shtml|shtm|stm)$"
REGEXP_SSI_URL = "^(http|ftp)[^ ]+$"
# Compile regular expressions once at the beginning for speed purposes:
is_file_regex = re.compile(REGEXP_FILE_URL, re.IGNORECASE)
is_small_file_regex = re.compile(REGEXP_SMALL_FILE_URL, re.IGNORECASE)
is_image_regex = re.compile(REGEXP_IMAGE_URL, re.IGNORECASE)
is_url_regex = re.compile(REGEXP_VALID_URL, re.IGNORECASE)
is_ssi_regex = re.compile(REGEXP_SSI_URL, re.IGNORECASE)
# UI
SERVER_LOG = "/tmp/owtf/ui_server.log"
FILE_SERVER_LOG = "/tmp/owtf/file_server.log"
# HTTP_AUTH
HTTP_AUTH_HOST = None
HTTP_AUTH_USERNAME = None
HTTP_AUTH_PASSWORD = None
HTTP_AUTH_MODE = "basic"
# Memory
RESOURCE_MONITOR_PROFILER = 0
PROCESS_PER_CORE = 1
MIN_RAM_NEEDED = 20
# misc
DATE_TIME_FORMAT = "%d/%m/%Y-%H:%M"
REPLACEMENT_DELIMITER = "@@@"
REPLACEMENT_DELIMITER_LENGTH = len(REPLACEMENT_DELIMITER)
CONFIG_TYPES = ["string", "other"]
USER_AGENT = "Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/15.0"
PROXY_CHECK_URL = "http://www.google.ie"
# Fallback
FALLBACK_WEB_TEST_GROUPS = os.path.join(ROOT_DIR, "data", "conf", "profiles", "plugin_web", "groups.cfg")
FALLBACK_NET_TEST_GROUPS = os.path.join(ROOT_DIR, "data", "conf", "profiles", "plugin_net", "groups.cfg")
FALLBACK_AUX_TEST_GROUPS = os.path.join(ROOT_DIR, "data", "conf", "profiles", "plugin_aux", "groups.cfg")
FALLBACK_PLUGINS_DIR = os.path.join(ROOT_DIR, "data", "plugins")
FALLBACK_GENERAL_PROFILE = os.path.join(ROOT_DIR, "data", "conf", "general.yaml")
FALLBACK_FRAMEWORK_CONFIG = os.path.join(ROOT_DIR, "data", "conf", "framework.yaml")
FALLBACK_RESOURCES_PROFILE = os.path.join(ROOT_DIR, "data", "conf", "resources.cfg")
FALLBACK_WEB_PLUGIN_ORDER_PROFILE = os.path.join(ROOT_DIR, "data", "conf", "profiles", "plugin_web", "order.cfg")
FALLBACK_NET_PLUGIN_ORDER_PROFILE = os.path.join(ROOT_DIR, "data", "conf", "profiles", "plugin_net", "order.cfg")
# Override the values
local_conf = os.path.join(OWTF_CONF, "settings.py")
try:
with open(local_conf) as f:
settings = compile(f.read(), local_conf, "exec")
exec(settings, globals(), locals())
except FileNotFoundError:
pass
|
nilq/baby-python
|
python
|
_base_ = [
'../_base_/models/fcn_hr18.py', '../_base_/datasets/vaihingen.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
]
evaluation = dict(interval=288, metric='mIoU', pre_eval=True, save_best='mIoU')
model = dict(decode_head=dict(num_classes=6))
|
nilq/baby-python
|
python
|
# Generated by Django 3.0 on 2019-12-09 16:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0005_auto_20191208_2110'),
]
operations = [
migrations.RenameField(
model_name='game',
old_name='name',
new_name='names',
),
]
|
nilq/baby-python
|
python
|
from xml.dom.ext.reader.Sax import FromXmlFile
from xml.dom.NodeFilter import NodeFilter
from place import Place
class PlaceXml:
def __init__(self, filename, places):
root = FromXmlFile(filename)
walker = root.createTreeWalker(root.documentElement,
NodeFilter.SHOW_ELEMENT, None, 0)
while 1:
nodeName = walker.currentNode.nodeName
attribs = walker.currentNode.attributes
if nodeName == 'game':
self.startingPlace = attribs['startingPlace'].value
elif nodeName == 'place':
placeName = attribs['name'].value
desc = attribs['description'].value
currentPlace = Place(placeName, desc)
places[placeName] = currentPlace
elif nodeName == 'object':
currentPlace.addObject(attribs['name'].value)
elif nodeName == 'connection':
currentPlace.addConnection(attribs['place'].value)
next = walker.nextNode()
if next is None: break
|
nilq/baby-python
|
python
|
def marks(code):
if '.' in code:
another(code[:code.index(',') - 1] + '!')
else:
another(code + '.')
def another(code2):
call(numbers(code2 + 'haha'))
marks('start1 ')
marks('start2 ')
def alphabet(code4):
if 1:
if 2:
return code4 + 'a'
else:
return code4 + 'b'
else:
if 2:
return code4 + 'c'
else:
return code4 + 'd'
def numbers(code5):
if 2:
return alphabet(code5 + '1')
else:
return alphabet(code5 + '2')
def call(code3):
code3 = numbers(numbers('end')) + numbers(code3)
code3.partition
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import functools
import logging
from errno import ENOENT, EINVAL
from stat import S_IFDIR, S_IFLNK, S_IFREG
import _thread
from fuse import FUSE, FuseOSError, Operations
from zfs import datasets
from zfs import posix
from zfs.posix.attributes import PosixType
logger = logging.getLogger(__name__)
def locked(f):
@functools.wraps(f)
def inner(self, *a, **kw):
with self.pool_lock:
return f(self, *a, **kw)
return inner
class ZFSFuse(Operations):
def __init__(self, pool=None):
self.pool = pool
self.fd = 0
self.pool_lock = _thread.allocate_lock()
logger.critical('...')
@locked
def getattr(self, path, fh=None):
try:
obj = self.pool.open(path)
if path.endswith('etc/resolv.conf'):
logger.debug(f'asdf asdf {obj} {obj.attrs} {obj.dnode.index}')
if isinstance(obj, datasets.Dataset):
obj = obj.root_directory
if isinstance(obj, posix.PosixObject):
attrs = obj.attrs
mode = attrs['ZPL_MODE'].perms
logger.debug(f'{path}, {attrs.keys()}')
logger.debug(mode)
if isinstance(obj, posix.Directory):
mode |= S_IFDIR
elif 'ZPL_SYMLINK' in attrs or attrs['ZPL_MODE'].file_type == PosixType.SYMLINK:
mode |= S_IFLNK
elif isinstance(obj, posix.File):
mode |= S_IFREG
return {
'st_mode': mode,
'st_uid': attrs['ZPL_UID'],
'st_gid': attrs['ZPL_GID'],
'st_size': attrs['ZPL_SIZE'],
'st_mtime': attrs['ZPL_MTIME'].seconds,
'st_atime': attrs['ZPL_ATIME'].seconds,
'st_ctime': attrs['ZPL_CTIME'].seconds,
}
else:
return {}
except Exception as e:
logger.exception('error in getattr')
raise FuseOSError(ENOENT)
def getxattr(self, path, name, position=0):
return b''
def listxattr(self, path):
return []
def open(self, path, flags):
self.fd += 1
return self.fd
@locked
def readlink(self, path):
try:
logger.debug(f'attempted to readlink {path}')
obj = self.pool.open(path)
return obj.attrs['ZPL_SYMLINK']
except Exception as e:
logger.exception(f'readlink failed for {path}')
raise FuseOSError(ENOENT)
@locked
def read(self, path, size, offset, fh):
try:
return self.pool.read_file(path)[offset:offset+size]
except Exception as e:
logger.exception("error in read")
raise FuseOSError(EINVAL)
@locked
def readdir(self, path, fh):
try:
names = ['.', '..']
for name in self.pool.open(path).keys():
if isinstance(name, bytes):
name = name.decode('utf8')
names.append(name)
logger.info(' '.join(names))
return names
except Exception as e:
logger.exception("error in readdir")
raise FuseOSError(EINVAL)
def statfs(self, path):
return dict(f_bsize=512, f_blocks=4096, f_bavail=2048)
def mount(pool, mountpoint):
zf = ZFSFuse(pool)
fuse = FUSE(zf, mountpoint,
foreground=True,
rdonly=True,
nobrowse=True,
jail_symlinks=True,
nolocalcaches=True,
# debug=True,
)
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
"""Python setuptools setup."""
import os
from setuptools import find_namespace_packages, setup
def get_verified_absolute_path(path):
"""Verify and return absolute path of argument.
Args:
path : Relative/absolute path
Returns:
Absolute path
"""
installed_path = os.path.abspath(path)
if not os.path.exists(installed_path):
raise RuntimeError("No valid path for requested component exists")
return installed_path
def get_installation_requirments(file_path):
"""Parse pip requirements file.
Args:
file_path : path to pip requirements file
Returns:
list of requirement strings
"""
with open(file_path, 'r') as file:
requirements_file_content = \
[line.strip() for line in file if
line.strip() and not line.lstrip().startswith('#')]
return requirements_file_content
# Get current dir (pyclaragenomics folder is copied into a temp directory
# created by pip)
current_dir = os.path.dirname(os.path.realpath(__file__))
# Classifiers for PyPI
pyaw_classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9"
]
required_packages = \
get_installation_requirments(
get_verified_absolute_path(
os.path.join(current_dir, 'requirements.txt'))
)
setup(name='geps',
description='NVIDIA GWAS Epistatic Phenotype Simulator',
author='NVIDIA Corporation',
url="https://github.com/clara-parabricks/GEPSi",
include_package_data=True,
install_requires=required_packages,
packages=find_namespace_packages(),
python_requires='>=3.6',
long_description='Python libraries and utilities for manipulating '
'genomics data',
classifiers=pyaw_classifiers,
entry_points={'console_scripts': ['gepsi = scripts.main:main']},
data_files=[
('configs', ['configs/genotype.yaml',
'configs/phenotype.yaml'])],
platforms=['any'],
)
|
nilq/baby-python
|
python
|
import os
from deepinterpolation.generic import JsonSaver, ClassLoader
import datetime
now = datetime.datetime.now()
run_uid = now.strftime("%Y_%m_%d_%H_%M")
generator_param = {}
inferrence_param = {}
steps_per_epoch = 10
generator_param["type"] = "generator"
generator_param["name"] = "FmriGenerator"
generator_param["pre_post_x"] = 3
generator_param["pre_post_y"] = 3
generator_param["pre_post_z"] = 3
generator_param["pre_post_t"] = 1
generator_param[
"train_path"
] = "/Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/studyimagenet/derivatives-preproc-spm-output-sub-02-ses-perceptionTraining01-func-sub-02_ses-perceptionTraining01_task-perception_run-01_bold_preproc.nii"
generator_param["batch_size"] = 100
generator_param["start_frame"] = 0
generator_param["end_frame"] = 100
generator_param["total_nb_block"] = 10
generator_param["steps_per_epoch"] = steps_per_epoch
inferrence_param["type"] = "inferrence"
inferrence_param["name"] = "fmri_inferrence"
inferrence_param[
"model_path"
] = "/Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/trained_fmri_models/fmri_volume_dense_denoiser_mean_absolute_error_2020_08_08_01_05_2020_08_08_01_05/2020_08_08_01_05_fmri_volume_dense_denoiser_mean_absolute_error_2020_08_08_01_05-1640-0.0474.h5"
inferrence_param[
"output_file"
] = "/Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/studyimagenet/denoised/fmri_volume_denoiser_mean_absolute_error_task_full_7.h5"
jobdir = "/Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/studyimagenet/denoised"
try:
os.mkdir(jobdir)
except:
print("folder already exists")
path_generator = os.path.join(jobdir, "generator.json")
json_obj = JsonSaver(generator_param)
json_obj.save_json(path_generator)
path_infer = os.path.join(jobdir, "inferrence.json")
json_obj = JsonSaver(inferrence_param)
json_obj.save_json(path_infer)
generator_obj = ClassLoader(path_generator)
data_generator = generator_obj.find_and_build()(path_generator)
inferrence_obj = ClassLoader(path_infer)
inferrence_class = inferrence_obj.find_and_build()(path_infer, data_generator)
inferrence_class.run()
|
nilq/baby-python
|
python
|
# An Iterative DFS solution.
class Graph:
def __init__(self, V):
self.V = V
self.adj = [[] for i in range(V)]
def add_edge(self, v, w):
self.adj[v].append(w)
def DFS_util(self, s, visited):
stack = []
stack.append(s)
while (len(stack) != 0):
s = stack.pop()
if (not visited[s]):
print(s, end=" ")
visited[s] = True
i = 0
while i < len(self.adj[s]):
if (not visited[self.adj[s][i]]):
stack.append(self.adj[s][i])
i += 1
def DFS(self):
visited = [False] * self.V
for i in range(self.V):
if (not visited[i]):
self.DFS_util(i, visited)
if __name__ == '__main__':
g = Graph(5)
g.add_edge(1, 0)
g.add_edge(2, 1)
g.add_edge(3, 4)
g.add_edge(4, 0)
print("Following is Depth First Traversal")
g.DFS()
|
nilq/baby-python
|
python
|
import enum
import types as _types
import typing
from importlib import import_module
from .. import exc
_DEFAULT_BACKEND = None
class Backends(enum.Enum):
"""The backends of PyFLocker."""
CRYPTOGRAPHY = "cryptography"
CRYPTODOME = "cryptodome"
def load_algorithm(
name: str, backend: typing.Optional[Backends] = None
) -> _types.ModuleType:
"""Load a specific algorithm from the given ``backend``.
Args:
name (str): The name of the algorithm.
backend (:class:`Backends`): The backend to use.
Returns:
module: Algorithm module from the required backend.
Raises:
UnsupportedAlgorithm:
This is raised if the algorithm is not found in the backend.
"""
_backend = load_backend(backend)
try:
return import_module(f".{name}", _backend.__name__)
except ImportError as e:
raise exc.UnsupportedAlgorithm(
f"{name} is not implemented by backend {backend}."
) from e
def load_backend(
backend: typing.Optional[Backends] = None,
) -> _types.ModuleType:
"""Load a backend.
Args:
backend (:class:`Backends`): An attribute from :class:`Backends` class.
Returns:
module: The backend module.
"""
# Rules:
# 1. if default is present and backend is None: return default
# 2. if backend is given:
# 2.1. don't set default
# 2.2. load that particular backend or raise
# otherwise find a backend or raise
# once the backend is found, set it as default
global _DEFAULT_BACKEND
if backend is None:
if _DEFAULT_BACKEND is None:
_DEFAULT_BACKEND = _find_backend()
return _DEFAULT_BACKEND
# backend is not None
if not isinstance(backend, Backends):
raise TypeError("argument backend must be of type Backends.")
if _DEFAULT_BACKEND is None:
_DEFAULT_BACKEND = _import_helper(backend)
return _DEFAULT_BACKEND
return _import_helper(backend)
def _import_helper(backend):
return import_module(f".{backend.name.lower()}_", __spec__.parent)
def _find_backend():
errors = 0
for i in list(Backends):
try:
return _import_helper(i)
except ImportError:
errors += 1
if errors == len(Backends):
raise ImportError("No backends found.")
|
nilq/baby-python
|
python
|
# generated by update to not change manually
from bungieapi.base import BaseClient, clean_query_value
from bungieapi.forge import forge
from bungieapi.generated.components.responses import booleanClientResponse
from bungieapi.generated.components.responses.social.friends import (
BungieFriendListClientResponse,
BungieFriendRequestListClientResponse,
PlatformFriendClientResponse,
)
from bungieapi.generated.components.schemas.social.friends import PlatformFriendType
class Client(BaseClient):
async def get_friend_list(
self,
) -> BungieFriendListClientResponse:
"""Returns your Bungie Friend list."""
query = None
result = await self.get(
path="/Social/Friends/",
query=query,
)
return forge(BungieFriendListClientResponse, result)
async def get_friend_request_list(
self,
) -> BungieFriendRequestListClientResponse:
"""Returns your friend request queue."""
query = None
result = await self.get(
path="/Social/Friends/Requests/",
query=query,
)
return forge(BungieFriendRequestListClientResponse, result)
async def issue_friend_request(
self,
membership_id: str,
) -> booleanClientResponse:
"""Requests a friend relationship with the target user.
Any of the target user's linked membership ids are valid inputs.
Parameters:
membership_id: The membership id of the user you wish to add.
"""
query = None
result = await self.post(
path=f"/Social/Friends/Add/{clean_query_value(membership_id)}/",
query=query,
)
return forge(booleanClientResponse, result)
async def accept_friend_request(
self,
membership_id: str,
) -> booleanClientResponse:
"""Accepts a friend relationship with the target user.
The user must be on your incoming friend request list, though no error will occur if they are not.
Parameters:
membership_id: The membership id of the user you wish to accept.
"""
query = None
result = await self.post(
path=f"/Social/Friends/Requests/Accept/{clean_query_value(membership_id)}/",
query=query,
)
return forge(booleanClientResponse, result)
async def decline_friend_request(
self,
membership_id: str,
) -> booleanClientResponse:
"""Declines a friend relationship with the target user.
The user must be on your incoming friend request list, though no error will occur if they are not.
Parameters:
membership_id: The membership id of the user you wish to decline.
"""
query = None
result = await self.post(
path=f"/Social/Friends/Requests/Decline/{clean_query_value(membership_id)}/",
query=query,
)
return forge(booleanClientResponse, result)
async def remove_friend(
self,
membership_id: str,
) -> booleanClientResponse:
"""Remove a friend relationship with the target user.
The user must be on your friend list, though no error will occur if they are not.
Parameters:
membership_id: The membership id of the user you wish to remove.
"""
query = None
result = await self.post(
path=f"/Social/Friends/Remove/{clean_query_value(membership_id)}/",
query=query,
)
return forge(booleanClientResponse, result)
async def remove_friend_request(
self,
membership_id: str,
) -> booleanClientResponse:
"""Remove a friend relationship with the target user.
The user must be on your outgoing request friend list, though no error will occur if they are not.
Parameters:
membership_id: The membership id of the user you wish to remove.
"""
query = None
result = await self.post(
path=f"/Social/Friends/Requests/Remove/{clean_query_value(membership_id)}/",
query=query,
)
return forge(booleanClientResponse, result)
async def get_platform_friend_list(
self,
friend_platform: "PlatformFriendType",
page: str,
) -> PlatformFriendClientResponse:
"""Gets the platform friend of the requested type, with additional
information if they have Bungie accounts.
Must have a recent login session with said platform.
Parameters:
friend_platform: The platform friend type.
page: The zero based page to return. Page size is 100.
"""
query = None
result = await self.get(
path=f"/Social/PlatformFriends/{clean_query_value(friend_platform)}/{clean_query_value(page)}/",
query=query,
)
return forge(PlatformFriendClientResponse, result)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from NwalaTextUtils import __version__
desc = """Collection of functions for processing text"""
setup(
name='NwalaTextUtils',
version=__version__,
description=desc,
long_description='See: https://github.com/oduwsdl/NwalaTextUtils/',
author='Alexander C. Nwala',
author_email='alexandernwala@gmail.com',
url='https://github.com/oduwsdl/NwalaTextUtils/',
packages=find_packages(),
license="MIT",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
install_requires=[
'beautifulsoup4',
'boilerpy3>=1.0.4',
'requests',
'tldextract'
]
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
'''Bananagrams solver.'''
import argparse
import logging
import random
from collections import Counter
from itertools import chain
from string import ascii_lowercase
DOWN, ACROSS = 'down', 'across'
BLANK_CHAR = '.'
class WordGrid:
'''Represents a grid of letters and blanks.'''
def __init__(self, grid_words=()):
self._grid_words = list(grid_words)
@property
def empty(self):
'''Whether the grid contains any letters.'''
return not self._grid_words
@property
def words(self):
'''A list of words laid out on this grid.'''
return [word for word, _, _, _ in self._grid_words]
def insert_word(self, word, x, y, direction):
'''Insert a word in the given position. Checks for conflicts.'''
# check for conflicts
for i, char in enumerate(word):
existing = self.letter(x + i if direction == ACROSS else x,
y + i if direction == DOWN else y)
if existing and existing != char:
raise ValueError(f'{word} char {i} conflicts with {existing}')
self._grid_words.append((word, x, y, direction))
def remove_word(self, x, y, direction, word):
'''Remove a word from the grid.'''
self._grid_words.remove((word, x, y, direction))
def copy(self):
'''Return a deep copy of the grid.'''
return WordGrid(self._grid_words)
def letter(self, x, y):
'''Get the letter at the given position on the grid.'''
for word, word_x, word_y, direction in self._grid_words:
if x == word_x and direction == DOWN:
word_coord = y - word_y
elif y == word_y and direction == ACROSS:
word_coord = x - word_x
else:
continue
if 0 <= word_coord < len(word):
return word[word_coord]
return None
def letters(self, x, y, length, direction):
'''Get all letters (and blanks) on the given line segment.'''
if direction == ACROSS:
for i in range(length):
yield self.letter(x + i, y)
elif direction == DOWN:
for i in range(length):
yield self.letter(x, y + i)
else:
raise ValueError(direction)
def bounding_box(self):
'''Calculate the grid's bounding box.
Returns a tuple with the top-left corner's position as the first two
elements and the width and height as the remaining two.
'''
min_x = min((x for _, x, _, _ in self._grid_words), default=0)
min_y = min((y for _, _, y, _ in self._grid_words), default=0)
max_x = max((x + len(word) if direction == ACROSS else x + 1
for word, x, _, direction in self._grid_words),
default=0)
max_y = max((y + len(word) if direction == DOWN else y + 1
for word, _, y, direction in self._grid_words),
default=0)
return min_x, min_y, max_x - min_x, max_y - min_y
def __str__(self):
'''Return a printable representation of the grid.'''
min_x, min_y, width, height = self.bounding_box()
grid = [[BLANK_CHAR] * width for _ in range(height)]
for word, x, y, direction in self._grid_words:
if direction == ACROSS:
grid[y-min_y][x-min_x:x-min_x+len(word)] = list(word)
elif direction == DOWN:
for i, char in enumerate(word):
grid[y-min_y+i][x-min_x] = char
else:
raise ValueError(direction)
return '\n'.join(map(''.join, grid))
def reachable_letters(self):
'''Generate letters not completely surrounded by others.
The grid can be extended by forming words using these letters.
'''
min_x, min_y, width, height = self.bounding_box()
for x in range(min_x, min_x + width):
for y in range(min_y, min_y + height):
letter_here = self.letter(x, y)
if not letter_here:
continue
if not self.letter(x - 1, y) or not self.letter(x + 1, y):
yield letter_here, x, y, ACROSS
if not self.letter(x, y - 1) or not self.letter(x, y + 1):
yield letter_here, x, y, DOWN
def all_words(self):
'''All words laid out on the grid, including "accidental" ones.'''
def columns(grid):
for i in range(min(map(len, grid))):
yield ''.join(line[i] for line in grid)
def words(row_or_col):
return filter(lambda w: len(w) > 1, row_or_col.split(BLANK_CHAR))
grid = str(self).split('\n')
return chain(*map(words, chain(grid, columns(grid))))
def all_words_valid(self, wordlist):
'''Check that all words laid out on the grid are in the word list.'''
return all(map(lambda w: w in wordlist, self.all_words()))
def longest_formable_words(have_letters, wordlist):
'''Return the list of words it is possible to make using the given letters.
This function returns those words sorted by length in descending order
(longest first).
'''
def is_formable(word):
return all(n <= have_letters[l] for l, n in Counter(word).items())
return sorted(filter(is_formable, wordlist), key=len, reverse=True)
def solve_grid(letters, wordlist):
'''Generate grids using all the given letters.
This function returns all possible grids using all the given letters, only
generating words from the given word list.
'''
letters = Counter(letters)
# Eliminate impossible words early, so we don't check them every iteration.
wordlist = longest_formable_words(letters, wordlist)
logging.info('word list is %s words long', len(wordlist))
def solve_grid_stage(grid, letters_left):
'''Solve a partially completed grid.
This is a recursive function that takes a partially completed grid and
a Counter of letters left to use, and tries to complete the grid.
This does something like a depth-first search on possible word layouts.
'''
if not letters_left:
# We're done! No letters left, return this grid if it is valid.
logging.debug('no more letters left, grid done!')
# Check the grid contains only valid words.
if grid.all_words_valid(wordlist):
yield grid
else:
logging.debug('grid contains invalid words, discarding')
return
if grid.empty:
# Degenerate initial case.
# Start the grid off by laying out the first word.
for word in longest_formable_words(letters_left, wordlist):
this_grid = grid.copy()
this_grid.insert_word(word, 0, 0, ACROSS)
logging.debug('starting with longest remaining word %s', word)
yield from solve_grid_stage(this_grid,
letters_left - Counter(word))
return
# Loop through letters we can use to form more words, and try extending
# the grid using the letters we have left.
for letter, x, y, reachable_dir in grid.reachable_letters():
logging.debug('can reach %s (%s), trying to find useful words',
letter, reachable_dir)
usable_letters = letters_left + Counter(letter)
for word in longest_formable_words(usable_letters, wordlist):
logging.debug('can form "%s"', word)
if letter not in word:
# Need to connect it to the existing grid somewhere -- if
# we're not using the connecting letter, we can't connect
# it to the existing grid.
logging.debug("ignoring %s as it doesn't contain %s",
word, letter)
continue
this_grid = grid.copy()
indices_in_word = [word.index(letter)]
for _ in range(word.count(letter) - 1):
next_index = word.index(letter, indices_in_word[-1] + 1)
indices_in_word.append(next_index)
# If the connecting letter occurs multiple times in the word
# we've chosen, there are multiple ways to connect it to the
# existing grid. Let's try all of them.
if reachable_dir == DOWN:
possible_coords = [(x, y - i) for i in indices_in_word]
elif reachable_dir == ACROSS:
possible_coords = [(x - i, y) for i in indices_in_word]
else:
raise ValueError(reachable_dir)
for new_x, new_y in possible_coords:
# Find out which letters already exist in the right place,
# and make sure we don't take them out of the pile of
# letters left to use.
existing_letters = this_grid.letters(
new_x, new_y, len(word), reachable_dir)
overlap = [char for i, char in enumerate(existing_letters)
if char and char == word[i]]
logging.debug('%s exists in the grid, removing',
' '.join(overlap))
using_letters = Counter(word) - Counter(overlap)
logging.debug('letters actually used: %s', using_letters)
if not using_letters:
logging.debug("%s already exists here on the grid")
continue
try:
# This will throw a ValueError if we pass an invalid
# reachable_dir, but we checked that just above this
# loop.
this_grid.insert_word(word, new_x, new_y, reachable_dir)
except ValueError:
logging.debug("%s conflicts with existing grid", word)
continue
if not using_letters:
# If we put (part of) an existing word in the same
# place on the grid, that wouldn't cause an error above
# but we'd be calling solve_grid_stage again with
# exactly the same arguments, causing an infinite loop.
logging.debug('%s already exists here', word)
continue
logging.debug('can insert "%s"', word)
yield from solve_grid_stage(this_grid,
letters_left - using_letters)
return solve_grid_stage(WordGrid(), letters)
def parse_args():
'''Parse command-line arguments.'''
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('wordlist', metavar='WORDLIST',
type=argparse.FileType('r'),
help='file containing one lowercase word per line')
parser.add_argument('letters', metavar='LETTERS', nargs='?',
default=''.join(random.choices(ascii_lowercase, k=11)),
help='letters to lay out (default: 11 random letters)')
return parser.parse_args()
def main(args):
'''Main entry point.'''
logging.basicConfig(level=logging.INFO)
wordlist = list(map(str.strip, args.wordlist))
logging.info('using letters: %s', args.letters)
for i, grid in enumerate(solve_grid(args.letters, wordlist)):
words = ', '.join(grid.all_words())
print(f'grid #{i}: ({words})', grid, '-' * 80, sep='\n')
if __name__ == '__main__':
main(parse_args())
|
nilq/baby-python
|
python
|
from cloupy.scraping import imgw
import pytest
import urllib.request
import urllib.error
def check_if_NOT_connected_to_the_internet(host='http://google.com'):
try:
urllib.request.urlopen(host)
return False
except urllib.error.URLError:
return True
@pytest.mark.filterwarnings("ignore::pandas.errors.DtypeWarning")
@pytest.mark.skipif(check_if_NOT_connected_to_the_internet(), reason='internet connection required')
class TestDataDownloading:
@pytest.fixture
def intervals(self):
return ['monthly', 'daily', 'prompt']
@pytest.fixture
def st_kinds(self):
return ['synop', 'climat', 'fall']
def test_if_column_2_is_always_year(
self, intervals, st_kinds
):
from os import listdir
from os.path import isfile, join
import shutil
from random import shuffle
y_range = range(2018, 2019)
files_reading_dir_path = str(__file__).replace(
join('test', 'test_integration', 'test_integration_imgw.py'),
join('scraping', 'files_reading_folder')
)
for interval in intervals:
for st_kind in st_kinds:
if st_kind == 'fall' and interval == 'prompt':
continue
urls = imgw.get_urls(interval, st_kind, y_range)
imgw.download_data(urls)
downloaded_files_names = [f for f in listdir(files_reading_dir_path) if
isfile(join(files_reading_dir_path, f))]
file_formats = imgw.get_file_formats(interval, st_kind, 'all')
keywords = ['nazwa stacji', 'temperatura', 'rok', 'opad', 'wiatr']
shuffle(keywords)
for file in file_formats:
if isinstance(file_formats, str):
file = file_formats
df = imgw.concatenate_data(
downloaded_files_names=downloaded_files_names, file_formats=file, years_range=y_range,
keywords=keywords, specific_columns=None, optimize_memory_usage=False,
merge_splitted_stations=True
)
df = df[0][df[1]]
assert min(df[2]) == 2018
shutil.rmtree(files_reading_dir_path)
def test_data_downloading_for_years_before_2001(
self, intervals, st_kinds
):
years_range = range(1984, 1987)
TestDataDownloading.download_and_test_data(intervals, st_kinds, years_range)
def test_data_downloading_for_years_after_2000(
self, intervals, st_kinds
):
years_range = range(2011, 2013)
TestDataDownloading.download_and_test_data(intervals, st_kinds, years_range)
def test_data_downloading_for_years_between_2000_and_2001(
self, intervals, st_kinds
):
years_range = range(2000, 2002)
TestDataDownloading.download_and_test_data(intervals, st_kinds, years_range)
def test_adding_coordinates_to_dataframe(
self, intervals, st_kinds
):
years_range = range(2010, 2011)
for interval in intervals:
for st_kind in st_kinds:
if st_kind == 'fall' and interval == 'prompt':
continue
df = imgw.download_imgw_climatological_data(
interval, st_kind, years_range,
specific_columns=[0, 1, 2, 3],
optimize_memory_usage=True,
return_coordinates=True
)
assert 'lat' in df.columns
assert 'lon' in df.columns
assert 'elv' in df.columns
assert not df['lat'].isnull().all()
assert not df['lon'].isnull().all()
assert not df['elv'].isnull().all()
@staticmethod
def download_and_test_data(
intervals, st_kinds, years_range
):
for interval in intervals:
for st_kind in st_kinds:
if interval == 'prompt' and st_kind == 'fall':
with pytest.raises(NotADirectoryError):
imgw.download_imgw_climatological_data(
interval, st_kind, years_range
)
continue
else:
df = imgw.download_imgw_climatological_data(
interval, st_kind, years_range,
optimize_memory_usage=True,
specific_columns=[0, 1, 2, 3]
)
assert not df.empty
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import copy
import requests
import shutil
from typing import Sequence
import yaml
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("gen_meta")
from .common import *
LINGUIST_COMMIT = "10c20c7286a4b56c17253e8aab044debfe9f0dbe"
ROSETTA_CODE_DATA_COMMIT = "aac6731f2c1e30321fcfc58ac95d8203c041ee04"
def add_linguist_languages(commit: str, meta: Meta):
meta.add_dataset(name="linguist", data={"version": commit,})
norm_langs = {
"PLSQL": "PL/SQL",
"PLpgSQL": "PL/pgSQL",
"Mathematica": "Wolfram Language",
}
langs = get_linguist_languages(commit=commit)
for lang in langs:
norm_lang = norm_langs.get(lang, lang)
meta.add_language(dataset="linguist", norm_lang=norm_lang, lang=lang)
def get_linguist_languages(commit: str) -> Sequence[str]:
logger.info("loading linguist languages.yml for commit %s" % commit)
url = (
"https://raw.githubusercontent.com/github/linguist/%s/lib/linguist/languages.yml"
% commit
)
response = requests.get(url)
response.raise_for_status()
data = load_yaml_from_steam(response.content.decode("utf-8"))
return [l for l in data.keys()]
def add_rosetta_code_languages(commit: str, meta: Meta):
dataset_name = "rosetta_code"
meta.add_dataset(name=dataset_name, data={"version": commit,})
norm_langs = {
"AWK": "Awk",
"Batchfile": "Batchfile",
"Brainf***": "Brainfuck",
"C sharp": "C#",
"EC": "eC",
"F Sharp": "F#",
"Fish": "fish",
"lilypond": "LilyPond",
"Make": "Makefile",
"Mathematica": "Wolfram Language",
"MoonScript": "moonscript",
"NewLISP": "NewLisp",
"OOC": "ooc",
"Openscad": "OpenSCAD",
"POV-Ray": "POV-Ray SDL",
"Powerbuilder": "PowerBuilder",
"Q": "q",
"REBOL": "Rebol",
"Sed": "sed",
"Vim Script": "Vim script",
"XSLT 1.0": "XSLT",
"XSLT 2.0": "XSLT",
"Object Pascal": "Pascal",
"Delphi": "Pascal",
"Free Pascal": "Pascal",
"Visual Basic .NET": "Visual Basic",
"VBA": "Visual Basic",
"VBScript": "Visual Basic",
}
langs = get_rosetta_code_languages(commit=commit)
for lang in langs:
norm_lang = norm_langs.get(lang, lang)
meta.add_language(dataset=dataset_name, norm_lang=norm_lang, lang=lang)
def get_rosetta_code_languages(commit: str) -> Sequence[str]:
logger.info("loading rosetta_code languages for commit %s" % commit)
tmp_dir = clone_tmp_repo("acmeism/RosettaCodeData", commit=commit)
langs = load_yaml(os.path.join(tmp_dir, "Meta", "Lang.yaml"))
langs = {k: v["path"] for k, v in langs.items()}
def _has_rosetta_code_samples(tmp_dir, lang):
return len(os.listdir(os.path.join(tmp_dir, "Lang", lang))) > 2
langs = [l for l, p in langs.items() if _has_rosetta_code_samples(tmp_dir, p)]
shutil.rmtree(tmp_dir)
return langs
def main():
meta = Meta(load=False)
add_linguist_languages(LINGUIST_COMMIT, meta)
add_rosetta_code_languages(ROSETTA_CODE_DATA_COMMIT, meta)
meta.save()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
'''
剑指 Offer 20. 表示数值的字符串
请实现一个函数用来判断字符串是否表示数值(包括整数和小数)。
数值(按顺序)可以分成以下几个部分:
若干空格
一个 小数 或者 整数
(可选)一个 'e' 或 'E' ,后面跟着一个 整数
若干空格
小数(按顺序)可以分成以下几个部分:
(可选)一个符号字符('+' 或 '-')
下述格式之一:
至少一位数字,后面跟着一个点 '.'
至少一位数字,后面跟着一个点 '.' ,后面再跟着至少一位数字
一个点 '.' ,后面跟着至少一位数字
整数(按顺序)可以分成以下几个部分:
(可选)一个符号字符('+' 或 '-')
至少一位数字
部分数值列举如下:
["+100", "5e2", "-123", "3.1416", "-1E-16", "0123"]
部分非数值列举如下:
["12e", "1a3.14", "1.2.3", "+-5", "12e+5.4"]
示例 1:
输入:s = "0"
输出:true
示例 2:
输入:s = "e"
输出:false
示例 3:
输入:s = "."
输出:false
示例 4:
输入:s = " .1 "
输出:true
提示:
1 <= s.length <= 20
s 仅含英文字母(大写和小写),数字(0-9),加号 '+' ,减号 '-' ,空格 ' ' 或者点 '.' 。
'''
'''
思路:状态机
根据数值的定义,写出状态机状态转移表,依次读入每个字符,查看是否能转移到下一个状态。最后结束输入,状态机能变成s_end状态
时间复杂度:O(n),输入的字符串S只遍历一次
空间复杂度:O(1)
'''
class Solution:
def isNumber(self, s: str) -> bool:
st_start, st_sign, st_int, st_dot, st_decimal, st_e, st_expsign, st_exp, st_endspace, st_end, st_open_dot = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
ch_space, ch_num, ch_dot, ch_e, ch_sign, ch_end, ch_other = 0, 1, 2, 3, 4, 9, 99
chMap = {' ': ch_space, '.': ch_dot, 'e': ch_e, 'E': ch_e, '-': ch_sign, '+': ch_sign}
machine = {
st_start: { # 开始状态
ch_space: st_start,
ch_num: st_int,
ch_dot: st_open_dot,
ch_sign: st_sign
},
st_sign: { # +-符号
ch_dot: st_open_dot,
ch_num: st_int
},
st_int: { # 整数状态
ch_num: st_int,
ch_dot: st_dot,
ch_e: st_e,
ch_space: st_endspace,
ch_end: st_end
},
st_open_dot: { # 前面没有整数的小数点
ch_num: st_decimal
},
st_dot: { # 小数点
ch_num: st_decimal,
ch_e: st_e,
ch_space: st_endspace,
ch_end: st_end
},
st_decimal: { # 小数部分状态
ch_num: st_decimal,
ch_e: st_e,
ch_space: st_endspace,
ch_end: st_end
},
st_e: { # e
ch_sign: st_expsign,
ch_num: st_exp
},
st_expsign: { # e的整数符号部分
ch_num: st_exp
},
st_exp: { # e的整数部分
ch_num: st_exp,
ch_space: st_endspace,
ch_end: st_end
},
st_endspace: { # 末尾空格部分
ch_space: st_endspace,
ch_end: st_end
}
}
# 按照状态机进行状态遍历
status = st_start
for i in range(len(s)):
ch = ch_other
if s[i] in chMap:
ch = chMap[s[i]]
elif s[i].isdigit():
ch = ch_num
else:
ch = ch_other
if ch in machine[status]:
status = machine[status][ch] # 根据输入字符进行状态转移
else:
return False
if ch_end in machine[status] and machine[status][ch_end] == st_end:
return True
return False
s = Solution()
print(not s.isNumber('.'))
print(s.isNumber("+100"))
print(s.isNumber("5e2"))
print(s.isNumber('-123'))
print(s.isNumber('3.1416'))
print(s.isNumber('-1E-16'))
print(s.isNumber('0123'))
print(s.isNumber('12e') is False)
print(s.isNumber('1a3.14') is False)
print(s.isNumber('1.2.3') is False)
print(s.isNumber('+-5') is False)
print(s.isNumber('12e+5.4') is False)
|
nilq/baby-python
|
python
|
from typing import Tuple, Optional
from .template import Processor
class Cutadapt(Processor):
fq1: str
fq2: Optional[str]
adapter: str
trimmed_fq1: str
trimmed_fq2: Optional[str]
def main(self,
fq1: str,
fq2: Optional[str],
adapter: str) -> Tuple[str, Optional[str]]:
self.fq1 = fq1
self.fq2 = fq2
self.adapter = adapter
if self.fq2 is not None:
self.trimmed_fq1, self.trimmed_fq2 = CutadaptPairedEnd(self.settings).main(
fq1=self.fq1,
fq2=self.fq2,
adapter=self.adapter)
else:
self.trimmed_fq1 = CutadaptSingleEnd(self.settings).main(
fq=self.fq1,
adapter=self.adapter)
self.trimmed_fq2 = None
return self.trimmed_fq1, self.trimmed_fq2
class CutadaptBase(Processor):
MINIMUM_OVERLAP = '3'
MAXIMUM_ERROR_RATE = '0.1'
MINIMUM_LENGTH = '50'
class CutadaptPairedEnd(CutadaptBase):
fq1: str
fq2: str
adapter: str
trimmed_fq1: str
trimmed_fq2: str
def main(self,
fq1: str,
fq2: str,
adapter: str) -> Tuple[str, str]:
self.fq1 = fq1
self.fq2 = fq2
self.adapter = adapter
self.set_output_paths()
self.cutadapt()
return self.trimmed_fq1, self.trimmed_fq2
def set_output_paths(self):
self.trimmed_fq1 = f'{self.workdir}/trimmed_1.fq'
self.trimmed_fq2 = f'{self.workdir}/trimmed_2.fq'
def cutadapt(self):
log = f'{self.outdir}/cutadapt.log'
cmd = f'''cutadapt \\
--adapter {self.adapter} \\
-A {self.adapter} \\
--overlap {self.MINIMUM_OVERLAP} \\
--error-rate {self.MAXIMUM_ERROR_RATE} \\
--minimum-length {self.MINIMUM_LENGTH} \\
--output {self.trimmed_fq1} \\
--paired-output {self.trimmed_fq2} \\
{self.fq1} \\
{self.fq2} \\
1> {log} \\
2> {log}'''
self.call(cmd)
class CutadaptSingleEnd(CutadaptBase):
fq: str
adapter: str
trimmed_fq: str
def main(self,
fq: str,
adapter: str) -> str:
self.fq = fq
self.adapter = adapter
self.set_output_path()
self.cutadapt()
return self.trimmed_fq
def set_output_path(self):
self.trimmed_fq = f'{self.workdir}/trimmed.fq'
def cutadapt(self):
log = f'{self.outdir}/cutadapt.log'
cmd = f'''cutadapt \\
--adapter {self.adapter} \\
--overlap {self.MINIMUM_OVERLAP} \\
--error-rate {self.MAXIMUM_ERROR_RATE} \\
--minimum-length {self.MINIMUM_LENGTH} \\
--output {self.trimmed_fq} \\
{self.fq} \\
1> {log} \\
2> {log}'''
self.call(cmd)
class FastQC(Processor):
fq1: str
fq2: Optional[str]
def main(self,
fq1: str,
fq2: Optional[str]):
self.fq1 = fq1
self.fq2 = fq2
self.fastqc()
def fastqc(self):
log = f'{self.outdir}/fastqc.log'
fq2 = '' if self.fq2 is None else self.fq2
cmd = f'''fastqc \\
--outdir {self.outdir} \\
--threads {self.threads} \\
{self.fq1} {fq2} \\
1> {log} \\
2> {log}'''
self.call(cmd)
|
nilq/baby-python
|
python
|
import redis
import json
class Construct_Applications(object):
def __init__(self,bc,cd): # bc is build configuration class cd is construct data structures
bc.add_header_node("APPLICATION_SUPPORT")
bc.end_header_node("APPLICATION_SUPPORT")
|
nilq/baby-python
|
python
|
__author__ = 'Geir Istad'
from tinydb import TinyDB, where
class CanStorage:
__data_base = TinyDB
__current_sequence_table = TinyDB.table
__current_sequence = None
__max_sequence = None
__ready_to_store = False
def __init__(self, a_file_path):
"""
Opens (or creates) a data base file that that the instance of a
CanStorage interacts with.
:param a_file_path:
Path and file name. Note: path _has_ to exist, if not the program will
exit non-gracefully.
:return:
N/A
"""
self.__data_base = TinyDB(a_file_path)
# Check if we have a current sequence stored in the filemajigger
sequence_table = self.__data_base.table('sequence_counter')
sequence_check = sequence_table.search(where('sequence'))
# If a previous sequence exist we increment the max by one
if sequence_check:
self.__max_sequence = max(sequence_check)['sequence']
# If this is the first entry set current sequence to 0
else:
self.__max_sequence = 0
def print_debug_info(self):
"""
Provides debug information about contents of data base.
:return:
N/A
"""
print self.__data_base.all()
print self.__data_base.tables()
def __init_storage(self):
"""
Initialises a new storage table. Increments the sequence counter, stores
it for future use and creates a new named table for the new sequence of
data to be stored.
:return:
N/A
"""
self.__current_sequence = self.__max_sequence + 1
# Store the current sequence to db for next time the file is opened
sequence_table = self.__data_base.table('sequence_counter')
sequence_table.insert({'sequence': self.__current_sequence})
# Create new table entry for this sequence
sequence_name = 'sequence' + str(self.__current_sequence)
self.__current_sequence_table = self.__data_base.table(sequence_name)
self.__ready_to_store = True
def store(self, a_dict_or_list_entry):
"""
Stores a data entry in the currently opened data base table. If the
storage is not initialised it will call the initialising function to
create a new table for the current sequence of data to be stored.
:param a_dict_or_list_entry:
Either a list containing several dictionary entries or a single
dictionary entry containing a 'data_id' filed.
:return:
N/A
"""
if not self.__ready_to_store:
self.__init_storage()
# Check if we're storing a list or a dictionary
if type(a_dict_or_list_entry) == list:
# Cycle through all dictionaries stored in list
for list_entry in a_dict_or_list_entry:
# Get and remove the key from the dict
data_key = list_entry['data_id']
list_entry.pop('data_id', 0)
# Store the passed dictionary with its key being the data_id
# field
self.__current_sequence_table.insert({data_key: list_entry})
elif type(a_dict_or_list_entry) == dict:
# Get and remove the key from the dict
data_key = a_dict_or_list_entry['data_id']
a_dict_or_list_entry.pop('data_id', 0)
# Store the passed dictionary with its key being the data_id field
self.__current_sequence_table.insert({data_key:
a_dict_or_list_entry})
else:
exit('CanParser.store() expects list or dict entries!')
def load(self, a_sequence_number, a_key):
"""
Provides access to the data stored for the specified sequence number and
the specified key ('data_id').
:param a_sequence_number:
The sequence number of interest.
:param a_key:
A 'data_id' key containing the data we are interested in retrieving.
:return:
data_list_for_key containing a list of dictionary objects.
Will return an empty list of the sequence number is invalid.
"""
data_list_for_key = list()
if a_sequence_number <= self.__max_sequence:
sequence_name = 'sequence' + str(a_sequence_number)
selected_table = self.__data_base.table(sequence_name)
data_list_for_key = selected_table.search(where(a_key))
return data_list_for_key
def get_max_sequence(self):
"""
Give a user the number of data sequences stored in the data base.
:return:
Number of sequences currently stored.
"""
return self.__max_sequence
def get_data_types(self, a_sequence_number):
"""
Returns all the data types that are stored in a given data sequence
entry.
:param a_sequence_number:
The data sequence the user is interested in retrieving a list of
different data entries for.
:return:
key_list containing the unique 'data_id's available in the specified
sequence number.
Will return an empty list of the sequence number is invalid.
"""
key_list = list()
# Only return for valid sequence numbers!
if a_sequence_number <= self.__max_sequence:
sequence_name = 'sequence' + str(a_sequence_number)
selected_table = self.__data_base.table(sequence_name)
all_items = selected_table.all()
for item in all_items:
if item.keys()[0] not in key_list:
key_list.append(item.keys()[0])
return key_list
|
nilq/baby-python
|
python
|
"""Container access request backend for Openstack Swift."""
__name__ = "swift_sharing_request"
__version__ = "0.4.9"
__author__ = "CSC Developers"
__license__ = "MIT License"
|
nilq/baby-python
|
python
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.codegen.thrift.python.apache_thrift_py_gen import ApacheThriftPyGen
from pants.backend.codegen.thrift.python.python_thrift_library import PythonThriftLibrary
from pants.backend.python.targets.python_library import PythonLibrary
from pants_test.tasks.task_test_base import TaskTestBase
class ApacheThriftPyGenTest(TaskTestBase):
@classmethod
def task_type(cls):
return ApacheThriftPyGen
def generate_single_thrift_target(self, python_thrift_library):
context = self.context(target_roots=[python_thrift_library])
apache_thrift_gen = self.create_task(context)
apache_thrift_gen.execute()
def is_synthetic_python_library(target):
return isinstance(target, PythonLibrary) and target.is_synthetic
synthetic_targets = context.targets(predicate=is_synthetic_python_library)
self.assertEqual(1, len(synthetic_targets))
return synthetic_targets[0]
def test_single_namespace(self):
self.create_file('src/thrift/com/foo/one.thrift', contents=dedent("""
namespace py foo
const i32 THINGCONSTANT = 42
struct Thing {}
service ThingService {}
"""))
one = self.make_target(spec='src/thrift/com/foo:one',
target_type=PythonThriftLibrary,
sources=['one.thrift'])
synthetic_target = self.generate_single_thrift_target(one)
self.assertEqual({'foo/__init__.py', 'foo/ThingService-remote',
'foo/ThingService.py', 'foo/ttypes.py', 'foo/constants.py'},
set(synthetic_target.sources_relative_to_source_root()))
def test_nested_namespaces(self):
self.create_file('src/thrift/com/foo/one.thrift', contents=dedent("""
namespace py foo
struct One {}
"""))
self.create_file('src/thrift/com/foo/bar/two.thrift', contents=dedent("""
namespace py foo.bar
struct Two {}
"""))
one = self.make_target(spec='src/thrift/com/foo:one',
target_type=PythonThriftLibrary,
sources=['one.thrift', 'bar/two.thrift'])
synthetic_target = self.generate_single_thrift_target(one)
self.assertEqual({'foo/__init__.py', 'foo/ttypes.py', 'foo/constants.py',
'foo/bar/__init__.py', 'foo/bar/ttypes.py', 'foo/bar/constants.py'},
set(synthetic_target.sources_relative_to_source_root()))
|
nilq/baby-python
|
python
|
from django.template import loader, Context
from django.db.models import Q
from blog.views import Entry
def search(request):
query = request.GET['q']
t = loader.get_template('result.html')
results = Entry.objects.filter(Q(title__icontains=query) | Q(body__icontains=query))#.order_by('created')
c = Context({ 'query': query, 'results':results })
return HttpResponse(t.render(c))
"""
title ==> object title from { models.py }
body ==> object body from { models.py }
"""
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import itertools
import pandas as pd
from .. import models
class BattleMetricsService(object):
def __init__(self):
self._battle = models.Battle()
self.summary = pd.DataFrame()
def read_html(self, file_path):
log = models.BattleLog.from_html(file_path=file_path)
self._handle_log_records(log.records)
def read_string(self, data):
log = models.BattleLog.from_string(data=data)
self._handle_log_records(log.records)
def _handle_log_records(self, log_records):
for log_record in log_records:
# Metric computation is time-sensitive. It matters when
# the battle state is updated.
if (isinstance(log_record, models.HitPointsChangedRecord) and
not log_record.indirectly_caused_by and
self._battle.current_action.used_by_pokemon != self._battle.current_action.targeted_pokemon):
self._update_damage_dealt(log_record=log_record)
self._battle.apply_log_record(log_record)
# While there is a pd.Index.any method, pd.MultiIndex
# objects do not support truth testing. You must instead
# rely on the isinstance or type functions.
summary_has_index = isinstance(self.summary.index, pd.MultiIndex)
if not summary_has_index and self._battle.pokemon_are_loaded:
self._create_index()
self._create_metrics_placeholders()
self._update_index_labels()
def _create_index(self):
tuples = list()
for player in self._battle.get_all_players():
pokemon_sids = (pokemon.pokemon_sid for pokemon in player.pokemon)
tuples.extend(itertools.product([player.player_sid], pokemon_sids))
names = ('player_sid', 'pokemon_sid')
index = pd.MultiIndex.from_tuples(tuples, names=names)
summary = pd.DataFrame(index=index)
self.summary = summary
def _create_metrics_placeholders(self):
summary = self.summary.copy()
summary.loc[:, 'damage_dealt'] = 0
self.summary = summary
def _update_damage_dealt(self, log_record):
summary = self.summary.copy()
current_action = self._battle.current_action
hit_points_before = current_action.targeted_pokemon.remaining_hit_points
hit_points_after = log_record.remaining_hit_points
hit_points_delta = hit_points_before - hit_points_after
index = (current_action.used_by_player.player_sid,
current_action.used_by_pokemon.pokemon_sid)
summary.loc[index, 'damage_dealt'] += hit_points_delta
self.summary = summary
def _update_index_labels(self):
summary = self.summary.copy()
fields = ['player_name', 'pokemon_name']
summary.loc[:, fields[0]], summary.loc[:, fields[1]] = ('', '')
for player in self._battle.get_all_players():
for pokemon in player.pokemon:
index = (player.player_sid, pokemon.pokemon_sid)
summary.loc[index, fields] = (player.name, pokemon.name)
summary = summary.reset_index()
summary = summary.set_index(keys=fields)
self.summary = summary
|
nilq/baby-python
|
python
|
import json
from .Reducer import Reducer
class EAVReducer(Reducer):
def setTimestamp(self, timestamp):
self.set("timestamp", timestamp)
def setEntity(self, entity):
self.set("entity", entity)
def getEntity(self):
return self.get("entity")
def setAttribute(self, attribute):
self.set("attribute", attribute)
def setValue(self, value, typ):
self.set("value", value)
self.updateMeta("type", typ)
def setMeta(self, meta):
self.set("meta", meta)
def getMeta(self):
ret = self.get("meta")
if ret is None:
return {}
else:
return self.get("meta")
def updateMeta(self, key, value):
meta = self.getMeta()
meta[key] = value
self.setMeta(meta)
def mergeMeta(self, meta):
oldmeta = self.getMeta()
newmeta = {**oldmeta, **meta}
self.setMeta(newmeta)
|
nilq/baby-python
|
python
|
#
# This file is part of pyasn1-alt-modules software.
#
# Copyright (c) 2019-2022, Vigil Security, LLC
# License: http://vigilsec.com/pyasn1-alt-modules-license.txt
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1_alt_modules import pem
from pyasn1_alt_modules import rfc2560
from pyasn1_alt_modules import rfc5940
from pyasn1_alt_modules import rfc5652
from pyasn1_alt_modules import rfc5280
class CRLandOCSPResponseTestCase(unittest.TestCase):
pem_text = """\
MIIHWQYJKoZIhvcNAQcCoIIHSjCCB0YCAQExDTALBglghkgBZQMEAgEwUwYJKoZI
hvcNAQcBoEYERENvbnRlbnQtVHlwZTogdGV4dC9wbGFpbg0KDQpXYXRzb24sIGNv
bWUgaGVyZSAtIEkgd2FudCB0byBzZWUgeW91Lg0KoIIBaDCCAWQwggEKoAMCAQIC
CQClWUKCJkwnGTAKBggqhkjOPQQDAjAkMRQwEgYDVQQKDAtleGFtcGxlLm9yZzEM
MAoGA1UEAwwDQm9iMB4XDTE3MTIyMDIzMDc0OVoXDTE4MTIyMDIzMDc0OVowJDEU
MBIGA1UECgwLZXhhbXBsZS5vcmcxDDAKBgNVBAMMA0JvYjBZMBMGByqGSM49AgEG
CCqGSM49AwEHA0IABIZP//xT8ah2ymmxfidIegeccVKuGxN+OTuvGq69EnQ8fUFD
ov2KNw8Cup0DtzAfHaZOMFWUu2+Vy3H6SLbQo4OjJTAjMCEGA1UdEQEB/wQXMBWG
E3NpcDpib2JAZXhhbXBsZS5vcmcwCgYIKoZIzj0EAwIDSAAwRQIhALIkjJJAKCI4
nsklf2TM/RBvuguWwRkHMDTVGxAvczlsAiAVjrFR8IW5vS4EzyePDVIua7b+Tzb3
THcQsVpPR53kDaGCBGQwggIbMIIBAwIBATANBgkqhkiG9w0BAQsFADBsMQswCQYD
VQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGln
aWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBS
b290IENBFw0xOTA1MDIyMjE1NTRaFw0xOTA1MjMyMjE1NTRaMDEwLwIQDPWCOBgZ
nlb4K9ZS7Sft6RcNMTgxMDI1MTYxMTM4WjAMMAoGA1UdFQQDCgEAoDAwLjAfBgNV
HSMEGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzALBgNVHRQEBAICAcQwDQYJKoZI
hvcNAQELBQADggEBABPO3OA0OkQZ+RLVxz/cNx5uNVEO416oOePkN0A4DxFztf33
7caS4OyfS9Wyu1j5yUdWJVpAKXSQeN95MqHkpSpYDssuqbuYjv8ViJfseGBgtXTc
zUzzNeNdY2uxMbCxuhmPkgacAo1lx9LkK2ScYHWVbfFRF1UQ/dcmavaZsEOBNuLW
OxQYA9MqfVNAymHe7vPqwm/8IY2FbHe9HsiJZfGxNWMDP5lmJiXmpntTeDQ2Ujdi
yXwGGKjyiSTFk2jVRutrGINufaoA/f7eCmIb4UDPbpMjVfD215dW8eBKouypCVoE
vmCSSTacdiBI2yOluvMN0PzvPve0ECAE+D4em9ahggJBBggrBgEFBQcQAjCCAjMK
AQCgggIsMIICKAYJKwYBBQUHMAEBBIICGTCCAhUwZqEgMB4xHDAJBgNVBAYTAlJV
MA8GA1UEAx4IAFQAZQBzAHQYEzIwMTkwNTA5MTU1MDQ4LjI1OVowLTArMBIwBwYF
Kw4DAhoEAQEEAQECAQGAABgTMjAxOTA1MDkxNTUwNDguMjYxWjAKBggqhkjOPQQD
AgNJADBGAiEAujFVH+NvuTLYa8RW3pvWSUwZfjOW5H5171JI+/50BjcCIQDhwige
wl+ts6TIvhU+CFoOipQBNKyKXKh7ngJkUtpZ86CCAVIwggFOMIIBSjCB8aADAgEC
AgEBMAoGCCqGSM49BAMCMB4xHDAJBgNVBAYTAlJVMA8GA1UEAx4IAFQAZQBzAHQw
HhcNMTkwMjAxMDUwMDAwWhcNMjIwMjAxMDUwMDAwWjAeMRwwCQYDVQQGEwJSVTAP
BgNVBAMeCABUAGUAcwB0MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEM0jxEYgg
RxC/r87uV/h6iZ8BAdHT/6fxRuzG0PRMIlFBy38skFUXJJulKV9JW16YJqOkVsqv
xwMM61z7p1vQ/qMgMB4wDwYDVR0TBAgwBgEB/wIBAzALBgNVHQ8EBAMCAAYwCgYI
KoZIzj0EAwIDSAAwRQIhAIdpCt5g89ofSADXmBD3KXQGnTghwbAMeWrKXqTGww+x
AiAl8NQgfUk4xMymZ3VtCLJ2MdczDps4Zh2KPOqAR5fZAjGCAQcwggEDAgEBMDEw
JDEUMBIGA1UECgwLZXhhbXBsZS5vcmcxDDAKBgNVBAMMA0JvYgIJAKVZQoImTCcZ
MAsGCWCGSAFlAwQCAaBpMBgGCSqGSIb3DQEJAzELBgkqhkiG9w0BBwEwHAYJKoZI
hvcNAQkFMQ8XDTE5MDEyNDIzNTI1NlowLwYJKoZIhvcNAQkEMSIEIO93j8lA1ebc
JXb0elmbMSYZWp8aInra81+iLAUNjRlaMAoGCCqGSM49BAMCBEcwRQIhAPeI7URq
tw//LB/6TAN0/Qh3/WHukXwxRbOJpnYVx0b6AiB3lK3FfwBhx4S5YSPMblS7goJl
ttTMEpl2prH8bbwo1g==
"""
def setUp(self):
self.asn1Spec = rfc5652.ContentInfo()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
sd, rest = der_decoder(
asn1Object['content'], asn1Spec=rfc5652.SignedData())
self.assertTrue(sd.prettyPrint())
self.assertEqual(
rfc5652.id_data, sd['encapContentInfo']['eContentType'])
self.assertTrue(sd['encapContentInfo']['eContent'])
v2 = rfc5280.Version(value='v2')
self.assertEqual(v2, sd['crls'][0]['crl']['tbsCertList']['version'])
ocspr_oid = rfc5940.id_ri_ocsp_response
self.assertEqual(ocspr_oid, sd['crls'][1]['other']['otherRevInfoFormat'])
ocspr, rest = der_decoder(
sd['crls'][1]['other']['otherRevInfo'],
asn1Spec=rfc5940.OCSPResponse())
self.assertTrue(ocspr.prettyPrint())
success = rfc2560.OCSPResponseStatus(value='successful')
self.assertEqual(success, ocspr['responseStatus'])
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
sd_eci = asn1Object['content']['encapContentInfo']
self.assertEqual(rfc5652.id_data, sd_eci['eContentType'])
self.assertTrue(sd_eci['eContent'].hasValue())
for ri in asn1Object['content']['crls']:
if ri.getName() == 'crl':
v2 = rfc5280.Version(value='v2')
self.assertEqual(v2, ri['crl']['tbsCertList']['version'])
if ri.getName() == 'other':
ori = ri['other']
ocspr_oid = rfc5940.id_ri_ocsp_response
self.assertEqual(ocspr_oid, ori['otherRevInfoFormat'])
ocspr_status = ori['otherRevInfo']['responseStatus']
success = rfc2560.OCSPResponseStatus(value='successful')
self.assertEqual(success, ocspr_status)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
|
nilq/baby-python
|
python
|
from typing import Dict, Tuple, Optional, Any
from datetime import datetime
import base64
import urllib3
import requests
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from CommonServerPython import *
# Disable insecure warnings
urllib3.disable_warnings()
INTEGRATION_CONTEXT_NAME = 'MSGraphGroups'
NO_OUTPUTS: dict = {}
APP_NAME = 'ms-graph-groups'
def camel_case_to_readable(text: str) -> str:
"""'camelCase' -> 'Camel Case'
Args:
text: the text to transform
Returns:
A Camel Cased string.
"""
if text == 'id':
return 'ID'
return ''.join(' ' + char if char.isupper() else char.strip() for char in text).strip().title()
def parse_outputs(groups_data: Dict[str, str]) -> Tuple[dict, dict]:
"""Parse group data as received from Microsoft Graph API into Demisto's conventions
Args:
groups_data: a dictionary containing the group data
Returns:
A Camel Cased dictionary with the relevant fields.
groups_readable: for the human readable
groups_outputs: for the entry context
"""
# Unnecessary fields, dropping as to not load the incident context.
fields_to_drop = ['@odata.context', '@odata.nextLink', '@odata.deltaLink', '@odata.type', '@removed',
'resourceProvisioningOptions', 'securityIdentifier', 'onPremisesSecurityIdentifier',
'onPremisesNetBiosName', 'onPremisesProvisioningErrors', 'onPremisesSamAccountName',
'resourceBehaviorOptions', 'creationOptions', 'preferredDataLocation']
if isinstance(groups_data, list):
groups_readable, groups_outputs = [], []
for group_data in groups_data:
group_readable = {camel_case_to_readable(i): j for i, j in group_data.items() if i not in fields_to_drop}
if '@removed' in group_data:
group_readable['Status'] = 'deleted'
groups_readable.append(group_readable)
groups_outputs.append({k.replace(' ', ''): v for k, v in group_readable.copy().items()})
return groups_readable, groups_outputs
group_readable = {camel_case_to_readable(i): j for i, j in groups_data.items() if i not in fields_to_drop}
if '@removed' in groups_data:
group_readable['Status'] = 'deleted'
group_outputs = {k.replace(' ', ''): v for k, v in group_readable.copy().items()}
return group_readable, group_outputs
def epoch_seconds() -> int:
"""
Return the number of seconds for return current date.
"""
return int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds())
def get_encrypted(content: str, key: str) -> str:
"""
Args:
content (str): content to encrypt. For a request to Demistobot for a new access token, content should be
the tenant id
key (str): encryption key from Demistobot
Returns:
encrypted timestamp:content
"""
def create_nonce() -> bytes:
return os.urandom(12)
def encrypt(string: str, enc_key: str) -> bytes:
"""
Args:
enc_key (str):
string (str):
Returns:
bytes:
"""
# String to bytes
enc_key = base64.b64decode(enc_key)
# Create key
aes_gcm = AESGCM(enc_key)
# Create nonce
nonce = create_nonce()
# Create ciphered data
data = string.encode()
ct_ = aes_gcm.encrypt(nonce, data, None)
return base64.b64encode(nonce + ct_)
now = epoch_seconds()
encrypted = encrypt(f'{now}:{content}', key).decode('utf-8')
return encrypted
class Client(BaseClient):
"""
Client to use in the MS Graph Groups integration. Overrides BaseClient
"""
def __init__(self, base_url: str, tenant: str, auth_and_token_url: str, auth_id: str, token_retrieval_url: str,
enc_key: str, verify: bool, proxy: dict):
super().__init__(base_url, verify, proxy)
self.tenant = tenant
self.auth_and_token_url = auth_and_token_url
self.auth_id = auth_id
self.token_retrieval_url = token_retrieval_url
self.enc_key = enc_key
def get_access_token(self):
"""Get the Microsoft Graph Access token from the instance token or generates a new one if needed.
Returns:
The access token.
"""
integration_context = demisto.getIntegrationContext()
access_token = integration_context.get('access_token')
valid_until = integration_context.get('valid_until')
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
try:
dbot_response = requests.post(
self.token_retrieval_url,
headers={'Accept': 'application/json'},
data=json.dumps({
'app_name': APP_NAME,
'registration_id': self.auth_id,
'encrypted_token': get_encrypted(self.tenant, self.enc_key)
}),
verify=self._verify
)
except requests.exceptions.SSLError as err:
demisto.debug(str(err))
raise Exception(f'Connection error in the API call to Microsoft Graph.\n'
f'Check your not secure parameter.\n\n{err}')
except requests.ConnectionError as err:
demisto.debug(str(err))
raise Exception(f'Connection error in the API call to Microsoft Graph.\n'
f'Check your Server URL parameter.\n\n{err}')
if dbot_response.status_code not in {200, 201}:
msg = 'Error in authentication. Try checking the credentials you entered.'
try:
demisto.info(f'Authentication failure from server: {dbot_response.status_code}'
f' {dbot_response.reason} {dbot_response.text}')
err_response = dbot_response.json()
server_msg = err_response.get('message')
if not server_msg:
title = err_response.get('title')
detail = err_response.get('detail')
if title:
server_msg = f'{title}. {detail}'
if server_msg:
msg += f' Server message: {server_msg}'
except Exception as err:
demisto.error(f'Failed parsing error response - Exception: {err}')
raise Exception(msg)
try:
gcloud_function_exec_id = dbot_response.headers.get('Function-Execution-Id')
demisto.info(f'Google Cloud Function Execution ID: {gcloud_function_exec_id}')
parsed_response = dbot_response.json()
except ValueError:
raise Exception(
'There was a problem in retrieving an updated access token.\n'
'The response from the Demistobot server did not contain the expected content.'
)
access_token = parsed_response.get('access_token')
expires_in = parsed_response.get('expires_in', 3595)
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
# err on the side of caution with a slightly shorter access token validity period
expires_in = expires_in - time_buffer
demisto.setIntegrationContext({
'access_token': access_token,
'valid_until': epoch_seconds() + expires_in
})
return access_token
def http_request(self, method: str, url_suffix: str = None, params: Dict = None, body: Optional[str] = None,
next_link: str = None):
"""
Generic request to Microsoft Graph
"""
token = self.get_access_token()
if next_link:
url = next_link
else:
url = f'{self._base_url}{url_suffix}'
try:
response = requests.request(
method,
url,
headers={
'Authorization': 'Bearer ' + token,
'Content-Type': 'application/json',
'Accept': 'application/json'
},
params=params,
data=body,
verify=self._verify,
)
except requests.exceptions.SSLError as err:
demisto.debug(str(err))
raise Exception(f'Connection error in the API call to Microsoft Graph.\n'
f'Check your not secure parameter.\n\n{err}')
except requests.ConnectionError as err:
demisto.debug(str(err))
raise Exception(f'Connection error in the API call to Microsoft Graph.\n'
f'Check your Server URL parameter.\n\n{err}')
try:
data = response.json() if response.text else {}
if not response.ok:
raise Exception(f'API call to MS Graph failed [{response.status_code}]'
f' - {demisto.get(data, "error.message")}')
elif response.status_code == 206: # 206 indicates Partial Content, reason will be in the warning header
demisto.debug(str(response.headers))
return data
except TypeError as exc:
demisto.debug(str(exc))
raise Exception(f'Error in API call to Microsoft Graph, could not parse result [{response.status_code}]')
def test_function(self):
"""Performs basic GET request to check if the API is reachable and authentication is successful.
Returns:
ok if successful.
"""
self.http_request('GET', 'groups', params={'$orderby': 'displayName'})
demisto.results('ok')
def list_groups(self, order_by: str = None, next_link: str = None, top: int = None, filter_: str = None) -> Dict:
"""Returns all groups by sending a GET request.
Args:
order_by: the group fields to order by the response.
next_link: the link for the next page of results, if exists. see Microsoft documentation for more details.
docs.microsoft.com/en-us/graph/api/group-list?view=graph-rest-1.0
top: sets the page size of results.
filter_: filters results.
Returns:
Response from API.
"""
params = {'$orderby': order_by} if order_by else {}
if next_link: # pagination
groups = self.http_request('GET', next_link=next_link)
elif filter_:
groups = self.http_request('GET', f'groups?$filter={filter_}&$top={top}', params=params)
else:
groups = self.http_request('GET', f'groups?$top={top}', params=params)
return groups
def get_group(self, group_id: str) -> Dict:
"""Returns a single group by sending a GET request.
Args:
group_id: the group id.
Returns:
Response from API.
"""
group = self.http_request('GET', f'groups/{group_id}')
return group
def create_group(self, properties: Dict[str, Optional[Any]]) -> Dict:
"""Create a single group by sending a POST request.
Args:
properties: the group properties.
Returns:
Response from API.
"""
group = self.http_request('POST', 'groups', body=json.dumps(properties))
return group
def delete_group(self, group_id: str):
"""Delete a single group by sending a DELETE request.
Args:
group_id: the group id to delete.
"""
# If successful, this method returns 204 No Content response code.
# It does not return anything in the response body.
self.http_request('DELETE ', f'groups/{group_id}')
def list_members(self, group_id: str, next_link: str = None, top: int = None, filter_: str = None) -> Dict:
"""List all group members by sending a GET request.
Args:
group_id: the group id to list its members.
next_link: the link for the next page of results, if exists. see Microsoft documentation for more details.
docs.microsoft.com/en-us/graph/api/group-list-members?view=graph-rest-1.0
top: sets the page size of results.
filter_: filters results.
Returns:
Response from API.
"""
if next_link: # pagination
members = self.http_request('GET', next_link)
elif filter_:
members = self.http_request('GET', f'groups/{group_id}/members?$filter={filter_}&$top={top}')
else:
members = self.http_request('GET', f'groups/{group_id}/members?$top={top}')
return members
def add_member(self, group_id: str, properties: Dict[str, str]):
"""Add a single member to a group by sending a POST request.
Args:
group_id: the group id to add the member to.
properties: the member properties.
"""
# If successful, this method returns 204 No Content response code.
# It does not return anything in the response body.
self.http_request('POST', f'groups/{group_id}/members/$ref', body=json.dumps(properties))
def remove_member(self, group_id: str, user_id: str):
"""Remove a single member to a group by sending a DELETE request.
Args:
group_id: the group id to add the member to.
user_id: the user id to remove.
"""
# If successful, this method returns 204 No Content response code.
# It does not return anything in the response body.
self.http_request('DELETE', f'groups/{group_id}/members/{user_id}/$ref')
def test_function_command(client: Client, args: Dict):
"""Performs a basic GET request to check if the API is reachable and authentication is successful.
Args:
client: Client object with request
args: Usually demisto.args()
"""
client.test_function()
def list_groups_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Lists all groups and return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs.
"""
order_by = args.get('order_by')
next_link = args.get('next_link')
top = args.get('top')
filter_ = args.get('filter')
groups = client.list_groups(order_by, next_link, top, filter_)
groups_readable, groups_outputs = parse_outputs(groups['value'])
next_link_response = ''
if '@odata.nextLink' in groups:
next_link_response = groups['@odata.nextLink']
if next_link_response:
entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID).NextLink': next_link_response,
f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': groups_outputs}
title = 'Groups (Note that there are more results. Please use the next_link argument to see them.):'
else:
entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': groups_outputs}
title = 'Groups:'
human_readable = tableToMarkdown(name=title, t=groups_readable,
headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail'],
removeNull=True)
return human_readable, entry_context, groups
def get_group_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Get a group by group id and return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs.
"""
group_id = str(args.get('group_id'))
group = client.get_group(group_id)
group_readable, group_outputs = parse_outputs(group)
human_readable = tableToMarkdown(name="Groups:", t=group_readable,
headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',
'Security Enabled', 'Visibility'],
removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}(obj.ID === {group_id})': group_outputs}
return human_readable, entry_context, group
def create_group_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Create a group and return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs.
"""
required_properties = {
'displayName': str(args.get('display_name')),
'mailNickname': str(args.get('mail_nickname')),
'mailEnabled': args.get('mail_enabled') == 'true',
'securityEnabled': args.get('security_enabled')
}
# create the group
group = client.create_group(required_properties)
# display the new group and it's properties
group_readable, group_outputs = parse_outputs(group)
human_readable = tableToMarkdown(name=f"{required_properties['displayName']} was created successfully:",
t=group_readable,
headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',
'Security Enabled', 'Mail Enabled'],
removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_outputs}
return human_readable, entry_context, group
def delete_group_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Delete a group by group id and return outputs in Demisto's format
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs.
"""
group_id = str(args.get('group_id'))
client.delete_group(group_id)
# get the group data from the context
group_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}(val.ID === "{group_id}")')
if isinstance(group_data, list):
group_data = group_data[0]
# add a field that indicates that the group was deleted
group_data['Deleted'] = True # add a field with the members to the group
entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_data}
human_readable = f'Group: "{group_id}" was deleted successfully.'
return human_readable, entry_context, NO_OUTPUTS
def list_members_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""List a group members by group id. return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs.
"""
group_id = str(args.get('group_id'))
next_link = args.get('next_link')
top = args.get('top')
filter_ = args.get('filter')
members = client.list_members(group_id, next_link, top, filter_)
if not members['value']:
human_readable = f'The group {group_id} has no members.'
return human_readable, NO_OUTPUTS, NO_OUTPUTS
members_readable, members_outputs = parse_outputs(members['value'])
# get the group data from the context
group_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}(val.ID === "{group_id}")')
if isinstance(group_data, list):
group_data = group_data[0]
if '@odata.nextLink' in members:
next_link_response = members['@odata.nextLink']
group_data['Members'] = members_outputs # add a field with the members to the group
group_data['Members']['NextLink'] = next_link_response
entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_data}
title = f'Group {group_id} members ' \
f'(Note that there are more results. Please use the next_link argument to see them.):'
else:
group_data['Members'] = members_outputs # add a field with the members to the group
entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_data}
title = f'Group {group_id} members:'
human_readable = tableToMarkdown(name=title, t=members_readable,
headers=['ID', 'Display Name', 'Job Title', 'Mail'],
removeNull=True)
return human_readable, entry_context, members
def add_member_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Add a member to a group by group id and user id. return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs.
"""
group_id = str(args.get('group_id'))
user_id = str(args.get('user_id'))
required_properties = {
"@odata.id": f'https://graph.microsoft.com/v1.0/users/{user_id}'}
client.add_member(group_id, required_properties)
human_readable = f'User {user_id} was added to the Group {group_id} successfully.'
return human_readable, NO_OUTPUTS, NO_OUTPUTS
def remove_member_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Remove a member from a group by group id and user id. return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs.
"""
group_id = str(args.get('group_id'))
user_id = str(args.get('user_id'))
client.remove_member(group_id, user_id)
human_readable = f'User {user_id} was removed from the Group "{group_id}" successfully.'
return human_readable, NO_OUTPUTS, NO_OUTPUTS
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
base_url = demisto.params().get('url').rstrip('/') + '/v1.0/'
tenant = demisto.params().get('tenant_id')
auth_and_token_url = demisto.params().get('auth_id').split('@')
auth_id = auth_and_token_url[0]
enc_key = demisto.params().get('enc_key')
verify = not demisto.params().get('insecure', False)
proxy = handle_proxy()
if len(auth_and_token_url) != 2:
token_retrieval_url = 'https://oproxy.demisto.ninja/obtain-token' # guardrails-disable-line
else:
token_retrieval_url = auth_and_token_url[1]
commands = {
'test-module': test_function_command,
'msgraph-groups-list-groups': list_groups_command,
'msgraph-groups-get-group': get_group_command,
'msgraph-groups-create-group': create_group_command,
'msgraph-groups-delete-group': delete_group_command,
'msgraph-groups-list-members': list_members_command,
'msgraph-groups-add-member': add_member_command,
'msgraph-groups-remove-member': remove_member_command
}
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client = Client(base_url, tenant, auth_and_token_url, auth_id, token_retrieval_url, enc_key, verify, proxy)
# Run the command
human_readable, entry_context, raw_response = commands[command](client, demisto.args())
# create a war room entry
return_outputs(readable_output=human_readable, outputs=entry_context, raw_response=raw_response)
except Exception as err:
return_error(str(err))
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Top-level package for ProtoBuf Schematics."""
__author__ = """Almog Cohen"""
__version__ = '0.4.1'
|
nilq/baby-python
|
python
|
from config import UPLOAD_FOLDER, COMCORHD_FOLDER, JULGAMENTO_FOLDER, REPOSITORIES, VALIDATE_UD, VALIDATE_LANG, GOOGLE_LOGIN, VALIDAR_UD
from flask import render_template, request
import pandas as pd
import os, estrutura_ud, estrutura_dados, confusao, re, time, datetime, validar_UD
import models, pickle
from app import db, app, executor, allCorpora, modificacoesCorpora
from localtime import localtime
import sys, shutil
MAX_FILE_SIZE = 50
INTERROGATORIO = False
if os.path.isdir(os.path.abspath(os.path.join(JULGAMENTO_FOLDER, "..", "Interrogat-rio"))):
globals()['INTERROGATORIO'] = True
else:
globals()['INTERROGATORIO'] = False
def checkRepo(repositorio="", branch=""):
if not os.path.isdir(UPLOAD_FOLDER + "/" + 'repositories'):
os.mkdir(UPLOAD_FOLDER + "/" + 'repositories')
for repo in REPOSITORIES:
if '/' in repo:
if not os.path.isdir(UPLOAD_FOLDER + '/repositories/' + repo.rsplit("/", 1)[1].split(".git")[0]):
if os.system(f'cd {UPLOAD_FOLDER}/repositories; git clone {repo}'):
pass
listRepo = []
for item in os.listdir(UPLOAD_FOLDER + "/" + 'repositories'):
if os.path.isdir(UPLOAD_FOLDER + "/" + 'repositories' + "/" + item):
listRepo.append(item)
branches = []
microBranches = []
if repositorio:
if os.system(f"cd {UPLOAD_FOLDER}/repositories/{repositorio}; git stash; git pull; git ls-remote > branches.txt"):
pass
with open(f"{UPLOAD_FOLDER}/repositories/{repositorio}/branches.txt", 'r') as f:
texto = f.read().splitlines()
for branchFor in texto:
if branchFor and '/heads/' in branchFor:
microBranches.append("<option>" + branchFor.split('/heads/')[1].strip() + "</option>")
branches = ['<select name="branch" id="branch" class="form-control selectpicker branch" data-live-search="true" required>'] + ['<option class="translateHtml" disabled selected value> -- escolha um ramo -- </option>'] + sorted(microBranches) + ["</select>"]
commits = []
if repositorio and branch:
if os.system(f"cd {UPLOAD_FOLDER}/repositories/{repositorio}; git stash; git pull; git checkout {branch}; git pull; git log > commits.txt"):
pass
with open(f"{UPLOAD_FOLDER}/repositories/{repositorio}/commits.txt", 'r') as f:
texto = re.split(r"(^|\n\n)commit ", f.read())
commits.append('<select name="repoCommit" id="repoCommit" class="form-control selectpicker repoCommit" data-live-search="true" required>')
for commitFor in texto:
if commitFor != "\n\n" and commitFor:
commits.append("<option>" + commitFor.split(" ", 1)[1].split("\n")[0] + " | commit " + commitFor.split("\n")[0] + "</option>")
commits.append("</select>")
return {
'repositories': listRepo,
'commits': "\n".join(commits),
'branches': "\n".join(branches),
}
def renderErrors(c, texto="", exc=[], fromZero=False):
if not os.path.isfile(conllu(c).findErrors() + "_html") or fromZero:
if fromZero or not texto:
#if not os.path.isfile(conllu(c).findErrors()):
if not 'win' in sys.platform:
if os.system(JULGAMENTO_FOLDER + f'/.julgamento/bin/python3 {os.path.abspath(os.path.dirname(__file__))}/tools/validate.py {conllu(c).findGolden()} --max-err=0 --lang={VALIDATE_LANG} 2>&1 | tee {conllu(c).findErrors()}'):
pass
else:
raise Exception("Only available on Linux.")
with open(conllu(c).findErrors()) as f:
texto = f.read()
if conllu(c).golden() in allCorpora.corpora and allCorpora.corpora.get(conllu(c).golden()):
corpus = allCorpora.corpora.get(conllu(c).golden())
else:
corpus = estrutura_ud.Corpus(recursivo=True)
corpus.load(conllu(c).findGolden())
with open(conllu(c).findGolden(), 'r') as f:
arquivo = f.read()
arquivoSplit = arquivo.splitlines()
sent_ids = {}
exceptions = [
'Exception caught',
'for 9',
'Non-tree',
'HEAD == ID',
'cycle',
'Skipping'
]
exceptions += exc
for linha in texto.splitlines():
if linha and any(x.lower().strip() in linha.lower() for x in exceptions) and ' Node ' in linha and 'Sent ' in linha and ("Line " in linha or ' line ' in linha):
t = int(linha.split("Line ", 1)[1].split(" ")[0]) if "Line " in linha else int(linha.split(" line ", 1)[1].split(" ")[0])
if "\t" in arquivoSplit[t-1]:
if not linha.split(":", 1)[1] in sent_ids:
sent_ids[linha.split(":", 1)[1]] = []
bold = {'word': arquivoSplit[t-1].split("\t")[1], 'color': 'black', 'id': arquivo.splitlines()[t-1].split("\t")[0]}# if '\t' in arquivo.splitlines()[t-1] else ""
t = allCorpora.corpora[conllu(c).golden()].sentences[linha.split(" Node ")[0].split("Sent ", 1)[1]].map_token_id[arquivo.splitlines()[t-1].split("\t")[0]]
sent_ids[linha.split(":", 1)[1]].append({'id': linha.split(" Node ")[0].split("Sent ", 1)[1], 't': t, 'bold': bold})
html = ""
for k, problem in enumerate(sorted(sent_ids)):
html += f"<div class='alert alert-warning' role='alert'>{k+1} / {len(sent_ids)} - {problem}</div>"
for i, sent_id in enumerate(sent_ids[problem]):
if sent_id['id'] in corpus.sentences:
if sent_id['bold']['word'] and sent_id['bold']['color'] and sent_id['t']:
html += f'<div class="panel panel-default"><div class="panel-body">{ i+1 } / { len(sent_ids[problem]) }</div>' + \
render_template(
'sentence.html',
golden=corpus.sentences[sent_id['id']],
c=c,
t=sent_id['t'],
bold=sent_id['bold'],
goldenAndSystem=True if conllu(c).system() in allCorpora.corpora else False,
) + "</div></div>"
else:
html += f'<div class="panel panel-default"><div class="panel-body">{ i+1 } / { len(sent_ids[problem]) }: {sent_id["id"]}</div>'
with open(conllu(c).findErrors() + "_html", "w") as f:
f.write(html)
else:
with open(conllu(c).findErrors() + "_html") as f:
html = f.read()
return html
def findCorpora(filtro, tipo):
lista = []
if tipo == 'available':
corpora = checkCorpora()['available']
elif tipo == 'training':
corpora = checkCorpora()['inProgress']
elif tipo == 'success':
corpora = checkCorpora()['success']
elif tipo == 'delete':
corpora = checkCorpora()['available']
elif tipo == 'onlyGolden':
corpora = checkCorpora()['missingSystem']
elif tipo == 'deleteGolden':
corpora = checkCorpora()['missingSystem']
elif tipo == 'features':
corpora = checkCorpora()['withFeatures']
filtro = filtro.split()
for corpus in corpora:
if tipo not in ["deleteGolden", "onlyGolden", 'features']:
sobre = corpus['sobre'] if 'sobre' in corpus else ""
corpusNom = corpus['nome']
corpusDate = corpus['data']
else:
sobre = ""
corpusNom = corpus
corpusDate = ""
if not filtro or all(x.lower() in (corpusNom+sobre+corpusDate).lower() for x in filtro):
if tipo == 'available':
lista.append(f'<a href="/corpus?c={ corpus["nome"] }" class="list-group-item"><strong>{ corpus["nome"] }</strong> <span class="badge">{ corpus["sentences"] if corpus["sentences"] else "" } <span class="translateHtml">{"sentenças" if corpus["sentences"] else "clique para carregar"}</span></span><br>{ corpus["sobre"] }<br><small>{ prettyDate(corpus["data"]).prettyDateDMAH() }</small></a>')
elif tipo == 'training':
terminated = ""
if prettyDate(corpus["data"]).hora +3 < prettyDate(str(datetime.datetime.now())).hora:
terminated = "&terminated=True"
lista.append(f'<a href="/log?c={ corpus["nome"] }{terminated}" class="list-group-item"><strong>{ corpus["nome"] }</strong><br><span class="translateHtml">Última modificação:</span> { prettyDate(corpus["data"]).prettyDateDMAH() }</a>')
elif tipo == 'success':
lista.append(f'<a href="/log?c={ corpus["nome"] }" class="list-group-item"><strong>{ corpus["nome"] }</strong><br><span class="translateHtml">Conclusão:</span> { prettyDate(corpus["data"]).prettyDateDMAH() }</a>')
elif tipo == 'delete':
lista.append(f'<a style="cursor:pointer" onclick="apagarCorpus(\'{corpus["nome"]}\')" class="list-group-item"><strong>{ corpus["nome"] }</strong> <span class="badge">{ corpus["sentences"] } <span class="translateHtml">sentenças</span></span><br>{ corpus["sobre"] }<br><small>{ prettyDate(corpus["data"]).prettyDateDMAH() }</small></a>')
elif tipo == 'deleteGolden':
lista.append(f'<a style="cursor:pointer" onclick="apagarCorpusGolden(\'{corpus}\')" class="list-group-item"><strong>{ corpus }</strong></a>')
elif tipo == 'onlyGolden':
if os.path.isfile(conllu(corpus).findOriginal()):
lista.append(f'<a href="/corpus?c={ corpus }" class="list-group-item"><strong>{ corpus }</strong></a>')
elif tipo == 'features':
lista.append(f'<a style="cursor:pointer" href="/static/uploads/{conllu(corpus).features()}" class="list-group-item"><strong>{ corpus }</strong></a>')
return "\n".join(lista)
def removerAcento(s):
return re.sub(r'[^A-Za-z0-9_\.\-]', '', s)
def formDB():
return '''
<div class="form-horizontal">
<div class="form-group">
<label for="about" class="col-sm-4 control-label"><span class="translateHtml">Sobre o corpus</span> <span class='glyphicon glyphicon-info-sign translateTitle' title='Informação extra para ajudar a identificar os diferentes corpora disponíveis'></span></label>
<div class="col-sm-8">
<input class="form-control" id="about" name="about" >
</div>
</div>
<div class="form-group">
<label for="partitions" class="col-sm-4 control-label"><span class="translateHtml">Partições</span> <span class='glyphicon glyphicon-info-sign translateTitle' title='A separação entre as partições train/test/dev deve ser feita por meio de arquivos .txt, contendo um ID de sentença por linha, na pasta /static/uploads'></span></label>
<div class="col-sm-8">
<select class="form-control selectpicker" data-live-search="true" id="partitions" name="partitions" required>
''' + "\n".join(\
["<option>" + x.rsplit("-", 1)[0] + "</option>" \
for x in os.listdir(UPLOAD_FOLDER) \
if '.txt' in x \
and "-train" in x \
and all(os.path.isfile(UPLOAD_FOLDER + "/" + x.rsplit("-", 1)[0] + "-" + y + ".txt") \
for y in ['test', 'train', 'dev'])]) + '''
</select>
</div>
</div>
<div class="form-group">
<div class="col-sm-offset-4 col-sm-8">
<div class="checkbox">
<label>
<input name="crossvalidation" type="checkbox"> <span class="translateHtml">Treinar todo o corpus (crossvalidation)</span>
<span class='glyphicon glyphicon-info-sign translateTitle' title='Treinar um corpus inteiro (crossvalidation) significa que vários modelos serão treinados, um para cada pedaço do corpus, de modo a garantir que o treino será realizado em todo o corpus e não haverá enviesamento. Pode demorar alguns dias para concluir o processo.'></span>
</label>
</div>
</div>
</div>
</div>
'''
class conllu:
def __init__(self, corpus):
if '/' in corpus: corpus = corpus.rsplit('/', 1)[1]
self.naked = corpus.split("_inProgress")[0].split("_meta")[0].split('_sistema')[0].split(".conllu")[0].split('_success')[0].split('_original')[0].split('_features.html')[0]
def golden(self):
return self.naked + ".conllu"
def original(self):
return self.naked + "_original.conllu"
def system(self):
return self.naked + "_sistema.conllu"
def inProgress(self):
return self.naked + "_inProgress"
def success(self):
return self.naked + "_success"
def errors(self):
return self.naked + "_errors"
def features(self):
return self.naked + "_features.html"
def findGolden(self):
if INTERROGATORIO and os.path.isfile(f'{COMCORHD_FOLDER}/{self.naked}.conllu'):
return f'{COMCORHD_FOLDER}/{self.naked}.conllu'
elif os.path.isfile(UPLOAD_FOLDER + "/" + self.naked + ".conllu"):
return UPLOAD_FOLDER + "/" + self.naked + ".conllu"
elif INTERROGATORIO:
return f'{COMCORHD_FOLDER}/{self.naked}.conllu'
else:
return UPLOAD_FOLDER + "/" + self.naked + ".conllu"
def findOriginal(self):
return UPLOAD_FOLDER + "/" + self.naked + "_original.conllu"
def findFeatures(self):
return UPLOAD_FOLDER + "/" + self.naked + "_features.html"
def findSystem(self):
return UPLOAD_FOLDER + "/" + self.naked + "_sistema.conllu"
def findInProgress(self):
return UPLOAD_FOLDER + "/" + self.naked + "_inProgress"
def findSuccess(self):
return UPLOAD_FOLDER + "/" + self.naked + "_success"
def findErrors(self):
return UPLOAD_FOLDER + "/" + self.naked + "_errors"
def findErrorsValidarUD(self):
return UPLOAD_FOLDER + "/" + self.naked + "_errorsValidarUD"
class prettyDate:
def __init__(self, date):
date = str(date)
calendario_raw = "janeiro,fevereiro,março,abril,maio,junho,julho,agosto,setembro,outubro,novembro,dezembro"
calendario = {i+1: mes for i, mes in enumerate(calendario_raw.split(","))}
data = date.split(" ")[0].split("-")
self.dia = int(data[2])
self.mes = int(data[1])
self.mesExtenso = calendario[self.mes]
self.mesExtenso_3 = "".join(calendario[self.mes][:3])
self.ano = int(data[0])
horabruta = date.split(" ")[1].rsplit(":", 1)[0]
self.hora = int(horabruta.split(":")[0]) - localtime
if self.hora < 0: self.hora = 24 + self.hora
self.tempo = str(self.hora) + ":" + horabruta.split(":")[1]
def prettyDateDMAH(self):
return f"{self.dia} de {self.mesExtenso_3}. {self.ano} {self.tempo}"
def prettyDateDMH(self):
return f"{self.dia} de {self.mesExtenso_3}. às {self.tempo}"
def prettyDateDMA(self):
return f"{self.dia} de {self.mesExtenso} de {self.ano}"
dicionarioColunas = {
'0': 'id',
'1': 'word',
'2': 'lemma',
'3': 'upos',
'4': 'xpos',
'5': 'feats',
'6': 'dephead',
'7': 'deprel',
'8': 'deps',
'9': 'misc',
}
def getMatrixSentences(c, golden, system, coluna):
listaSentences = []
ud1 = allCorpora.corpora.get(conllu(c).golden())
ud2 = allCorpora.corpora.get(conllu(c).system())
for sent_id, sentence in ud1.sentences.items():
if sent_id in ud2.sentences and len(sentence.tokens) == len(ud2.sentences[sent_id].tokens):
for t, token in enumerate(sentence.tokens):
if token.__dict__[coluna.lower()] == golden and ud2.sentences[sent_id].tokens[t].__dict__[coluna.lower()] == system:
listaSentences.append({
'sent_id': sent_id,
'golden': sentence,
'system': ud2.sentences[sent_id],
'divergence': {
'system': {'category': system, 'head': {'id': ud2.sentences[sent_id].tokens[t].head_token.id, 'word': ud2.sentences[sent_id].tokens[t].head_token.word}},
'golden': {'category': golden, 'head': {'id': token.head_token.id, 'word': token.head_token.word}}
},
'col': coluna.lower(),
'bold': {'word': token.word, 'color': 'black', 'id': token.id},
'boldCol': f'{coluna.lower()}<coluna>{t}',
'secBold': {'word': token.head_token.word, 'color': 'green', 'id': token.head_token.id} if coluna.lower() in ["deprel"] else "",
'thirdBold': {'word': ud2.sentences[sent_id].tokens[t].head_token.word, 'color': 'red', 'id': ud2.sentences[sent_id].tokens[t].head_token.id} if coluna.lower() in ["deprel"] else "",
't': t
})
return listaSentences
def sortLambda(dicionario, lambdaattr, reverse=True):
return sorted(dicionario, key=lambda x: dicionario[x][lambdaattr], reverse=reverse)
def categoryAccuracy(ud1, ud2, c, coluna="DEPREL"):
tables = ""
golden = allCorpora.corpora.get(conllu(ud1).golden())
system = allCorpora.corpora.get(conllu(ud2).system())
dicionario = {}
UAS = dict()
for sentid, sentence in golden.sentences.items():
if sentid in system.sentences and len(golden.sentences[sentid].tokens) == len(system.sentences[sentid].tokens):
for t, token in enumerate(sentence.tokens):
if not token.__dict__[coluna.lower()] in dicionario:
dicionario[token.__dict__[coluna.lower()]] = [0, 0, 0]
if not token.__dict__[coluna.lower()] in UAS:
UAS[token.__dict__[coluna.lower()]] = dict()
dicionario[token.__dict__[coluna.lower()]][0] += 1
if coluna == "DEPREL" and system.sentences[sentid].tokens[t].__dict__[coluna.lower()] == token.__dict__[coluna.lower()]:
dicionario[token.__dict__[coluna.lower()]][2] += 1
if ((coluna == "DEPREL" and system.sentences[sentid].tokens[t].__dict__['dephead'] == token.__dict__['dephead']) or (coluna == "UPOS")) and system.sentences[sentid].tokens[t].__dict__[coluna.lower()] == token.__dict__[coluna.lower()]:
dicionario[token.__dict__[coluna.lower()]][1] += 1
elif system.sentences[sentid].tokens[t].__dict__[coluna.lower()] == token.__dict__[coluna.lower()]:
tok_golden = token.head_token.upos
tok_system = system.sentences[sentid].tokens[t].head_token.upos
tok_golden += "_L" if int(token.head_token.id) < int(token.id) else "_R"
tok_system += "_L" if int(system.sentences[sentid].tokens[t].head_token.id) < int(system.sentences[sentid].tokens[t].id) else "_R"
if tok_golden + "/" + tok_system in UAS[token.__dict__[coluna.lower()]]:
UAS[token.__dict__[coluna.lower()]][tok_golden + "/" + tok_system][0] += 1
else:
UAS[token.__dict__[coluna.lower()]][tok_golden + "/" + tok_system] = [1, []]
UAS[token.__dict__[coluna.lower()]][tok_golden + "/" + tok_system][1].append([sentid, t])
coluna1 = ""
coluna2 = ""
coluna3 = ""
if coluna == "DEPREL":
conteudo = "".join([f"<tr><td>{x}</td><td>{dicionario[x][0]}</td><td>{(dicionario[x][2] / dicionario[x][0])*100}%</td><td>{(dicionario[x][1] / dicionario[x][0])*100}%</td><td class='matrixTd'><a href='/corpus?c={c}&{coluna}={x}'>{(sum([len(UAS[x][y][1]) for y in UAS[x]]) / dicionario[x][0])*100}%</a></td></tr>" for x in sorted(dicionario, key=lambda x: x)])
coluna2 = "<a style='text-decoration:underline; color:white; cursor:text;' class='translateTitle translateHtml' title='LAS é quando o deprel e o dephead estão corretos'>LAS</a>"
coluna3 = "<a style='text-decoration:underline; color:white; cursor:text;' class='translateTitle translateHtml' title='Os erros de dephead são contabilizados apenas quando a etiqueta deprel está correta. Para ver divergências de deprel, verificar matriz de confusão'>Erros de dephead</a>"
coluna1 = "<a style='text-decoration:underline; color:white; cursor:text;' class='translateTitle translateHtml' title='Acertos de deprel sem contabilizar dephead. Para ver divergências de deprel, verificar matriz de confusão'>Acertos</a>"
elif coluna == "UPOS":
conteudo = "".join([f"<tr><td>{x}</td><td>{dicionario[x][0]}</td><td>{(dicionario[x][1] / dicionario[x][0])*100}%</td></tr>" for x in sorted(dicionario, key=lambda x: x)])
coluna1 = "<span class='translateHtml'>Acertos</span>"
tables += f"<table id='t01' style='margin:auto; max-height:70vh; display:block; overflow-x: auto; overflow-y:auto;'><thead><tr style='text-align:center;'><th>{coluna}</th><th>Total</th>{'<th>' + coluna1 + '</th>' if coluna1 else ''}{'<th>' + coluna2 + '</th>' if coluna2 else ''}{'<th>' + coluna3 + '</th>' if coluna3 else ''}</tr></thead>\
{conteudo}\
</table>"
return {'tables': tables, 'UAS': UAS}
def caracteristicasCorpus(ud1, ud2=""):
golden = allCorpora.corpora.get(conllu(ud1).golden())
if not golden:
return None
system = "" if not ud2 else allCorpora.corpora.get(conllu(ud2).system())
n_Tokens = 0
n_Sentences = len(golden.sentences)
dicionario_Lemas = {}
documentos_golden = {}
documentos_sistema = {}
for sentence in golden.sentences.values():
documento = sentence.sent_id.rsplit("-", 1)[0]
if not documento in documentos_golden:
documentos_golden[documento] = [0, 0]
documentos_golden[documento][0] += 1
for token in sentence.tokens:
if not '-' in token.id:
if not token.lemma in dicionario_Lemas:
dicionario_Lemas[token.lemma] = 0
dicionario_Lemas[token.lemma] += 1
n_Tokens += 1
documentos_golden[documento][1] += 1
if system:
n_Tokens_s = 0
n_Sentences_s = len(system.sentences)
dicionario_Lemas_s = {}
for sentence in system.sentences.values():
documento = sentence.sent_id.rsplit("-", 1)[0]
if not documento in documentos_sistema:
documentos_sistema[documento] = [0, 0]
documentos_sistema[documento][0] += 1
for token in sentence.tokens:
if not '-' in token.id:
if not token.lemma in dicionario_Lemas_s:
dicionario_Lemas_s[token.lemma] = 0
dicionario_Lemas_s[token.lemma] += 1
n_Tokens_s += 1
documentos_sistema[documento][1] += 1
tabela_Geral = "<h3 class='translateHtml'>Características do corpus</h3><br>"
if system:
tabela_Geral += "<table style='max-height:70vh; margin:auto; display:block; overflow-x: auto; overflow-y: auto; overflow:scroll;'>"
tabela_Geral += "<tr><td></td><th class='translateHtml'>Sentenças</th><th class='translateHtml'>Tokens</th><th class='translateHtml'>Lemas diferentes</th></tr>"
tabela_Geral += f"<tr><th class='translateHtml'>Golden</th><td>{n_Sentences}</td><td>{n_Tokens}</td><td>{len(dicionario_Lemas)}</td></tr>"
tabela_Geral += f"<tr><th class='translateHtml'>Sistema</th><td>{n_Sentences_s}</td><td>{n_Tokens_s}</td><td>{len(dicionario_Lemas_s)}</td></tr>"
else:
tabela_Geral += "<table style='max-height:70vh; margin:auto; display:block; overflow-x: auto; overflow-y: auto; overflow:scroll;'>"
tabela_Geral += "<tr><td></td><th class='translateHtml'>Sentenças</th><th class='translateHtml'>Tokens</th><th class='translateHtml'>Lemas diferentes</th></tr>"
tabela_Geral += f"<tr><th class='translateHtml'>Golden</th><td>{n_Sentences}</td><td>{n_Tokens}</td><td>{len(dicionario_Lemas)}</td></tr>"
tabela_Geral += "</table>"
if documentos_golden:
tabela_Geral += "<br><table style='max-height:70vh; margin:auto; display:block; overflow-x: auto; overflow-y: auto; overflow:scroll;'>"
tabela_Geral += "<tr><th class='translateHtml'>GOLDEN</th><th class='translateHtml'>Sentenças</th><th class='translateHtml'>Tokens</th></tr>"
for documento in sorted(documentos_golden):
tabela_Geral += f"<tr><td>{documento}</td><td>{documentos_golden[documento][0]}</td><td>{documentos_golden[documento][1]}</td></tr>"
tabela_Geral += "</table>"
if system:
tabela_Geral += "<br><table style='max-height:70vh; margin:auto; display:block; overflow-x: auto; overflow-y: auto; overflow:scroll;'>"
tabela_Geral += "<tr><th class='translateHtml'>SISTEMA</th><th class='translateHtml'>Sentenças</th><th class='translateHtml'>Tokens</th></tr>"
for documento in sorted(documentos_sistema):
tabela_Geral += f"<tr><td>{documento}</td><td>{documentos_sistema[documento][0]}</td><td>{documentos_sistema[documento][1]}</td></tr>"
tabela_Geral += "</table>"
c = conllu(ud1).naked
depois = allCorpora.corpora[conllu(c).golden()]
antes = allCorpora.corpora[conllu(c).original()]
lemas_diferentes = {}
upos_diferentes = {}
deprel_diferentes = {}
sentences_diferentes = []
text_diferentes = []
comparable_sentences = []
not_comparable_sentences = []
removed_sentences = []
modified_tokens = []
for sentid, sentence in antes.sentences.items():
if not sentid in depois.sentences:
removed_sentences.append(sentid)
continue
if sentence.tokens_to_str() != depois.sentences[sentid].tokens_to_str():
sentences_diferentes.append(sentid)
if sentence.text != depois.sentences[sentid].text:
text_diferentes.append(sentid + "<br>" + sentence.text + "<depois>" + depois.sentences[sentid].text)
if len(sentence.tokens) != len(depois.sentences[sentid].tokens):
not_comparable_sentences.append(sentid)
else:
comparable_sentences.append(sentid)
for t, token in enumerate(sentence.tokens):
if token.to_str() != depois.sentences[sentid].tokens[t].to_str():
modified_tokens.append(1)
if token.lemma != depois.sentences[sentid].tokens[t].lemma:
if not token.lemma + "<depois>" + depois.sentences[sentid].tokens[t].lemma in lemas_diferentes:
lemas_diferentes[token.lemma + "<depois>" + depois.sentences[sentid].tokens[t].lemma] = []
lemas_diferentes[token.lemma + "<depois>" + depois.sentences[sentid].tokens[t].lemma].append({'sent_id': sentid, 'golden': sentence, 't': t, 'bold': {'word': token.word, 'color': 'red', 'id': token.id}})
if token.upos != depois.sentences[sentid].tokens[t].upos:
if not token.upos + "<depois>" + depois.sentences[sentid].tokens[t].upos in upos_diferentes:
upos_diferentes[token.upos + "<depois>" + depois.sentences[sentid].tokens[t].upos] = []
upos_diferentes[token.upos + "<depois>" + depois.sentences[sentid].tokens[t].upos].append({'sent_id': sentid, 'golden': sentence, 't': t, 'bold': {'word': token.word, 'color': 'red', 'id': token.id}})
if token.deprel != depois.sentences[sentid].tokens[t].deprel:
if not token.deprel + "<depois>" + depois.sentences[sentid].tokens[t].deprel in deprel_diferentes:
deprel_diferentes[token.deprel + "<depois>" + depois.sentences[sentid].tokens[t].deprel] = []
deprel_diferentes[token.deprel + "<depois>" + depois.sentences[sentid].tokens[t].deprel].append({'sent_id': sentid, 'golden': sentence, 't': t, 'bold': {'word': token.word, 'color': 'red', 'id': token.id}})
modificacoesCorpora.modificacoes[c] = {'lemma': lemas_diferentes, 'upos': upos_diferentes, 'deprel': deprel_diferentes}
sentences_iguais = [x for x in depois.sentences if x not in sentences_diferentes]
tabela_Geral += f"<br><h4><span class='translateHtml' style='cursor:pointer;' onclick='$(\".modified_sentences\").slideToggle();'>Sentenças modificadas</span>: {len(sentences_diferentes)} / {round((len(sentences_diferentes)/n_Sentences)*100, 2)}%</h4><pre class='modified_sentences' style='display:none;'>{'; '.join(sentences_diferentes)}</pre>"
tabela_Geral += f"<br><h4><span class='translateHtml' style='cursor:pointer;' onclick='$(\".unmodified_sentences\").slideToggle();'>Sentenças não modificadas</span>: {len(sentences_iguais)} / {round((len(sentences_iguais)/n_Sentences)*100, 2)}%</h4><pre class='unmodified_sentences' style='display:none'>{'; '.join(sentences_iguais)}</pre>"
tabela_Geral += f"<br><h4><span class='translateHtml' style='cursor:pointer;' onclick='$(\".removed_sentences\").slideToggle();'>Sentenças removidas</span>: {len(removed_sentences)}</h4><pre class='removed_sentences' style='display:none'>{'; '.join(removed_sentences)}</pre>"
tabela_Geral += f"<br><h4><span class='translateHtml' style='cursor:pointer;' onclick='$(\".different_tokenization\").slideToggle();'>Sentenças com tokenização diferente</span>: {len(not_comparable_sentences)}</h4><pre class='different_tokenization' style='display:none'>{'; '.join(not_comparable_sentences)}</pre>"
tabela_Geral += f"<br><h4 style='cursor:pointer;' onclick='$(\".different_text\").slideToggle();'><span class='translateHtml'>\"# text\" modificados</span>: {len(text_diferentes)}</h4>"
tabela_Geral += "<table class='different_text' style='display:none;'>"
for entrada in text_diferentes:
tabela_Geral += "<tr><th></th><th>{}</th></tr>".format(entrada.split("<br>")[0])
tabela_Geral += "<tr><th class='translateHtml'>ANTES</th><td>{}</td></tr>".format(entrada.split("<depois>")[0].split("<br>")[1])
tabela_Geral += "<tr><th class='translateHtml'>DEPOIS</th><td>{}</td></tr>".format(entrada.split("<depois>")[1])
tabela_Geral += "</table>"
tabela_Geral += f"<br><h4><span class='translateHtml'>Tokens modificados</span>: {len(modified_tokens)} / {round((len(modified_tokens)/n_Tokens)*100, 2)}%</h4>"
tabela_Geral += f"<br><h4><span class='translateHtml'>Tokens modificados por sentença modificada</span>: {len(modified_tokens)/len(sentences_diferentes) if len(sentences_diferentes) else '0'}</h4>"
tabela_Geral += f"<br><h4 style='cursor:pointer;' onclick='$(\".dist_lemas\").slideToggle();'><span class='translateHtml'>Distribuição de lemas</span>: {len(dicionario_Lemas)}</h4>"
total_lemas = sum([dicionario_Lemas[y] for y in dicionario_Lemas])
tabela_Geral += "<div style='margin-top:10px; display:none' class='dist_lemas'>"
tabela_Geral += "<div class='col-lg-6'><table>"
tabela_Geral += "<tr><th class='translateHtml'>Lemas em Golden</th><th>#</th><th>%</th></tr>"
tabela_Geral += "".join([f"<tr><td>{x}</td><td>{dicionario_Lemas[x]}</td><td>{str((dicionario_Lemas[x]/total_lemas)*100)[:5]}%</td></tr>" for x in sorted(dicionario_Lemas, reverse=False, key=lambda y: (-dicionario_Lemas[y], y))])
tabela_Geral += "</table></div>"
if system:
total_lemas = sum([dicionario_Lemas_s[y] for y in dicionario_Lemas_s])
tabela_Geral += "<div class='col-lg-6'><table>"
tabela_Geral += "<tr><th class='translateHtml'>Lemas em Sistema</th><th>#</th><th>%</th></tr>"
tabela_Geral += "".join([f"<tr><td>{x}</td><td>{dicionario_Lemas_s[x]}</td><td>{str((dicionario_Lemas_s[x]/total_lemas)*100)[:5]}%</td></tr>" for x in sorted(dicionario_Lemas_s, reverse=False, key=lambda y: (-dicionario_Lemas_s[y], y))])
tabela_Geral += "</table></div>"
tabela_Geral += "</div>"
tabela_Geral += f"<br><h4 style='cursor:pointer;' onclick='$(\".different_lemma\").slideToggle();'><span class='translateHtml'>Lemas modificados</span>: {sum([len(lemas_diferentes[x]) for x in lemas_diferentes])}</h4>"
tabela_Geral += "<table class='different_lemma' style='display:none'>"
tabela_Geral += "<tr><th class='translateHtml'>ANTES</th><th class='translateHtml'>DEPOIS</th><th>#</th></tr>"
tabela_Geral += "".join(["<tr><td>" + x.split("<depois>")[0] + "</td><td>" + x.split("<depois>")[1] + f"</td><td class='matrixTd'><a href='/corpus?c={c}&antes={x.split('<depois>')[0]}&depois={x.split('<depois>')[1]}&mod=lemma'>" + str(len(lemas_diferentes[x])) + "</a></td></tr>" for x in sorted(lemas_diferentes, reverse=False, key=lambda y: (-len(lemas_diferentes[y]), y))])
tabela_Geral += "</table>"
tabela_Geral += f"<br><h4 style='cursor:pointer;' onclick='$(\".different_upos\").slideToggle();'><span class='translateHtml'>UPOS modificados</span>: {sum([len(upos_diferentes[x]) for x in upos_diferentes])}</h4>"
tabela_Geral += "<table style='display:none;' class='different_upos'>"
tabela_Geral += "<tr><th class='translateHtml'>ANTES</th><th class='translateHtml'>DEPOIS</th><th>#</th></tr>"
tabela_Geral += "".join(["<tr><td>" + x.split("<depois>")[0] + "</td><td>" + x.split("<depois>")[1] + f"</td><td class='matrixTd'><a href='/corpus?c={c}&antes={x.split('<depois>')[0]}&depois={x.split('<depois>')[1]}&mod=upos'>" + str(len(upos_diferentes[x])) + "</a></td></tr>" for x in sorted(upos_diferentes, reverse=False, key=lambda y: (-len(upos_diferentes[y]), y))])
tabela_Geral += "</table>"
tabela_Geral += f"<br><h4 style='cursor:pointer;' onclick='$(\".different_deprel\").slideToggle();'><span class='translateHtml'>DEPREL modificados</span>: {sum([len(deprel_diferentes[x]) for x in deprel_diferentes])}</h4>"
tabela_Geral += "<table class='different_deprel' style='display:none'>"
tabela_Geral += "<tr><th class='translateHtml'>ANTES</th><th class='translateHtml'>DEPOIS</th><th>#</th></tr>"
tabela_Geral += "".join(["<tr><td>" + x.split("<depois>")[0] + "</td><td>" + x.split("<depois>")[1] + f"</td><td class='matrixTd'><a href='/corpus?c={c}&antes={x.split('<depois>')[0]}&depois={x.split('<depois>')[1]}&mod=deprel'>" + str(len(deprel_diferentes[x])) + "</a></td></tr>" for x in sorted(deprel_diferentes, reverse=False, key=lambda y: (-len(deprel_diferentes[y]), y))])
tabela_Geral += "</table>"
with open(conllu(ud1).findFeatures(), "w") as f:
f.write(render_template('caracteristicas.html',
tabela_Geral=tabela_Geral,
corpus=conllu(ud1).naked,
user="")
)
return tabela_Geral
def sentAccuracy(ud1, ud2):
golden = allCorpora.corpora.get(conllu(ud1).golden())
system = allCorpora.corpora.get(conllu(ud2).system())
sent_accuracy = [0, 0]
for sentid, sentence in golden.sentences.items():
if sentid in system.sentences and len(sentence.tokens) == len(system.sentences[sentid].tokens):
sent_accuracy[0] += 1
acertos = 0
for t, token in enumerate(sentence.tokens):
if system.sentences[sentid].tokens[t].upos == token.upos and system.sentences[sentid].tokens[t].dephead == token.dephead and system.sentences[sentid].tokens[t].deprel == token.deprel:
acertos += 1
if acertos == len(sentence.tokens):
sent_accuracy[1] += 1
return "<table style='max-height:70vh; margin:auto; display:block; overflow-x: auto; overflow-y: auto; overflow:scroll;'><tr><th></th><th>#</th><th>%</th></tr><tr><th class='translateHtml'>Sentenças comparáveis</th><td>{comparableSentences}</td><td>{percentSentences}</td></tr>\
<tr><th class='translateHtml'>Sentenças corretas</th><td>{correctSentences}</td><td>{percentCorrect}</td></tr>\
</table>".format(
comparableSentences=sent_accuracy[0],
percentSentences=f"{(sent_accuracy[0] / len(golden.sentences)) * 100}%",
correctSentences=sent_accuracy[1],
percentCorrect=f"{(sent_accuracy[1] / sent_accuracy[0]) * 100}%",
)
def metrics(ud1, ud2):
html = ""
if os.system(f"python3 {JULGAMENTO_FOLDER}/conll18_ud_eval.py {ud1} {ud2} -v > {UPLOAD_FOLDER}/{conllu(ud1).naked}_metrics"):
pass
with open(f"{UPLOAD_FOLDER}/{conllu(ud1).naked}_metrics", 'r') as f:
html += f"<pre>{f.read()}</pre>"
return html
def matrix(table, c, kind="UPOS"):
html = ""
colunas = [x for x in table.splitlines()[0].split()]
for i, linha in enumerate(table.splitlines()):
ud1 = linha.split()[0]
if i == 0:
html += "<thead>"
html += "<tr>"
for k, coluna in enumerate(linha.split()):
ud2 = colunas[k] if len(colunas) > k else ""
html += "<t{dorh}>{0}{2}{1}</t{dorh}>".format(f"<a href='/corpus?c={c}&ud1={ud1}&ud2={ud2}&col={kind}'>" if k != 0 and i != 0 and k + 1 < len(linha.split()) and i + 1 < len(table.splitlines()) else "", "</a>" if k != 0 and i != 0 and k + 1 < len(linha.split()) and i + 1 < len(table.splitlines()) else "", coluna, dorh="h" if k == 0 or i == 0 else "d class='matrixTd'")
html += '</tr>'
if i == 0:
html += "</thead>"
return "<table id='t01' style='margin:auto; max-height:85vh; display:block; overflow-x: auto; overflow-y:auto;'>" + html + "</table>"
def resub(s, a, b):
return re.sub(r'\b' + a + r'\b', b, s)
def paint_text(sentence, id1, color1, id2="", color2="", id3="", color3=""):
text = []
for token in sentence.tokens:
if not '-' in token.id and not '.' in token.id:
word = token.word
if id3 and token.id == id3:
word = "<span style='color:{}'>{}</span>".format(color3 if id2 != id3 else "purple", word)
elif id2 and token.id == id2:
word = "<span style='color:{}'>{}</span>".format(color2, word)
elif id1 and token.id == id1:
word = "<b><span style='color:{}'>{}</span></b>".format(color1, word)
text.append(word)
return " ".join(text)
#@executor.job
def loadCorpus(x):
if os.path.isfile(conllu(x).findGolden()) and not os.path.isfile(conllu(x).findOriginal()):
shutil.copyfile(conllu(x).findGolden(), conllu(x).findOriginal())
if os.path.isfile(conllu(x).findSystem()) and not conllu(x).system() in allCorpora.corpora:
allCorpora.corpora[conllu(x).system()] = estrutura_ud.Corpus(recursivo=True)
if not conllu(x).golden() in allCorpora.corpora:
allCorpora.corpora[conllu(x).golden()] = estrutura_ud.Corpus(recursivo=True)
if not conllu(x).original() in allCorpora.corpora:
allCorpora.corpora[conllu(x).original()] = estrutura_ud.Corpus(recursivo=True)
if conllu(x).system() in allCorpora.corpora and not allCorpora.corpora[conllu(x).system()].sentences:
sys.stderr.write("\n>>>>>>>>>>>>>> loading system {}...".format(x))
corpus = estrutura_ud.Corpus(recursivo=True)
corpus.load(conllu(x).findSystem())
allCorpora.corpora[conllu(x).system()].sentences = dict(corpus.sentences.items())
sys.stderr.write(" system ok <<<<<<<<")
if conllu(x).original() in allCorpora.corpora and not allCorpora.corpora[conllu(x).original()].sentences:
corpus = estrutura_ud.Corpus(recursivo=True)
corpus.load(conllu(x).findOriginal())
allCorpora.corpora[conllu(x).original()].sentences = dict(corpus.sentences.items())
if conllu(x).golden() in allCorpora.corpora and not allCorpora.corpora[conllu(x).golden()].sentences:
sys.stderr.write("\n>>>>>>>>>>>>>> loading {}...".format(x))
corpus = estrutura_ud.Corpus(recursivo=True)
corpus.load(conllu(x).findGolden())
allCorpora.corpora[conllu(x).golden()].sentences = dict(corpus.sentences.items())
sys.stderr.write(" ok <<<<<<<<")
corpus = ""
def addDatabase(golden):
corpusdb = db.session.query(models.Corpus).get(conllu(golden).naked)
if corpusdb:
db.session.remove(corpusdb)
db.session.commit()
novoCorpus = models.Corpus(
name=conllu(golden).naked,
date=str(datetime.datetime.now()),
sentences=0,
about=request.values.get('sysAbout') if request.values.get('sysAbout') else ">",
partitions="",
author=google.get('/oauth2/v2/userinfo').json()['email'] if GOOGLE_LOGIN else "",
goldenAlias='Golden',
systemAlias='Sistema'
)
db.session.add(novoCorpus)
db.session.commit()
def checkCorpora():
availableCorpora = []
missingSystem = []
for corpus in list(allCorpora.corpora.keys()):
if not os.path.isfile(conllu(corpus).findGolden()) and conllu(corpus).golden() in allCorpora.corpora:
allCorpora.corpora.pop(conllu(corpus).golden())
if conllu(corpus).system() in allCorpora.corpora:
allCorpora.corpora.pop(conllu(corpus).system())
corpusdb = db.session.query(models.Corpus).get(conllu(corpus).naked)
if corpusdb:
db.session.delete(corpusdb)
db.session.commit()
if os.path.isfile(conllu(corpus).findSystem()):
os.remove(conllu(corpus).findSystem())
if os.path.isfile(conllu(corpus).findOriginal()):
os.remove(conllu(corpus).findOriginal())
if not os.path.isfile(conllu(corpus).findOriginal()) and conllu(corpus).original() in allCorpora.corpora:
allCorpora.corpora.pop(conllu(corpus).original())
if INTERROGATORIO:
for x in os.listdir(COMCORHD_FOLDER):
if os.path.getsize("{}/{}".format(COMCORHD_FOLDER, x))/1024/1000 < MAX_FILE_SIZE:
if x.endswith('.conllu') and os.path.isfile(f'{UPLOAD_FOLDER}/{conllu(x).system()}'):
if not db.session.query(models.Corpus).get(conllu(x).naked):
addDatabase(x)
availableCorpora += [{'nome': conllu(x).naked, 'data': db.session.query(models.Corpus).get(conllu(x).naked).date, 'sobre': db.session.query(models.Corpus).get(conllu(x).naked).about, 'sentences': len(allCorpora.corpora[conllu(x).golden()].sentences) if conllu(x).golden() in allCorpora.corpora and not isinstance(allCorpora.corpora[conllu(x).golden()], str) else 0}]
for x in os.listdir(UPLOAD_FOLDER):
if os.path.getsize("{}/{}".format(UPLOAD_FOLDER, x))/1024/1000 < MAX_FILE_SIZE:
if x.endswith('.conllu') and not x.endswith("_sistema.conllu") and not x.endswith("_original.conllu") and os.path.isfile(f"{UPLOAD_FOLDER}/{conllu(x).system()}") and not any(conllu(x).naked == k['nome'] for k in availableCorpora):
if not db.session.query(models.Corpus).get(conllu(x).naked):
addDatabase(x)
availableCorpora += [{'nome': conllu(x).naked, 'data': db.session.query(models.Corpus).get(conllu(x).naked).date, 'sobre': db.session.query(models.Corpus).get(conllu(x).naked).about, 'sentences': len(allCorpora.corpora[conllu(x).golden()].sentences) if conllu(x).system() in allCorpora.corpora and not isinstance(allCorpora.corpora[conllu(x).system()], str) else 0}]
if INTERROGATORIO:
for x in os.listdir(COMCORHD_FOLDER):
if os.path.getsize("{}/{}".format(COMCORHD_FOLDER, x))/1024/1000 < MAX_FILE_SIZE:
if x.endswith('.conllu') and not any(x.endswith(y) for y in ['_sistema.conllu', '_original.conllu']) and not os.path.isfile(f"{UPLOAD_FOLDER}/{conllu(x).system()}") and not os.path.isfile(f"{UPLOAD_FOLDER}/{conllu(x).inProgress()}"):
missingSystem += [conllu(x).naked]
for x in os.listdir(UPLOAD_FOLDER):
if os.path.getsize("{}/{}".format(UPLOAD_FOLDER, x))/1024/1000 < MAX_FILE_SIZE:
if x.endswith('.conllu') and not os.path.isfile(f"{UPLOAD_FOLDER}/{conllu(x).system()}") and not any(x.endswith(y) for y in ['_sistema.conllu', '_original.conllu']) and not os.path.isfile(f"{UPLOAD_FOLDER}/{conllu(x).inProgress()}") and not conllu(x).naked in missingSystem:
missingSystem += [conllu(x).naked]
inProgress = [{'nome': conllu(x).naked, 'data': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(os.path.getmtime(conllu(x).findInProgress())))} for x in os.listdir(UPLOAD_FOLDER) if x.endswith('_inProgress')]
success = [{'nome': conllu(x).naked, 'data': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(os.path.getmtime(conllu(x).findSuccess())))} for x in os.listdir(UPLOAD_FOLDER) if x.endswith('_success')]
features = []
for arquivo in os.listdir(UPLOAD_FOLDER):
if arquivo == conllu(arquivo).features():
if conllu(arquivo).naked not in features and conllu(arquivo).naked not in [conllu(x).naked for x in allCorpora.corpora]:
features.append(arquivo.split("_features.html")[0])
return {
'available': sorted(availableCorpora, key=lambda x: x['data'], reverse=True),
'missingSystem': sorted(missingSystem),
'onlyGolden': sorted(missingSystem),
'inProgress': sorted(inProgress, key=lambda x: x['data'], reverse=True),
'success': sorted(success, key=lambda x: x['data'], reverse=True),
'withFeatures': sorted(features),
}
|
nilq/baby-python
|
python
|
'''
Author: Siyun WANG
'''
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import pandas as pd
import numpy as np
from statsmodels.tsa.seasonal import seasonal_decompose
from ExploreData import ExploreData
class BasicStatisticPlots(object):
'''
Make basic statistic plots for data visualisation
==========
Parametres
==========
expData: ExploreData object
'''
def __init__(self, expData):
self.explorer = expData
self.explorer()
self.data = expData.data
self.numerical_vars = expData.numerical_vars
self.categorical_vars = expData.categorical_vars
self.datetime_vars = expData.datetime_vars
self.nb_rows = self.data.shape[0]
self.nb_cols = self.data.shape[1]
# tested
def corrMatPlot(self, data=None, annot=True, threshold=None):
'''
plot correlation matrix
=====
INPUT
=====
data: pandas dataframe, optional, default = None
data to be plot. If None, then the class attribute data will be used.
annot: boolean, optional, default = True
whether to print the exact value of each element in the correlation matrix
threshold: float between 0 and 1, default = None
if given, all cells having absolut correlation below the value will be masked
'''
if data is None:
data = self.data
corr = data.loc[:, self.numerical_vars].corr()
mask = np.triu(np.ones_like(corr, dtype=np.bool))
if threshold is not None:
mask[np.where(np.abs(corr) < threshold)] = True
plt.figure(figsize=(16,12))
sns.heatmap(data=corr, vmin=-1, vmax=1, cmap='RdBu_r',
annot=annot, cbar=True, square=True, mask=mask)
plt.title("correlation matrix")
plt.show()
# tested
def distPlot(self, col, drop_outliers=True, bins=None, data=None, lab=None):
'''
plot histogram of given variable
======
INPUTS
======
col: string
variable's column name.
drop_outliers: bool, default = True
whether to drop datapoints who fall 3 standard deviations away from the average.
bins: int or list, default = None
seaborn distplot's bin parametre.
data: pandas dataframe, optional, default = None
data to be plot. If None, then the class attribute data will be used.
lab: string, optional, default = None
axis label. if None, column name will be used
'''
if data is None:
data = self.data
if lab is None:
lab = col
plt.figure(figsize=(16,8))
if drop_outliers:
sns.distplot(a=data.loc[(abs((data.loc[:,col]-data.loc[:,col].mean())/data.loc[:,col].std())<3), col], kde=False, norm_hist=True)
else:
sns.distplot(a=data.loc[:, col].dropna(), bins=bins, kde=False, norm_hist=True)
plt.grid()
plt.title('distribution of %s' % lab)
plt.xlabel(lab)
plt.ylabel('frequency')
plt.xticks(rotation=-60)
plt.show()
def checkCorrelation(self, threshold, drop_outliers=True, data=None):
'''
plot scatter plots of highly correlated features
=====
INPUT
=====
drop_outliers: bool, default = True
whether to drop datapoints who fall 3 standard deviations away from the average.
threshold: float between 0 and 1, default = None
if given, all cells having absolut correlation below the value will be masked
data: pandas dataframe, optional, default = None
data to be plot. If None, then the class attribute data will be used.
'''
if data is None:
data = self.data
corr = data.loc[:, self.numerical_vars].corr().values
corr[np.triu_indices_from(corr)] = 0 # mask upper triangle
mask = np.where((np.abs(corr) >= threshold) & (np.abs(corr) < 1))
for c1, c2 in zip(mask[0], mask[1]):
col1 = self.numerical_vars[c1]
col2 = self.numerical_vars[c2]
print("==================")
print("correlation between %s and %s: %.4f" % (col1, col2, corr[c1,c2]))
self.scatterPlot(col1, col2,
drop_outliers=drop_outliers, data=data)
print("\n\n")
# tested
def scatterPlot(self, col1, col2, col3=None, drop_outliers=True, data=None, lab1=None, lab2=None):
'''
plot scatter plot for given variables
======
INPUTS
======
col1: string
x variable's column name.
col2: string
y variable's co lumn name.
col3: string, optional, default = None
hue variable's column name. If a third variable is provided, the points will be distinguished by this variable, otherwise scatter plot with histograms of each x,y variable is plotted. Note that the hue variable should be categorical.
drop_outliers: bool, default = True
whether to drop datapoints who fall 3 standard deviations away from the average.
data: pandas dataframe, optional, default = None
data to be plot. If None, then the class atribute data will be used.
lab1, lab2: strings, optional, default = None
axis labels. if None, column names will be used
'''
if data is None:
data = self.data
if lab1 is None:
lab1 = col1
if lab2 is None:
lab2 = col2
if col3 is not None:
if data.loc[:, col3].nunique() > 10:
raise ValueError("Too many labels in %s, please flag or re-group them." % col3)
plt.figure(figsize=(16,8))
if drop_outliers:
sns.scatterplot(x=col1, y=col2, data=data.loc[(abs((data.loc[:,col1]-data.loc[:,col1].mean())/data.loc[:,col1].std())<3)],
hue=col3,
#style=col3
)
else:
sns.scatterplot(x=col1, y=col2, data=data,
hue=col3,
#style=col3
)
plt.xlabel(lab1)
plt.ylabel(lab2)
plt.xticks(rotation=-60)
plt.title('scatter plot of %s vs %s' % (lab1, lab2))
plt.grid()
plt.show()
else:
if drop_outliers:
sns.jointplot(x=col1, y=col2,
data=data.loc[(abs((data.loc[:,col1]-data.loc[:,col1].mean())/data.loc[:,col1].std())<3)],
height=10)
else:
sns.jointplot(x=col1, y=col2, data=data,
height=10)
plt.show()
# tested
def scatterPlot_1vsRest(self, col, variables, hue=None, drop_outliers=False, asX=True, data=None):
'''
plot scatter plots for given variables
======
INPUTS
======
col: string
variable's column name.
variables: array-like object
contains the variables to be plotted as an other axis
hue: string, optional, default = None
hue variable's column name. If provided, the points will be distinguished by this variable, otherwise scatter plot with histograms of each x,y variable is plotted. Note that the hue variable should be categorical.
drop_outliers: bool, default = True
whether to drop datapoints who fall 3 standard deviations away from the average.
asX: bool, default = True
True if the "col" should be the x variable and the other variables in "variables" are the y variable, False vice-versa
data: pandas dataframe, optional, default = None
data to be plot. If None, then the class atribute data will be used.
'''
variables = list(variables)
if col in variables:
variables.remove(col)
if asX:
for var in variables:
self.scatterPlot(col, var, hue, drop_outliers=drop_outliers, data=data)
else:
for var in variables:
self.scatterPlot(var, col, hue, drop_outliers=drop_outliers, data=data)
# tested
def piePlot(self, cols, agg, col_y=None, data=None):
'''
create a grouped dataframe by the given categorical variable and plot a pie
======
INPUTS
======
cols: list of strings
variable names by which the dataframe is to be grouped.
agg: ExploreData.createGroupedDf's agg parametre
col_y: string, optional, default = None
the target column name to be plotted. If not given, the first one in cols is taken.
data: pandas dataframe, optional, default = None
data to be plot. If None, then the class attribute data will be used.
'''
grouped = self.explorer.createGroupedDf(cols, agg, data=data)
if grouped.index.nlevels > 2:
raise ValueError("Too many levels of index. Allowed: 2; Recieved: %d" % grouped.index.nlevels)
# if the grouped dataframe has 2 levels of index
elif grouped.index.nlevels == 2:
# for e.g., a grouped dataframe obtained by grouping variables [v1, v2] and aggregated by summation
# over the variable v3
# the grouped dataframe may look like this:
# v1 v2 v3_agg
# ------------------
# A a 10
# -------------
# b 5
# -------------
# d 5
# ------------------
# B b 10
# -------------
# c 15
# we want to plot 2 plots for A and B, a pie in such a plot is anything in {a, b, c, d} (values of v2),
# the size of a pie is defined by the corresponding value.
# Precisely, for the pie plot A, the pie a occupies 50% of the chart, the pie b and the pie d take each
# one of both 25% of the chart
for ind in grouped.index.get_level_values(cols[0]).unique():
print(cols[0] + ': ' + str(ind))
plt.figure()
tmp = grouped.loc[ind]
tmp.plot(y=col_y, subplots=True, kind='pie', figsize=(10,10), legend=False)
plt.show()
# if the grouped dataframe has single level index, plot simple pie plot by index
elif grouped.index.nlevels == 1:
plt.figure()
grouped.plot(y=col_y, subplots=True, kind='pie', figsize=(10,10), legend=False)
plt.show()
else:
raise ValueError("Invalid indexing")
#
def boxPlot(self, col1, col2, col3=None, drop_outliers=True, plotEach=False, data=None):
'''
plot scatter plot for given variables
======
INPUTS
======
col1: string
x variable's column name. Should be categorical.
col2: string
y variable's column name.
col3: string, optional, default = None
hue variable's column name. If a third variable is provided, the points will be distinguished by this variable.
drop_outliers: bool, default = True
whether to drop datapoints who fall 3 standard deviations away from the average.
plotEach: bool, default = False
whether to plot each point (if set to be True, it can be slow if the amount of data is huge)
data: pandas dataframe, optional, default = None
data to be plot. If None, then the class atribute data will be used.
'''
if data is None:
data = self.data
data.reset_index(inplace=True, drop=True)
if col1 not in self.categorical_vars:
raise ValueError("col1 should be a categorical variable.")
plt.figure(figsize=(16,8))
if drop_outliers:
sns.boxplot(x=col1, y=col2, hue=col3, data=data.loc[(abs((data.loc[:,col2]-data.loc[:,col2].mean())/data.loc[:,col2].std())<3)])
if plotEach:
sns.stripplot(x=col1, y=col2, hue=col3, data=data.loc[(abs((data.loc[:,col2]-data.loc[:,col2].mean())/data.loc[:,col2].std())<3)],
dodge=True, alpha=0.5)
else:
sns.boxplot(x=col1, y=col2, hue=col3, data=data)
if plotEach:
sns.stripplot(x=col1, y=col2, hue=col3, data=data,
dodge=True, alpha=0.5)
plt.grid()
plt.title('box plot of %s with respect to %s' % (col2, col1))
plt.xlabel(col1)
plt.ylabel(col2)
plt.xticks(rotation=-60)
plt.show()
#
def boxPlot_1vsRest(self, col, variables, hue=None, drop_outliers=True, plotEach=False, data=None):
'''
plot box plots for given variables
======
INPUTS
======
col: string
y variable's column name.
variables: array-like object
contains the variables to be plotted as x. Variables should be in the categorical variables.
hue: string, defautl = None
hue variable's column name. If provided, the points will be distinguished by this variable, otherwise box plots with histograms of each x,y variable are plotted.
drop_outliers: bool, default = True
whether to drop datapoints who fall 3 standard deviations away from the average.
plotEach: bool, default = False
whether to plot each point (if set to be True, it can be slow if the amount of data is huge)
data: pandas dataframe, optional, default = None
data to be plot. If None, then the class atribute data will be used.
'''
if data is None:
data = self.data
for var in variables:
if data.loc[:,var].nunique() > 10:
print("Number of unique values of %s is greater than 10, pleas flag or regroup them for better visualisation." % var)
else:
self.boxPlot(var, col, drop_outliers=drop_outliers, plotEach=plotEach, data=data)
# tested
def timeSeriesPlot(self, datetimeCol, cols, freq=None, agg=None, data=None):
'''
plot time series curves
======
INPUTS
======
datetimeCol: string
datetime variable's name
cols: list of strings
variable to be plotted over datetimeCol.
freq: string, optional, default = None
frequency value for resampling data. "S" for second, "T" for minute, "H" for hour, "D" for day, "W" for week, "M" for month, "Y" for year etc..
agg: string or function, optional, default = None
aggregation method for resampling data. If a function is given, it's the user who should take care of the NaN values. If None, no resampling will be peformed.
data: pandas dataframe, optional, default = None
data to be plot. If None, then the class atribute data will be used.
'''
if data is None:
data = self.data
if datetimeCol not in self.datetime_vars:
raise ValueError("datetimeCol should be a datetime variable.")
plt.figure(figsize=(16,8))
if agg is None:
for col in cols:
plt.plot(data.loc[:, datetimeCol], data.loc[:,col], alpha=0.5, label=col)
else:
data.reset_index(inplace=True, drop=True)
df_plot = self.explorer.createResampledDf(freq, datetimeCol, agg, data=data)
for col in cols:
plt.plot(df_plot.index, df_plot.loc[:,col], alpha=0.5, label=col)
plt.grid()
plt.title('evolution of variable(s) over time, frequency %s' % freq)
plt.xlabel('time')
plt.ylabel('quantity')
plt.legend(loc=0)
plt.xticks(rotation=-60)
plt.show()
# tested
def timeSeriesPlot_twinX(self, datetimeCol, cols1, cols2, freq=None, agg=None, data=None):
'''
plot 2 time series curves sharing x axis and having seperated y axes for each curve
======
INPUTS
======
datetimeCol: string
datetime variable's name
cols1,2: lists
variables to be plotted over datetimeCol.
freq: string, optional, default = None
frequency value for resampling data. "S" for second, "T" for minute, "H" for hour, "D" for day, "W" for week, "M" for month, "Y" for year etc..
agg: string or function, optional, default = None
aggregation method for resampling data. If a function is given, it's the user who should take care of the NaN values. If None, no resampling will be peformed.
data: pandas dataframe, optional, default = None
data to be plot. If None, then the class atribute data will be used.
'''
if data is None:
data = self.data
if datetimeCol not in self.datetime_vars:
raise ValueError("datetimeCol should be a datetime variable.")
if agg is None:
df_plot = data
t = data.loc[:,datetimeCol]
else:
data.reset_index(inplace=True, drop=True)
df_plot = self.explorer.createResampledDf(freq, datetimeCol, agg, data=data)
t = df_plot.index
colours1 = sns.color_palette("PuBu_r", n_colors=len(cols1))
colours2 = sns.color_palette("YlOrRd_r", n_colors=len(cols2))
fig, ax1 = plt.subplots(figsize=(16,8))
for i,col1 in enumerate(cols1):
s1 = df_plot.loc[:, col1]
ax1.plot(t, s1, ':', color=colours1[i], alpha=0.8, linewidth=3, label=col1)
ax1.set_xlabel('time_axis')
ax1.legend(loc=2)
# Make the y-axis label, ticks and tick labels match the line colour.
ax1.set_ylabel(col1, color='steelblue')
ax1.tick_params('y', colors='steelblue')
ax1.grid(color='steelblue', alpha=0.4, axis='y', linestyle='--')
ax2 = ax1.twinx()
for i,col2 in enumerate(cols2):
s2 = df_plot.loc[:,col2]
ax2.plot(t, s2, color=colours2[i], alpha=0.7, label=col2)
ax2.set_ylabel(col2, color='orange')
ax2.tick_params('y', colors='orange')
ax2.grid(color='orange', alpha=0.4, axis='y', linestyle='-.')
ax2.legend(loc=1)
fig.tight_layout()
plt.title('Evolution of variables by time')
plt.show()
# tested
def timeSeriesDecomposition(self, datetimeCol, col, freq=None, agg=None, data=None):
'''
decompose a time series in to y(x) = trend + seasonality + noise and plot each component
======
INPUTS
======
datetimeCol: string
datetime variable's name
col: string
variable to be plotted over datetimeCol.
freq: string, optional, default = None
frequency value for resampling data. "S" for second, "T" for minute, "H" for hour, "D" for day, "W" for week, "M" for month, "Y" for year etc..
agg: string or function, optional, default = None
aggregation method for resampling data. If a function is given, it's the user who should take care of the NaN values. If None, no resampling will be peformed.
data: pandas dataframe, optional, default = None
data to be plot. If None, then the class atribute data will be used.
======
OUTPUT
======
the result of the decomposition
'''
if data is None:
data = self.data
if datetimeCol not in self.datetime_vars:
raise ValueError("datetimeCol should be a datetime variable.")
if agg is None:
df = data
else:
data.reset_index(inplace=True, drop=True)
df = self.explorer.createResampledDf(freq, datetimeCol, agg, data=data)
series = df.loc[:,col]
result = seasonal_decompose(series, model='additive')
fig, (ax0,ax1,ax2,ax3) = plt.subplots(4,1, figsize=(35,20))
result.observed.plot(ax=ax0)
result.trend.plot(ax=ax1)
result.seasonal.plot(ax=ax2)
result.resid.plot(ax=ax3)
plt.show()
return result
# tested
# Quite special a function, I can't see how it can be generalised to other projects of different kinds...
def timeSeriesPlot_folded(self, datetimeCol, groupbyCols, plotCol, foldFreq,
fixYLim=False, inPercentage=False, percentageOn=None, cumulateSum=False,
freq=None, agg=None, data=None):
'''
plot time series curves of one variable over a same period
======
INPUTS
======
datetimeCol: string
datetime variable's name
groupbyCols: list of strings
variables to be grouped.
plotCol: stirng
variable to be plotted.
foldFreq: string
the frequency that distinguishes the curves, must be longer than the frequency for resampling data. Availble frequencies are {'W', 'M', 'Y'}. For e.g., if one wants to study the average temperature of each week over years, then the foldFreq will be "Y" for year while the freq for resampling data will be "W" for week.
fixYLim: bool, default = False
whether to fix y limits as the same for all figures.
inPercentage: bool, default = False
whether to convert the variable to be plotted into percentages.
percentageOn: string, default = None
Column name, only applied when inPercentage is set to True. If given, a sum of the plotCol will be calculated stratified by the given column and the resampled datetime column, otherwise the sum is calculated only on the stratified datetime.
cumulateSum: bool, default = False
whether to plot the variable in its cumulated sum. Note that if set True, fixYLim is automatically set to False.
freq: string, optional, default = None
frequency value for resampling data. Available frequencies here are {'D', 'W', 'M'} for day, week and month respectively.
agg: dictionary or function, optional, default = None
aggregation method for resampling data. If a function is given, it's the user who should take care of the NaN values. If None, no resampling will be peformed.
data: pandas dataframe, optional, default = None
data to manipulate with. If None, then the class atribute data will be used.
'''
if data is None:
data = self.data
data.reset_index(inplace=True, drop=True)
if datetimeCol not in self.datetime_vars:
raise ValueError("datetimeCol should be a datetime variable.")
# group dataframe
df_plot = data.groupby(by=groupbyCols).resample(freq, on=datetimeCol)
# aggregate dataframe by user-defined method
if type(agg) is type(lambda x:x): # if agg is a function
df_plot = df_plot.apply(agg)
elif type(agg) is dict:
df_plot = df_plot.agg(agg)
else:
raise ValueError('agg can either be a function or an aggregation dictionary.')
if type(df_plot) is pd.Series:
df_plot = pd.DataFrame(df_plot)
df_plot.columns = [plotCol]
df_plot.reset_index(level=datetimeCol, inplace=True)
if inPercentage:
if percentageOn is None:
total = data.resample(freq, on=datetimeCol).agg({plotCol:'sum'})
else:
total = data.groupby(by=percentageOn).resample(freq, on=datetimeCol).agg({plotCol:'sum'})
total.columns = ['SumOfPlotCol']
df_plot = df_plot.join(total, on=datetimeCol)
df_plot.loc[:, plotCol] = df_plot.loc[:, plotCol].div(df_plot.SumOfPlotCol)
# define plt.ylim
bottom, top = df_plot.loc[:, plotCol].min()*0.95, df_plot.loc[:, plotCol].max()*1.05
# define x-axis' time unity
if freq == 'W':
df_plot['unity'] = df_plot.loc[:,datetimeCol].dt.week
elif freq == 'M':
df_plot['unity'] = df_plot.loc[:,datetimeCol].dt.month
elif freq == 'D':
df_plot['unity'] = df_plot.loc[:,datetimeCol].dt.day
else:
raise ValueError("Available 'freq' frequencies are {'D','W','M'}")
# define period of the fold
if foldFreq == 'W':
df_plot['foldFreq'] = df_plot.loc[:,datetimeCol].dt.week
elif foldFreq == 'M':
df_plot['foldFreq'] = df_plot.loc[:,datetimeCol].dt.month
elif foldFreq == 'Y':
df_plot['foldFreq'] = df_plot.loc[:,datetimeCol].dt.year
else:
raise ValueError("Available 'foldFreq' frequencies are {'W','M','Y'}")
# if the user wants the curve to be in cumulated sum (special case, only make sense when aggregation is a sum)
if cumulateSum:
fixYLim = False
# if the filter is of ordre 1
if len(groupbyCols) == 1:
for ind in df_plot.index.unique():
plt.figure(figsize=(18,6))
x_bottom, x_top = df_plot.unity.min(), df_plot.unity.max()
for ff in df_plot.foldFreq.unique():
tmp = df_plot.loc[ind,:]
plt.plot(tmp.loc[tmp.foldFreq == ff].unity.values, tmp.loc[tmp.foldFreq == ff, plotCol].cumsum(), '-*',
alpha=0.5, label=ff)
if fixYLim:
plt.ylim(bottom, top)
plt.xlim(x_bottom, x_top)
plt.legend(loc=0)
plt.grid()
plt.title('Evolution of %s resampled by %s [%s: %s]' % (plotCol, freq, groupbyCols[0], ind))
plt.show()
# if a second ordre filter is applied
elif len(groupbyCols) == 2:
for ind0 in df_plot.index.get_level_values(groupbyCols[0]).unique():
TMP = df_plot.loc[ind0]
x_bottom, x_top = TMP.unity.min(), TMP.unity.max()
print('\==========================================')
print(groupbyCols[0] + ": " + ind0)
for ind in TMP.index.unique():
plt.figure(figsize=(18,6))
for ff in TMP.foldFreq.unique():
tmp = TMP.loc[ind,:]
plt.plot(tmp.loc[tmp.foldFreq == ff].unity.values, tmp.loc[tmp.foldFreq == ff, plotCol].cumsum(),
'-*', alpha=0.5, label=ff)
if fixYLim:
plt.ylim(bottom, top)
plt.xlim(x_bottom, x_top)
plt.legend(loc=0)
plt.grid()
plt.title('Evolution of %s resampled by %s [%s: %s]' % (plotCol, freq, groupbyCols[1], ind))
plt.show()
# currently does not support higher ordre filter, raise error message
else:
raise ValueError("Too many levels of index. Allowed: 2; Recieved: %d" % len(groupbyCols))
# if curves are not in cumulated sum
else:
# if the filter is of ordre 1
if len(groupbyCols) == 1:
for ind in df_plot.index.unique():
plt.figure(figsize=(18,6))
x_bottom, x_top = df_plot.unity.min(), df_plot.unity.max()
for ff in df_plot.foldFreq.unique():
tmp = df_plot.loc[ind,:]
plt.plot(tmp.loc[tmp.foldFreq == ff].unity.values, tmp.loc[tmp.foldFreq == ff, plotCol], '-*',
alpha=0.5, label=ff)
if fixYLim:
plt.ylim(bottom, top)
plt.xlim(x_bottom, x_top)
plt.legend(loc=0)
plt.grid()
plt.title('Evolution of %s resampled by %s [%s: %s]' % (plotCol, freq, groupbyCols[0], ind))
plt.show()
# if a second ordre filter is applied
elif len(groupbyCols) == 2:
for ind0 in df_plot.index.get_level_values(groupbyCols[0]).unique():
TMP = df_plot.loc[ind0]
x_bottom, x_top = TMP.unity.min(), TMP.unity.max()
print('==========================================')
print(groupbyCols[0] + ": " + ind0)
for ind in TMP.index.unique():
plt.figure(figsize=(18,6))
for ff in TMP.foldFreq.unique():
tmp = TMP.loc[ind,:]
plt.plot(tmp.loc[tmp.foldFreq == ff].unity.values, tmp.loc[tmp.foldFreq == ff, plotCol], '-*',
alpha=0.5, label=ff)
if fixYLim:
plt.ylim(bottom, top)
plt.xlim(x_bottom, x_top)
plt.legend(loc=0)
plt.grid()
plt.title('Evolution of %s resampled by %s [%s: %s]' % (plotCol, freq, groupbyCols[1], ind))
plt.show()
# currently does not support higher ordre filter, raise error message
else:
raise ValueError("Too many levels of index. Allowed: 2; Recieved: %d" % len(groupbyCols))
|
nilq/baby-python
|
python
|
def fun (r):
return ((2 + ((r - 1) * 2) ) // 2 ) * r
for _ in range(int(input())):
l,r = [int(x) for x in input().split()]
n = fun(r)
n -= fun(l - 1)
print(n)
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2017 Nutanix Inc. All rights reserved.
#
#
# pylint: disable=pointless-statement
import unittest
import uuid
import mock
from curie.curie_error_pb2 import CurieError
from curie.curie_server_state_pb2 import CurieSettings
from curie.discovery_util import DiscoveryUtil
from curie.exception import CurieException, CurieTestException
from curie.ipmi_util import IpmiUtil
from curie.proto_util import proto_patch_encryption_support
from curie.util import CurieUtil
from curie.vmm_client import VmmClient
from curie.nutanix_rest_api_client import NutanixMetadata
from curie.nutanix_rest_api_client import NutanixRestApiClient
class TestCurieDiscoveryUtil(unittest.TestCase):
def setUp(self):
self.fq_disc_util_name = "curie.discovery_util.DiscoveryUtil"
self._no_oob_node_proto = CurieSettings.ClusterNode()
oob_info = self._no_oob_node_proto.node_out_of_band_management_info
oob_info.interface_type = oob_info.kNone
self._ipmi_node_proto = CurieSettings.ClusterNode()
oob_info = self._ipmi_node_proto.node_out_of_band_management_info
oob_info.interface_type = oob_info.kIpmi
oob_info.ip_address = "1.2.3.4"
oob_info.username = "username"
oob_info.password = "password"
def test_dispatch(self):
cluster_pb = proto_patch_encryption_support(CurieSettings.Cluster)()
mgmt_info = cluster_pb.cluster_management_server_info
software_info = cluster_pb.cluster_software_info
hyp_info = cluster_pb.cluster_hypervisor_info
mgmt_info.prism_info.SetInParent()
with self.assertRaises(CurieException):
DiscoveryUtil.update_cluster_version_info(cluster_pb)
software_info.nutanix_info.SetInParent()
with self.assertRaises(CurieException):
DiscoveryUtil.update_cluster_version_info(cluster_pb)
hyp_info.ahv_info.SetInParent()
fq_update_prism = (
"%s._update_cluster_version_info_prism" % self.fq_disc_util_name)
with mock.patch(fq_update_prism) as mock_prism:
DiscoveryUtil.update_cluster_version_info(cluster_pb)
mock_prism.assert_called_once_with(cluster_pb)
mgmt_info.Clear()
software_info.Clear()
hyp_info.Clear()
mgmt_info.vcenter_info.SetInParent()
fq_update_vcenter = (
"%s._update_cluster_version_info_vcenter" % self.fq_disc_util_name)
with mock.patch(fq_update_vcenter) as mock_vcenter:
DiscoveryUtil.update_cluster_version_info(cluster_pb)
mock_vcenter.assert_called_once_with(cluster_pb)
mgmt_info.Clear()
mgmt_info.vmm_info.SetInParent()
fq_update_vmm = (
"%s._update_cluster_version_info_vmm" % self.fq_disc_util_name)
with mock.patch(fq_update_vmm) as mock_vmm:
DiscoveryUtil.update_cluster_version_info(cluster_pb)
mock_vmm.assert_called_once_with(cluster_pb)
mgmt_info.Clear()
with self.assertRaises(CurieException):
DiscoveryUtil.update_cluster_version_info(cluster_pb)
fq_update_vip = (
"%s.update_cluster_virtual_ip" % self.fq_disc_util_name)
with mock.patch(fq_update_vip) as mock_vip:
DiscoveryUtil.update_cluster_virtual_ip(cluster_pb)
mock_vip.assert_called_once_with(cluster_pb)
@mock.patch.object(IpmiUtil, "get_chassis_status")
@mock.patch.object(CurieUtil, "ping_ip")
def test_validate_oob_config(self, mock_ping, mock_status):
proto_patch_encryption_support(CurieSettings)
cluster_pb = CurieSettings.Cluster()
for ii in xrange(4):
node_pb = cluster_pb.cluster_nodes.add()
node_pb.CopyFrom(self._no_oob_node_proto)
node_pb.id = str(ii)
DiscoveryUtil.validate_oob_config(cluster_pb)
self.assertEqual(mock_ping.call_count, 0)
self.assertEqual(mock_status.call_count, 0)
cluster_pb = CurieSettings.Cluster()
for ii in xrange(4):
node_pb = cluster_pb.cluster_nodes.add()
node_pb.CopyFrom(self._ipmi_node_proto)
node_pb.id = str(ii)
mock_ping.return_value = True
DiscoveryUtil.validate_oob_config(cluster_pb)
self.assertEqual(mock_ping.call_count, len(cluster_pb.cluster_nodes))
self.assertEqual(mock_status.call_count, len(cluster_pb.cluster_nodes))
mock_ping.reset_mock()
mock_status.reset_mock()
mock_ping.side_effect = [True, False, True, True]
with self.assertRaises(CurieException):
DiscoveryUtil.validate_oob_config(cluster_pb)
# We expect that the first ping succeeds and then the second fails. There
# should be an exception after the second ping attempt. If ping fails, the
# expectations is then that the chassis status won't be called.
self.assertEqual(mock_ping.call_count, 2)
self.assertEqual(mock_status.call_count, 1)
mock_ping.reset_mock()
mock_status.reset_mock()
mock_ping.return_value = True
mock_ping.side_effect = None
mock_status.side_effect = [
{},
CurieException(CurieError.kOobAuthenticationError, "AuthError"),
{},
CurieException(CurieError.kInternalError, "SomeOtherError")
]
with self.assertRaises(CurieException):
DiscoveryUtil.validate_oob_config(cluster_pb)
self.assertEqual(mock_ping.call_count, 2)
self.assertEqual(mock_status.call_count, 2)
def test__get_hyp_version_for_host(self):
host = {"hypervisorFullName": "Nutanix 20170726.42",
DiscoveryUtil.CE_HOST_ATTR_KEY:
DiscoveryUtil.CE_HOST_ATTR_VAL
}
self.assertEqual(
DiscoveryUtil._get_hyp_version_for_host(host),
"Nutanix CE 20170726.42")
host["hypervisorFullName"] = "20170726.42"
self.assertEqual(
DiscoveryUtil._get_hyp_version_for_host(host),
"CE 20170726.42")
host["hypervisorFullName"] = "20170726.42"
host[DiscoveryUtil.CE_HOST_ATTR_KEY] = ""
self.assertEqual(
DiscoveryUtil._get_hyp_version_for_host(host),
"20170726.42")
host["hypervisorFullName"] = "Nutanix %s" % host["hypervisorFullName"]
self.assertEqual(
DiscoveryUtil._get_hyp_version_for_host(host),
"Nutanix 20170726.42")
def test__get_hyp_version_for_host_empty_host(self):
host = {"name": '1.1.1.1',
"hypervisorFullName": None}
with self.assertRaises(CurieTestException) as ar:
DiscoveryUtil._get_hyp_version_for_host(host)
self.assertIn("Cause: Cannot get hypervisor name from node: 1.1.1.1.",
str(ar.exception))
def test__get_hyp_version_for_host_empty_host_no_name(self):
host = {"hypervisorFullName": None}
with self.assertRaises(CurieTestException) as ar:
DiscoveryUtil._get_hyp_version_for_host(host)
self.assertIn("Cause: Cannot get hypervisor name from node: Unknown",
str(ar.exception))
@mock.patch("curie.discovery_util.NutanixRestApiClient")
@mock.patch("curie.discovery_util.VmmClient")
def test__update_cluster_version_info_vmm(self, m_VmmClient, n_NtnxApiCli):
cluster_pb = proto_patch_encryption_support(CurieSettings.Cluster)()
mgmt_info = cluster_pb.cluster_management_server_info
mgmt_info.vmm_info.SetInParent()
software_info = cluster_pb.cluster_software_info
software_info.nutanix_info.SetInParent()
m_vmm_cli = m_VmmClient.return_value
m_vmm_cli.get_nodes.return_value = [
{
"ips": ["1.2.3.4"],
"fqdn": "node1.somewhere.blah",
"name": "node1.somewhere.blah",
"id": "157bbf6f-010b-41c6-938b-2a3dc3fae7ca",
"bmc_port": "623",
"bmc_address": "1.2.3.5",
"overall_state": "OK",
"state": "Responding",
"version": "10.0.14393.351"
}, {
"ips": ["2.3.4.5"],
"fqdn": "node2.somewhere.blah",
"name": "node2.somewhere.blah",
"id": "4657f9f7-4027-4fc4-bc90-04c16188438d",
"bmc_port": "623",
"bmc_address": "2.3.4.6",
"overall_state": "OK",
"state": "Responding",
"version": "10.0.14393.351"
}, {
"ips": ["3.4.5.6"],
"fqdn": "node3.somewhere.blah",
"name": "node3.somewhere.blah",
"id": "a4b928cf-2d16-43a1-9139-f98d4cbd55d6",
"bmc_port": "623",
"bmc_address": "3.4.5.7",
"overall_state": "OK",
"state": "Responding",
"version": "10.0.14393.351"
}
]
m_vmm_cli.get_vmm_version.return_value = "4.1.0.1"
m_ntnx_api = n_NtnxApiCli.return_value
cluster_inc_id = 12345
cluster_uuid = str(uuid.uuid4())
cluster_version = "el6-release-euphrates-5.0.2-stable-9d20638eb2ba1d3f84f213d5976fbcd412630c6d"
m_ntnx_api.get_nutanix_metadata.return_value = NutanixMetadata(
version=cluster_version, cluster_uuid=cluster_uuid,
cluster_incarnation_id=cluster_inc_id)
DiscoveryUtil.update_cluster_version_info(cluster_pb)
self.assertEqual(cluster_pb.cluster_software_info.nutanix_info.version,
"5.0.2")
self.assertEqual(
cluster_pb.cluster_management_server_info.vmm_info.vmm_version,
"4.1.0.1")
self.assertEqual(cluster_pb.cluster_hypervisor_info.hyperv_info.version,
["10.0.14393.351", "10.0.14393.351", "10.0.14393.351"])
@mock.patch("curie.discovery_util.NutanixRestApiClient")
def test_update_virtual_ip_prism(self, m_NutanixRestApiClient):
m_client = mock.MagicMock()
m_client.clusters_get.return_value = {
"name": "Mock-Cluster",
"clusterExternalIPAddress": "1.2.3.4",
}
m_NutanixRestApiClient.from_proto.return_value = m_client
cluster_pb = proto_patch_encryption_support(CurieSettings.Cluster)()
mgmt_info = cluster_pb.cluster_management_server_info
mgmt_info.prism_info.SetInParent()
software_info = cluster_pb.cluster_software_info
software_info.nutanix_info.SetInParent()
self.assertEqual("",
cluster_pb.cluster_software_info.nutanix_info.prism_host)
DiscoveryUtil.update_cluster_virtual_ip(cluster_pb)
self.assertEqual("1.2.3.4",
cluster_pb.cluster_software_info.nutanix_info.prism_host)
@mock.patch("curie.discovery_util.NutanixRestApiClient")
@mock.patch("curie.discovery_util.VmmClient")
def test_update_virtual_ip_vmm_cvms(self, m_VmmClient,
m_NutanixRestApiClient):
m_VmmClient.is_nutanix_cvm.side_effect = [False, True]
m_VmmClient.is_powered_on.side_effect = [True]
m_vmm_client = mock.MagicMock()
m_vmm_client.get_vms.return_value = [
{"name": "FAKE-VM-A", "ips": ["1.1.1.1"]},
{"name": "FAKE-CVM", "ips": ["1.1.1.2"]},
]
m_VmmClient.return_value = m_vmm_client
m_nutanix_client = mock.MagicMock()
m_nutanix_client.clusters_get.return_value = {
"name": "Mock-Cluster",
"clusterExternalIPAddress": "1.2.3.4",
}
m_NutanixRestApiClient.return_value = m_nutanix_client
cluster_pb = proto_patch_encryption_support(CurieSettings.Cluster)()
mgmt_info = cluster_pb.cluster_management_server_info
mgmt_info.vmm_info.SetInParent()
software_info = cluster_pb.cluster_software_info
software_info.nutanix_info.SetInParent()
software_info.nutanix_info.prism_user = "fake_prism_user"
software_info.nutanix_info.prism_password = "fake_prism_password"
self.assertEqual("",
cluster_pb.cluster_software_info.nutanix_info.prism_host)
DiscoveryUtil.update_cluster_virtual_ip(cluster_pb)
self.assertEqual("1.2.3.4",
cluster_pb.cluster_software_info.nutanix_info.prism_host)
m_NutanixRestApiClient.assert_has_calls([
mock.call("1.1.1.2", "fake_prism_user", "fake_prism_password"),
])
@mock.patch("curie.discovery_util.NutanixRestApiClient")
@mock.patch("curie.discovery_util.VmmClient")
def test_update_virtual_ip_vmm_no_cvms_found(
self, m_VmmClient, m_NutanixRestApiClient):
m_VmmClient.is_nutanix_cvm.side_effect = [False, False]
m_VmmClient.is_powered_on.side_effect = []
m_vmm_client = mock.MagicMock()
m_vmm_client.get_vms.return_value = [
{"name": "FAKE-VM-A", "ips": ["1.1.1.1"]},
{"name": "FAKE-ALSO-NOT-A-CVM", "ips": ["1.1.1.2"]},
]
m_VmmClient.return_value = m_vmm_client
m_nutanix_client = mock.MagicMock()
m_nutanix_client.clusters_get.return_value = {
"name": "Mock-Cluster",
"clusterExternalIPAddress": "1.2.3.4",
}
m_NutanixRestApiClient.return_value = m_nutanix_client
cluster_pb = proto_patch_encryption_support(CurieSettings.Cluster)()
mgmt_info = cluster_pb.cluster_management_server_info
mgmt_info.vmm_info.SetInParent()
software_info = cluster_pb.cluster_software_info
software_info.nutanix_info.SetInParent()
software_info.nutanix_info.prism_user = "fake_prism_user"
software_info.nutanix_info.prism_password = "fake_prism_password"
self.assertEqual("",
cluster_pb.cluster_software_info.nutanix_info.prism_host)
with self.assertRaises(CurieTestException) as ar:
DiscoveryUtil.update_cluster_virtual_ip(cluster_pb)
self.assertIn(
"Cause: No Nutanix CVMs found.\n\n"
"Impact: The cluster virtual IP address can not be discovered.\n\n"
"Corrective Action: Please verify that the cluster contains Nutanix "
"CVMs, and that they are powered on.\n\n"
"Traceback: None",
str(ar.exception))
@mock.patch("curie.discovery_util.NutanixRestApiClient")
@mock.patch("curie.discovery_util.VmmClient")
def test_update_virtual_ip_vmm_error_communicating_with_cvms(
self, m_VmmClient, m_NutanixRestApiClient):
m_VmmClient.is_nutanix_cvm.side_effect = [True, True]
m_VmmClient.is_powered_on.side_effect = [True, True]
m_vmm_client = mock.MagicMock()
m_vmm_client.get_vms.return_value = [
{"name": "FAKE-CVM-A", "ips": ["1.1.1.1"]},
{"name": "FAKE-CVM-B", "ips": ["1.1.1.2"]},
]
m_VmmClient.return_value = m_vmm_client
m_nutanix_client = mock.MagicMock()
m_nutanix_client.clusters_get.side_effect = IOError("Kaboom!")
m_NutanixRestApiClient.return_value = m_nutanix_client
cluster_pb = proto_patch_encryption_support(CurieSettings.Cluster)()
mgmt_info = cluster_pb.cluster_management_server_info
mgmt_info.vmm_info.SetInParent()
software_info = cluster_pb.cluster_software_info
software_info.nutanix_info.SetInParent()
software_info.nutanix_info.prism_user = "fake_prism_user"
software_info.nutanix_info.prism_password = "fake_prism_password"
self.assertEqual("",
cluster_pb.cluster_software_info.nutanix_info.prism_host)
with self.assertRaises(CurieTestException) as ar:
DiscoveryUtil.update_cluster_virtual_ip(cluster_pb)
self.assertIn(
"Cause: Failed to query Prism on any Nutanix CVM.\n\n"
"Impact: The cluster virtual IP address can not be discovered.\n\n"
"Corrective Action: Please verify that the Nutanix CVMs on the cluster "
"are powered on, and that the network connectivity to the CVMs is "
"correct.\n\nTraceback (most recent call last):\n",
str(ar.exception))
self.assertIn("IOError: Kaboom!", ar.exception.traceback)
@mock.patch("curie.discovery_util.NutanixRestApiClient")
def test_update_virtual_ip_prism_already_set(self, m_NutanixRestApiClient):
m_client = mock.MagicMock()
m_client.clusters_get.return_value = {
"name": "Mock-Cluster",
"clusterExternalIPAddress": "1.2.3.4",
}
m_NutanixRestApiClient.from_proto.return_value = m_client
cluster_pb = proto_patch_encryption_support(CurieSettings.Cluster)()
mgmt_info = cluster_pb.cluster_management_server_info
mgmt_info.prism_info.SetInParent()
software_info = cluster_pb.cluster_software_info
software_info.nutanix_info.SetInParent()
cluster_pb.cluster_software_info.nutanix_info.prism_host = "5.5.5.5"
self.assertEqual("5.5.5.5",
cluster_pb.cluster_software_info.nutanix_info.prism_host)
DiscoveryUtil.update_cluster_virtual_ip(cluster_pb)
self.assertEqual("1.2.3.4",
cluster_pb.cluster_software_info.nutanix_info.prism_host)
|
nilq/baby-python
|
python
|
import numpy as np
import numbers
from manimlib.constants import *
from manimlib.mobject.functions import ParametricFunction
from manimlib.mobject.geometry import Arrow
from manimlib.mobject.geometry import Line
from manimlib.mobject.number_line import NumberLine
from manimlib.mobject.svg.tex_mobject import TexMobject
from manimlib.mobject.types.vectorized_mobject import VGroup
from manimlib.utils.config_ops import digest_config
from manimlib.utils.config_ops import merge_dicts_recursively
from manimlib.utils.simple_functions import binary_search
from manimlib.utils.space_ops import angle_of_vector
# TODO: There should be much more code reuse between Axes, NumberPlane and GraphScene
class CoordinateSystem():
"""
Abstract class for Axes and NumberPlane
"""
CONFIG = {
"dimension": 2,
"x_min": -FRAME_X_RADIUS,
"x_max": FRAME_X_RADIUS,
"y_min": -FRAME_Y_RADIUS,
"y_max": FRAME_Y_RADIUS,
}
def coords_to_point(self, *coords):
raise Exception("Not implemented")
def point_to_coords(self, point):
raise Exception("Not implemented")
def c2p(self, *coords):
"""Abbreviation for coords_to_point"""
return self.coords_to_point(*coords)
def p2c(self, point):
"""Abbreviation for point_to_coords"""
return self.point_to_coords(point)
def get_axes(self):
raise Exception("Not implemented")
def get_axis(self, index):
return self.get_axes()[index]
def get_x_axis(self):
return self.get_axis(0)
def get_y_axis(self):
return self.get_axis(1)
def get_z_axis(self):
return self.get_axis(2)
def get_x_axis_label(self, label_tex, edge=RIGHT, direction=DL, **kwargs):
return self.get_axis_label(label_tex, self.get_x_axis(), edge,
direction, **kwargs)
def get_y_axis_label(self, label_tex, edge=UP, direction=DR, **kwargs):
return self.get_axis_label(label_tex, self.get_y_axis(), edge,
direction, **kwargs)
def get_axis_label(self,
label_tex,
axis,
edge,
direction,
buff=MED_SMALL_BUFF):
label = TexMobject(label_tex)
label.next_to(axis.get_edge_center(edge), direction, buff=buff)
label.shift_onto_screen(buff=MED_SMALL_BUFF)
return label
def get_axis_labels(self, x_label_tex="x", y_label_tex="y"):
self.axis_labels = VGroup(
self.get_x_axis_label(x_label_tex),
self.get_y_axis_label(y_label_tex),
)
return self.axis_labels
def get_graph(self, function, **kwargs):
x_min = kwargs.pop("x_min", self.x_min)
x_max = kwargs.pop("x_max", self.x_max)
graph = ParametricFunction(
lambda t: self.coords_to_point(t, function(t)),
t_min=x_min,
t_max=x_max,
**kwargs)
graph.underlying_function = function
return graph
def get_parametric_curve(self, function, **kwargs):
dim = self.dimension
graph = ParametricFunction(
lambda t: self.coords_to_point(*function(t)[:dim]), **kwargs)
graph.underlying_function = function
return graph
def input_to_graph_point(self, x, graph):
if hasattr(graph, "underlying_function"):
return self.coords_to_point(x, graph.underlying_function(x))
else:
alpha = binary_search(
function=lambda a: self.point_to_coords(
graph.point_from_proportion(a))[0],
target=x,
lower_bound=self.x_min,
upper_bound=self.x_max,
)
if alpha is not None:
return graph.point_from_proportion(alpha)
else:
return None
class Axes(VGroup, CoordinateSystem):
CONFIG = {
"axis_config": {
"color": LIGHT_GREY,
"include_tip": True,
"exclude_zero_from_default_numbers": True,
},
"x_axis_config": {},
"y_axis_config": {
"label_direction": LEFT,
},
"center_point": ORIGIN,
}
def __init__(self, **kwargs):
VGroup.__init__(self, **kwargs)
self.x_axis = self.create_axis(self.x_min, self.x_max,
self.x_axis_config)
self.y_axis = self.create_axis(self.y_min, self.y_max,
self.y_axis_config)
self.y_axis.rotate(90 * DEGREES, about_point=ORIGIN)
# Add as a separate group incase various other
# mobjects are added to self, as for example in
# NumberPlane below
self.axes = VGroup(self.x_axis, self.y_axis)
self.add(*self.axes)
self.shift(self.center_point)
def create_axis(self, min_val, max_val, axis_config):
new_config = merge_dicts_recursively(
self.axis_config,
{
"x_min": min_val,
"x_max": max_val
},
axis_config,
)
return NumberLine(**new_config)
def coords_to_point(self, *coords):
origin = self.x_axis.number_to_point(0)
result = np.array(origin)
for axis, coord in zip(self.get_axes(), coords):
result += (axis.number_to_point(coord) - origin)
return result
def c2p(self, *coords):
return self.coords_to_point(*coords)
def point_to_coords(self, point):
return tuple([axis.point_to_number(point) for axis in self.get_axes()])
def p2c(self, point):
return self.point_to_coords(point)
def get_axes(self):
return self.axes
def get_coordinate_labels(self, x_vals=None, y_vals=None):
if x_vals is None:
x_vals = []
if y_vals is None:
y_vals = []
x_mobs = self.get_x_axis().get_number_mobjects(*x_vals)
y_mobs = self.get_y_axis().get_number_mobjects(*y_vals)
self.coordinate_labels = VGroup(x_mobs, y_mobs)
return self.coordinate_labels
def add_coordinates(self, x_vals=None, y_vals=None):
self.add(self.get_coordinate_labels(x_vals, y_vals))
return self
class ThreeDAxes(Axes):
CONFIG = {
"dimension": 3,
"x_min": -5.5,
"x_max": 5.5,
"y_min": -5.5,
"y_max": 5.5,
"z_axis_config": {},
"z_min": -3.5,
"z_max": 3.5,
"z_normal": DOWN,
"num_axis_pieces": 20,
"light_source": 9 * DOWN + 7 * LEFT + 10 * OUT,
}
def __init__(self, **kwargs):
Axes.__init__(self, **kwargs)
z_axis = self.z_axis = self.create_axis(self.z_min, self.z_max,
self.z_axis_config)
z_axis.rotate(-np.pi / 2, UP, about_point=ORIGIN)
z_axis.rotate(angle_of_vector(self.z_normal), OUT, about_point=ORIGIN)
self.axes.add(z_axis)
self.add(z_axis)
self.add_3d_pieces()
self.set_axis_shading()
def add_3d_pieces(self):
for axis in self.axes:
axis.pieces = VGroup(*axis.get_pieces(self.num_axis_pieces))
axis.add(axis.pieces)
axis.set_stroke(width=0, family=False)
axis.set_shade_in_3d(True)
def set_axis_shading(self):
def make_func(axis):
vect = self.light_source
return lambda: (
axis.get_edge_center(-vect),
axis.get_edge_center(vect),
)
for axis in self:
for submob in axis.family_members_with_points():
submob.get_gradient_start_and_end_points = make_func(axis)
submob.get_unit_normal = lambda a: np.ones(3)
submob.set_sheen(0.2)
class NumberPlane(Axes):
CONFIG = {
"axis_config": {
"stroke_color": WHITE,
"stroke_width": 2,
"include_ticks": False,
"include_tip": False,
"line_to_number_buff": SMALL_BUFF,
"label_direction": DR,
"number_scale_val": 0.5,
},
"y_axis_config": {
"label_direction": DR,
},
"background_line_style": {
"stroke_color": BLUE_D,
"stroke_width": 2,
"stroke_opacity": 1,
},
# Defaults to a faded version of line_config
"faded_line_style": None,
"x_line_frequency": 1,
"y_line_frequency": 1,
"faded_line_ratio": 1,
"make_smooth_after_applying_functions": True,
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.init_background_lines()
def init_background_lines(self):
if self.faded_line_style is None:
style = dict(self.background_line_style)
# For anything numerical, like stroke_width
# and stroke_opacity, chop it in half
for key in style:
if isinstance(style[key], numbers.Number):
style[key] *= 0.5
self.faded_line_style = style
self.background_lines, self.faded_lines = self.get_lines()
self.background_lines.set_style(**self.background_line_style, )
self.faded_lines.set_style(**self.faded_line_style, )
self.add_to_back(
self.faded_lines,
self.background_lines,
)
def get_lines(self):
x_axis = self.get_x_axis()
y_axis = self.get_y_axis()
x_freq = self.x_line_frequency
y_freq = self.y_line_frequency
x_lines1, x_lines2 = self.get_lines_parallel_to_axis(
x_axis,
y_axis,
x_freq,
self.faded_line_ratio,
)
y_lines1, y_lines2 = self.get_lines_parallel_to_axis(
y_axis,
x_axis,
y_freq,
self.faded_line_ratio,
)
lines1 = VGroup(*x_lines1, *y_lines1)
lines2 = VGroup(*x_lines2, *y_lines2)
return lines1, lines2
def get_lines_parallel_to_axis(self, axis1, axis2, freq, ratio):
line = Line(axis1.get_start(), axis1.get_end())
dense_freq = (1 + ratio)
step = (1 / dense_freq) * freq
lines1 = VGroup()
lines2 = VGroup()
ranges = (
np.arange(0, axis2.x_max, step),
np.arange(0, axis2.x_min, -step),
)
for inputs in ranges:
for k, x in enumerate(inputs):
new_line = line.copy()
new_line.move_to(axis2.number_to_point(x))
if k % (1 + ratio) == 0:
lines1.add(new_line)
else:
lines2.add(new_line)
return lines1, lines2
def get_center_point(self):
return self.coords_to_point(0, 0)
def get_x_unit_size(self):
return self.get_x_axis().get_unit_size()
def get_y_unit_size(self):
return self.get_x_axis().get_unit_size()
def get_axes(self):
return self.axes
def get_vector(self, coords, **kwargs):
kwargs["buff"] = 0
return Arrow(self.coords_to_point(0, 0), self.coords_to_point(*coords),
**kwargs)
def prepare_for_nonlinear_transform(self, num_inserted_curves=50):
for mob in self.family_members_with_points():
num_curves = mob.get_num_curves()
if num_inserted_curves > num_curves:
mob.insert_n_curves(num_inserted_curves - num_curves)
return self
class ComplexPlane(NumberPlane):
CONFIG = {
"color": BLUE,
"line_frequency": 1,
}
def number_to_point(self, number):
number = complex(number)
return self.coords_to_point(number.real, number.imag)
def n2p(self, number):
return self.number_to_point(number)
def point_to_number(self, point):
x, y = self.point_to_coords(point)
return complex(x, y)
def p2n(self, point):
return self.point_to_number(point)
def get_default_coordinate_values(self):
x_numbers = self.get_x_axis().default_numbers_to_display()
y_numbers = self.get_y_axis().default_numbers_to_display()
y_numbers = [complex(0, y) for y in y_numbers if y != 0]
return [*x_numbers, *y_numbers]
def get_coordinate_labels(self, *numbers, **kwargs):
if len(numbers) == 0:
numbers = self.get_default_coordinate_values()
self.coordinate_labels = VGroup()
for number in numbers:
z = complex(number)
if abs(z.imag) > abs(z.real):
axis = self.get_y_axis()
value = z.imag
kwargs = merge_dicts_recursively(
kwargs,
{"number_config": {
"unit": "i"
}},
)
else:
axis = self.get_x_axis()
value = z.real
number_mob = axis.get_number_mobject(value, **kwargs)
self.coordinate_labels.add(number_mob)
return self.coordinate_labels
def add_coordinates(self, *numbers):
self.add(self.get_coordinate_labels(*numbers))
return self
|
nilq/baby-python
|
python
|
import os
import unittest
import numpy as np
import pygsti
import pygsti.construction as pc
from pygsti.serialization import json
from pygsti.modelpacks.legacy import std1Q_XY
from pygsti.modelpacks.legacy import std2Q_XYCNOT as std
from pygsti.objects import Label as L
from ..testutils import BaseTestCase, compare_files
class CalcMethods2QTestCase(BaseTestCase):
@classmethod
def setUpClass(cls):
"""
Handle all once-per-class (slow) computation and loading,
to avoid calling it for each test (like setUp). Store
results in class variable for use within setUp.
"""
super(CalcMethods2QTestCase, cls).setUpClass()
#Change to test_packages directory (since setUp hasn't been called yet...)
origDir = os.getcwd()
os.chdir(os.path.abspath(os.path.dirname(__file__)))
os.chdir('..') # The test_packages directory
#Note: std is a 2Q model
cls.maxLengths = [1]
#cls.germs = std.germs_lite
cls.germs = pygsti.circuits.to_circuits([(gl,) for gl in std.target_model().operations])
cls.mdl_datagen = std.target_model().depolarize(op_noise=0.1, spam_noise=0.001)
cls.listOfExperiments = pygsti.circuits.create_lsgst_circuits(
std.target_model(), std.prepStrs, std.effectStrs, cls.germs, cls.maxLengths)
#RUN BELOW FOR DATAGEN (UNCOMMENT to regenerate)
#ds = pygsti.data.simulate_data(cls.mdl_datagen, cls.listOfExperiments,
# n_samples=1000, sample_error="multinomial", seed=1234)
#ds.save(compare_files + "/calcMethods2Q.dataset")
cls.ds = pygsti.objects.DataSet(file_to_load_from=compare_files + "/calcMethods2Q.dataset")
cls.advOpts = {'tolerance': 1e-2}
#Reduced model GST dataset
cls.nQubits = 2
cls.mdl_redmod_datagen = pc.build_nqnoise_model(cls.nQubits, geometry="line", max_idle_weight=1, maxhops=1,
extra_weight_1_hops=0, extra_gate_weight=1, sparse=False,
sim_type="matrix", verbosity=1,
gateNoise=(1234, 0.01), prepNoise=(456, 0.01),
povmNoise=(789, 0.01))
#Create a reduced set of fiducials and germs
op_labels = list(cls.mdl_redmod_datagen.operations.keys())
fids1Q = std1Q_XY.fiducials[0:2] # for speed
cls.redmod_fiducials = []
for i in range(cls.nQubits):
cls.redmod_fiducials.extend(pygsti.construction.manipulate_circuits(
fids1Q, [((L('Gx'),), (L('Gx', i),)), ((L('Gy'),), (L('Gy', i),))]))
#print(redmod_fiducials, "Fiducials")
cls.redmod_germs = pygsti.circuits.to_circuits([(gl,) for gl in op_labels])
cls.redmod_maxLs = [1]
#expList = pygsti.circuits.create_lsgst_circuits(
# cls.mdl_redmod_datagen, cls.redmod_fiducials, cls.redmod_fiducials,
# cls.redmod_germs, cls.redmod_maxLs)
#RUN BELOW FOR DATAGEN (UNCOMMENT to regenerate)
#redmod_ds = pygsti.data.simulate_data(cls.mdl_redmod_datagen, expList, 1000, "round", seed=1234)
#redmod_ds.save(compare_files + "/calcMethods2Q_redmod.dataset")
cls.redmod_ds = pygsti.objects.DataSet(file_to_load_from=compare_files + "/calcMethods2Q_redmod.dataset")
#print(len(expList)," reduced model sequences")
#Random starting points - little kick so we don't get hung up at start
np.random.seed(1234)
cls.rand_start18 = np.random.random(18) * 1e-6
cls.rand_start206 = np.random.random(206) * 1e-6
cls.rand_start228 = np.random.random(228) * 1e-6
os.chdir(origDir) # return to original directory
## GST using "full" (non-embedded/composed) gates
# All of these calcs use dense matrices; While sparse operation matrices (as Maps) could be used,
# they'd need to enter as a sparse basis to a LindbladDenseOp (maybe add this later?)
def test_stdgst_matrix(self):
# Using matrix-based calculations
target_model = std.target_model().copy()
target_model.set_all_parameterizations("CPTP")
target_model.set_simtype('matrix') # the default for 1Q, so we could remove this line
results = pygsti.run_long_sequence_gst(self.ds, target_model, std.prepStrs, std.effectStrs,
self.germs, self.maxLengths, advanced_options=self.advOpts,
verbosity=4)
#RUN BELOW LINES TO SAVE GATESET (UNCOMMENT to regenerate)
#pygsti.io.write_model(results.estimates['default'].models['go0'],
# compare_files + "/test2Qcalc_std_exact.model","Saved Standard-Calc 2Q test model")
# Note: expected nSigma of 143 is so high b/c we use very high tol of 1e-2 => result isn't very good
print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
self.assertAlmostEqual(results.estimates['default'].misfit_sigma(), 143, delta=2.0)
mdl_compare = pygsti.io.load_model(compare_files + "/test2Qcalc_std_exact.model")
self.assertAlmostEqual(results.estimates['default'].models['go0'].frobeniusdist(mdl_compare), 0, places=3)
def test_stdgst_map(self):
# Using map-based calculation
target_model = std.target_model().copy()
target_model.set_all_parameterizations("CPTP")
target_model.set_simtype('map')
results = pygsti.run_long_sequence_gst(self.ds, target_model, std.prepStrs, std.effectStrs,
self.germs, self.maxLengths, advanced_options=self.advOpts,
verbosity=4)
#Note: expected nSigma of 143 is so high b/c we use very high tol of 1e-2 => result isn't very good
print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
self.assertAlmostEqual(results.estimates['default'].misfit_sigma(), 143, delta=2.0)
mdl_compare = pygsti.io.load_model(compare_files + "/test2Qcalc_std_exact.model")
self.assertAlmostEqual(results.estimates['default'].models['go0'].frobeniusdist(mdl_compare), 0, places=3)
def test_stdgst_terms(self):
# Using term-based (path integral) calculation
# This performs a map-based unitary evolution along each path.
target_model = std.target_model().copy()
target_model.set_all_parameterizations("H+S terms")
target_model.set_simtype('termorder:1') # this is the default set by set_all_parameterizations above
results = pygsti.run_long_sequence_gst(self.ds, target_model, std.prepStrs, std.effectStrs,
self.germs, self.maxLengths, verbosity=4)
#RUN BELOW LINES TO SAVE GATESET (UNCOMMENT to regenerate)
#pygsti.io.json.dump(results.estimates['default'].models['go0'],
# open(compare_files + "/test2Qcalc_std_terms.model",'w'))
print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
self.assertAlmostEqual(results.estimates['default'].misfit_sigma(), 5, delta=1.0)
mdl_compare = pygsti.serialization.json.load(open(compare_files + "/test2Qcalc_std_terms.model"))
self.assertAlmostEqual(np.linalg.norm(results.estimates['default'].models['go0'].to_vector()
- mdl_compare.to_vector()), 0, places=3)
# ## GST using "reduced" models
# Reduced, meaning that we use composed and embedded gates to form a more complex error model with
# shared parameters and qubit connectivity graphs. Calculations *can* use dense matrices and matrix calcs,
# but usually will use sparse mxs and map-based calcs.
def test_reducedmod_matrix(self):
# Using dense matrices and matrix-based calcs
target_model = pc.build_nqnoise_model(self.nQubits, geometry="line", max_idle_weight=1, maxhops=1,
extra_weight_1_hops=0, extra_gate_weight=1, sparse=False,
sim_type="matrix", verbosity=1)
target_model.from_vector(self.rand_start206)
results = pygsti.run_long_sequence_gst(self.redmod_ds, target_model, self.redmod_fiducials,
self.redmod_fiducials, self.redmod_germs, self.redmod_maxLs,
verbosity=4, advanced_options={'tolerance': 1e-3})
#RUN BELOW LINES TO SAVE GATESET (UNCOMMENT to regenerate)
#pygsti.io.json.dump(results.estimates['default'].models['go0'],
# open(compare_files + "/test2Qcalc_redmod_exact.model",'w'))
print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
self.assertAlmostEqual(results.estimates['default'].misfit_sigma(), 1.0, delta=1.0)
mdl_compare = pygsti.serialization.json.load(open(compare_files + "/test2Qcalc_redmod_exact.model"))
self.assertAlmostEqual(results.estimates['default'].models['go0'].frobeniusdist(mdl_compare), 0, places=3)
def test_reducedmod_map1(self):
# Using dense embedded matrices and map-based calcs (maybe not really necessary to include?)
target_model = pc.build_nqnoise_model(self.nQubits, geometry="line", max_idle_weight=1, maxhops=1,
extra_weight_1_hops=0, extra_gate_weight=1, sparse=False,
sim_type="map", verbosity=1)
target_model.from_vector(self.rand_start206)
results = pygsti.run_long_sequence_gst(self.redmod_ds, target_model, self.redmod_fiducials,
self.redmod_fiducials, self.redmod_germs, self.redmod_maxLs,
verbosity=4, advanced_options={'tolerance': 1e-3})
print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
self.assertAlmostEqual(results.estimates['default'].misfit_sigma(), 1.0, delta=1.0)
mdl_compare = pygsti.serialization.json.load(open(compare_files + "/test2Qcalc_redmod_exact.model"))
self.assertAlmostEqual(results.estimates['default'].models['go0'].frobeniusdist(mdl_compare), 0, places=1)
#Note: models aren't necessarily exactly equal given gauge freedoms that we don't know
# how to optimizize over exactly - so this is a very loose test...
def test_reducedmod_map2(self):
# Using sparse embedded matrices and map-based calcs
target_model = pc.build_nqnoise_model(self.nQubits, geometry="line", max_idle_weight=1, maxhops=1,
extra_weight_1_hops=0, extra_gate_weight=1, sparse=True,
sim_type="map", verbosity=1)
target_model.from_vector(self.rand_start206)
results = pygsti.run_long_sequence_gst(self.redmod_ds, target_model, self.redmod_fiducials,
self.redmod_fiducials, self.redmod_germs, self.redmod_maxLs,
verbosity=4, advanced_options={'tolerance': 1e-3})
print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
self.assertAlmostEqual(results.estimates['default'].misfit_sigma(), 1.0, delta=1.0)
mdl_compare = pygsti.serialization.json.load(open(compare_files + "/test2Qcalc_redmod_exact.model"))
self.assertAlmostEqual(np.linalg.norm(results.estimates['default'].models['go0'].to_vector()
- mdl_compare.to_vector()), 0, places=1)
#Note: models aren't necessarily exactly equal given gauge freedoms that we don't know
# how to optimizize over exactly - so this is a very loose test...
def test_reducedmod_svterm(self):
# Using term-based calcs using map-based state-vector propagation
target_model = pc.build_nqnoise_model(self.nQubits, geometry="line", max_idle_weight=1, maxhops=1,
extra_weight_1_hops=0, extra_gate_weight=1, sparse=False, verbosity=1,
sim_type="termorder:1", parameterization="H+S terms")
target_model.from_vector(self.rand_start228)
results = pygsti.run_long_sequence_gst(self.redmod_ds, target_model, self.redmod_fiducials,
self.redmod_fiducials, self.redmod_germs, self.redmod_maxLs,
verbosity=4, advanced_options={'tolerance': 1e-3})
#RUN BELOW LINES TO SAVE GATESET (UNCOMMENT to regenerate)
#pygsti.io.json.dump(results.estimates['default'].models['go0'],
# open(compare_files + "/test2Qcalc_redmod_terms.model",'w'))
print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
self.assertAlmostEqual(results.estimates['default'].misfit_sigma(), 3.0, delta=1.0)
mdl_compare = pygsti.serialization.json.load(open(compare_files + "/test2Qcalc_redmod_terms.model"))
self.assertAlmostEqual(np.linalg.norm(results.estimates['default'].models['go0'].to_vector()
- mdl_compare.to_vector()), 0, places=3)
def test_reducedmod_cterm(self):
# Using term-based calcs using map-based stabilizer-state propagation
target_model = pc.build_nqnoise_model(self.nQubits, geometry="line", max_idle_weight=1, maxhops=1,
extra_weight_1_hops=0, extra_gate_weight=1, sparse=False, verbosity=1,
sim_type="termorder:1", parameterization="H+S clifford terms")
target_model.from_vector(self.rand_start228)
results = pygsti.run_long_sequence_gst(self.redmod_ds, target_model, self.redmod_fiducials,
self.redmod_fiducials, self.redmod_germs, self.redmod_maxLs,
verbosity=4, advanced_options={'tolerance': 1e-3})
print("MISFIT nSigma = ", results.estimates['default'].misfit_sigma())
self.assertAlmostEqual(results.estimates['default'].misfit_sigma(), 3.0, delta=1.0)
mdl_compare = pygsti.serialization.json.load(open(compare_files + "/test2Qcalc_redmod_terms.model"))
self.assertAlmostEqual(np.linalg.norm(results.estimates['default'].models['go0'].to_vector()
- mdl_compare.to_vector()), 0, places=3)
def test_circuitsim_stabilizer_2Qcheck(self):
#Test 2Q circuits
#from pygsti.modelpacks.legacy import std2Q_XYICNOT as stdChk
from pygsti.modelpacks.legacy import std2Q_XYICPHASE as stdChk
maxLengths = [1, 2, 4]
listOfExperiments = pygsti.circuits.create_lsgst_circuits(
stdChk.target_model(), stdChk.prepStrs, stdChk.effectStrs, stdChk.germs, maxLengths)
#listOfExperiments = pygsti.circuits.to_circuits([ ('Gcnot','Gxi') ])
#listOfExperiments = pygsti.circuits.to_circuits([ ('Gxi','Gcphase','Gxi','Gix') ])
mdl_normal = stdChk.target_model().copy()
mdl_clifford = stdChk.target_model().copy()
#print(mdl_clifford['Gcnot'])
self.assertTrue(stdChk.target_model()._evotype == "densitymx")
mdl_clifford.set_all_parameterizations('static unitary') # reduces dim...
self.assertTrue(mdl_clifford._evotype == "statevec")
mdl_clifford.set_all_parameterizations('clifford')
self.assertTrue(mdl_clifford._evotype == "stabilizer")
for opstr in listOfExperiments:
#print(str(opstr))
p_normal = mdl_normal.probabilities(opstr)
p_clifford = mdl_clifford.probabilities(opstr)
#p_clifford = bprobs[opstr]
for outcm in p_normal.keys():
if abs(p_normal[outcm] - p_clifford[outcm]) > 1e-8:
print(str(opstr), " ERR: \n", p_normal, "\n", p_clifford)
self.assertTrue(False)
print("Done checking %d sequences!" % len(listOfExperiments))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
nilq/baby-python
|
python
|
"""
Compare two version numbers version1 and version2.
If version1 > version2 return 1,
if version1 < version2 return -1,
otherwise return 0.
You may assume that the version strings are non-empty
and contain only digits and the . character.
The . character does not represent a decimal point and
is used to separate number sequences.
For instance, 2.5 is not "two and a half" or "half way
to version three", it is the fifth second-level revision
of the second first-level revision.
Here is an example of version numbers ordering:
0.1 < 1.1 < 1.2 < 13.37
Your runtime beats 76.42 % of python submissions.
"""
class Solution(object):
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
"""
Method 1:
Your runtime beats 76.42 % of python submissions.
Split the version numbers based on '.'
Append zero to the end, to make sure both the
version numbers are of the same length.
Compare
"""
versions1 = [int(v) for v in version1.split(".")]
versions2 = [int(v) for v in version2.split(".")]
for i in range(max(len(versions1),len(versions2))):
v1 = versions1[i] if i < len(versions1) else 0
v2 = versions2[i] if i < len(versions2) else 0
if v1 > v2:
return 1
elif v1 < v2:
return -1;
return 0;
|
nilq/baby-python
|
python
|
from core.views import BaseView, LoginRequiredMixin
from ..models import PokerMember, PokerRoom
class SettingsView(LoginRequiredMixin, BaseView):
template_name = 'settings.html'
def get(self, request, token):
"""Handle GET request."""
if not self.member:
return self.redirect('poker:room', args=(token,))
return super().get(request, token)
def post(self, request, token):
"""Handle POST request."""
# Exit room
if '_exit' in request.POST:
self.member.is_active = False
self.member.save()
return self.redirect('poker:index')
room_name = request.POST.get('room_name')
member_name = request.POST.get('member_name')
use_time = request.POST.get('use_time')
self.room.name = room_name
self.room.use_time = bool(int(use_time))
self.member.name = member_name
self.room.save()
self.member.save()
return self.redirect('poker:room', args=(token,))
def get_context_data(self, *args, **kwargs):
"""Get context data."""
return {
'room': self.room,
'member': self.member,
}
def dispatch(self, *args, **kwargs):
"""Dispatch request."""
self.user = (
self.request.user if self.request.user.is_authenticated else None
)
self.room = self.get_object_or_404(PokerRoom, token=kwargs['token'])
self.poker_round = self.room.get_poker_round()
self.member = PokerMember.objects.filter(
room=self.room,
user=self.user,
is_active=True,
).first()
return super().dispatch(*args, **kwargs)
|
nilq/baby-python
|
python
|
import bpy
import struct
import squish
from bStream import *
import time
def compress_block(image, imageData, tile_x, tile_y, block_x, block_y):
rgba = [0 for x in range(64)]
mask = 0
for y in range(4):
if(tile_y + block_y + y < len(imageData)):
for x in range(4):
if(tile_x + block_x + x < len(imageData[0])):
#print(f"Writing pixel in tile [{tile_x}, {tile_y}] block [{bx}, {by}] at data at {x} {y}")
index = (y * 4) + x
mask |= (1 << index)
localIndex = 4 * index
pixel = imageData[(image.size[1] - 1) - (tile_y + block_y + y)][(tile_x + block_x + x)]
if(type(pixel) != int):
rgba[localIndex + 0] = int(pixel[0] * 255)
rgba[localIndex + 1] = int(pixel[1] * 255)
rgba[localIndex + 2] = int(pixel[2] * 255)
rgba[localIndex + 3] = int(pixel[3] * 255 if len(pixel) == 4 else 0xFF) #just in case alpha is not enabled
return squish.compressMasked(bytes(rgba), mask, squish.DXT1)
def cmpr_from_blender(image):
start = time.time()
img_data = [[image.pixels[(y * image.size[0] + x)*4 : ((y * image.size[0] + x) * 4) + 4] for x in range(image.size[0])] for y in range(image.size[1])]
img_out = bStream()
#calculate block count to ensure that we dont get any garbage data
for ty in range(0, image.size[1], 8):
for tx in range(0, image.size[0], 8):
for by in range(0, 8, 4):
for bx in range(0, 8, 4):
rgba = [0 for x in range(64)]
mask = 0
for y in range(4):
if(ty + by + y < len(img_data)):
for x in range(4):
if(tx + bx + x < len(img_data[0])):
index = (y * 4) + x
mask |= (1 << index)
localIndex = 4 * index
pixel = img_data[(image.size[1] - 1) - (ty + by + y)][(tx + bx + x)]
if(type(pixel) != int):
rgba[localIndex + 0] = int(pixel[0] * 255)
rgba[localIndex + 1] = int(pixel[1] * 255)
rgba[localIndex + 2] = int(pixel[2] * 255)
rgba[localIndex + 3] = int(pixel[3] * 255 if len(pixel) == 4 else 0xFF) #just in case alpha is not enabled
img_out.write(squish.compressMasked(bytes(rgba), mask, squish.DXT1))
img_out.seek(0)
end = time.time()
print(f"{image.name} compressed in {end-start} seconds")
return (0x0E, image.size[0], image.size[1], img_out.fhandle.read())
def rgb565_from_blender(image):
img_data = [[image.pixels[(y * image.size[0] + x)*4 : ((y * image.size[0] + x) * 4) + 4] for x in range(image.size[0])] for y in range(image.size[1])]
img_out = bStream()
for ty in range(0, image.size[1], 4):
for tx in range(0, image.size[0], 4):
for by in range(4):
for bx in range(4):
pixel = img_data[(image.size[1] - 1) - (ty + by)][(tx + bx)]
pixel = [int(p*255) for p in pixel]
img_out.writeUInt16(((pixel[0] & 0xF8) << 8) | ((pixel[1] & 0xFC) << 3) | ((pixel[2] & 0xF8) >> 3))
img_out.seek(0)
return (0x04, image.size[0], image.size[1], img_out.fhandle.read())
def rgb5A3_from_blender(image):
img_data = [[image.pixels[(y * image.size[0] + x)*4 : ((y * image.size[0] + x) * 4) + 4] for x in range(image.size[0])] for y in range(image.size[1])]
img_out = bStream()
for ty in range(0, image.size[1], 4):
for tx in range(0, image.size[0], 4):
for by in range(4):
for bx in range(4):
pixel = img_data[(image.size[1] - 1) - (ty + by)][(tx + bx)]
pixel = [int(p*255) for p in pixel]
if(pixel[3] == 255): # use rgb555 mode
img_out.writeUInt16(0x8000 | ((pixel[0] & 0xF8) << 7) | ((pixel[1] & 0xF8) << 2) | ((pixel[2] & 0xF8) >> 3))
else:
img_out.writeUInt16(((pixel[3] & 0xE0) << 8) | ((pixel[0] & 0xF0) << 4) | (pixel[1] & 0xF0) | (pixel[2] >> 4))
img_out.seek(0)
return (0x05, image.size[0], image.size[1], img_out.fhandle.read())
class Material():
wrap_modes = ['CLAMP','REPEAT','MIRROR']
def __init__(self, texindex, material):
self.texture_index = texindex
self.u = self.wrap_modes.index(material.bin_wrap_mode_u)
self.v = self.wrap_modes.index(material.bin_wrap_mode_v)
def write(self, stream):
stream.writeInt16(self.texture_index)
stream.writeInt16(-1)
stream.writeUInt8(self.u)
stream.writeUInt8(self.v)
stream.writeUInt16(0)
stream.pad(12)
class Shader():
def __init__(self, material, material_indices, cur_index, out_indices):
tex = None
if(material.use_nodes and len(material.node_tree.nodes.get("Principled BSDF").inputs["Base Color"].links) > 0):
print(f"Setting up Material {material.name}, uses nodes {material.use_nodes}, input type {material.node_tree.nodes[0].inputs[0].links[0].from_node.type}")
tex = material.node_tree.nodes.get("Principled BSDF").inputs[0].links[0].from_node.image
self.bump_index = -1
self.diffuse_index = -1
#force for the moment
self.tint = (int(material.bin_shader_tint[0]*255) << 24 | int(material.bin_shader_tint[1]*255) << 16 | int(material.bin_shader_tint[2]*255) << 8 | int(material.bin_shader_tint[3]*255))
self.unk1 = material.bin_shader_unk1
self.unk2 = material.bin_shader_unk2
self.unk3 = material.bin_shader_unk3
#TODO: bumpmaps?
#if(material.bump_texname):
# self.bump_index = textures.material_indices[material.bump_texname]
if(tex is not None):
self.diffuse_index = material_indices[material.name]
out_indices[material.name] = cur_index
print("Bump Map {0}, Diffuse Map {1}, Tint {2}".format(self.bump_index, self.diffuse_index, hex(self.tint)))
def write(self, stream):
stream.writeUInt8(self.unk1)
stream.writeUInt8(self.unk2)
stream.writeUInt8(self.unk3)
stream.writeUInt32(self.tint)
stream.pad(1)
stream.writeInt16(self.diffuse_index)
stream.writeInt16(self.bump_index)
#demolisher support
for x in range(6):
stream.writeInt16(-1)
stream.writeInt16(0)
stream.writeInt16(-1)
for x in range(6):
stream.writeInt16(0)
class ShaderManager():
def __init__(self, material_indices, used_materials):
self.shader_indices = {}
self.shaders = [Shader(used_materials[x], material_indices, x, self.shader_indices) for x in range(len(used_materials))]
def getShaderIndex(self, name):
print(f"Looking for shader {name} out of shaders {self.shader_indices}")
return (self.shader_indices[name] if name in self.shader_indices else -1)
def writeShaders(self, stream):
for shader in self.shaders:
shader.write(stream)
class TextureManager():
def __init__(self, materials_used):
#TODO: Massive improvements need to be made here, this system works but it seems very inefficient.
self.textures = []
self.materials = []
self.texture_indices = {}
self.material_indices = {}
matindex = 0
texindex = 0
for material in materials_used:
if(material.use_nodes):
tex = None
if(len(material.node_tree.nodes.get("Principled BSDF").inputs["Base Color"].links) > 0):
tex = material.node_tree.nodes.get("Principled BSDF").inputs[0].links[0].from_node.image
texname = tex.name.split('.')[0]
if(texname in self.texture_indices):
self.material_indices[material.name] = matindex
self.materials.append(Material(self.texture_indices[texname] , material))
matindex += 1
continue
if(material.gx_img_type == 'CMPR'):
self.textures.append(cmpr_from_blender(tex))
elif(material.gx_img_type == 'RGB565'):
self.textures.append(rgb565_from_blender(tex))
elif(material.gx_img_type == 'RGB5A3'):
self.textures.append(rgb5A3_from_blender(tex))
self.texture_indices[texname] = texindex
self.material_indices[material.name] = matindex
self.materials.append(Material(texindex, material))
texindex += 1
matindex += 1
else:
self.material_indices[material.name] = matindex
self.materials.append(Material(-1, material))
matindex += 1
#else:
# self.materials.append(Material(texindex))
# texindex += 1
#if(material.bump_texname):
# self.textures.append(ConvertTexture(material.bump_texname))
# self.material_indices[material.bump_texname] = texindex
# self.materials.append(Material(texindex))
# texindex += 1
def writeMaterials(self, stream):
for material in self.materials:
material.write(stream)
def writeTextures(self, stream):
header_section = bStream()
data_section = bStream()
header_size = bStream.padTo32Delta(0xC*len(self.textures)) + (0xC*len(self.textures))
texture_offsets = []
for texture in self.textures:
texture_offsets.append(data_section.tell())
data_section.write(texture[3])
for x in range(0, len(texture_offsets)):
header_section.write(struct.pack(">HHBBHI", self.textures[x][1], self.textures[x][2], self.textures[x][0], 0, 0, texture_offsets[x] + header_size))
header_section.padTo32(header_section.tell())
header_section.seek(0)
data_section.seek(0)
stream.write(header_section.fhandle.read())
stream.write(data_section.fhandle.read())
header_section.close()
data_section.close()
|
nilq/baby-python
|
python
|
import datafellows
def test_main():
assert datafellows # use your library here
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib.pyplot as plt
from pypospack.eamtools import create_r
from pypospack.potential.pair_general_lj import func_cutoff_mishin2004
r = create_r(6.,5000)
rc = 5.168
hc = 0.332
xrc = (r-rc)/hc
psirc = (xrc**4)/(1+xrc**4)
rc_ind = np.ones(r.size)
rc_ind[r > rc] = 0
psirc = psirc * rc_ind
h0 = 0.332
x0 = r/h0
psi0 = (x0**4)/(1+x0**4)
fig, ax = plt.subplots(3, 1)
ax[0].plot(r,psirc,label=r'$\Psi_{c}$')
ax[0].set_ylabel(r'$\Psi_{c}$')
ax[1].plot(r,psi0,label=r'$\Psi_{0}$')
ax[1].set_ylabel(r'$\Psi_{0}$')
ax[2].plot(r,psirc*psi0,label=r'$\Psi_{c}*\Psi_{0}$')
ax[2].set_ylabel(r'$\Psi_{c}\Psi_{0}$')
for i in range(2):
ax[i].tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
fig.tight_layout()
fig.savefig('fig_cutoff_mishin2004.png',dpi=1300)
ax[2].plot(r,
func_cutoff_mishin2004(r,rc,hc,h0))
plt.show()
|
nilq/baby-python
|
python
|
from tir import Webapp
import unittest
from datetime import datetime
class MATA940(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
DateSystem = datetime.today()
inst.oHelper.Setup('SIGAFIS', DateSystem.strftime(
'%d/%m/%Y'), 'T1', 'X FIS16', '09')
inst.oHelper.Program('MATA940')
def test_MATA940_001(self):
'''
Test Case 001
'''
# self.oHelper.SetButton('Livros Fiscais (1)')
# self.oHelper.SetButton('Arq. Magneticos (1)')
# self.oHelper.SetButton('Sintegra')
# CLICA NO BOT�O PARAMETROS
self.oHelper.SetButton('Param.')
# SEÇÃO DE DEFINIÇÃO DE PARAMETROS
self.oHelper.SetValue('Data Inicial ?', '01/05/2016')
self.oHelper.SetValue('Data Final ?', '31/05/2016')
self.oHelper.SetValue('LayOut?', 'sintmg05')
self.oHelper.SetValue('Arquivo Destino?', 'sintmg.txt')
self.oHelper.SetValue('Finalidade?', 'Normal')
self.oHelper.SetValue('UF Origem/Destino?', '')
self.oHelper.SetValue('Processa UF?', 'Exceto a UF')
self.oHelper.SetValue('Numero do Livro?', '*')
self.oHelper.SetValue('Equipamento?', '')
self.oHelper.SetValue('Gera Inventario?', 'Nao')
self.oHelper.SetValue('Notas Fiscais?', 'Entrada')
# self.oHelper.SetValue('Gera Reg.60I e 60D ?','')
self.oHelper.SetValue('Drive Destino ?', 'C:\\')
self.oHelper.SetValue('Transportadora ?','')
self.oHelper.SetValue('Data de Fechamento ?', '31052016')
self.oHelper.SetValue('Gera Registro 60R ?', 'Nao')
self.oHelper.SetValue('Gera Registro 61R ?', 'Nao')
self.oHelper.SetValue('Gera NF Produtor ?', 'Nao')
self.oHelper.SetValue('Meio magnetico ?', 'FITA')
self.oHelper.SetValue('Fator de bloco ?', '')
self.oHelper.SetValue('Natureza Operacoes ?', 'Totalidade')
self.oHelper.SetValue('Destaca PIS/COFINS ?', 'Sim')
self.oHelper.SetValue('NF De ?', '')
self.oHelper.SetValue('NF Ate ?', 'ZZZZ')
self.oHelper.SetValue('Filial de ?', '')
self.oHelper.SetValue('Filial Ate ?', 'ZZZZZZ')
self.oHelper.SetValue('Consolidação na mesma UF ?', 'Nao')
self.oHelper.SetValue('Filtro Tipo Produto ?', '')
self.oHelper.SetValue('Produto De ?', '')
self.oHelper.SetValue('Produto Ate ?', 'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ')
self.oHelper.SetValue('Armazem De ?', '')
self.oHelper.SetValue('Armazem Ate ?', 'ZZ')
self.oHelper.SetValue('Prods.c/Saldo Neg. ?', 'Nao')
self.oHelper.SetValue('Prods.c/Saldo Zera. ?', 'Nao')
self.oHelper.SetValue('Prods.c/Saldo Poder 3º. ?', 'Nao')
self.oHelper.SetValue('Prods.c/Custo Zera. ?', 'Nao')
self.oHelper.SetValue('Gera 88 MG ?', 'Nao')
self.oHelper.SetValue('Data 88 ?', '')
self.oHelper.SetValue('Gera Relat. Rest. MG ?', 'Nao')
self.oHelper.SetValue('Saldo Processo ?', 'Nao')
self.oHelper.SetValue('Lista MOD Processo ?', 'Nao')
self.oHelper.SetValue('Seleciona Filiais ?', 'Sim')
self.oHelper.SetValue('Gera registro 60I ?', 'Nao')
self.oHelper.SetValue('Gera reg. Tipo 88 Det. 06 ?', 'Nao')
self.oHelper.SetValue('Gera reg. 8827 e 8828 ?', 'Nao')
self.oHelper.SetValue('Gera reg. 8830 ?', 'Nao')
self.oHelper.SetValue('Simples Nacional ?', 'Nao')
self.oHelper.SetValue('Arq. Periodo Atual ?', '')
self.oHelper.SetValue('Gera reg. 53 (Entradas) ?', 'Nao')
self.oHelper.SetValue('Gera reg. 88DV ?', 'Nao')
self.oHelper.SetValue('Aglutina seleção por CNPJ+IE ?', 'Nao')
# self.oHelper.SetValue('Rest. ST Alteração Regime ?','')
# self.oHelper.SetValue('Rest.ST Estoque/Nota Fiscal ?','')
# self.oHelper.SetValue('Gera somente Reg. Rest.ST ?','')
# CLICA NO BOTÃO OK PARA CONFIRMAR OS PARAMETROS E VOLTA PARA A TELA ANTERIOR
self.oHelper.SetButton('OK')
# CLICA NO OK E INICIA O PROCESSO DE Gerar Arquivo Magn�tico Layout SINTMG05 - Registro 55 (GNRE ICMS Antecipado - Documento de Entrada)
self.oHelper.SetButton('Ok')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Mar 20, 2013
All-to-all perceptron layers: simple (:class:`All2All`) and with \
activation function (:class:`All2AllTanh`, :class:`All2AllRELU` and \
:class:`All2AllSoftmax`).
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
from __future__ import division
import cuda4py.blas as cublas
import numpy
from zope.interface import implementer
from veles.accelerated_units import IOpenCLUnit, ICUDAUnit, INumpyUnit
import veles.error as error
from veles.memory import reshape, Array
import veles.ocl_blas as ocl_blas
from veles.znicz.nn_units import FullyConnectedOutput, NNLayerBase
@implementer(IOpenCLUnit, ICUDAUnit, INumpyUnit)
class All2All(FullyConnectedOutput, NNLayerBase):
"""All2All with linear activation f(x) = x.
Must be assigned before initialize():
input
Updates after run():
output
Creates within initialize():
weights
bias
output
Attributes:
input: input as batch of samples.
output: output as batch of samples.
weights: matrix of weights.
bias: bias.
output_sample_shape: shape of the output layer (may be Array).
output_samples_number: the number of samples in the output If it is
None (the default), it is taken from input.
output_dtype: the dtype of output. If it is None (the default),
it is taken from input.
activation_mode: activation type. It is passed as a definition directly
to OpenCL/CUDA source code.
weights_transposed: assume weights matrix as a transposed one,
NOTE: only access order will be affected,
not a shape.
weights_filling: rand weight filling
("uniform" (default) or "gaussian")
weights_stddev: magnitude of uniform weight distribution.
weights_stddev: StdDev of normal weight distributtion
"""
__id__ = "58a5eadf-ae1e-498f-bf35-7d93939c4c86"
MAPPING = {"all2all"}
C = 10
def __init__(self, workflow, **kwargs):
super(All2All, self).__init__(workflow, **kwargs)
self.activation_mode = "ACTIVATION_LINEAR"
self.exports.append("activation_mode")
self._global_size = None
self._local_size = None
self.demand("input", "output_sample_shape")
def init_unpickled(self):
super(All2All, self).init_unpickled()
self.sources_["all2all/forward"] = {}
def get_weights_magnitude(self):
"""
Returns: weights range magnitude for initial random distribution,
such that activation function will be near maximum
if all input values are at their supposed max value.
"""
vle = numpy.sqrt(
self.C / (self.input.sample_size +
numpy.prod(self.output_sample_shape)))
if self.weights_filling == "gaussian":
vle /= 3
return vle
def fill_array(self, filling, array, stddev):
if filling == "uniform":
self.rand.fill(array, -stddev, stddev)
elif filling == "gaussian":
self.rand.fill_normal_real(array, 0, stddev)
elif filling == "constant":
array[:] = stddev
else:
raise error.BadFormatError("Invalid filling type %s" % filling)
def initialize(self, device, **kwargs):
if not self.input:
if self.output:
if self.output_samples_number is None:
self.warning(
"input is not initialized and output_samples_number "
"was not specified => unable to validate output")
return True
assert self.output.shape[1:] == self.output_shape[1:]
if not self.output or self.output.shape[0] != self.output_shape[0]:
if self.output_samples_number is None:
self.warning(
"input is not initialized and output_samples_number "
"was not specified => unable to create output")
return True
if self.output_dtype is None:
self.warning(
"input is not initialized and output_dtype was "
"not specified => unable to create output")
return True
self.output.reset(numpy.zeros(
self.output_shape, self.output_dtype))
return True
super(All2All, self).initialize(device=device, **kwargs)
if self.weights_stddev is None:
self.weights_stddev = min(self.get_weights_magnitude(), 0.5)
if self.bias_stddev is None:
self.bias_stddev = self.weights_stddev
# Check that weights vector was not assigned from the outside
self.weights_shape = (self.neurons_number, self.input.sample_size)
weights_shape_t = tuple(reversed(self.weights_shape))
if not self.weights:
self.weights.reset(numpy.zeros(self.weights_shape,
dtype=self.input.dtype))
self.fill_array(self.weights_filling, self.weights.mem,
self.weights_stddev)
if self.weights_transposed:
self.weights.shape = weights_shape_t
else:
assert (self.weights.shape == weights_shape_t if
self.weights_transposed else weights_shape_t)
if self.include_bias:
# Check that bias was not assigned from the outside
if not self.bias:
self.bias.reset(numpy.zeros(
self.neurons_number, self.input.dtype))
self.fill_array(self.bias_filling, self.bias.mem,
self.bias_stddev)
else:
assert self.bias.size == self.neurons_number
self._create_output()
self.init_vectors(self.input, self.output, self.weights, self.bias)
def _create_output(self):
if self.output and self.output.shape == self.output_shape:
return
if self.output:
assert self.output.shape[1:] == self.output_shape[1:]
if not self.output or self.output_shape[0] != self.output.shape[0]:
self.output.reset(numpy.zeros(self.output_shape, self.input.dtype))
def _gpu_init(self, blas_class):
dtype = self.input.dtype
self.gemm_ = blas_class.gemm(dtype)
self.np_one = numpy.ones(1, dtype)
self.np_zero = numpy.zeros(1, dtype)
self._transA = (cublas.CUBLAS_OP_N if self.weights_transposed
else cublas.CUBLAS_OP_T)
self._transB = cublas.CUBLAS_OP_N
self._A_ = self.weights.devmem
self._B_ = self.input.devmem
self._rowsCountA = self.weights_shape[0]
self._columnCountB = self.input.shape[0]
self._commonSideLength = self.input.sample_size
self.build_program({"BIAS_SIZE": self.output.sample_size,
"OUTPUT_SIZE": self.output.size,
self.activation_mode: 1,
"INCLUDE_BIAS": int(self.include_bias),
"Y": self.output.sample_size},
"%s_%d_%d_%d" %
(self.__class__.__name__, self.input.shape[0],
self.input.sample_size, self.output.sample_size),
dtype=dtype)
if self.include_bias or self.activation_mode != "ACTIVATION_LINEAR":
self.assign_kernel("apply_bias_with_activation")
self.set_args(self.output, self.bias)
def cuda_init(self):
self._gpu_init(cublas.CUBLAS)
if self._kernel_ is not None:
block_size = self.device.suggest_block_size(self._kernel_)
self._global_size_bias = (
int(numpy.ceil(self.output.size / block_size)), 1, 1)
self._local_size_bias = (block_size, 1, 1)
def ocl_init(self):
ocl_blas.OCLBLAS.attach_to_device(self.device)
self._gpu_init(ocl_blas.OCLBLAS)
if self._kernel_ is not None:
self._global_size_bias = (self.output.size,)
self._local_size_bias = None
def _gpu_run(self):
self.unmap_vectors(self.output, self.input, self.weights, self.bias)
self.gemm_(
self.device.blas, self._transA, self._transB,
self._rowsCountA, self._columnCountB, self._commonSideLength,
self.np_one, self._A_, self._B_,
self.np_zero, self.output.devmem)
if self.include_bias or self.activation_mode != "ACTIVATION_LINEAR":
self.execute_kernel(self._global_size_bias, self._local_size_bias)
def ocl_run(self):
if self.intel_opencl_workaround:
return self.numpy_run()
return self._gpu_run()
def cuda_run(self):
return self._gpu_run()
def numpy_run(self):
"""Forward propagation from batch on CPU only.
"""
self.output.map_invalidate()
self.input.map_read()
self.weights.map_read()
self.bias.map_read()
mem = numpy.dot(self.input.matrix,
self.weights.mem if self.weights_transposed
else self.weights.mem.transpose())
if self.include_bias:
mem += self.bias.mem
reshape(self.output.mem, mem.shape)[:] = mem[:]
class All2AllTanh(All2All):
"""All2All with scaled tanh() activation f(x) = 1.7159 * tanh(0.6666 * x).
"""
__id__ = "b3a2bd5c-3c01-46ef-978a-fef22e008f31"
A = 1.7159
B = 0.6666
C = 9.0 # tanh(C) -> 1
MAPPING = {"all2all_tanh"}
def initialize(self, device, **kwargs):
self.activation_mode = "ACTIVATION_TANH"
retval = super(All2AllTanh, self).initialize(device=device, **kwargs)
self.output.max_supposed = All2AllTanh.A
return retval
def numpy_run(self):
"""Forward propagation from batch on CPU only.
"""
super(All2AllTanh, self).numpy_run()
self.output.map_write()
mem = self.output.mem
mem *= All2AllTanh.B
numpy.tanh(mem, mem)
mem *= All2AllTanh.A
class All2AllRELU(All2All):
"""All2All with RELU activation f(x) = log(1.0 + exp(x)).
"""
__id__ = "5b7f36d8-f8c8-4eb7-8af3-75eb3cfca3fe"
MAPPING = {"all2all_relu"}
def initialize(self, device, **kwargs):
self.activation_mode = "ACTIVATION_RELU"
retval = super(All2AllRELU, self).initialize(device=device, **kwargs)
self.output.max_supposed = 10
return retval
def numpy_run(self):
"""Forward propagation from batch on CPU only.
"""
super(All2AllRELU, self).numpy_run()
self.output.map_write()
mem = self.output.mem
mem[:] = numpy.where(mem > 15, mem, numpy.log(numpy.exp(mem) + 1.0))
class All2AllStrictRELU(All2All):
"""All2All with RELU activation f(x) = max(x, 0).
"""
__id__ = "fe63baf0-4fe4-4cf3-bafb-ef1215bf27a8"
MAPPING = {"all2all_str"}
def initialize(self, device, **kwargs):
self.activation_mode = "ACTIVATION_STRICT_RELU"
retval = super(All2AllStrictRELU, self).initialize(
device=device, **kwargs)
self.output.max_supposed = 10
return retval
def numpy_run(self):
"""Forward propagation from batch on CPU only.
"""
super(All2AllStrictRELU, self).numpy_run()
self.output.map_write()
mem = self.output.mem
numpy.clip(mem, 0.0, 1.0e30, mem)
class All2AllSigmoid(All2All):
"""All2All with Sigmoid activation f(x) = 1 / (1 + exp(-x)).
"""
__id__ = "a27974ec-1764-4944-925d-4862de237881"
MAPPING = {"all2all_sigmoid"}
C = 1
def initialize(self, device, **kwargs):
self.activation_mode = "ACTIVATION_SIGMOID"
retval = super(All2AllSigmoid, self).initialize(
device=device, **kwargs)
self.output.supposed_max_value = 1
return retval
def numpy_run(self):
"""Forward propagation from batch on CPU only.
"""
super(All2AllSigmoid, self).numpy_run()
self.output.map_write()
mem = self.output.mem
# 1 / (1 + numpy.exp(-mem))
numpy.exp(-mem, mem)
numpy.reciprocal(mem + 1, mem)
class All2AllSoftmax(All2All):
"""All2All with linear activation and softmax normalization.
Must be assigned before initialize():
Updates after run():
max_idx
Creates within initialize():
max_idx
Attributes:
krn_sm_: kernel for softmax activation calculation.
max_idx: indexes of element with maximum value for each sample.
"""
__id__ = "420219fc-3e1a-45b1-87f8-aaa0c1540de4"
MAPPING = {"softmax"}
def __init__(self, workflow, **kwargs):
super(All2AllSoftmax, self).__init__(workflow, **kwargs)
self.max_idx = Array()
self.reduce_size = 256
def init_unpickled(self):
super(All2AllSoftmax, self).init_unpickled()
self.krn_sm_ = None
self._force_gpu_apply_exp = False
def initialize(self, device, **kwargs):
self.reduce_size = min(self.reduce_size,
int(numpy.prod(self.output_sample_shape)))
self.sources_["all2all/softmax"] = {
"REDUCE_SIZE": self.reduce_size
}
retval = super(All2AllSoftmax, self).initialize(
device=device, **kwargs)
if retval:
return retval
if self.output.mem.size // self.output.mem.shape[0] <= 1:
raise error.BadFormatError(
"Output sample size should be greater than 1 for SoftMax.")
if not self.max_idx:
self.max_idx.reset(numpy.zeros(self.output.shape[0],
dtype=numpy.int32))
self.max_idx.initialize(self.device)
return retval
def numpy_apply_exp(self):
self.output.map_write()
self.max_idx.map_invalidate()
out = self.output.mem
out = reshape(out, (out.shape[0], out.size // out.shape[0]))
for i, sample in enumerate(out):
im = sample.argmax()
self.max_idx[i] = im
m = sample[im]
sample -= m
numpy.exp(sample, sample)
smm = sample.sum()
sample /= smm
def ocl_apply_exp(self):
self.unmap_vectors(self.output, self.max_idx)
global_size = (self.output.shape[0] * self.reduce_size,)
local_size = (self.reduce_size,)
self.execute_kernel(global_size, local_size, self.krn_sm_)
def cuda_apply_exp(self):
self.unmap_vectors(self.output, self.max_idx)
global_size = (self.output.shape[0], 1, 1)
local_size = (self.reduce_size, 1, 1)
self.execute_kernel(global_size, local_size, self.krn_sm_)
def numpy_run(self):
"""Forward propagation from batch on CPU only.
"""
super(All2AllSoftmax, self).numpy_run()
if not self._force_gpu_apply_exp:
self.numpy_apply_exp()
def ocl_run(self):
"""Forward propagation from batch on GPU.
"""
self._force_gpu_apply_exp = True
super(All2AllSoftmax, self).ocl_run()
self.ocl_apply_exp()
def cuda_run(self):
"""Forward propagation from batch on GPU.
"""
self._force_gpu_apply_exp = True
super(All2AllSoftmax, self).cuda_run()
self.cuda_apply_exp()
def ocl_init(self):
super(All2AllSoftmax, self).ocl_init()
self.krn_sm_ = self.get_kernel("apply_exp")
self.krn_sm_.set_args(self.output.devmem, self.max_idx.devmem)
def cuda_init(self):
super(All2AllSoftmax, self).cuda_init()
self.krn_sm_ = self.get_kernel("apply_exp")
self.krn_sm_.set_args(self.output.devmem, self.max_idx.devmem)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.