content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
import {{cookiecutter.project_name}}.parameters as P
class Animation:
'''
A matplotlib window containing an animation for the system. There are
no parameters for initialization, as the Animation object can directly
access the parameters in {{cookiecutter.project_name}}.parameters.
'''
def __init__(self):
pass
def update(self, u):
"""
Updates the animation, given a system state. This function does not
return any value.
Parameters
----------
u : np.ndarray
A vector of shape (n,1) containing the state vector of the system.
"""
pass
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 10 13:15:23 2018
@author: sshekhar
"""
from nseurls import *
import six
from nsecommons import *
from nseconstants import *
from datetime import date, timedelta
from bs4 import BeautifulSoup
import pandas as pd
import six
import inspect
dd_mmm_yyyy = StrDate.default_format(format="%d-%b-%Y")
dd_mm_yyyy = StrDate.default_format(format="%d-%m-%Y")
EQUITY_SCHEMA = [str, str,
dd_mmm_yyyy,
float, float, float, float,
float, float, float, int, float,
int, int, float]
EQUITY_HEADERS = ["Symbol", "Series", "Date", "Prev Close",
"Open", "High", "Low","Last", "Close", "VWAP",
"Volume", "Turnover", "Trades", "Deliverable Volume",
"%Deliverble"]
EQUITY_SCALING = {"Turnover": 100000,
"%Deliverble": 0.01}
FUTURES_SCHEMA = [str, dd_mmm_yyyy, dd_mmm_yyyy,
float, float, float, float,
float, float, int, float,
int, int, float]
FUTURES_HEADERS = ['Symbol', 'Date', 'Expiry',
'Open', 'High', 'Low', 'Close',
'Last', 'Settle Price', 'Number of Contracts', 'Turnover',
'Open Interest', 'Change in OI', 'Underlying']
FUTURES_SCALING = {"Turnover": 100000}
OPTION_SCHEMA = [str, dd_mmm_yyyy, dd_mmm_yyyy, str, float,
float, float, float, float,
float, float, int, float,
float, int, int, float]
OPTION_HEADERS = ['Symbol', 'Date', 'Expiry', 'Option Type', 'Strike Price',
'Open', 'High', 'Low', 'Close',
'Last', 'Settle Price', 'Number of Contracts', 'Turnover',
'Premium Turnover', 'Open Interest', 'Change in OI', 'Underlying']
OPTION_SCALING = {"Turnover": 100000,
"Premium Turnover": 100000}
INDEX_SCHEMA = [dd_mmm_yyyy,
float, float, float, float,
int, float]
INDEX_HEADERS = ['Date',
'Open', 'High', 'Low', 'Close',
'Volume', 'Turnover']
INDEX_SCALING = {'Turnover': 10000000}
VIX_INDEX_SCHEMA = [dd_mmm_yyyy,
float, float, float, float,
float, float, float]
VIX_INDEX_HEADERS = ['Date',
'Open', 'High', 'Low', 'Close',
'Previous', 'Change', '%Change']
VIX_SCALING = {'%Change': 0.01}
INDEX_PE_SCHEMA = [dd_mmm_yyyy,
float, float, float]
INDEX_PE_HEADERS = ['Date', 'P/E', 'P/B', 'Div Yield']
RBI_REF_RATE_SCHEMA = [dd_mmm_yyyy, float, float, float, float]
RBI_REF_RATE_HEADERS = ['Date', '1 USD', '1 GBP', '1 EURO', '100 YEN']
"""
symbol = "SBIN" (stock name, index name and VIX)
start = date(yyyy,mm,dd)
end = date(yyyy,mm,dd)
index = True, False (True even for VIX)
---------------
futures = True, False
option_type = "CE", "PE", "CA", "PA"
strike_price = integer number
expiry_date = date(yyyy,mm,dd)
"""
def get_history(symbol, start, end, index=False, futures=False, option_type="",
expiry_date = None, strike_price="", series='EQ'):
"""This is the function to get the historical prices of any security (index,
stocks, derviatives, VIX) etc.
Args:
symbol (str): Symbol for stock, index or any security
start (datetime.date): start date
end (datetime.date): end date
index (boolean): False by default, True if its a index
futures (boolean): False by default, True for index and stock futures
expiry_date (datetime.date): Expiry date for derivatives, Compulsory for futures and options
option_type (str): It takes "CE", "PE", "CA", "PA" for European and American calls and puts
strike_price (int): Strike price, Compulsory for options
series (str): Defaults to "EQ", but can be "BE" etc (refer NSE website for details)
Returns:
pandas.DataFrame : A pandas dataframe object
Raises:
ValueError:
1. strike_price argument missing or not of type int when options_type is provided
2. If there's an Invalid value in option_type, valid values-'CE' or 'PE' or 'CA' or 'CE'
3. If both futures='True' and option_type='CE' or 'PE'
"""
frame = inspect.currentframe()
args, _, _, kwargs = inspect.getargvalues(frame)
del(kwargs['frame'])
start = kwargs['start']
end = kwargs['end']
if (end - start) > timedelta(130):
kwargs1 = dict(kwargs)
kwargs2 = dict(kwargs)
kwargs1['end'] = start + timedelta(130)
kwargs2['start'] = kwargs1['end'] + timedelta(1)
t1 = ThreadReturns(target=get_history, kwargs=kwargs1)
t2 = ThreadReturns(target=get_history, kwargs=kwargs2)
t1.start()
t2.start()
t1.join()
t2.join()
return pd.concat((t1.result, t2.result))
else:
return get_history_quanta(**kwargs)
def get_history_quanta(**kwargs):
url, params, schema, headers, scaling = validate_params(**kwargs)
df = url_to_df(url=url,
params=params,
schema=schema,
headers=headers, scaling=scaling)
return df
def url_to_df(url, params, schema, headers, scaling={}):
resp = url(**params)
bs = BeautifulSoup(resp.text, 'lxml')
tp = ParseTables(soup=bs,
schema=schema,
headers=headers)
# tp = ParseTables(soup=bs,
# schema=schema,
# headers=headers, index="Date")
df = tp.get_df()
for key, val in six.iteritems(scaling):
df[key] = val * df[key]
return df
def validate_params(symbol, start, end, index=False, futures=False, option_type="",
expiry_date = None, strike_price="", series='EQ'):
"""
symbol = "SBIN" (stock name, index name and VIX)
start = date(yyyy,mm,dd)
end = date(yyyy,mm,dd)
index = True, False (True even for VIX)
---------------
futures = True, False
option_type = "CE", "PE", "CA", "PA"
strike_price = integer number
expiry_date = date(yyyy,mm,dd)
"""
params = {}
if start > end:
raise ValueError('Please check start and end dates')
if (futures and not option_type) or (not futures and option_type): #EXOR
params['symbol'] = symbol
params['dateRange'] = ''
params['optionType'] = 'select'
params['strikePrice'] = ''
params['fromDate'] = start.strftime('%d-%b-%Y')
params['toDate'] = end.strftime('%d-%b-%Y')
url = derivative_history_url
try:
params['expiryDate'] = expiry_date.strftime("%d-%m-%Y")
except AttributeError as e:
raise ValueError('Derivative contracts must have expiry_date as datetime.date')
option_type = option_type.upper()
if option_type in ("CE", "PE", "CA", "PA"):
if not isinstance(strike_price,int) and not isinstance(strike_price, float):
raise ValueError("strike_price argument missing or not of type int or float")
#option specific
if index: params['instrumentType'] = 'OPTIDX'
else: params['instrumentType'] = 'OPTSTK'
params['strikePrice'] = strike_price
params['optionType'] = option_type
schema = OPTION_SCHEMA
headers = OPTION_HEADERS
scaling = OPTION_SCALING
elif option_type:
#this means that there's an invalid value in option_type
raise ValueError("Invalid value in option_type, valid values-'CE' or 'PE' or 'CA' or 'CE'")
else:
# its a futures request
if index:
if symbol=='INDIAVIX': params['instrumentType'] = 'FUTIVX'
else: params['instrumentType'] = 'FUTIDX'
else: params['instrumentType'] = 'FUTSTK'
schema = FUTURES_SCHEMA
headers = FUTURES_HEADERS
scaling = FUTURES_SCALING
elif futures and option_type:
raise ValueError("select either futures='True' or option_type='CE' or 'PE' not both")
else: # its a normal request
if index:
if symbol=='INDIAVIX':
params['fromDate'] = start.strftime('%d-%b-%Y')
params['toDate'] = end.strftime('%d-%b-%Y')
url = index_vix_history_url
schema = VIX_INDEX_SCHEMA
headers = VIX_INDEX_HEADERS
scaling = VIX_SCALING
else:
if symbol in DERIVATIVE_TO_INDEX:
params['indexType'] = DERIVATIVE_TO_INDEX[symbol]
else:
params['indexType'] = symbol
params['fromDate'] = start.strftime('%d-%m-%Y')
params['toDate'] = end.strftime('%d-%m-%Y')
url = index_history_url
schema = INDEX_SCHEMA
headers = INDEX_HEADERS
scaling = INDEX_SCALING
else:
params['symbol'] = symbol
params['series'] = series
params['symbolCount'] = get_symbol_count(symbol)
params['fromDate'] = start.strftime('%d-%m-%Y')
params['toDate'] = end.strftime('%d-%m-%Y')
url = equity_history_url
schema = EQUITY_SCHEMA
headers = EQUITY_HEADERS
scaling = EQUITY_SCALING
return url, params, schema, headers, scaling
def get_index_pe_history(symbol, start, end):
frame = inspect.currentframe()
args, _, _, kwargs = inspect.getargvalues(frame)
del(kwargs['frame'])
start = kwargs['start']
end = kwargs['end']
if (end - start) > timedelta(130):
kwargs1 = dict(kwargs)
kwargs2 = dict(kwargs)
kwargs1['end'] = start + timedelta(130)
kwargs2['start'] = kwargs1['end'] + timedelta(1)
t1 = ThreadReturns(target=get_index_pe_history, kwargs=kwargs1)
t2 = ThreadReturns(target=get_index_pe_history, kwargs=kwargs2)
t1.start()
t2.start()
t1.join()
t2.join()
return pd.concat((t1.result, t2.result))
else:
return get_index_pe_history_quanta(**kwargs)
def get_index_pe_history_quanta(symbol, start, end):
"""This function will fetch the P/E, P/B and dividend yield for a given index
Args:
symbol (str): Symbol for stock, index or any security
start (datetime.date): start date
end (datetime.date): end date
Returns:
pandas.DataFrame : A pandas dataframe object
"""
if symbol in DERIVATIVE_TO_INDEX:
index_name = DERIVATIVE_TO_INDEX[symbol]
else:
index_name = symbol
resp = index_pe_history_url(indexName=index_name,
fromDate=start.strftime('%d-%m-%Y'),
toDate=end.strftime('%d-%m-%Y'))
bs = BeautifulSoup(resp.text, 'lxml')
tp = ParseTables(soup=bs,
schema=INDEX_PE_SCHEMA,
headers=INDEX_PE_HEADERS, index="Date")
df = tp.get_df()
return df
def get_price_list(dt, segment='EQ'):
MMM = dt.strftime("%b").upper()
yyyy = dt.strftime("%Y")
"""
1. YYYY
2. MMM
3. ddMMMyyyy
"""
res = price_list_url(yyyy, MMM, dt.strftime("%d%b%Y").upper() )
txt = unzip_str(res.content)
fp = six.StringIO(txt)
df = pd.read_csv(fp)
del df['Unnamed: 13']
return df
def get_rbi_ref_history(start, end):
frame = inspect.currentframe()
args, _, _, kwargs = inspect.getargvalues(frame)
del(kwargs['frame'])
start = kwargs['start']
end = kwargs['end']
if (end - start) > timedelta(130):
kwargs1 = dict(kwargs)
kwargs2 = dict(kwargs)
kwargs1['end'] = start + timedelta(130)
kwargs2['start'] = kwargs1['end'] + timedelta(1)
t1 = ThreadReturns(target=get_rbi_ref_history, kwargs=kwargs1)
t2 = ThreadReturns(target=get_rbi_ref_history, kwargs=kwargs2)
t1.start()
t2.start()
t1.join()
t2.join()
return pd.concat((t1.result, t2.result))
else:
return get_rbi_ref_history_quanta(**kwargs)
def get_rbi_ref_history_quanta(start, end):
"""
Args:
start (datetime.date): start date
end (datetime.date): end date
Returns:
pandas.DataFrame : A pandas dataframe object
"""
resp = rbi_rate_history_url(fromDate=start.strftime('%d-%m-%Y'),
toDate=end.strftime('%d-%m-%Y'))
bs = BeautifulSoup(resp.text, 'lxml')
tp = ParseTables(soup=bs,
schema=RBI_REF_RATE_SCHEMA,
headers=RBI_REF_RATE_HEADERS, index="Date")
df = tp.get_df()
return df
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: mom.security.codec.asn1.rsadsa
:synopsis: ASN.1/DER decoding and encoding for RSA and DSA private keys.
ASN.1 Syntax::
RSAPrivateKey ::= SEQUENCE {
version Version,
modulus INTEGER, -- n
publicExponent INTEGER, -- e
privateExponent INTEGER, -- d
prime1 INTEGER, -- p
prime2 INTEGER, -- q
exponent1 INTEGER, -- d mod (p-1)
exponent2 INTEGER, -- d mod (q-1)
coefficient INTEGER -- (inverse of q) mod p }
Version ::= INTEGER
"""
# Read unencrypted PKCS#1/PKIX-compliant, PEM & DER encoded private keys.
# Private keys can be generated with "openssl genrsa|gendsa" commands.
from __future__ import absolute_import
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import univ
__author__ = "yesudeep@google.com (Yesudeep Mangalapilly)"
MAX = 16
class DSAPrivateKey(univ.Sequence):
"""PKIX compliant DSA private key structure"""
componentType = namedtype.NamedTypes(
namedtype.NamedType("version", univ.Integer(
namedValues=namedval.NamedValues(("v1", 0)))),
namedtype.NamedType("p", univ.Integer()),
namedtype.NamedType("q", univ.Integer()),
namedtype.NamedType("g", univ.Integer()),
namedtype.NamedType("public", univ.Integer()),
namedtype.NamedType("private", univ.Integer())
)
class OtherPrimeInfo(univ.Sequence):
"""Other prime information."""
componentType = namedtype.NamedTypes(
namedtype.NamedType("prime", univ.Integer()),
namedtype.NamedType("exponent", univ.Integer()),
namedtype.NamedType("coefficient", univ.Integer())
)
class OtherPrimeInfos(univ.SequenceOf):
"""Other prime information."""
componentType = OtherPrimeInfo()
subtypeSpec = (univ.SequenceOf.subtypeSpec +
constraint.ValueSizeConstraint(1, MAX))
class RSAPrivateKey(univ.Sequence):
"""PKCS#1 compliant RSA private key structure"""
componentType = namedtype.NamedTypes(
namedtype.NamedType("version", univ.Integer(
namedValues=namedval.NamedValues(("two-prime", 0), ("multi", 1)))),
namedtype.NamedType("modulus", univ.Integer()),
namedtype.NamedType("publicExponent", univ.Integer()),
namedtype.NamedType("privateExponent", univ.Integer()),
namedtype.NamedType("prime1", univ.Integer()),
namedtype.NamedType("prime2", univ.Integer()),
namedtype.NamedType("exponent1", univ.Integer()),
namedtype.NamedType("exponent2", univ.Integer()),
namedtype.NamedType("coefficient", univ.Integer()),
namedtype.OptionalNamedType("otherPrimeInfos", OtherPrimeInfos())
)
|
nilq/baby-python
|
python
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from proton.handlers import MessagingHandler
from proton.reactor import Container
import address
import content
class Sender(MessagingHandler):
def __init__(self, url, messages):
super(Sender, self).__init__()
self.url = url
self._messages = messages
self._message_index = 0
self._sent_count = 0
self._confirmed_count = 0
def on_start(self, event):
event.container.create_sender(self.url)
def on_sendable(self, event):
while event.sender.credit and self._sent_count < len(self._messages):
message = self._messages[self._message_index]
print(message)
event.sender.send(message)
self._message_index += 1
self._sent_count += 1
def on_accepted(self, event):
self._confirmed_count += 1
if self._confirmed_count == len(self._messages):
event.connection.close()
def on_transport_error(self, event):
raise Exception(event.transport.condition)
if __name__ == "__main__":
try:
Container(Sender(address.url, content.messages)).run()
except KeyboardInterrupt:
pass
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import numpy as np
top = 4000000
fibo1 = 2
fibo2= 1
cumSum = 2
while fibo1 < top:
temp = fibo1
fibo1 = fibo1+fibo2
fibo2 = temp
if fibo1%2==0 and fibo1<top:
print fibo1
cumSum=cumSum+fibo1
print cumSum
|
nilq/baby-python
|
python
|
from django.http import request
from django.shortcuts import render
from . import regresion
import json
def fit(f_type, xvalues, yvalues):
if f_type == 'cuadratic':
print(f_type)
return
# Create your views here.
def RegresionCuadratica(request):
if request.method == 'POST':
xvalues = request.POST.get("xvalues")
yvalues = request.POST.get("yvalues")
if xvalues=="" or yvalues=="":
return render(request, 'ajusteCurvas/regresionCuadratica/regresionCuadratica.html', {})
xvalues = list(map(float, xvalues.split()))
yvalues = list(map(float, yvalues.split()))
yvalues = [x for y, x in sorted(zip(xvalues, yvalues))]
xvalues.sort()
xvalues_json = json.dumps(xvalues)
yvalues_json = json.dumps(yvalues)
chartX, chartY, a, b, c = regresion.regresionCuadratica(xvalues, yvalues)
ctx={'resultado':'si', 'xvalues':xvalues_json, 'yvalues':yvalues_json, 'xchart': chartX, 'ychart':chartY, 'a':a, 'b':b, 'c':c}
else:
ctx = {}
return render(request, 'ajusteCurvas/regresionCuadratica/regresionCuadratica.html', ctx)
def RegresionLineal(request):
if request.method == 'POST':
xvalues = request.POST.get("xvalues")
yvalues = request.POST.get("yvalues")
if xvalues=="" or yvalues=="":
return render(request, 'ajusteCurvas/regresionLineal/regresionLineal.htm', {})
xvalues = list(map(float, xvalues.split()))
yvalues = list(map(float, yvalues.split()))
yvalues = [x for y, x in sorted(zip(xvalues, yvalues))]
xvalues.sort()
xvalues_json = json.dumps(xvalues)
yvalues_json = json.dumps(yvalues)
# chart x y y son coordenadas paa la recta solo necesito dos coeficientes y usar una funcion de regresion lineal
chartX, chartY, b, c = regresion.regresionLineal(xvalues, yvalues)
ctx={'resultado':15, 'xvalues':xvalues_json, 'yvalues':yvalues_json, 'xchart': chartX, 'ychart':chartY,'b':b, 'c':c}
else:
ctx = {}
return render(request, 'ajusteCurvas/regresionLineal/regresionLineal.html', ctx)
def homeAC(request):
return render(request, 'ajusteCurvas/curva.html', {})
|
nilq/baby-python
|
python
|
import gym
from baselines import ampi
from baselines import bench
from baselines.ampi import mlp_policy
from baselines import logger
def callback(lcl, _glb):
# stop training if reward exceeds 199
is_solved = lcl['t'] > 100 and sum(lcl['episode_rewards'][-101:-1]) / 100 >= 199
return is_solved
LOG_DIR="/home/gpu_user/assia/ws/tf/BPO/baselines/ampi/log/"
XP_NAME = "cost"
DO_COST = 1
def main():
# policy model
#def policy_fn(name, ob_space, ac_space):
# return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
# hid_size=64, num_hid_layers=2)
env = gym.make("CartPole-v0")
logger.configure(LOG_DIR + XP_NAME)
env = bench.Monitor(env, logger.get_dir())
# q model
model = ampi.models.mlp([64])
policy_fn = ampi.mlp_policy.mlpPolicy([64])
if DO_COST==1:
act = ampi.learn_cost(
env,
q_func=model,
policy_fn=policy_fn,
lr=1e-3,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
else:
act = ampi.learn(
env,
q_func=model,
policy_fn=policy_fn,
lr=1e-3,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to cartpole_model.pkl")
#act.save(LOG_DIR + "/cartpole_model.pkl")
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
"""Test suite for pytorch_ranking."""
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
'''
File name: do_color_propagation.py
Author: Varun Jampani
'''
# ---------------------------------------------------------------------------
# Video Propagation Networks
#----------------------------------------------------------------------------
# Copyright 2017 Max Planck Society
# Distributed under the BSD-3 Software license [see LICENSE.txt for details]
# ---------------------------------------------------------------------------
import numpy as np
import scipy.io as sio
import os
from scipy import misc
import random
import copy
import gc
gc.enable()
from utils import *
from init_caffe import *
from davis_data import *
from fetch_and_transform_data import fetch_and_transform_data
from create_online_net import *
import matplotlib.pyplot as plt
max_input_points = MAX_INPUT_POINTS
total_frames = NUM_PREV_FRAMES + 1
def color_propagation(stage_id):
stage_id = int(stage_id)
out_folder = RESULT_FOLDER + '/STAGE' + str(stage_id) + '_RESULT/'
if not os.path.exists(out_folder):
os.makedirs(out_folder)
feature_scales = [0.2, 0.04, 0.04, 0.04]
# Iterate over all sequences
with open(MAIN_VAL_SEQ,'r') as f:
for seq in f:
print(seq)
seq = seq[:-1]
[inputs, num_frames] = fetch_and_transform_data(seq)
if stage_id > 0:
prev_color_file = RESULT_FOLDER + '/STAGE' + str(stage_id-1) + '_RESULT/' + seq + '/all_frame_color_result.npy'
prev_color_result = np.load(prev_color_file)
result_folder = out_folder + '/' + seq + '/'
if not os.path.exists(result_folder):
os.makedirs(result_folder)
color_result = (np.transpose(np.squeeze(inputs['input_color']), (1, 2, 0)) + 0.5) * 255.0
gray_result = np.squeeze(inputs['out_features'][:, 0, :, 0:854])[:,:,None]
full_result = np.append(gray_result, color_result, axis = 2)
rgb_result = convert_to_rgb(full_result)
misc.imsave(result_folder + '/' + str(0).zfill(5) + '.png',
rgb_result)
all_frames_color_result = inputs['input_color']
prev_frame_result = None
net_inputs = {}
net_inputs['input_color'] = inputs['input_color']
net_inputs['scales'] = np.ones((1, 4, 1, 1))
for k in range(0, 4):
net_inputs['scales'][0, k, 0, 0] = feature_scales[k]
f_value = total_frames - 1
ignore_feat_value = -1000
if stage_id == 0:
standard_net = load_bnn_deploy_net(max_input_points)
else:
caffe_model = MODEL_FOLDER + 'COLOR_STAGE1.caffemodel'
standard_net = load_bnn_cnn_deploy_net(max_input_points)
standard_net.copy_from(caffe_model)
for t in range(1, MAX_FRAMES):
print(t)
if t < f_value:
net_inputs['input_color'] = copy.copy(inputs['input_color'])
net_inputs['in_features'] = copy.copy(inputs['out_features'][:, :, :, 0: 854*t])
net_inputs['out_features'] = copy.copy(inputs['out_features'][:, :, :, 854 * t : 854 * (t+1)])
else:
net_inputs['input_color'] = copy.copy(inputs['input_color'][:, :, :, 854*(t-f_value): 854*t])
net_inputs['in_features'] = copy.copy(inputs['out_features'][:, :, :, 854*(t-f_value): 854*t])
net_inputs['out_features'] = copy.copy(inputs['out_features'][:, :, :, 854 * t : 854 * (t+1)])
height = net_inputs['in_features'].shape[2]
width = net_inputs['in_features'].shape[3]
num_input_points = height * width
# Random sampling input points
if num_input_points > max_input_points:
sampled_indices = random.sample(xrange(num_input_points), max_input_points)
else:
sampled_indices = random.sample(xrange(num_input_points), num_input_points)
h_indices = (np.array(sampled_indices) / width).tolist()
w_indices = (np.array(sampled_indices) % width).tolist()
net_inputs['input_color'] = net_inputs['input_color'][:, :, h_indices, w_indices]
net_inputs['input_color'] = net_inputs['input_color'][:, :, np.newaxis, :]
net_inputs['in_features'] = net_inputs['in_features'][:, :, h_indices, w_indices]
net_inputs['in_features'] = net_inputs['in_features'][:, :, np.newaxis, :]
if num_input_points > max_input_points:
prev_frame_result = standard_net.forward_all(**net_inputs)['out_color_result']
if num_input_points < max_input_points:
if stage_id == 0:
net = load_bnn_deploy_net(num_input_points)
else:
caffe_model = MODEL_FOLDER + 'COLOR_STAGE1.caffemodel'
net = load_bnn_cnn_deploy_net(num_input_points)
net.copy_from(caffe_model)
prev_frame_result = net.forward_all(**net_inputs)['out_color_result']
# import pdb; pdb.set_trace()
result = np.squeeze(prev_frame_result)
color_result = (np.transpose(result, (1, 2, 0)) + 0.5) * 255.0
gray_result = np.squeeze(inputs['out_features'][:, 0, :, 854 * t : 854 * (t+1)])[:,:,None]
full_result = np.append(gray_result, color_result, axis = 2)
rgb_result = convert_to_rgb(full_result)
misc.imsave(result_folder + '/' + str(t).zfill(5) + '.png',
rgb_result)
all_frames_color_result = np.append(all_frames_color_result, prev_frame_result,
axis=3)
if stage_id > 0:
prev_frame_result = prev_color_result[:, :, :, 854 * t : 854 * (t+1)]
inputs['input_color'] = np.append(inputs['input_color'],
prev_frame_result,
axis=3)
gc.collect()
# Save the all frames color result
out_file = result_folder + '/all_frame_color_result.npy'
np.save(out_file, all_frames_color_result)
return result
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: ' + sys.argv[0] + ' <stage_id>')
else:
color_propagation(int(sys.argv[1]))
|
nilq/baby-python
|
python
|
import argparse
import PIL.Image as pil_image
import numpy as np
from utils import AverageMeter, calc_psnr, calc_ssim, convert_rgb_to_y, denormalize,calc_psnr_for_eachimg,calc_ssim_for_eachimg
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--lr_image_file', type=str, required=False,
default="./savedimg/Set5/4/EDSR_blanced_attention_2.png")
#"D:/WFY/20200628SR/RDN-pytorch-master/对比图/对比图/img_001_SRF_4_SR CARN.png"
# "D:/WFY/20200628SR/RDN-pytorch-master/对比图/对比图/img_002_SRF_4_SR (2)CARN.png"
# "./savedimg/Set5/4/PAN_Blanced_attention_2.png"
# "./savedimg/urban100/4/PAN_Blanced_attention_66.png"
# "./savedimg/BSDS100/4/AWSRN_blanced_attention_87.png"
# "D:/WFY/20200628SR/RDN-pytorch-master/PAN/B100/69015.png"
parser.add_argument('--hr_image_file', type=str, required=False,
default="../classical_SR_datasets/Set5/Set5/butterfly.png")
#"D:/WFY/20200628SR/classical_SR_datasets/Set5/Set5/butterfly.png"
# "D:/WFY/20200628SR/classical_SR_datasets/Set14/Set14/barbara.png"
parser.add_argument('--result_image_file', type=str, required=False,
default="./result/")
opt = parser.parse_args()
image_lr = pil_image.open(opt.lr_image_file).convert('RGB')
image_hr = pil_image.open(opt.hr_image_file).convert('RGB')
if (image_lr.width != image_hr.width) or (image_lr.height != image_hr.height):
image_lr = image_lr.resize((image_hr.width,image_hr.height),resample=pil_image.BICUBIC)
# image_lr = convert_rgb_to_y(denormalize(image_lr.squeeze(0)), dim_order='chw')
# image_hr = convert_rgb_to_y(denormalize(image_hr.squeeze(0)), dim_order='chw')
#
# psnr = calc_psnr(image_hr, image_lr)
# ssim = calc_ssim(image_hr, image_lr)
image_hr=np.array(image_hr)
image_lr = np.array(image_lr)
psnr = calc_psnr_for_eachimg(image_hr, image_lr)
ssim = calc_ssim_for_eachimg(image_hr, image_lr)
print('PSNR SSIM: {:.5f} {:.5f}'.format(psnr, ssim))
# output = pil_image.fromarray(denormalize(preds).permute(1, 2, 0).byte().cpu().numpy())
# output.save(opt.result_image_file + opt.choose_net + ('{}_x{}.png'.format(filename, opt.scale)))
|
nilq/baby-python
|
python
|
import warnings
from typing import Any, Callable, Dict, List, Optional, Union
from cognite.client import utils
from cognite.client._api.assets import AssetsAPI
from cognite.client._api.data_sets import DataSetsAPI
from cognite.client._api.datapoints import DatapointsAPI
from cognite.client._api.entity_matching import EntityMatchingAPI
from cognite.client._api.events import EventsAPI
from cognite.client._api.files import FilesAPI
from cognite.client._api.iam import IAMAPI
from cognite.client._api.labels import LabelsAPI
from cognite.client._api.login import LoginAPI
from cognite.client._api.raw import RawAPI
from cognite.client._api.relationships import RelationshipsAPI
from cognite.client._api.sequences import SequencesAPI
from cognite.client._api.templates import TemplatesAPI
from cognite.client._api.three_d import ThreeDAPI
from cognite.client._api.time_series import TimeSeriesAPI
from cognite.client._api_client import APIClient
from cognite.client.exceptions import CogniteAPIKeyError
from cognite.client.utils._client_config import ClientConfig
class CogniteClient:
"""Main entrypoint into Cognite Python SDK.
All services are made available through this object. See examples below.
Args:
api_key (str): API key
project (str): Project. Defaults to project of given API key.
client_name (str): A user-defined name for the client. Used to identify number of unique applications/scripts
running on top of CDF.
base_url (str): Base url to send requests to. Defaults to "https://api.cognitedata.com"
max_workers (int): Max number of workers to spawn when parallelizing data fetching. Defaults to 10.
headers (Dict): Additional headers to add to all requests.
timeout (int): Timeout on requests sent to the api. Defaults to 30 seconds.
file_transfer_timeout (int): Timeout on file upload/download requests. Defaults to 600 seconds.
proxies (Dict[str, str]): Dictionary mapping from protocol to url. e.g. {"https": "http://10.10.1.10:1080"}
token (Union[str, Callable[[], str]]): A jwt or method which takes no arguments and returns a jwt to use for authentication.
This will override any api-key set.
token_url (str): Optional url to use for token generation.
This will override the COGNITE_TOKEN_URL environment variable and only be used if both api-key and token are not set.
token_client_id (str): Optional client id to use for token generation.
This will override the COGNITE_CLIENT_ID environment variable and only be used if both api-key and token are not set.
token_client_secret (str): Optional client secret to use for token generation.
This will override the COGNITE_CLIENT_SECRET environment variable and only be used if both api-key and token are not set.
token_scopes (list): Optional list of scopes to use for token generation.
This will override the COGNITE_TOKEN_SCOPES environment variable and only be used if both api-key and token are not set.
token_custom_args (Dict): Optional additional arguments to use for token generation.
This will be passed in as optional additional kwargs to OAuth2Session fetch_token and will only be used if both api-key and token are not set.
disable_pypi_version_check (bool): Don't check for newer versions of the SDK on client creation
debug (bool): Configures logger to log extra request details to stderr.
"""
_API_VERSION = "v1"
def __init__(
self,
api_key: Optional[str] = None,
api_subversion: Optional[str] = None,
project: Optional[str] = None,
client_name: Optional[str] = None,
base_url: Optional[str] = None,
max_workers: Optional[int] = None,
headers: Optional[Dict[str, str]] = None,
timeout: Optional[int] = None,
file_transfer_timeout: Optional[int] = None,
proxies: Optional[Dict[str, str]] = None,
token: Optional[Union[str, Callable[[], str], None]] = None,
token_url: Optional[str] = None,
token_client_id: Optional[str] = None,
token_client_secret: Optional[str] = None,
token_scopes: Optional[List[str]] = None,
token_custom_args: Optional[Dict[str, str]] = None,
disable_pypi_version_check: Optional[bool] = None,
debug: bool = False,
):
self._config = ClientConfig(
api_key=api_key,
api_subversion=api_subversion,
project=project,
client_name=client_name,
base_url=base_url,
max_workers=max_workers,
headers=headers,
timeout=timeout,
file_transfer_timeout=file_transfer_timeout,
proxies=proxies,
token=token,
token_url=token_url,
token_client_id=token_client_id,
token_client_secret=token_client_secret,
token_scopes=token_scopes,
token_custom_args=token_custom_args,
disable_pypi_version_check=disable_pypi_version_check,
debug=debug,
)
self.login = LoginAPI(self._config, cognite_client=self)
if self._config.project is None:
self._config.project = self._infer_project()
self.assets = AssetsAPI(self._config, api_version=self._API_VERSION, cognite_client=self)
self.datapoints = DatapointsAPI(self._config, api_version=self._API_VERSION, cognite_client=self)
self.events = EventsAPI(self._config, api_version=self._API_VERSION, cognite_client=self)
self.files = FilesAPI(self._config, api_version=self._API_VERSION, cognite_client=self)
self.iam = IAMAPI(self._config, api_version=self._API_VERSION, cognite_client=self)
self.data_sets = DataSetsAPI(self._config, api_version=self._API_VERSION, cognite_client=self)
self.sequences = SequencesAPI(self._config, api_version=self._API_VERSION, cognite_client=self)
self.time_series = TimeSeriesAPI(self._config, api_version=self._API_VERSION, cognite_client=self)
self.raw = RawAPI(self._config, api_version=self._API_VERSION, cognite_client=self)
self.three_d = ThreeDAPI(self._config, api_version=self._API_VERSION, cognite_client=self)
self.labels = LabelsAPI(self._config, api_version=self._API_VERSION, cognite_client=self)
self.relationships = RelationshipsAPI(self._config, api_version=self._API_VERSION, cognite_client=self)
self.entity_matching = EntityMatchingAPI(self._config, api_version=self._API_VERSION, cognite_client=self)
self.templates = TemplatesAPI(self._config, api_version=self._API_VERSION, cognite_client=self)
self._api_client = APIClient(self._config, cognite_client=self)
def get(self, url: str, params: Dict[str, Any] = None, headers: Dict[str, Any] = None):
"""Perform a GET request to an arbitrary path in the API."""
return self._api_client._get(url, params=params, headers=headers)
def post(self, url: str, json: Dict[str, Any], params: Dict[str, Any] = None, headers: Dict[str, Any] = None):
"""Perform a POST request to an arbitrary path in the API."""
return self._api_client._post(url, json=json, params=params, headers=headers)
def put(self, url: str, json: Dict[str, Any] = None, headers: Dict[str, Any] = None):
"""Perform a PUT request to an arbitrary path in the API."""
return self._api_client._put(url, json=json, headers=headers)
def delete(self, url: str, params: Dict[str, Any] = None, headers: Dict[str, Any] = None):
"""Perform a DELETE request to an arbitrary path in the API."""
return self._api_client._delete(url, params=params, headers=headers)
@property
def version(self) -> str:
"""Returns the current SDK version.
Returns:
str: The current SDK version
"""
return utils._auxiliary.get_current_sdk_version()
@property
def config(self) -> ClientConfig:
"""Returns a config object containing the configuration for the current client.
Returns:
ClientConfig: The configuration object.
"""
return self._config
def _infer_project(self):
login_status = self.login.status()
if login_status.logged_in:
warnings.warn(
"Authenticated towards inferred project '{}'. Pass project to the CogniteClient constructor or set"
" the environment variable 'COGNITE_PROJECT' to suppress this warning.".format(login_status.project),
stacklevel=3,
)
return login_status.project
else:
raise CogniteAPIKeyError("Invalid API key")
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
import time
# 专注于 IO多路复用 可以实现一个可并发的服务器
import select
# 用于类的序列化和反序列化
import json
from master import *
import server_pool
from master2 import *
import socket
class EventLoop(object):
def __init__(self):
self._stopping = False
# 创建一个socket AF_INET指定使用IPv4,SOCK_STREAM
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn_rd =[sock]
#用来保存端口信息
self.portdict = {}
#根本不知道这两个东西是干什么的 其他地方引入进来的
self.socketbridge = SocketBridge()
self.socketbridge.start_as_daemon()
@property
def getPortdict(self):
return self.portdict
def thread_stop(self):
self._stopping = True
exists = self.portdict.keys()
for cur in exists:
logging.info("loop dispose port {}".format(cur))
# dispose() 函数是什么
self.portdict[cur].dispose()
def run(self):
while not self._stopping:
# logging.debug('hh:{}'.format(r['hh']))
try:
f = open('config.json', 'r')
configs = json.load(f)
#获取http的配置
http = configs['http']
#获取tcp的配置
tcp = configs['tcp']
check = {}
if http:
# 抽取主机地址和端口号
host, port = http['customer'].split(":")
if self.portdict.get(port) is None:
self.portdict[port] = http_service(http)
self.portdict[port].updateconfig(http)
check[port] = http
for c in tcp:
host, port = c['master'].split(":")
check[port] = c
exists = self.portdict.keys()
removelist = []
for cur in exists:
if not check.get(cur):
logging.info("loop dispose port {}".format(cur))
self.portdict[cur].dispose()
removelist.append(cur)
for re in removelist:
self.portdict.pop(re)
for (port,c) in check.items():
if not self.portdict.get(port):
logging.info("run init {}".format(c['master']))
self.portdict[port] = Mastar_line(self.socketbridge)
self.portdict[port].main_master(c)
except Exception as e:
logging.info("fail config.json e:{}".format(e))
finally:
if f:
f.close()
# logging.debug('using event model: 123')
logging.info("bridgeAdd:{},bridgeRemove:{}".format(server_pool.ServerPool.bridgeAdd,server_pool.ServerPool.bridgeRemove))
time.sleep(10)
|
nilq/baby-python
|
python
|
"""Timers Tests"""
from datetime import datetime, timedelta
from itertools import starmap
from operator import sub
from time import time
import pytest
from circuits import Component, Event, Timer, sleep
from circuits.six.moves import map, zip
@pytest.fixture
def app(request, manager, watcher):
app = App().register(manager)
assert watcher.wait("registered")
def finalizer():
app.unregister()
assert watcher.wait("unregistered")
request.addfinalizer(finalizer)
return app
class single(Event):
"""single Event"""
complete = True
class persistent(Event):
"""persistent Event"""
complete = True
class App(Component):
def init(self):
self.flag = False
self.count = 0
self.timestamps = []
def single(self):
self.timestamps.append(time())
self.count += 1
self.flag = True
def persistent(self, interval):
timer = Timer(interval, single(), persist=True)
timer.register(self)
yield sleep(interval * 10)
timer.unregister()
def test_single(app, watcher):
Timer(0.1, single()).register(app)
assert watcher.wait("single_complete")
assert app.flag
def test_persistent(app, watcher):
exponent = -1
interval = 10.0 ** exponent
app.fire(persistent(interval))
assert watcher.wait("persistent_complete")
xs = list(map(abs, starmap(sub, zip(app.timestamps, app.timestamps[1:]))))
avg = sum(xs) / len(xs)
assert round(avg, abs(exponent)) == interval
def test_datetime(app, watcher):
now = datetime.now()
d = now + timedelta(seconds=0.1)
Timer(d, single()).register(app)
assert watcher.wait("single_complete")
assert app.flag
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class Lab1Config(AppConfig):
name = 'lab1'
|
nilq/baby-python
|
python
|
#######################################################################
# Name: test_semantic_action_results
# Purpose: Tests semantic action results passed to first_pass call
# Author: Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# Copyright: (c) 2014 Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# License: MIT License
#######################################################################
# proj
from arpeggio import *
from arpeggio import RegExMatch as _
from arpeggio.export import PTDOTExporter
def grammar(): return first, "a", second
def first(): return [fourth, third], ZeroOrMore(third)
def second(): return OneOrMore(third), "b"
def third(): return [third_str, fourth]
def third_str(): return "3"
def fourth(): return _(r'\d+')
first_sar = None
third_sar = None
class Visitor(PTNodeVisitor):
def visit_first(self, node, children):
global first_sar
first_sar = children
def visit_third(self, node, children):
global third_sar
third_sar = children
return 1
def test_semantic_action_results():
global first_sar, third_sar
peg_input = "4 3 3 3 a 3 3 b"
parser = ParserPython(grammar, reduce_tree=False)
result = parser.parse(peg_input)
PTDOTExporter().exportFile(result, 'test_semantic_action_results_pt.dot')
visit_parse_tree(result, Visitor())
assert isinstance(first_sar, SemanticActionResults)
assert len(first_sar.third) == 3
assert third_sar.third_str[0] == '3'
|
nilq/baby-python
|
python
|
from pathlib import Path
from unittest.mock import MagicMock
from unittest.mock import patch
import pytest
from zelt.kubernetes import deployer
from zelt.kubernetes.manifest import Manifest
from zelt.kubernetes.manifest_set import ManifestSet
from zelt.kubernetes.storage.configmap import ConfigmapStorage
@pytest.fixture()
def manifest_set(tmp_path: Path) -> ManifestSet:
manifest_file = Path(tmp_path, "a_manifest.yaml")
with manifest_file.open("w") as f:
f.write(
"""
apiVersion: apps/v1
kind: Deployment
metadata:
name: a_controller
namespace: some_namespace
labels:
application: some_application
role: controller
spec:
replicas: 1
selector:
matchLabels:
application: some_application
role: controller"""
)
return ManifestSet(
namespace=Manifest.from_file(manifest_file),
service=Manifest.from_file(manifest_file),
ingress=Manifest.from_file(manifest_file),
controller=Manifest.from_file(manifest_file),
worker=Manifest.from_file(manifest_file),
others=[],
)
@pytest.fixture()
def locustfile(tmp_path: Path) -> Path:
locustfile = Path(tmp_path, "some.py")
with locustfile.open("w") as f:
f.write("")
return locustfile
@pytest.fixture()
def configmap_storage() -> ConfigmapStorage:
return ConfigmapStorage(namespace="a-namespace", labels={"some": "labels"})
class TestCreateResources:
@patch("zelt.kubernetes.client.config")
@patch("zelt.kubernetes.client.try_creating_custom_objects")
@patch("zelt.kubernetes.client.CoreV1Api.create_namespace")
@patch("zelt.kubernetes.client.CoreV1Api.create_namespaced_service")
@patch("zelt.kubernetes.client.CoreV1Api.create_namespaced_config_map")
@patch("zelt.kubernetes.client.NetworkingV1beta1Api.create_namespaced_ingress")
@patch("zelt.kubernetes.client.AppsV1Api.create_namespaced_deployment")
@patch("zelt.kubernetes.client.wait_until_pod_ready")
def test_it_deploys_all_given_manifests_and_configmap(
self,
wait,
create_deployment,
create_ingress,
create_configmap,
create_service,
create_namespace,
create_custom_objects,
config,
configmap_storage: ConfigmapStorage,
locustfile: Path,
manifest_set: ManifestSet,
):
deployer.create_resources(
ms=manifest_set, storage=configmap_storage, locustfile=locustfile
)
create_namespace.assert_called_once()
create_service.assert_called_once()
create_configmap.assert_called_once()
create_ingress.assert_called_once()
create_custom_objects.assert_not_called()
assert create_deployment.call_count == 2
@patch("zelt.kubernetes.client.config")
@patch("zelt.kubernetes.client.try_creating_custom_objects")
@patch("zelt.kubernetes.client.CoreV1Api.create_namespace")
@patch("zelt.kubernetes.client.CoreV1Api.create_namespaced_service")
@patch("zelt.kubernetes.client.CoreV1Api.create_namespaced_config_map")
@patch("zelt.kubernetes.client.NetworkingV1beta1Api.create_namespaced_ingress")
@patch("zelt.kubernetes.client.AppsV1Api.create_namespaced_deployment")
@patch("zelt.kubernetes.client.wait_until_pod_ready")
def test_it_deploys_custom_manifests(
self,
wait,
create_deployment,
create_ingress,
create_configmap,
create_service,
create_namespace,
create_custom_objects,
config,
configmap_storage: ConfigmapStorage,
locustfile: Path,
manifest_set: ManifestSet,
):
manifest_set = manifest_set._replace(others=[manifest_set.namespace] * 2)
deployer.create_resources(
ms=manifest_set, storage=configmap_storage, locustfile=locustfile
)
create_custom_objects.assert_called_once()
@patch("zelt.kubernetes.client.config")
@patch("zelt.kubernetes.client.CoreV1Api.create_namespace")
@patch("zelt.kubernetes.client.CoreV1Api.create_namespaced_service")
@patch("zelt.kubernetes.client.NetworkingV1beta1Api.create_namespaced_ingress")
@patch("zelt.kubernetes.client.AppsV1Api.create_namespaced_deployment")
@patch("zelt.kubernetes.client.wait_until_pod_ready")
def test_it_does_not_deploy_workers_when_given_none(
self,
wait,
create_deployment,
create_ingress,
create_service,
create_namespace,
config,
manifest_set: ManifestSet,
):
manifest_set = manifest_set._replace(worker=None)
deployer.create_resources(
ms=manifest_set, storage=MagicMock(), locustfile=MagicMock()
)
create_namespace.assert_called_once()
create_service.assert_called_once()
create_ingress.assert_called_once()
assert create_deployment.call_count == 1
class TestDeleteResources:
@patch("zelt.kubernetes.client.config")
@patch("zelt.kubernetes.client.CoreV1Api.delete_namespace")
@patch("zelt.kubernetes.client.CoreV1Api.delete_namespaced_service")
@patch("zelt.kubernetes.client.CoreV1Api.delete_namespaced_config_map")
@patch("zelt.kubernetes.client.NetworkingV1beta1Api.delete_namespaced_ingress")
@patch("zelt.kubernetes.client.await_no_resources_found")
@patch("zelt.kubernetes.client.AppsV1Api.delete_collection_namespaced_deployment")
def test_it_deletes_all_given_manifests_and_configmap(
self,
delete_deployments,
wait,
delete_ingress,
delete_configmap,
delete_service,
delete_namespace,
config,
configmap_storage: ConfigmapStorage,
manifest_set: ManifestSet,
):
deployer.delete_resources(ms=manifest_set, storage=configmap_storage)
delete_namespace.assert_called_once()
delete_service.assert_called_once()
delete_configmap.assert_called_once()
delete_ingress.assert_called_once()
delete_deployments.assert_called_once()
class TestUpdateWorkerPods:
def test_it_does_nothing_if_no_worker_manifest_exists(
self, manifest_set: ManifestSet
):
manifest_set = manifest_set._replace(worker=None)
deployer.update_worker_pods(manifest_set, 2)
assert manifest_set.worker is None
def test_it_replaces_the_number_of_worker_replicas_in_place(
self, manifest_set: ManifestSet
):
original_replicas = int(manifest_set.worker.body["spec"]["replicas"])
expected_replicas = original_replicas + 1
deployer.update_worker_pods(manifest_set, expected_replicas)
assert int(manifest_set.worker.body["spec"]["replicas"]) == expected_replicas
def test_it_only_updates_worker_manifest_replicas(self, manifest_set):
controller_replicas = int(manifest_set.controller.body["spec"]["replicas"])
deployer.update_worker_pods(manifest_set, controller_replicas + 1)
assert (
int(manifest_set.controller.body["spec"]["replicas"]) == controller_replicas
)
class TestRescaleWorkerDeployment:
@patch("zelt.kubernetes.client.AppsV1Api.replace_namespaced_deployment")
def test_it_does_not_rescale_when_not_given_a_worker_manifest(
self, rescale, manifest_set: ManifestSet
):
manifest_set = manifest_set._replace(worker=None)
deployer.rescale_worker_deployment(manifest_set, 0)
rescale.assert_not_called()
@patch("zelt.kubernetes.client.config")
@patch("zelt.kubernetes.client.AppsV1Api.read_namespaced_deployment")
@patch("zelt.kubernetes.client.AppsV1Api.replace_namespaced_deployment")
def test_it_rescales_when_given_a_worker_manifest(
self, rescale, _read, _config, manifest_set: ManifestSet
):
deployer.rescale_worker_deployment(manifest_set, 0)
rescale.assert_called_once()
|
nilq/baby-python
|
python
|
from DB_Tables.Comments_Table import Comment_Table
from DB_Tables.InlineComment_Table import InlineComment_Table
from DB_Tables.PatchDetail_Table import PatchDetail_Table
from DB_Tables.Patch_Table import Patch_Table
from DB_Tables.People_Table import People_Table
from DB_Tables.RequestDetail_Table import RequestDetail_Table
from DB_Tables.Request_Table import Request_Table
from DB_Tables.Reviews_Table import Reviews_Table
from Database.DB_Queries import DB_Queries
from Gerrit_Extractor.ReviewExtractor import ReviewExtractor
class Controller:
def __init__(self, url='', username='', password=''):
if url=='':
url='https://gerrit.iotivity.org/gerrit/'
self.url = url
self.username = username
self.password = password
self.reviewExtractor=ReviewExtractor(self.url,self.username,self.password)
self.db = DB_Queries()
print('DB Connected Successfully')
self.changeList = []
self.changeDetailList = []
self.commentCount = {}
self.inlineCommentList = []
self.rt = Request_Table(self.db)
self.rDetail_Table = RequestDetail_Table(self.db)
self.pt = People_Table(self.db)
self.reviewsTable = Reviews_Table(self.db)
self.ct = Comment_Table(self.db)
self.iCom = InlineComment_Table(self.db)
self.pDetailTable = PatchDetail_Table(self.db)
self.patchTable = Patch_Table(self.db)
def getProjectList(self):
self.projectDict,self.projectNameList=self.reviewExtractor.getProjectList()
def getAllChangeList(self):
self.changeList=self.reviewExtractor.getAllChangeList(self.projectDict)
def getChangeDetailList(self):
self.changeDetailList=self.reviewExtractor.getChangeDetailList(self.changeList)
def getTopic(self):
self.changeDetailList=self.reviewExtractor.getTopic(self.changeDetailList)
def getRevisions(self):
self.changeDetailList = self.reviewExtractor.getRevisions(self.changeDetailList)
def getReviewerList(self):
changeList = self.db.fetchChangeList()
self.reviewerList=self.reviewExtractor.getReviewerList(changeList)
def getInlineComment(self):
changeList = self.db.fetchChangeList()
self.inlineCommentList, self.commentCount = self.reviewExtractor.getInlineComment(changeList)
#print(self.inlineCommentList)
def getPatchDetail(self):
changeList = self.db.fetchChangeList()
self.patchDetailList = self.reviewExtractor.getPatchDetail(changeList)
#print(self.inlineCommentList)
def extractAllInfo(self):
self.getProjectList()
self.getAllChangeList()
self.getChangeDetailList()
self.getTopic()
self.getRevisions()
self.getPatchDetail()
self.getInlineComment()
self.getReviewerList()
def prepRequest(self):
self.rt.prepRequest(self.changeDetailList)
def prepRequestDetail(self):
self.rDetail_Table.prepRequestDetail(self.changeDetailList)
def prepComment(self):
self.ct.prepComment(self.changeDetailList)
def prepPeople(self):
# print(self.reviewerList)
self.pt.prepPeople(self.reviewerList)
def prepReviews(self):
# print(self.reviewerList)
self.reviewsTable.prepReviews(self.reviewerList)
def prepInlineComment(self):
self.iCom.prepInlineComment(self.inlineCommentList)
def prepPatchDetail(self):
self.pDetailTable.prepPatchDetail(self.patchDetailList)
def prepPatch(self):
self.patchTable.prepPatch(self.patchDetailList, self.commentCount)
def fillAllTable(self):
self.prepRequest()
self.prepRequestDetail()
self.prepComment()
self.prepInlineComment()
self.prepPatchDetail()
self.prepPatch()
self.prepPeople()
self.prepReviews()
def execute(self):
self.getProjectList()
self.getAllChangeList()
self.getChangeDetailList()
self.getTopic()
self.getRevisions()
self.prepRequest()
self.prepRequestDetail()
self.prepComment()
self.getInlineComment()
self.prepInlineComment()
self.getReviewerList()
self.prepPeople()
self.prepReviews()
self.getPatchDetail()
self.prepPatchDetail()
self.prepPatch()
|
nilq/baby-python
|
python
|
# flake8: noqa F401
from wagtailregulations.models.django import (
EffectiveVersion,
Part,
Section,
Subpart,
sortable_label,
)
from wagtailregulations.models.pages import RegulationPage, RegulationPageMixin
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
url(
regex=r'^$',
view=views.chat,
name='chatRoom'
),
url(r'^new/$', views.new_room, name='new_room'),
url(r'^(?P<label>[\w-]{,50})/$', views.chat_room, name='chat_room'),
]
|
nilq/baby-python
|
python
|
__author__ = """Romain Picard"""
__email__ = 'romain.picard@oakbits.com'
__version__ = '0.1.0'
from .types import NamedObservable, Update, Updated
from . import pullable
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from swagger_client.api.aliens_api_api import AliensApiApi
from swagger_client.api.games_api_api import GamesApiApi
from swagger_client.api.logs_api_api import LogsApiApi
from swagger_client.api.scoreboard_api_api import ScoreboardApiApi
from swagger_client.api.submissions_api_api import SubmissionsApiApi
from swagger_client.api.teams_api_api import TeamsApiApi
from swagger_client.api.tournaments_api_api import TournamentsApiApi
|
nilq/baby-python
|
python
|
import uuid
from django.conf import settings
from django.test import TestCase
from mock import patch
from sqlalchemy import create_engine
from sqlalchemy.exc import ProgrammingError
from corehq.apps.userreports.models import DataSourceConfiguration, ReportConfiguration
from corehq.apps.userreports.pillow import ConfigurableIndicatorPillow
from corehq.apps.userreports.reports.factory import ReportFactory
from corehq.apps.userreports.sql.connection import get_engine_id
from corehq.apps.userreports.tests.utils import get_sample_data_source, get_sample_doc_and_indicators, \
get_sample_report_config
from corehq.apps.userreports.sql import IndicatorSqlAdapter
from corehq import db
class UCRMultiDBTest(TestCase):
@classmethod
def setUpClass(cls):
cls.db2_name = 'cchq_ucr_tests'
db_conn_parts = settings.SQL_REPORTING_DATABASE_URL.split('/')
db_conn_parts[-1] = cls.db2_name
cls.db2_url = '/'.join(db_conn_parts)
# setup patches
cls.connection_string_patch = patch('corehq.db.connection_manager.get_connection_string')
def connection_string_for_engine(engine_id):
if engine_id == 'engine-1':
return settings.SQL_REPORTING_DATABASE_URL
else:
return cls.db2_url
mock_manager = cls.connection_string_patch.start()
mock_manager.side_effect = connection_string_for_engine
# setup data sources
data_source_template = get_sample_data_source()
cls.ds_1 = DataSourceConfiguration.wrap(data_source_template.to_json())
cls.ds_1.engine_id = 'engine-1'
cls.ds_1.save()
cls.ds_2 = DataSourceConfiguration.wrap(data_source_template.to_json())
cls.ds_2.engine_id = 'engine-2'
cls.ds_2.save()
# use db1 engine to create db2 http://stackoverflow.com/a/8977109/8207
cls.root_engine = create_engine(settings.SQL_REPORTING_DATABASE_URL)
conn = cls.root_engine.connect()
conn.execute('commit')
try:
conn.execute('CREATE DATABASE {}'.format(cls.db2_name))
except ProgrammingError:
# optimistically assume it failed because was already created.
pass
conn.close()
cls.ds1_adapter = IndicatorSqlAdapter(cls.ds_1)
cls.ds2_adapter = IndicatorSqlAdapter(cls.ds_2)
def setUp(self):
# initialize the tables
self.ds1_adapter.rebuild_table()
self.ds2_adapter.rebuild_table()
self.assertEqual(0, self.ds1_adapter.get_query_object().count())
self.assertEqual(0, self.ds2_adapter.get_query_object().count())
@classmethod
def tearDownClass(cls):
# unpatch
cls.connection_string_patch.stop()
# delete data sources
cls.ds_1.delete()
cls.ds_2.delete()
# dispose secondary engine
cls.ds2_adapter.session_helper.engine.dispose()
# drop the secondary database
conn = cls.root_engine.connect()
conn.execute('rollback')
try:
conn.execute('DROP DATABASE {}'.format(cls.db2_name))
finally:
conn.close()
cls.root_engine.dispose()
def tearDown(self):
self.ds1_adapter.session_helper.Session.remove()
self.ds2_adapter.session_helper.Session.remove()
self.ds1_adapter.drop_table()
self.ds2_adapter.drop_table()
def test_patches_and_setup(self):
self.assertEqual('engine-1', get_engine_id(self.ds_1))
self.assertEqual('engine-2', get_engine_id(self.ds_2))
self.assertEqual(settings.SQL_REPORTING_DATABASE_URL,
db.connection_manager.get_connection_string('engine-1'))
self.assertEqual(self.db2_url,
db.connection_manager.get_connection_string('engine-2'))
self.assertNotEqual(str(self.ds1_adapter.engine.url), str(self.ds2_adapter.engine.url))
self.assertEqual(settings.SQL_REPORTING_DATABASE_URL, str(self.ds1_adapter.engine.url))
self.assertEqual(self.db2_url, str(self.ds2_adapter.engine.url))
def test_pillow_save_to_multiple_databases(self):
self.assertNotEqual(self.ds1_adapter.engine.url, self.ds2_adapter.engine.url)
pillow = ConfigurableIndicatorPillow()
pillow.bootstrap(configs=[self.ds_1, self.ds_2])
self.assertNotEqual(self.ds1_adapter.engine.url, self.ds2_adapter.engine.url)
sample_doc, _ = get_sample_doc_and_indicators()
pillow.change_transport(sample_doc)
self.assertNotEqual(self.ds1_adapter.engine.url, self.ds2_adapter.engine.url)
self.assertEqual(1, self.ds1_adapter.get_query_object().count())
self.assertEqual(1, self.ds2_adapter.get_query_object().count())
def test_pillow_save_to_one_database_at_a_time(self):
pillow = ConfigurableIndicatorPillow()
pillow.bootstrap(configs=[self.ds_1])
sample_doc, _ = get_sample_doc_and_indicators()
pillow.change_transport(sample_doc)
self.assertEqual(1, self.ds1_adapter.get_query_object().count())
self.assertEqual(0, self.ds2_adapter.get_query_object().count())
# save to the other
pillow.bootstrap(configs=[self.ds_2])
sample_doc['_id'] = uuid.uuid4().hex
pillow.change_transport(sample_doc)
self.assertEqual(1, self.ds1_adapter.get_query_object().count())
self.assertEqual(1, self.ds2_adapter.get_query_object().count())
self.assertEqual(1, self.ds1_adapter.get_query_object().filter_by(doc_id='some-doc-id').count())
self.assertEqual(1, self.ds2_adapter.get_query_object().filter_by(doc_id=sample_doc['_id']).count())
def test_report_data_source(self):
# bootstrap report data sources against indicator data sources
report_config_template = get_sample_report_config()
report_config_1 = ReportConfiguration.wrap(report_config_template.to_json())
report_config_1.config_id = self.ds_1._id
report_config_2 = ReportConfiguration.wrap(report_config_template.to_json())
report_config_2.config_id = self.ds_2._id
# save a few docs to ds 1
sample_doc, _ = get_sample_doc_and_indicators()
num_docs = 3
for i in range(num_docs):
sample_doc['_id'] = uuid.uuid4().hex
self.ds1_adapter.save(sample_doc)
# ds 1 should have data, ds2 should not
ds1_rows = ReportFactory.from_spec(report_config_1).get_data()
self.assertEqual(1, len(ds1_rows))
self.assertEqual(num_docs, ds1_rows[0]['count'])
ds2_rows = ReportFactory.from_spec(report_config_2).get_data()
self.assertEqual(0, len(ds2_rows))
# save one doc to ds 2
sample_doc['_id'] = uuid.uuid4().hex
self.ds2_adapter.save(sample_doc)
# ds 1 should still have same data, ds2 should now have one row
ds1_rows = ReportFactory.from_spec(report_config_1).get_data()
self.assertEqual(1, len(ds1_rows))
self.assertEqual(num_docs, ds1_rows[0]['count'])
ds2_rows = ReportFactory.from_spec(report_config_2).get_data()
self.assertEqual(1, len(ds2_rows))
self.assertEqual(1, ds2_rows[0]['count'])
|
nilq/baby-python
|
python
|
"""
Model class for Message.
Refer: https://developers.facebook.com/docs/graph-api/reference/message
"""
from dataclasses import dataclass
from typing import List, Optional
from dataclasses_json import config
from pyfacebook.models.base import BaseModel, field
@dataclass
class Tag(BaseModel):
"""
A class representing the tag in message.
"""
name: Optional[str] = field(repr=True)
@dataclass
class MessageTags(BaseModel):
"""
A class representing the tags in message.
"""
data: Optional[List[Tag]] = field(repr=True)
@dataclass
class MsgUser(BaseModel):
"""
A class representing the User in message.
"""
id: Optional[str] = field(repr=True, compare=True)
name: Optional[str] = field(repr=True)
email: Optional[str] = field()
@dataclass
class MessageTo(BaseModel):
"""
A class representing the to in message.
"""
data: Optional[List[MsgUser]] = field(repr=True)
@dataclass
class MessageAttachmentImageData(BaseModel):
"""
A class representing the image in attachment.
"""
width: Optional[int] = field()
height: Optional[int] = field()
max_width: Optional[int] = field()
max_height: Optional[int] = field()
url: Optional[str] = field(repr=True)
preview_url: Optional[str] = field()
raw_gif_image: Optional[str] = field()
raw_webp_image: Optional[str] = field()
animated_gif_url: Optional[str] = field()
animated_gif_preview_url: Optional[str] = field()
animated_webp_url: Optional[str] = field()
animated_webp_preview_url: Optional[str] = field()
image_type: Optional[int] = field()
render_as_sticker: Optional[bool] = field()
@dataclass
class MessageAttachmentVideoData(BaseModel):
"""
A class representing the video in attachment.
"""
width: Optional[int] = field()
height: Optional[int] = field()
length: Optional[int] = field()
video_type: Optional[int] = field()
url: Optional[str] = field(repr=True)
preview_url: Optional[str] = field()
rotation: Optional[int] = field()
@dataclass
class MessageAttachment(BaseModel):
"""
A class representing the Attachment in message.
Refer: https://developers.facebook.com/docs/graph-api/reference/v11.0/message/attachments
"""
id: Optional[str] = field(repr=True, compare=True)
mime_type: Optional[str] = field()
name: Optional[str] = field()
size: Optional[int] = field()
file_url: Optional[str] = field()
image_data: Optional[MessageAttachmentImageData] = field()
video_data: Optional[MessageAttachmentVideoData] = field()
@dataclass
class MessageAttachments(BaseModel):
"""
A class representing the attachments in message.
"""
data: Optional[List[MessageAttachment]] = field(repr=True)
@dataclass
class Message(BaseModel):
"""
A class representing the Message
"""
id: Optional[str] = field(repr=True, compare=True)
created_time: Optional[str] = field()
_from: Optional[dict] = field(metadata=config(field_name="from"))
message: Optional[str] = field()
tags: Optional[MessageTags] = field()
to: Optional[MessageTo] = field()
sticker: Optional[str] = field()
# common connections
attachments: Optional[MessageAttachments] = field()
|
nilq/baby-python
|
python
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2017 Peter Williams and collaborators.
# Licensed under the MIT License.
"""Various properties of plasma oscillations.
Most equation references are to Stix (1992), "Waves in Plasmas".
"""
from __future__ import absolute_import, division, print_function
__all__ = '''
Modes
Parameters
cutoff_frequencies
omega_plasma
omega_cyclotron
phase_velocity
resonance_frequencies
wavelength
wavenumber
'''.split()
import numpy as np
from pwkit import cgs
def omega_plasma(number_density, mass):
"""Compute the plasma frequency.
number_density
The number density of the particles, in cm^-3
mass
The mass of each particle, in g.
Returns:
The plasma frequency, in rad/s.
"""
return np.sqrt(4 * np.pi * number_density * cgs.e**2 / mass)
def omega_cyclotron(q, B, mass):
"""Compute the cyclotron frequency.
q
The charge of the particles, in esu: -1 for electrons, +1 for protons.
B
The ambient magnetic field strength in Gauss.
mass
The mass of each particle, in g.
Returns:
The cyclotron frequency, in rad/s.
We follow the convention of Stix and some other authors in that this value
has a sign that depends on the sign of the charge of the species in
question.
"""
return q * cgs.e * B / (mass * cgs.c)
def phase_velocity(refractive_index):
"""Compute the phase velocity in cm/s from the refractive index."""
return cgs.c / refractive_index
def wavelength(refractive_index, omega):
"""Compute the wavelength of a wave.
refractive_index
The refractive index of the wave (dimensionless).
omega
The temporal frequency of the wave, in rad/s.
Returns:
The wavelength, in cm.
"""
return 2 * np.pi * cgs.c / (refractive_index * omega)
def wavenumber(refractive_index, omega):
"""Compute the wavenumber of a wave.
refractive_index
The refractive index of the wave (dimensionless).
omega
The temporal frequency of the wave, in rad/s.
Returns:
The wavenumber, in cm^-1.
"""
return refractive_index * omega / cgs.c
def cutoff_frequencies(n_e, B, gamma=1.):
"""Calculate the wave cutoff frequencies associated with a plasma.
n_e
The number density of electrons in cm^-3. Neutrality is
assumed so this is also the number density of protons.
B
The ambient magnetic field strength in Gauss.
gamma
The Lorentz factor of the electrons. This is used to
adjust effective mass of the electrons. The protons are
assumed to have an effective Lorentz factor of unity.
Returns:
A sorted, 1D array of up to three cutoff frequencies in rad/s. At low
densities the highest-frequency cutoff is pretty much the electron
cyclotron frequency. Divide by (2 * pi * 1e9) to get GHz.
Waves that propagate into a cutoff are reflected
(https://farside.ph.utexas.edu/teaching/plasma/lectures1/node48.html).
Based on setting R, L, and P = 0 in Stix equations (2-1)--(2-3). The first
two can be converted into quadratics in omega, and they differ only by the
sign of B.
"""
m_e = gamma * cgs.me
m_i = cgs.mp
n_i = n_e
om_pe = omega_plasma(n_e, m_e)
om_pi = omega_plasma(n_i, m_i)
om_ce = omega_cyclotron(-1, B, m_e)
om_ci = omega_cyclotron(+1, B, m_i)
cutoffs = [np.sqrt(om_pe**2 + om_pi**2)] # P = 0 cutoff is trivial.
A = 1.
B = -(om_ce + om_ci) # this is the L = 0 cutoff; we're destroying the magnetic field variable
C = om_ce * om_ci - om_pe**2 - om_pi**2
if 4 * A * C > B**2:
return np.array(cutoffs) # no other valid solutions.
# Between R and L and the +/- in the quadratic equations, there are four
# possible solutions, two of which are negations of the other, so there
# are always two nonnegative solutions. If RHS == 0 they're the same
# number, though. `B` and `RHS` as we've defined them are always
# nonnegative.
prefactor = 1. / (2 * A)
rhs = np.sqrt(B**2 - 4 * A * C)
cutoffs.append(prefactor * (B + rhs))
if rhs != 0.:
if rhs > B:
cutoffs.append(prefactor * (rhs - B))
else:
cutoffs.append(prefactor * (B - rhs))
return np.array(sorted(cutoffs))
def resonance_frequencies(n_e, B, theta, gamma=1.):
"""Calculate the wave resonance frequencies associated with propagation in a
particular plasma.
n_e
The number density of electrons in cm^-3. Neutrality is
assumed so this is also the number density of protons.
B
The ambient magnetic field strength in Gauss.
theta
The angle of wave propagation relative to the magnetic field, in
radians. The resonance condition depends on this value.
gamma
The Lorentz factor of the electrons. This is used to
adjust effective mass of the electrons. The protons are
assumed to have an effective Lorentz factor of unity.
Returns:
A sorted, 1D array of up to three resonance frequencies in rad/s.
(Probably the actual number of such resonances is always 1 or 0.) Divide
by (2 * pi * 1e9) to get GHz.
This usually works out to be around the electron plasma frequency as theta
=> 0., and a smaller value as theta => 90.
FIXME: more intuitive understanding of how these numbers work out in the
various parameter limits (e.g. low densities, theta => 0, theta => 90
degrees, etc.).
Waves that propagate into a resonance are absorbed, heating the plasma
(https://farside.ph.utexas.edu/teaching/plasma/lectures1/node48.html).
Based on the solution of Stix equation (1-45) with plasma parameters
determined from equations (1-19) and (2-1)--(2-3). Some naive algebra
converts the condition into a cubic in the square of omega.
"""
m_e = gamma * cgs.me
m_i = cgs.mp
n_i = n_e
om_pe = omega_plasma(n_e, m_e)
om_pi = omega_plasma(n_i, m_i)
om_ce = omega_cyclotron(-1, B, m_e)
om_ci = omega_cyclotron(+1, B, m_i)
q = np.tan(theta)**2
j2 = om_ce**2 + om_ci**2
k2 = om_pe**2 + om_pi**2
c3 = q + 1
c2 = -(q + 1) * (j2 + k2)
c1 = (q + 1) * om_ce**2 * om_ce**2 - q * k2 * om_ce * om_ci + k2 * j2
c0 = -k2 * om_ce**2 * om_ce**2
roots = np.roots([c3, c2, c1, c0])
z = roots[np.abs(roots.imag) / np.abs(roots) < 1e-8].real
z = z[z > 0]
z = np.sort(z)
return np.sqrt(z)
class _Modes(object):
FAST = 0
SLOW = 1
RIGHT = 0
LEFT = 1
ORDINARY = 0
EXTRAORDINARY = 1
Modes = _Modes()
class Parameters(object):
def _finish(self):
"Stix equation 1-19."
self.S = 0.5 * (self.R + self.L)
self.D = 0.5 * (self.R - self.L)
return self
@classmethod
def new_basic(cls, ghz, n_e, B, gamma=1.):
"""Set up plasma parameters for an electron-proton plasma
in the standard cold approximation.
ghz
The oscillation frequency of the modes to consider, in GHz.
(Note that ideally we'd express this in terms of the wavenumber
`k` but the expressions that we use depend on `omega` instead.).
n_e
The number density of electrons in cm^-3. Neutrality is
assumed so this is also the number density of protons.
B
The ambient magnetic field strength in Gauss.
gamma
The Lorentz factor of the electrons. This is used to
adjust effective mass of the electrons. The protons are
assumed to have an effective Lorentz factor of unity.
Returns:
A new Parameters instance.
This function implements equations 1-47 and 1-48 in Stix.
"""
m_e = gamma * cgs.me
m_i = cgs.mp
n_i = n_e
omega = 2 * np.pi * ghz * 1e9
om_pe = omega_plasma(n_e, m_e)
om_pi = omega_plasma(n_i, m_i)
om_ce = omega_cyclotron(-1, B, m_e)
om_ci = omega_cyclotron(+1, B, m_i)
alpha = om_pe**2 / omega**2
beta = om_ci / omega
#gamma = (om_pi / om_ci)**2 -- defined by Stix but not needed
mu = np.abs(om_ce / om_ci) # = m_p / m_e ~ 43^2
obj = cls()
obj.omega = omega
obj.R = 1. - alpha / (mu * beta + mu) + alpha / (mu * beta - 1)
obj.L = 1. + alpha / (mu * beta - mu) - alpha / (mu * beta + 1)
obj.P = 1. - alpha / mu - alpha
return obj._finish()
@classmethod
def new_for_cma_diagnostic(cls, x, y, mass_ratio = 2.5):
"""Set up plasma parameters to reproduce the CMA diagram shown in
Stix Figure 2-1.
x
The X coordinate of the CMA diagram in question:
`(om_pi^2 + om_pi^2) / om`.
y
The Y coordinate of the CMA diagram in question: `|Om_e| / om`.
mass_ratio
The ion-to-electron mass ratio; the Stix diagram uses 2.5 for clarity.
See Stix equations 2-1 -- 2-3.
"""
omega = 1. # arbitrary
sum_om_p = x * omega**2 # = om_pe**2 + om_pi**2
om_ce = -y * omega
om_ci = np.abs(om_ce) / mass_ratio
obj = cls()
obj.omega = omega
obj.R = 1 - sum_om_p / ((omega + om_ci) * (omega + om_ce))
obj.L = 1 - sum_om_p / ((omega - om_ci) * (omega - om_ce))
obj.P = 1 - sum_om_p / omega**2
return obj._finish()
def refractive_index(self, theta):
"""Compute the refractive indices for waves propagating in a plasma
with the specified R,L,P parameters.
theta
The angle between the magnetic field and the wave propagation direction,
in radians.
Returns:
An array of shape `(..., 2)`, where the unspecified part of the
shape comes from broadcasting `theta` and the arrays like `self.L`.
The first element of the final array axis gives the refractive
indices for the fast mode, while the second gives them for the slow
mode.
The equations depend only on the square of the sines and cosines of `theta`
so they are symmetric on the half-circle.
This implements Stix equations 1-29 -- 1-35.
"""
sin2th = np.sin(theta)**2
cos2th = np.cos(theta)**2
A = self.S * sin2th + self.P * cos2th
B = self.R * self.L * sin2th + self.P * self.S * (1 + cos2th)
F = np.sqrt(((self.R * self.L - self.P * self.S) * sin2th)**2
+ (2 * self.P * self.D)**2 * cos2th) # contents can never be negative
n_fast = np.sqrt((B - F) / (2 * A))
n_slow = np.sqrt((B + F) / (2 * A))
return np.concatenate((n_fast[...,np.newaxis], n_slow[...,np.newaxis]), axis=-1)
|
nilq/baby-python
|
python
|
from smtplib import SMTPException
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinLengthValidator
from django.core.mail.message import EmailMultiAlternatives
from django.conf import settings
from django.template.loader import render_to_string
from cms.models.pluginmodel import CMSPlugin
from common.models import ActivableModel, TimestampedModel
class ContactFormPluginModel(CMSPlugin):
name_fname = models.CharField(
_('name of the name field'), max_length=100,
default=_('Your name')
)
name_placeholder = models.CharField(
_('placeholder of the name field'), max_length=100,
default=_('Type your name')
)
email_fname = models.CharField(
_('name of the e-mail field'), max_length=100,
default=_('Your e-mail')
)
email_placeholder = models.CharField(
_('placeholder of the e-mail field'), max_length=100,
default=_('Type your e-mail')
)
message_fname = models.CharField(
_('name of the message field'), max_length=100,
default=_('Your message')
)
message_placeholder = models.CharField(
_('placeholder of the message field'), max_length=100,
default=_('Type your message here')
)
button_text = models.CharField(
_('submit button text'), max_length=100,
default=_('SEND MESSAGE')
)
def __str__(self):
return '(Contact Form)'
def copy_relations(self, oldinstance):
self.recipients.all().delete()
for recipient in oldinstance.recipients.all():
recipient.id = None
recipient.plugin = self
recipient.save()
class Recipient(ActivableModel, TimestampedModel):
TO, CC, BCC = ('TO', 'CC', 'BCC')
R_TYPE_CHOICES = (
(TO, TO),
(CC, CC),
(BCC, BCC)
)
plugin = models.ForeignKey(
verbose_name=_('Contact Form Plugin'),
to=ContactFormPluginModel, null=True, on_delete=models.SET_NULL,
related_name='recipients'
)
name = models.CharField(_('name'), max_length=100)
email = models.EmailField(_('e-mail'))
recipient_type = models.CharField(
_('recipient type'), max_length=3, choices=R_TYPE_CHOICES, default=TO,
help_text=_(
'Type of the recipient: TO - normal, CC - copy, BCC - hidden copy.'
)
)
def __str__(self):
return (
f'{self.get_recipient_type_display()}: '
f'{self.name} <{self.email}>'
)
class ContactMessage(TimestampedModel):
STATUS_PENDING, STATUS_SUCCESS, STATUS_ERROR = range(1, 4)
STATUS_CHOICES = (
(STATUS_PENDING, _('pending')),
(STATUS_SUCCESS, _('sent')),
(STATUS_ERROR, _('error'))
)
sender_name = models.CharField(
_('sender name'), max_length=100,
null=False, blank=False
)
sender_email = models.EmailField(
_('sender e-mail'),
null=False, blank=False
)
message = models.TextField(
_('message'), null=False, blank=False,
validators=[MinLengthValidator(20)],
)
status = models.SmallIntegerField(
_('status'), choices=STATUS_CHOICES, default=STATUS_PENDING
)
status_msg = models.TextField(
_('Status message')
)
recipients = models.ManyToManyField(
to=Recipient, verbose_name=_('recipients'),
blank=False
)
class Meta:
verbose_name = _('Contact Message')
verbose_name_plural = _('Contact Messages')
def __str__(self):
return _('{status} message from {email}').format(
status=self.get_status_display(),
email=self.sender_email
)
def send(self, do_save=True):
recipients = self.recipients.all()
to = [r.email for r in recipients if r.recipient_type == Recipient.TO]
bcc = [r.email for r in recipients if r.recipient_type == Recipient.BCC]
cc = [r.email for r in recipients if r.recipient_type == Recipient.CC]
msg_body = render_to_string(
'contact/email/contact_message.txt',
{
'project_title': settings.PROJECT_TITLE,
'instance': self
}
)
mail = EmailMultiAlternatives(
subject=settings.CONTACT_MSG_SUBJECT.format(
PROJECT_TITLE=settings.PROJECT_TITLE
),
body=msg_body,
to=to, bcc=bcc, cc=cc,
reply_to=[self.sender_email]
)
try:
mail.send()
except SMTPException as e:
self.status = self.STATUS_ERROR
self.status_msg = _('Sending failed:\n{}').format(e)
if do_save:
self.save()
return False
else:
self.status = self.STATUS_SUCCESS
self.status_msg = _('Successfully sent.')
if do_save:
self.save()
return True
|
nilq/baby-python
|
python
|
import logging
import sys
import pyModeS as pms
from pyModeS.extra.rtlreader import RtlReader
class RtlSdrSource(RtlReader):
def __init__(self):
super(RtlSdrSource, self).__init__()
def handle_messages(self, messages):
if self.stop_flag.value is True:
self.sdr.close()
sys.exit(0)
for msg, t in messages:
if len(msg) < 28: # only process long messages
continue
logging.debug("Raw message: {}".format(msg))
df = pms.df(msg)
if df == 17 or df == 18:
logging.debug("Send message to pipe: {}".format(msg))
self.raw_pipe_in.send(msg)
else:
continue
|
nilq/baby-python
|
python
|
import numpy
verbose = False
def ride_of_fortune(artifact, explorers):
artifact_object = Artifact(artifact)
if verbose:
print(artifact_object.artifact)
results = []
for explorer_start_row in explorers:
if verbose:
print("====")
explorer_object = Explorer(y=explorer_start_row, x=-1, dy=0, dx=1)
while not explorer_object.exited:
explorer_object.step()
explorer_object.evaluate_and_rotate(artifact_object)
results.append(explorer_object.result)
return results
class Explorer():
def __init__(self, y, x, dy, dx):
self.y = y
self.x = x
self.dy = dy
self.dx = dx
self.result = None
self.exited = False
if verbose:
print("new explorer at y", self.y, "x", self.x, "moving dy", self.dy, "dx", self.dx)
def step(self):
if verbose:
print("about to step at y", self.y, "x", self.x, "moving dy", self.dy, "dx", self.dx)
self.y += self.dy
self.x += self.dx
if verbose:
print("just stepped at y", self.y, "x", self.x, "moving dy", self.dy, "dx", self.dx)
def evaluate_and_rotate(self, artifact_object):
if verbose:
print("evaluating")
if self.x < 0:
self.exited = True
self.result = None # exits back to land
elif self.x == artifact_object.width:
self.exited = True
self.result = [self.y, self.x - 1] # exits at this location's portal
elif self.y == artifact_object.height:
self.exited = True
self.result = [self.y - 1, self.x]
elif self.y < 0:
self.exited = True
self.result = [0, self.x]
if self.exited:
if verbose:
print("exited at y", self.y, "x", self.x, "moving dy", self.dy, "dx", self.dx)
print('result', self.result)
return
# decide whether to rotate
if artifact_object.artifact[self.y][self.x] != 0:
if verbose:
print("about to rotate y", self.y, "x", self.x, "moving dy", self.dy, "dx", self.dx)
print(artifact_object.artifact)
self.rotate(artifact_object)
if verbose:
print("did rotate y", self.y, "x", self.x, "moving dy", self.dy, "dx", self.dx)
print(artifact_object.artifact)
def rotate(self, artifact_object):
switch_state = artifact_object.artifact[self.y][self.x]
# rotate explorer
if switch_state == 1:
self.apply_state_one_rules()
elif switch_state == -1:
self.apply_state_minus_one_rules()
artifact_object.artifact[self.y][self.x] *= -1
def apply_state_one_rules(self):
if verbose:
print("State one rules")
dy = self.dy
dx = self.dx
if dy == 1 and dx == 0: # south
dy, dx = 0, 1 # east
elif dy == 0 and dx == 1: # east
dy, dx = 1, 0 # south
elif dy == -1 and dx == 0: # north
dy, dx = 0, -1 # west
elif dy == 0 and dx == -1: # west
dy, dx = -1, 0 # north
self.dy = dy
self.dx = dx
def apply_state_minus_one_rules(self):
if verbose:
print("State minus one rules")
dy = self.dy
dx = self.dx
if dy == 1 and dx == 0: # south
dy, dx = 0, -1 # west
elif dy == 0 and dx == -1: # west
dy, dx = 1, 0 # south
elif dy == -1 and dx == 0: # north
dy, dx = 0, 1 # east
elif dy == 0 and dx == 1: # east
dy, dx = -1, 0 # north
self.dy = dy
self.dx = dx
class Artifact:
def __init__(self, artifact):
self.height = len(artifact)
self.width = len(artifact[0])
n = numpy.zeros((self.height, self.width))
for i, row in enumerate(artifact):
for j, char in enumerate(artifact[i]):
if char == "A":
n[i][j] = 1
elif char == "B":
n[i][j] = -1
self.artifact = n
|
nilq/baby-python
|
python
|
# Copyright 2018 eShares, Inc. dba Carta, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from typing import Optional
from .equality_operator import EqualityOperator
from .greater_than_operator import GreaterThanOperator
from .greater_than_or_equal_to_operator import GreaterThanOrEqualToOperator
from .less_than_operator import LessThanOperator
from .less_than_or_equal_to_operator import LessThanOrEqualToOperator
from .negated_set_membership_operator import NegatedSetMembershipOperator
from .negation_operator import NegationOperator
from .set_membership_operator import SetMembershipOperator
class Operator:
OPERATOR_MAP = {
EqualityOperator.SYMBOL: EqualityOperator,
GreaterThanOperator.SYMBOL: GreaterThanOperator,
GreaterThanOrEqualToOperator.SYMBOL: GreaterThanOrEqualToOperator,
LessThanOperator.SYMBOL: LessThanOperator,
LessThanOrEqualToOperator.SYMBOL: LessThanOrEqualToOperator,
NegationOperator.SYMBOL: NegationOperator,
SetMembershipOperator.SYMBOL: SetMembershipOperator,
NegatedSetMembershipOperator.SYMBOL: NegatedSetMembershipOperator,
} # Dict[Optional[str], Any]
class InvalidSymbolError(Exception):
pass
@classmethod
def factory(cls, operator_symbol: Optional[str]):
try:
return cls.OPERATOR_MAP[operator_symbol]() # type: ignore
except KeyError:
raise cls.InvalidSymbolError("Operator not supported: %s" % operator_symbol)
|
nilq/baby-python
|
python
|
# SPDX-FileCopyrightText: 2021 Genome Research Ltd.
#
# SPDX-License-Identifier: MIT
from main.service import AssemblyService
from main.swagger import AssemblySwagger
from .base import BaseResource, setup_resource
api_assembly = AssemblySwagger.api
@setup_resource
class AssemblyResource(BaseResource):
class Meta:
service = AssemblyService
swagger = AssemblySwagger
|
nilq/baby-python
|
python
|
import unittest
import numpy
import tempfile
from pyscf import lib, gto
class KnowValues(unittest.TestCase):
def test_save_load_mol(self):
mol = gto.M(atom=[['H', (0,0,i)] for i in range(8)],
basis='sto3g')
fchk = tempfile.NamedTemporaryFile()
lib.chkfile.save_mol(mol, fchk.name)
mol1 = lib.chkfile.load_mol(fchk.name)
self.assertTrue(numpy.all(mol1._atm == mol._atm))
self.assertTrue(numpy.all(mol1._bas == mol._bas))
self.assertTrue(numpy.all(mol1._env == mol._env))
def test_save_load_arrays(self):
fchk = tempfile.NamedTemporaryFile()
a = numpy.eye(3)
lib.chkfile.save(fchk.name, 'a', a)
self.assertTrue(numpy.all(a == lib.chkfile.load(fchk.name, 'a')))
a = [numpy.eye(3), numpy.eye(4)]
lib.chkfile.save(fchk.name, 'a', a)
dat = lib.chkfile.load(fchk.name, 'a')
self.assertTrue(isinstance(dat, list))
self.assertTrue(numpy.all(a[1] == dat[1]))
a = [[numpy.eye(4), numpy.eye(4)]]*2
lib.chkfile.save(fchk.name, 'a', a)
dat = lib.chkfile.load(fchk.name, 'a')
self.assertTrue(isinstance(dat, list))
self.assertTrue(isinstance(dat[0], list))
a = {'x':[numpy.eye(4), numpy.eye(4)],
'y':[numpy.eye(4)]}
lib.chkfile.save(fchk.name, 'a', a)
dat = lib.chkfile.load(fchk.name, 'a')
self.assertTrue('x' in dat)
self.assertTrue('y' in dat)
if __name__ == "__main__":
print("Full Tests for lib.chkfile")
unittest.main()
|
nilq/baby-python
|
python
|
import sqlite3
from message import Message
from friend import Friend
'''
Created on Nov 14, 2013
@author: andyisaballa
'''
STICKERS_ENABLED = 0
STICKERS_DISABLED = 1
STICKERS_ONLY = 2
def getMessagesByFriend(db, name, stickers=STICKERS_DISABLED):
conn = sqlite3.connect(db)
cursor = conn.cursor()
if isinstance(name, str):
nameQ = 'SELECT id FROM friend WHERE name=?;'
name = cursor.execute(nameQ, (name, )).fetchone()[0]
if stickers == STICKERS_DISABLED:
query = "SELECT * FROM message WHERE sender=? AND message != 'NULL';"
elif stickers == STICKERS_ONLY:
query = "SELECT * FROM message WHERE sender=? AND message == 'NULL';"
else:
query = 'SELECT * FROM message WHERE sender=?;'
messages = cursor.execute(query, (name, )).fetchall()
cursor.close()
conn.close()
return [Message.fromTuple(m) for m in messages]
def getFriends(db):
conn = sqlite3.connect(db)
cursor = conn.cursor()
people = cursor.execute('SELECT * FROM friend;').fetchall()
cursor.close()
conn.close()
return [Friend.fromTuple(f) for f in people]
|
nilq/baby-python
|
python
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Russell Barlow III
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
from __future__ import division
import os
import re
SourceDir = "C:\\Users\\guita_000\\Desktop\\Test"
buildFilesList=[]
def buildBuildFileList(SourceDir):
for root, dirs, files in os.walk(SourceDir):
for buildFile in files:
if 'Build.cs' in buildFile:
buildFilesList.append(os.path.join(root, buildFile))
return buildFilesList
def editBuildFile(file_name):
seek_target = "TargetInfo"
file_object = open(file_name, "rt+")
file_buffer = file_object.readlines()
for i in range(len(file_buffer)):
if seek_target in file_buffer[i]:
for j in range(len(file_buffer)):
if '{' in file_buffer[i+j]:
file_buffer.insert(i+j+1, "\t\t\tbUseAVX = true;\n")
file_object.seek(0)
file_object.writelines(file_buffer)
file_object.truncate()
file_object.close()
return file_buffer
file_object.close()
return file_buffer
def editBuildFileRegex(file_name):
#Open Source Build File
file_object = open(file_name, "rt+")
#Read The Build File Into Memory
file_buffer = ''.join(file_object.readlines())
#Remove All Occurences Of bUseAVX = xxx;
file_buffer = re.sub("[ \t]*bUseAVX[ \t]*=[ \t]*[true]*[false]*[ \t]*;[\s]*", "\t\t\t", file_buffer )
#Add bUseAVX = true;
file_buffer = re.sub("(TargetInfo[ \t]+\w*[ \t]*\)\s*){", "\\1{\n\t\t\tbUseAVX = true;\n", file_buffer)
#Write Result Back To File
file_object.seek(0)
file_object.writelines(file_buffer)
file_object.truncate()
file_object.close()
def editBuildFileList(SourceDir):
buildFilesList = buildBuildFileList(SourceDir)
for file_index in range(len(buildFilesList)):
editBuildFile( buildFilesList[file_index] )
editBuildFileList(SourceDir)
|
nilq/baby-python
|
python
|
from abc import ABC, abstractmethod
from boltons.cacheutils import cachedproperty
class BaseEnv(ABC):
"""
Abstract base class used for implementing new environments.
"""
def __init__(self):
self.env = self._create_env()
def __str__(self):
return "<{}>".format(type(self).__name__)
# TODO: Follow https://github.com/mahmoud/boltons/pull/184
@cachedproperty
@abstractmethod
def s_space(self):
"""
Returns a `space` object containing information about the state space.
Example
-------
State space containing 4 continuous observations:
`return reward.utils.space.Continuous(low=0, high=1, shape=(4,))`
"""
@cachedproperty
@abstractmethod
def ac_space(self):
"""
Returns a `space` object containing information about the action space.
Example
-------
State space containing 4 continuous acs:
`return reward.utils.space.Continuous(low=0, high=1, shape=(4,))`
"""
@abstractmethod
def reset(self):
"""
Resets the environment to an initial state.
Returns
-------
numpy.ndarray
A numpy array with the state information.
"""
@abstractmethod
def step(self, ac):
"""
Receives an action and execute it on the environment.
Parameters
----------
action: int or float or numpy.ndarray
The action to be executed in the environment, it should be an ``int``
for discrete enviroments and ``float`` for continuous. There's also
the possibility of executing multiple actions (if the environment
supports so), in this case it should be a ``numpy.ndarray``.
Returns
-------
next_state: numpy.ndarray
A numpy array with the state information.
reward: float
The reward.
done: bool
Flag indicating the termination of the episode.
info: dict
Dict containing additional information about the state.
"""
@abstractmethod
def _create_env(self):
"""
Creates ans returns an environment.
Returns
-------
Environment object.
"""
@property
def num_lives(self): raise NotImplementedError
@property
def unwrapped(self): return self
def sample_random_ac(self): return self.ac_space.sample()
def record(self, path): raise NotImplementedError
def close(self): raise NotImplementedError
|
nilq/baby-python
|
python
|
# -------------------------------------------------------------------------------
# (c) 2020 Siemens AG
# All Rights Reserved.
# Author: thomas.graf@siemens.com
#
# Licensed under the MIT license.
# SPDX-License-Identifier: MIT
# -------------------------------------------------------------------------------
import sys
import warnings
import unittest
import responses
sys.path.insert(1, "..")
from sw360 import SW360 # noqa: E402
class Sw360TestHealth(unittest.TestCase):
MYTOKEN = "MYTOKEN"
MYURL = "https://my.server.com/"
ERROR_MSG_NO_LOGIN = "Unable to login"
def setUp(self):
warnings.filterwarnings(
"ignore", category=ResourceWarning,
message="unclosed.*<ssl.SSLSocket.*>")
def _add_login_response(self):
"""
Add the response for a successfull login.
"""
responses.add(
method=responses.GET,
url=self.MYURL + "resource/api/",
body="{'status': 'ok'}",
status=200,
content_type="application/json",
adding_headers={"Authorization": "Token " + self.MYTOKEN},
)
@responses.activate
def test_get_health_status(self):
lib = SW360(self.MYURL, self.MYTOKEN, False)
self._add_login_response()
actual = lib.login_api()
self.assertTrue(actual)
responses.add(
method=responses.GET,
url=self.MYURL + "resource/health/",
body='{"status": "UP"}',
status=200,
content_type="application/json",
adding_headers={"Authorization": "Token " + self.MYTOKEN},
)
status = lib.get_health_status()
self.assertIsNotNone(status)
self.assertTrue("status" in status)
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.5 on 2019-12-02 00:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0012_auto_20191202_0038'),
]
operations = [
migrations.RenameField(
model_name='boleto',
old_name='user_id',
new_name='user',
),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 8 16:58:10 2020
@author: nephilim
"""
import keras
import numpy as np
import my_Im2col
from matplotlib import pyplot,cm
import skimage.transform
import T_PowerGain
import DataNormalized
import Reshape2Encoder
def GetPatch(Image,patch_size,slidingDis):
blocks,idx=my_Im2col.my_im2col(Image,patch_size,slidingDis)
return blocks,idx
def CalculationSNR(Image,Noise):
frac_up=np.sum(Image**2)
frac_down=np.sum((Image-Noise)**2)
SNR=10*np.log10(frac_up/frac_down)
return SNR
class AutoEncoder():
def __init__(self,ImageShape,filters,kernel_size,latent_dim):
self.ImageShape=ImageShape
self.filters=filters
self.kernel_size=kernel_size
self.latent_dim=latent_dim
def Encoder(self):
self.Encoder_Input=keras.Input(shape=self.ImageShape,name='Encoder_Input_2D')
x=self.Encoder_Input
for idx,_ in enumerate(self.filters):
x=keras.layers.Conv2D(filters=self.filters[idx],kernel_size=self.kernel_size[idx],activation='relu',padding='same')(x)
x=keras.layers.BatchNormalization()(x)
x=keras.layers.MaxPool2D((2,2))(x)
self.shape=keras.backend.int_shape(x)
# print(self.shape)
x=keras.layers.Flatten()(x)
Encoder_Output=keras.layers.Dense(self.latent_dim,name='Encoder_Ouput_1D')(x)
self.EncoderMode=keras.models.Model(inputs=self.Encoder_Input,outputs=Encoder_Output,name='EncoderPart')
self.EncoderMode.summary()
self.EncoderMode.compile(loss='mse',optimizer='adam')
def Decoder(self):
Decoder_Input=keras.Input(shape=(self.latent_dim,),name='Decoder_Input_1D')
x=keras.layers.Dense(self.shape[1]*self.shape[2]*self.shape[3])(Decoder_Input)
x=keras.layers.Reshape((self.shape[1],self.shape[2],self.shape[3]))(x)
for idx,_ in enumerate(self.filters):
x=keras.layers.Conv2DTranspose(filters=self.filters[len(self.filters)-idx-1],kernel_size=self.kernel_size[len(self.kernel_size)-idx-1],activation='relu',padding='same')(x)
x=keras.layers.BatchNormalization()(x)
x=keras.layers.UpSampling2D((2,2))(x)
Decoder_Output=keras.layers.Conv2DTranspose(filters=1,kernel_size=5,activation='sigmoid',padding='same',name='Decoder_Output_1D')(x)
self.DecoderMode=keras.models.Model(inputs=Decoder_Input,outputs=Decoder_Output)
self.DecoderMode.summary()
self.DecoderMode.compile(loss='mse',optimizer='adam')
if __name__=='__main__':
AutoEncoder_=AutoEncoder(ImageShape=(512,512,1),filters=[16,32,64],kernel_size=[5,5,5],latent_dim=256)
AutoEncoder_.Encoder()
patch_size=(512,512)
slidingDis=64
Iteration=100
ProfileGain_train=[]
for iteration in range(Iteration):
Profile=np.load('./GPR_Modelling/Profile_TunnelLining/TunnelLining_Iter_%s.npy'%iteration)
ProfileGain=T_PowerGain.tpowGain(Profile,np.arange(7000)/4,0.9)
ProfileGain=skimage.transform.resize(ProfileGain,(512,512),mode='edge')
ProfileGain=DataNormalized.DataNormalized(ProfileGain)/255
ProfileGain_Patch,_=GetPatch(ProfileGain,patch_size,slidingDis)
ProfileGain_train.append(ProfileGain_Patch)
Iteration=100
compare=1
for iteration in range(Iteration):
Profile=np.load('./GPR_Modelling/ProfileAutoEncoder/%s_iter_record_%s_comp.npy'%(iteration,compare))
ProfileGain=T_PowerGain.tpowGain(Profile,np.arange(7000)/4,0.9)
ProfileGain=skimage.transform.resize(ProfileGain,(512,512),mode='edge')
ProfileGain=DataNormalized.DataNormalized(ProfileGain)/255
ProfileGain_Patch,_=GetPatch(ProfileGain,patch_size,slidingDis)
ProfileGain_train.append(ProfileGain_Patch)
ProfileGain_train=np.array(ProfileGain_train)
Profile_train=Reshape2Encoder.ReshapeData2Encoder(ProfileGain_train,patch_size)
del ProfileGain,ProfileGain_Patch,ProfileGain_train
layer_outputs=[layer.output for layer in AutoEncoder_.EncoderMode.layers[1:None]]
activation_model=keras.models.Model(inputs=AutoEncoder_.EncoderMode.input,outputs=layer_outputs)
activations=activation_model.predict(Profile_train[2:3,:,:])
# pyplot.figure()
# pyplot.imshow(ProfileGain)
# pyplot.figure()
# pyplot.imshow(activations[1][0,:,:,0])
# pyplot.figure()
# pyplot.imshow(activations[3][0,:,:,0])
# pyplot.figure()
# pyplot.imshow(activations[5][0,:,:,0])
# pyplot.figure()
# pyplot.imshow(ProfileGain)
# pyplot.figure()
# pyplot.imshow(display_grid[0,:,:])
# pyplot.figure()
# pyplot.imshow(display_grid[1,:,:])
# pyplot.figure()
# pyplot.imshow(display_grid[2,:,:])
# pyplot.figure()
# pyplot.imshow(Profile_train[1541,:,:,0])
# # pyplot.figure()
# # pyplot.imshow(activations[1][0,:,:,0])
# # pyplot.figure()
# # pyplot.imshow(activations[3][0,:,:,0])
# # pyplot.figure()
# # pyplot.imshow(activations[5][0,:,:,0])
# image_per_row=16
# for layer_activation in activations:
# n_features=layer_activation.shape[-1]
# size=layer_activation.shape[1]
# n_cols=n_features//image_per_row
# display_grid=np.zeros((size*n_cols,image_per_row*size))
# for col in range(n_cols):
# for row in range(image_per_row):
# channel_image=layer_activation[0,:,:,col*image_per_row+row]
# display_grid[col*size:(col+1)*size,row*size:(row+1)*size]=channel_image
# pyplot.figure()
# pyplot.imshow(display_grid[:,:])
# pyplot.axis('off')
pyplot.figure()
data=Profile_train[2:3,:,:]
data=data[0,:,:,0]
pyplot.imshow(data,cmap=cm.seismic)
pyplot.axis('off')
pyplot.savefig('OriginalInput.png',dpi=1000)
FirstConv2DLayer=activations[2]
image_per_row=4
n_features=FirstConv2DLayer.shape[-1]
size=FirstConv2DLayer.shape[1]
n_cols=n_features//image_per_row
display_grid=np.zeros((n_cols*size,image_per_row*size))
for col in range(n_cols):
for row in range(image_per_row):
channel_image=FirstConv2DLayer[0,:,:,col*image_per_row+row]
display_grid[col*size:(col+1)*size,row*size:(row+1)*size]=channel_image
pyplot.figure()
pyplot.imshow(display_grid[:,:],vmin=-0.2,vmax=0.4,cmap=cm.seismic)
pyplot.axis('off')
pyplot.savefig('FirstConv2D.png',dpi=1000)
FirstConv2DLayer=activations[5]
image_per_row=8
n_features=FirstConv2DLayer.shape[-1]
size=FirstConv2DLayer.shape[1]
n_cols=n_features//image_per_row
display_grid=np.zeros((n_cols*size,image_per_row*size))
for col in range(n_cols):
for row in range(image_per_row):
channel_image=FirstConv2DLayer[0,:,:,col*image_per_row+row]
display_grid[col*size:(col+1)*size,row*size:(row+1)*size]=channel_image
pyplot.figure()
pyplot.imshow(display_grid[:,:],vmin=-0.2,vmax=0.4,cmap=cm.seismic)
pyplot.axis('off')
pyplot.savefig('SecondConv2D.png',dpi=1000)
FirstConv2DLayer=activations[8]
image_per_row=8
n_features=FirstConv2DLayer.shape[-1]
size=FirstConv2DLayer.shape[1]
n_cols=n_features//image_per_row
display_grid=np.zeros((n_cols*size,image_per_row*size))
for col in range(n_cols):
for row in range(image_per_row):
channel_image=FirstConv2DLayer[0,:,:,col*image_per_row+row]
display_grid[col*size:(col+1)*size,row*size:(row+1)*size]=channel_image
pyplot.figure()
pyplot.imshow(display_grid[:,:],vmin=-0.2,vmax=0.4,cmap=cm.seismic)
pyplot.axis('off')
pyplot.savefig('ThirdConv2D.png',dpi=1000)
|
nilq/baby-python
|
python
|
# Copyright 2018 Telefonica
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from io import BytesIO
import json
import logging
from osmclient.common import http
from osmclient.common.exceptions import OsmHttpException, NotFound
import pycurl
class Http(http.Http):
CONNECT_TIMEOUT = 15
def __init__(self, url, user='admin', password='admin', **kwargs):
self._url = url
self._user = user
self._password = password
self._http_header = None
self._logger = logging.getLogger('osmclient')
self._default_query_admin = None
self._all_projects = None
self._public = None
if 'all_projects' in kwargs:
self._all_projects = kwargs['all_projects']
if 'public' in kwargs:
self._public = kwargs['public']
self._default_query_admin = self._complete_default_query_admin()
def _complete_default_query_admin(self):
query_string_list = []
if self._all_projects:
query_string_list.append("ADMIN")
if self._public is not None:
query_string_list.append("PUBLIC={}".format(self._public))
return "&".join(query_string_list)
def _complete_endpoint(self, endpoint):
if self._default_query_admin:
if '?' in endpoint:
endpoint = '&'.join([endpoint, self._default_query_admin])
else:
endpoint = '?'.join([endpoint, self._default_query_admin])
return endpoint
def _get_curl_cmd(self, endpoint, skip_query_admin=False):
self._logger.debug("")
curl_cmd = pycurl.Curl()
if self._logger.getEffectiveLevel() == logging.DEBUG:
curl_cmd.setopt(pycurl.VERBOSE, True)
if not skip_query_admin:
endpoint = self._complete_endpoint(endpoint)
curl_cmd.setopt(pycurl.CONNECTTIMEOUT, self.CONNECT_TIMEOUT)
curl_cmd.setopt(pycurl.URL, self._url + endpoint)
curl_cmd.setopt(pycurl.SSL_VERIFYPEER, 0)
curl_cmd.setopt(pycurl.SSL_VERIFYHOST, 0)
if self._http_header:
curl_cmd.setopt(pycurl.HTTPHEADER, self._http_header)
return curl_cmd
def delete_cmd(self, endpoint, skip_query_admin=False):
self._logger.debug("")
data = BytesIO()
curl_cmd = self._get_curl_cmd(endpoint, skip_query_admin)
curl_cmd.setopt(pycurl.CUSTOMREQUEST, "DELETE")
curl_cmd.setopt(pycurl.WRITEFUNCTION, data.write)
self._logger.info("Request METHOD: {} URL: {}".format("DELETE", self._url + endpoint))
curl_cmd.perform()
http_code = curl_cmd.getinfo(pycurl.HTTP_CODE)
self._logger.info("Response HTTPCODE: {}".format(http_code))
curl_cmd.close()
self.check_http_response(http_code, data)
# TODO 202 accepted should be returned somehow
if data.getvalue():
data_text = data.getvalue().decode()
self._logger.verbose("Response DATA: {}".format(data_text))
return http_code, data_text
else:
return http_code, None
def send_cmd(self, endpoint='', postfields_dict=None,
formfile=None, filename=None,
put_method=False, patch_method=False,
skip_query_admin=False):
self._logger.debug("")
data = BytesIO()
curl_cmd = self._get_curl_cmd(endpoint, skip_query_admin)
if put_method:
curl_cmd.setopt(pycurl.CUSTOMREQUEST, "PUT")
elif patch_method:
curl_cmd.setopt(pycurl.CUSTOMREQUEST, "PATCH")
curl_cmd.setopt(pycurl.POST, 1)
curl_cmd.setopt(pycurl.WRITEFUNCTION, data.write)
if postfields_dict is not None:
jsondata = json.dumps(postfields_dict)
if 'password' in postfields_dict:
postfields_dict_copy = copy.deepcopy(postfields_dict)
postfields_dict_copy['password'] = '******'
jsondata_log = json.dumps(postfields_dict_copy)
else:
jsondata_log = jsondata
self._logger.verbose("Request POSTFIELDS: {}".format(jsondata_log))
curl_cmd.setopt(pycurl.POSTFIELDS, jsondata)
elif formfile is not None:
curl_cmd.setopt(
pycurl.HTTPPOST,
[((formfile[0],
(pycurl.FORM_FILE,
formfile[1])))])
elif filename is not None:
with open(filename, 'rb') as stream:
postdata = stream.read()
self._logger.verbose("Request POSTFIELDS: Binary content")
curl_cmd.setopt(pycurl.POSTFIELDS, postdata)
if put_method:
self._logger.info("Request METHOD: {} URL: {}".format("PUT", self._url + endpoint))
elif patch_method:
self._logger.info("Request METHOD: {} URL: {}".format("PATCH", self._url + endpoint))
else:
self._logger.info("Request METHOD: {} URL: {}".format("POST", self._url + endpoint))
curl_cmd.perform()
http_code = curl_cmd.getinfo(pycurl.HTTP_CODE)
self._logger.info("Response HTTPCODE: {}".format(http_code))
curl_cmd.close()
self.check_http_response(http_code, data)
if data.getvalue():
data_text = data.getvalue().decode()
self._logger.verbose("Response DATA: {}".format(data_text))
return http_code, data_text
else:
return http_code, None
def post_cmd(self, endpoint='', postfields_dict=None,
formfile=None, filename=None,
skip_query_admin=False):
self._logger.debug("")
return self.send_cmd(endpoint=endpoint,
postfields_dict=postfields_dict,
formfile=formfile, filename=filename,
put_method=False, patch_method=False,
skip_query_admin=skip_query_admin)
def put_cmd(self, endpoint='', postfields_dict=None,
formfile=None, filename=None,
skip_query_admin=False):
self._logger.debug("")
return self.send_cmd(endpoint=endpoint,
postfields_dict=postfields_dict,
formfile=formfile, filename=filename,
put_method=True, patch_method=False,
skip_query_admin=skip_query_admin)
def patch_cmd(self, endpoint='', postfields_dict=None,
formfile=None, filename=None,
skip_query_admin=False):
self._logger.debug("")
return self.send_cmd(endpoint=endpoint,
postfields_dict=postfields_dict,
formfile=formfile, filename=filename,
put_method=False, patch_method=True,
skip_query_admin=skip_query_admin)
def get2_cmd(self, endpoint, skip_query_admin=False):
self._logger.debug("")
data = BytesIO()
curl_cmd = self._get_curl_cmd(endpoint, skip_query_admin)
curl_cmd.setopt(pycurl.HTTPGET, 1)
curl_cmd.setopt(pycurl.WRITEFUNCTION, data.write)
self._logger.info("Request METHOD: {} URL: {}".format("GET", self._url + endpoint))
curl_cmd.perform()
http_code = curl_cmd.getinfo(pycurl.HTTP_CODE)
self._logger.info("Response HTTPCODE: {}".format(http_code))
curl_cmd.close()
self.check_http_response(http_code, data)
if data.getvalue():
data_text = data.getvalue().decode()
self._logger.verbose("Response DATA: {}".format(data_text))
return http_code, data_text
return http_code, None
def check_http_response(self, http_code, data):
if http_code >= 300:
resp = ""
if data.getvalue():
data_text = data.getvalue().decode()
self._logger.verbose("Response {} DATA: {}".format(http_code, data_text))
resp = ": " + data_text
else:
self._logger.verbose("Response {}".format(http_code))
if http_code == 404:
raise NotFound("Error {}{}".format(http_code, resp))
raise OsmHttpException("Error {}{}".format(http_code, resp))
def set_query_admin(self, **kwargs):
if 'all_projects' in kwargs:
self._all_projects = kwargs['all_projects']
if 'public' in kwargs:
self._public = kwargs['public']
self._default_query_admin = self._complete_default_query_admin()
|
nilq/baby-python
|
python
|
r"""
Utils are utils.
"""
from sys import version_info as v
import subprocess
from datetime import datetime
def run_cmd(cmd: str) -> str:
r"""Executes and returns custom command output.
"""
return subprocess.getoutput(cmd)
def get_gpu_usage() -> str:
r"""Returns current gpu usage.
"""
cmd_used = 'nvidia-smi --query-gpu=memory.used --format=csv,nounits,noheader'
cmd_total = 'nvidia-smi --query-gpu=memory.total --format=csv,noheader'
return f'{run_cmd(cmd_used)} / {run_cmd(cmd_total)}'
def get_disk_usage() -> str:
r"""Returns disk total disk usage.
"""
cmd = 'df -h --total --output=source,size,used,avail | grep -E "Filesystem|total"'
return run_cmd(cmd)
def get_distro_descr() -> str:
r"""Returns Ubuntu distro info.
"""
cmd = 'cat /etc/*release | grep DISTRIB_DESCRIPTION | cut -d= -f2'
return run_cmd(cmd).strip('"')
def current_time() -> str:
r"""Returns current time.
"""
return f'{datetime.now():%Y-%m-%d-%H-%M}'
def get_gpu_name() -> str:
r"""Returns GPU name.
"""
cmd = 'nvidia-smi --query-gpu=name --format=csv,noheader'
return run_cmd(cmd)
def get_cuda_version() -> str:
r"""Returns CUDA version.
"""
cmd = 'cat /usr/local/cuda/version.txt'
return run_cmd(cmd)
def get_cudnn_version() -> str:
r"""Returns CUDA version.
TODO: Check this ones:
cat /usr/include/cudnn.h | grep "define CUDNN_MAJOR"
cat /usr/include/cudnn.h | grep "define CUDNN_MINOR"
cat /usr/include/cudnn.h | grep "define CUDNN_PATCHLEVEL"
"""
cmd = 'python -c "import torch; print(torch.backends.cudnn.version())"'
return run_cmd(cmd)
def get_python_version() -> str:
r"""Returns installed python version.
"""
return f'Python {v.major}.{v.minor}.{v.micro}'
def get_python_version2() -> str:
r"""Returns installed python version.
"""
cmd = 'python -V'
return run_cmd(cmd)
def get_pytorch_version() -> str:
r"""Returns installed pytorch's packages version.
"""
cmd = 'pip list | grep torch'
return run_cmd(cmd)
def print_sysinfo() -> None:
r"""Prints general system and pytorch version info.
"""
_ = list()
_.append('OS\t\t\t ' + get_distro_descr())
_.append('----')
_.append('GPU\t\t\t ' + get_gpu_name())
_.append('CUDA\t\t\t ' + get_cuda_version())
_.append('cuDNN\t\t\t ' + '.'.join([i for i in get_cudnn_version()]))
_.append('----')
_.append('\t\t\t '.join(get_python_version2().lower().split(' ')))
_.append(get_pytorch_version())
print(*_, sep='\n')
|
nilq/baby-python
|
python
|
import sys
import glob
import serial
import datetime
def serial_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/ttyA[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
ports = serial_ports()
print(f"Using Port: {ports[0]}")
ser = serial.Serial(ports[0])
ser.flushInput()
timeout = 5000
reset_low = 800
pulse_high = -100
CMD_UNKNOWN_02 = 0x02
CMD_DISP_MODE_MAYBE = 0x03
CMD_BACKLIGHT = 0x05
CMD_VOLUME = 0x40
CMD_PLAY_MODE = 0x41
CMD_BATTERY = 0x43
CMD_TRACK = 0xA0
CMD_PLAY_STATE = 0xA1
CMD_DISP_MAYBE = 0xA2
CMD_TEXT = 0xC8
REG_BATTERY = 0x03
BATTERY_CHARGE = 0x7F
BATTERY_LOW = 0x80
BATTERY_ZERO = 0x01
REG_PLAY_MODE = 0x03
PLAY_MODE_NORMAL = 0x00
PLAY_MODE_REPEAT = 0x01
PLAY_MODE_REPEAT_ONE = 0x03
PLAY_MODE_SHUFFLE = 0x7F
REG_PLAY_STATE = 0x06
CMD_PLAY_STATE_TOGGLE = 0x03
CMD_PLAY_STATE_ON = 0x7F
CMD_PLAY_STATE_OFF = 0x00
CMD_PLAY_STATE_TRACK = 0x01
REG_BACKLIGHT = 0x03
CMD_BACKLIGHT_ON = 0x7F
CMD_BACKLIGHT_OFF = 0x00
REG_VOLUME = 0x03
REG_TEXT = 0x03
REG_TEXT_POSITION = 0x05
REG_TEXT_LEN = 0x07
CMD_TEXT_APPEND = 0x02
CMD_TEXT_END = 0x01
REG_TRACK = 0x06
class SonyMdRemote:
def __init__(self):
self.Backlight = 0
self.Text = ""
self.Track = 0
self.PlayState = 0
self.PlayMode = 0
self.Battery = 0
self.Volume = 0
self.TextBuf = ""
def process_packet(self, data):
# ignore printing NOPs
str = ""
for d in data:
str += hex(d) + ", "
print(f"[{datetime.datetime.now().strftime('%H:%M:%S.%f')}] Packet: [ {str} ]")
if len(data) <= 2:
return
# check header values 82 80
if data[1] != 0x80:
print("BAD: Invalid packet!")
cmd = data[2]
# when display mode is pressed this is triggered
if cmd == CMD_UNKNOWN_02:
a = 1
elif cmd == CMD_DISP_MAYBE:
a = 1
elif cmd == CMD_BACKLIGHT:
md.set_backlight_raw(data)
elif cmd == CMD_VOLUME:
md.set_volume_raw(data)
elif cmd == CMD_TEXT:
md.set_text_raw(data)
elif cmd == CMD_TRACK:
md.set_track_raw(data)
elif cmd == CMD_PLAY_STATE:
md.set_play_state_raw(data)
elif cmd == CMD_PLAY_MODE:
md.set_play_mode_raw(data)
elif cmd == CMD_BATTERY:
md.set_battery_raw(data)
else:
str = ""
for d in data:
str += hex(d) + ", "
print(f"[{datetime.datetime.now().strftime('%H:%M:%S.%f')}] Unknown Packet: [ {str} ]")
def is_backlight_on(self):
return self.Backlight == CMD_BACKLIGHT_ON
def set_backlight(self, isOn):
self.Backlight = CMD_BACKLIGHT_ON
def set_backlight_raw(self, data):
bl = data[REG_BACKLIGHT]
self.Backlight = bl
print(f"Backlight {self.Backlight == CMD_BACKLIGHT_ON}")
def get_volume(self):
return self.Volume
def set_volume(self, val):
self.Volume = val
def set_volume_raw(self, data):
vol = data[REG_VOLUME]
self.Volume = vol
print (f"Volume {self.Volume}")
def set_text_raw(self, data):
reg = data[REG_TEXT]
# just keep appending text
if self.TextBuf == "":
self.Text = ""
self.Text += ''.join(chr(e) for e in data[REG_TEXT_POSITION:REG_TEXT_POSITION + REG_TEXT_LEN])
self.TextBuf = self.Text
# last chunk of text received
if reg == CMD_TEXT_END:
self.Text = self.Text.replace(chr(0xff), chr(0x00))
print(f"{self.Text}")
self.TextBuf = ""
def get_track(self):
return self.Track
def set_track(self, track):
self.Track = track
self.Text = ""
def set_track_raw(self, data):
reg = data[REG_TRACK]
if self.Track != reg:
self.set_track(reg)
self.Track = reg
print (f"Track T {self.Track}")
def get_play_state(self):
return self.PlayState
def set_play_state_raw(self, data):
reg = data[REG_PLAY_STATE]
self.PlayState = reg
if reg == CMD_PLAY_STATE_TOGGLE:
print (f"Disc: TOGGLE")
elif reg == CMD_PLAY_STATE_ON:
print (f"Disc: ALL ON")
elif reg == CMD_PLAY_STATE_OFF:
print (f"Disc: ALL OFF")
elif reg == CMD_PLAY_STATE_TRACK:
print (f"Disc: TRACK CHANGE")
def get_play_mode_repeat(self):
return self.PlayMode == PLAY_MODE_REPEAT
def get_play_mode_repeat_one(self):
return self.PlayMode == PLAY_MODE_REPEAT_ONE
def get_play_mode_shuffle(self):
return self.PlayMode == PLAY_MODE_SHUFFLE
def set_play_mode_raw(self, data):
reg = data[REG_PLAY_MODE]
self.PlayMode = reg
if reg == PLAY_MODE_NORMAL:
print("Play Mode: No Suffle/repeat")
elif reg == PLAY_MODE_REPEAT:
print("Play Mode: Repeat")
elif reg == PLAY_MODE_REPEAT_ONE:
print("Play Mode: Repeat ONE")
elif reg == PLAY_MODE_SHUFFLE:
print("Play Mode: SHUFFLE")
def get_battery_is_charging(self):
return self.Battery == BATTERY_CHARGE
def get_battery_is_low(self):
return self.Battery == BATTERY_LOW
def get_battery_level(self):
reg = self.Battery
if reg == BATTERY_CHARGE:
return 0
elif reg == BATTERY_LOW:
return 0
elif reg == BATTERY_ZERO:
return 0
else:
chg = reg >> 5
chg = set_bit(chg, 2, 0)
return chg + 1
def set_battery_raw(self, data):
reg = data[REG_BATTERY]
print(f"BAT {reg}")
if reg == BATTERY_CHARGE:
print (f"Battery: Charge ({reg})")
elif reg == BATTERY_LOW:
print (f"Battery: Low Battery")
elif reg == BATTERY_ZERO:
print (f"Battery: 0 Bars!")
else:
chg = reg >> 5
chg = set_bit(chg, 2, 0)
print (f"Battery: Charge ({chg + 1} Bars)")
self.Battery = reg
def display(self):
disp = "".ljust(10, ' ')
if self.get_play_mode_repeat():
disp = "Repeat".ljust(10, ' ')
elif self.get_play_mode_repeat_one():
disp = "Repeat One".ljust(10, ' ')
elif self.get_play_mode_shuffle():
disp = "Shuffle".ljust(10, ' ')
x = self.get_play_state()
bat = self.get_battery_is_charging()
batT = "CHG"
if bat == 0:
bat = self.get_battery_is_low()
batT = "LOW"
if bat == 0:
bat = self.get_battery_level()
batT = ("O" * bat).ljust(4, ' ')
print(f"[{self.Track}] {self.Text.ljust(20, ' ')} {x} [Repeat: {disp}] [B:{batT}]")
def set_bit(v, index, x):
"""Set the index:th bit of v to 1 if x is truthy, else to 0, and return the new value."""
mask = 1 << index # Compute mask, an integer with just bit 'index' set.
v &= ~mask # Clear the bit indicated by the mask (if x is False)
if x:
v |= mask # If x was True, set the bit indicated by the mask.
return v # Return the result, we're done.
def flip_byte(c):
c = ((c >> 1) & 0x55) | ((c << 1) & 0xAA)
c = ((c >> 2) & 0x33) | ((c << 2) & 0xCC)
c = (c >> 4) | (c << 4)
return c
md = SonyMdRemote()
def process_packet(packet_array):
global md
i = 0
j = 0
outp = 0
addr = 0
data = []
bit_counter = 0
state = 0
skp = 0
#print(packet_array)
for p in packet_array:
# skip the sync and start bit
if i < 4:
i = i + 1
state = 1 # packet detect
continue
if state == 1: # packet detect
# if we detect writes, skip the start pulse
if len(data) > 1 and data[0] == 0x92 and bit_counter == 0 and skp < 2:
i = i + 1
skp = skp + 1
continue
# gather the bits
if not bit_counter % 2:
#print(p)
if p < 0 and p < pulse_high:
outp = set_bit(outp, int(bit_counter/2), 0)
else:
outp = set_bit(outp, int(bit_counter/2), 1)
bit_counter = bit_counter + 1
if bit_counter >= 16:
#outp = outp & 0xFE
#print(f"gfhdhgf {outp}")
data.append(outp)
bit_counter = 0
state = 1 # detect stop
skp = 0
continue
i = i + 1
md.process_packet(data)
buf = ""
num_buf = ""
packet_bits = []
def process_bytes(ser_bytes):
buf = ser_bytes.replace("!", "")
spl = buf.split(',')
ispl = []
for v in range(len(spl)):
if spl[v] == '':
continue
ispl.append(int(spl[v]))
md.process_packet(ispl)
md.display()
while True:
try:
ser_bytes = ser.readline()
#print(ser_bytes)
buf = str(ser_bytes.decode("utf-8"))
buf = buf.replace("\n", "")
buf = buf.replace("\r", "")
buf = buf.replace("--", "-")
num = 0
state = 0
packet_val = 0
if buf[0] == '!':
process_bytes(buf)
continue
if buf[0] != '#':
continue
for b in range(len(buf)):
data = buf[b]
if data == '+':
num_buf = ""
continue
if data == '-':
num_buf = ""
if data == ',':
if num_buf == "" or num_buf == "-" or num_buf == "+":
continue
num = int(num_buf)
#print(num_buf)
num_buf = ""
packet_bits.append(num)
if num > timeout:
process_packet(packet_bits)
packet_bits.clear()
#print("Start Packet")
md.display()
continue
num_buf = ""
continue
num_buf += data
except KeyboardInterrupt:
print("Bye")
ser.close()
sys.exit()
except Exception:
print("error")
print(buf)
print(traceback)
break
|
nilq/baby-python
|
python
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import os
import popart
import numpy as np
import torch
import onnx
from onnx import numpy_helper
import math
import pytest
# `import test_util` requires adding to sys.path
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent.parent))
import test_util as tu
def run_model(
tmpdir,
model_file_name,
execution_mode="normal",
enable_matmul_serialization=False,
enable_outlining=False,
enable_accum=False,
accum_factor=1,
activation_tensor_location_settings=None,
weight_tensor_location_settings=None,
optimizer_state_tensor_location_settings=None,
accumulator_tensor_location_settings=None,
num_layers=3,
dsize=37, # Choose a prime number to force padding
batch_size=1,
num_iterations=1,
num_replicas=1,
optimizer=popart.SGD({"defaultLearningRate": (0.5, False)}),
reduction=popart.ReductionType.Sum):
np.random.seed(10911)
matmul_serialization_mode = 'output_channels'
matmul_serialization_factor = 2
builder = popart.Builder()
ip = builder.addInputTensor(
popart.TensorInfo("FLOAT", [batch_size, dsize, dsize]))
def add_layer(index, in_id):
w = builder.addInitializedInputTensor(
np.random.rand(dsize, dsize).astype(np.float32), f"W{index}")
matmul_id = builder.aiOnnx.matmul([in_id, w])
if enable_matmul_serialization:
builder.setSerializeMatMul({matmul_id}, matmul_serialization_mode,
matmul_serialization_factor)
return matmul_id
out = ip
l1 = ""
for i in range(num_layers):
if execution_mode == "normal":
vgid = 0
elif execution_mode == "phased":
vgid = i % 2
elif execution_mode == "pipelined":
vgid = i
else:
raise ValueError(f"Execution mode {execution_mode} unsupported")
with builder.executionPhase(i), builder.pipelineStage(
i), builder.virtualGraph(vgid):
out = add_layer(i, out)
if i == num_layers - 1:
with builder.executionPhase(i), builder.pipelineStage(
i), builder.virtualGraph(vgid):
l1 = builder.aiGraphcore.l1loss([out], 0.1, reduction)
anchorIds = []
builder.addOutputTensor(out)
if execution_mode == "normal":
num_ipus = 1
elif execution_mode == "phased":
num_ipus = 2
elif execution_mode == "pipelined":
num_ipus = 2**math.ceil(math.log2(num_layers))
else:
raise ValueError(f"Execution mode {execution_mode} unsupported")
dfAnchors = {}
for anchorId in anchorIds:
dfAnchors.update({anchorId: popart.AnchorReturnType("All")})
opts = popart.SessionOptions()
opts.enableOutlining = enable_outlining
opts.enableReplicatedGraphs = True if num_replicas > 1 else False
opts.replicatedGraphCount = num_replicas
opts.enableGradientAccumulation = enable_accum
opts.accumulationFactor = accum_factor
opts.accumulationAndReplicationReductionType = reduction
if activation_tensor_location_settings is not None:
opts.activationTensorLocationSettings = activation_tensor_location_settings
if weight_tensor_location_settings is not None:
opts.weightTensorLocationSettings = weight_tensor_location_settings
if optimizer_state_tensor_location_settings is not None:
opts.optimizerStateTensorLocationSettings = optimizer_state_tensor_location_settings
if accumulator_tensor_location_settings is not None:
opts.accumulatorTensorLocationSettings = accumulator_tensor_location_settings
if execution_mode == "normal":
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
elif execution_mode == "phased":
opts.executionPhaseSettings.phases = num_layers
opts.autoRecomputation = popart.RecomputationType.NoRecompute
opts.virtualGraphMode = popart.VirtualGraphMode.ExecutionPhases
opts.explicitRecomputation = False
elif execution_mode == "pipelined":
opts.enablePipelining = True
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
opts.autoRecomputation = popart.RecomputationType.Standard
proto = builder.getModelProto()
patterns = popart.Patterns(popart.PatternsLevel.All)
# patterns.InPlace = False
with tu.create_test_device(num_replicas * num_ipus,
pattern=popart.SyncPattern.Full) as device:
session = popart.TrainingSession(fnModel=proto,
dataFlow=popart.DataFlow(
1, dfAnchors),
optimizer=optimizer,
loss=l1,
patterns=patterns,
userOptions=opts,
deviceInfo=device)
session.prepareDevice()
session.weightsFromHost()
anchors = session.initAnchorArrays()
for i in range(num_iterations):
ip_data = np.random.rand(num_replicas, accum_factor, batch_size,
dsize, dsize).astype(np.float32)
stepio = popart.PyStepIO({ip: ip_data}, anchors)
session.run(stepio)
print("anchors:")
print(anchors)
session.modelToHost(str(tmpdir / model_file_name))
def check_model(lhs_model, rhs_model):
for i in range(len(lhs_model.graph.initializer)):
lhs = lhs_model.graph.initializer[i]
for j in range(len(rhs_model.graph.initializer)):
rhs = rhs_model.graph.initializer[j]
if (rhs.name == lhs.name):
print(f'Checking initializer {i} ({lhs.name} - {rhs.name})')
lhsa = numpy_helper.to_array(lhs)
rhsa = numpy_helper.to_array(rhs)
assert np.allclose(lhsa, rhsa, rtol=1.e-4, atol=1.e-5)
# Standard OnChip settings
onChipLocation = popart.TensorLocationSettings(
location=popart.TensorLocation(
storage=popart.TensorStorage.OnChip,
loadTileSet=popart.TileSet.Compute,
storageTileSet=popart.TileSet.Compute,
replicatedTensorSharding=popart.ReplicatedTensorSharding.Off),
minElementsForOffChip=0,
minElementsForReplicatedTensorSharding=2)
# Standard OffChip settings
offChipLocation = popart.TensorLocationSettings(
location=popart.TensorLocation(
storage=popart.TensorStorage.OffChip,
loadTileSet=popart.TileSet.Compute,
storageTileSet=popart.TileSet.Compute,
replicatedTensorSharding=popart.ReplicatedTensorSharding.Off),
minElementsForOffChip=0,
minElementsForReplicatedTensorSharding=2)
# Replicated tensor sharding OffChip settings
offChipRtsLocation = popart.TensorLocationSettings(
location=popart.TensorLocation(
storage=popart.TensorStorage.OffChip,
loadTileSet=popart.TileSet.Compute,
storageTileSet=popart.TileSet.Compute,
replicatedTensorSharding=popart.ReplicatedTensorSharding.On),
minElementsForOffChip=0,
minElementsForReplicatedTensorSharding=2)
# Replicated tensor sharding OnChip settings
onChipRtsLocation = popart.TensorLocationSettings(
location=popart.TensorLocation(
storage=popart.TensorStorage.OnChip,
loadTileSet=popart.TileSet.Compute,
storageTileSet=popart.TileSet.Compute,
replicatedTensorSharding=popart.ReplicatedTensorSharding.On),
minElementsForOffChip=0,
minElementsForReplicatedTensorSharding=2)
@tu.requires_ipu
def test_weight_update(tmpdir):
run_model(tmpdir, 'without_phased.onnx', "normal", False, True)
run_model(tmpdir,
'with_phased.onnx',
execution_mode="phased",
enable_matmul_serialization=False,
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipLocation,
optimizer_state_tensor_location_settings=offChipLocation,
accumulator_tensor_location_settings=offChipLocation)
without_phased = onnx.load(str(tmpdir / 'without_phased.onnx'))
with_phased = onnx.load(str(tmpdir / 'with_phased.onnx'))
check_model(without_phased, with_phased)
@tu.requires_ipu
def test_onchip_memory(tmpdir):
run_model(tmpdir, 'model_normal.onnx', execution_mode="normal")
run_model(tmpdir,
'model_onchip_act.onnx',
execution_mode="phased",
activation_tensor_location_settings=onChipLocation,
weight_tensor_location_settings=offChipLocation,
optimizer_state_tensor_location_settings=offChipLocation,
accumulator_tensor_location_settings=onChipLocation)
run_model(tmpdir,
'model_onchip_weights.onnx',
execution_mode="phased",
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=onChipLocation,
optimizer_state_tensor_location_settings=offChipLocation,
accumulator_tensor_location_settings=onChipLocation)
run_model(tmpdir,
'model_onchip_opt_state.onnx',
execution_mode="phased",
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipLocation,
optimizer_state_tensor_location_settings=onChipLocation,
accumulator_tensor_location_settings=onChipLocation)
normal = onnx.load(str(tmpdir / 'model_normal.onnx'))
onchip_act = onnx.load(str(tmpdir / 'model_onchip_act.onnx'))
onchip_weights = onnx.load(str(tmpdir / 'model_onchip_weights.onnx'))
onchip_opt_state = onnx.load(str(tmpdir / 'model_onchip_opt_state.onnx'))
check_model(normal, onchip_act)
check_model(normal, onchip_weights)
check_model(normal, onchip_opt_state)
# Check that 2 batches on 1 replica or 1 batch per replica on 2 replicas
# results in the same updated weight with SGD0
@tu.requires_ipu
def test_replicated_sgd0_weight_update(tmpdir):
run_model(tmpdir,
'phased.onnx',
execution_mode="phased",
batch_size=4,
num_replicas=1,
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipLocation,
optimizer_state_tensor_location_settings=offChipLocation,
accumulator_tensor_location_settings=offChipLocation)
run_model(tmpdir,
'phased_replicated.onnx',
execution_mode="phased",
batch_size=2,
num_replicas=2,
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipLocation,
optimizer_state_tensor_location_settings=offChipLocation,
accumulator_tensor_location_settings=offChipLocation)
run_model(tmpdir,
'phased_replicated_rws.onnx',
execution_mode="phased",
batch_size=2,
num_replicas=2,
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipRtsLocation,
optimizer_state_tensor_location_settings=offChipRtsLocation,
accumulator_tensor_location_settings=offChipRtsLocation)
run_model(tmpdir,
'phased_replicated_rws_acc.onnx',
execution_mode="phased",
batch_size=1,
num_replicas=2,
enable_accum=True,
accum_factor=2,
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipRtsLocation,
optimizer_state_tensor_location_settings=offChipRtsLocation,
accumulator_tensor_location_settings=onChipLocation)
phased = onnx.load(str(tmpdir / 'phased.onnx'))
phased_replicated = onnx.load(str(tmpdir / 'phased_replicated.onnx'))
phased_replicated_rws = onnx.load(
str(tmpdir / 'phased_replicated_rws.onnx'))
phased_replicated_rws_acc = onnx.load(
str(tmpdir / 'phased_replicated_rws_acc.onnx'))
check_model(phased, phased_replicated)
check_model(phased, phased_replicated_rws)
check_model(phased, phased_replicated_rws_acc)
# Check that 2 batches on 1 replica or 1 batch per replica on 2 replicas
# results in the same updated weight with SGD1
@pytest.mark.parametrize('sgdType', ['SGD1', 'SGD2'])
@tu.requires_ipu
def test_replicated_sgd1and2_weight_update(tmpdir, sgdType):
optimizer_dict = {
"defaultLearningRate": (0.00001, False),
"defaultMomentum": (0.9, False),
"defaultDampening": (0.2, False),
"defaultVelocityScaling": (0.1, False),
"lossScaling": (1.0, True),
"defaultWeightDecay": (0.2, True)
}
if sgdType == 'SGD1':
sgdAccMm = popart.SGDAccumulatorAndMomentum.Combined
elif sgdType == 'SGD2':
sgdAccMm = popart.SGDAccumulatorAndMomentum.Separate
else:
raise 'Unknown sgdType={sgdType} in test'
run_model(tmpdir,
'phased.onnx',
execution_mode="phased",
batch_size=2,
num_replicas=1,
num_iterations=5,
optimizer=popart.SGD(optimizer_dict,
accumulatorAndMomentum=sgdAccMm),
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipLocation,
optimizer_state_tensor_location_settings=offChipLocation,
accumulator_tensor_location_settings=offChipLocation)
run_model(tmpdir,
'phased_replicated.onnx',
execution_mode="phased",
batch_size=1,
num_replicas=2,
num_iterations=5,
optimizer=popart.SGD(optimizer_dict,
accumulatorAndMomentum=sgdAccMm),
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipLocation,
optimizer_state_tensor_location_settings=offChipLocation,
accumulator_tensor_location_settings=offChipLocation)
run_model(tmpdir,
'phased_replicated_rws.onnx',
execution_mode="phased",
batch_size=1,
num_replicas=2,
num_iterations=5,
optimizer=popart.SGD(optimizer_dict,
accumulatorAndMomentum=sgdAccMm),
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipRtsLocation,
optimizer_state_tensor_location_settings=offChipRtsLocation,
accumulator_tensor_location_settings=offChipRtsLocation)
# For SGD2, where accumulator and optimizer state are separate tensors, add
# another test case for when only the optimizer state is RTS.
if sgdType == 'SGD2':
run_model(tmpdir,
'phased_replicated_rts_os_only.onnx',
execution_mode="phased",
batch_size=1,
num_replicas=2,
num_iterations=5,
optimizer=popart.SGD(optimizer_dict,
accumulatorAndMomentum=sgdAccMm),
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipRtsLocation,
optimizer_state_tensor_location_settings=offChipRtsLocation,
accumulator_tensor_location_settings=offChipLocation)
phased = onnx.load(str(tmpdir / 'phased.onnx'))
phased_replicated = onnx.load(str(tmpdir / 'phased_replicated.onnx'))
phased_replicated_rws = onnx.load(
str(tmpdir / 'phased_replicated_rws.onnx'))
check_model(phased, phased_replicated)
check_model(phased, phased_replicated_rws)
if sgdType == 'SGD2':
phased_replicated_rts_os_only = onnx.load(
str(tmpdir / 'phased_replicated_rts_os_only.onnx'))
check_model(phased, phased_replicated_rts_os_only)
# Check that 2 batches on 1 replica or 1 batch per replica on 2 replicas
# results in the same updated weight with SGD1
@tu.requires_ipu
def test_replicated_adam_weight_update(tmpdir):
optimizer_dict = {
"defaultLearningRate": (0.005, True),
"defaultBeta1": (0.7, True),
"defaultBeta2": (0.8, True),
"defaultWeightDecay": (0.1, True),
"defaultEps": (1e-6, True),
"lossScaling": (10.0, True),
}
run_model(tmpdir,
'phased.onnx',
execution_mode="phased",
batch_size=2,
num_replicas=1,
num_iterations=5,
optimizer=popart.Adam(optimizer_dict),
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipLocation,
optimizer_state_tensor_location_settings=offChipLocation,
accumulator_tensor_location_settings=offChipLocation)
run_model(tmpdir,
'phased_replicated.onnx',
execution_mode="phased",
batch_size=1,
num_replicas=2,
num_iterations=5,
optimizer=popart.Adam(optimizer_dict),
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipLocation,
optimizer_state_tensor_location_settings=offChipLocation,
accumulator_tensor_location_settings=offChipLocation)
run_model(tmpdir,
'phased_replicated_rws.onnx',
execution_mode="phased",
batch_size=1,
num_replicas=2,
num_iterations=5,
optimizer=popart.Adam(optimizer_dict),
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipRtsLocation,
optimizer_state_tensor_location_settings=offChipRtsLocation,
accumulator_tensor_location_settings=offChipRtsLocation)
phased = onnx.load(str(tmpdir / 'phased.onnx'))
phased_replicated = onnx.load(str(tmpdir / 'phased_replicated.onnx'))
phased_replicated_rws = onnx.load(
str(tmpdir / 'phased_replicated_rws.onnx'))
check_model(phased, phased_replicated)
check_model(phased, phased_replicated_rws)
# Check that 2 batches on 1 replica or 1 batch per replica on 2 replicas
# results in the same updated weight with Lamb
@pytest.mark.parametrize("isConst", [False, True])
@pytest.mark.parametrize("reduction",
[popart.ReductionType.Sum, popart.ReductionType.Mean])
@tu.requires_ipu
def test_replicated_lamb_weight_update(tmpdir, isConst, reduction):
# Test both const & non-const optimizer parameters
optimizer_dict = {
"defaultLearningRate": (0.005, isConst),
"defaultBeta1": (0.7, isConst),
"defaultBeta2": (0.8, isConst),
"defaultWeightDecay": (0.1, isConst),
"defaultEps": (1e-6, isConst),
"lossScaling": (10.0, isConst),
}
# Off-chip, but no RTS (1x replica)
run_model(tmpdir,
'phased.onnx',
execution_mode="phased",
batch_size=4,
num_replicas=1,
num_iterations=5,
optimizer=popart.Adam(optimizer_dict, popart.AdamMode.Lamb),
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipLocation,
optimizer_state_tensor_location_settings=offChipLocation,
accumulator_tensor_location_settings=offChipLocation,
reduction=reduction)
# Off-chip, but no RTS (2x replicas)
run_model(tmpdir,
'phased_replicated.onnx',
execution_mode="phased",
batch_size=2,
num_replicas=2,
num_iterations=5,
optimizer=popart.Adam(optimizer_dict, popart.AdamMode.Lamb),
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipLocation,
optimizer_state_tensor_location_settings=offChipLocation,
accumulator_tensor_location_settings=offChipLocation,
reduction=reduction)
# Weights and optimizer off-chip, RTS
run_model(tmpdir,
'phased_replicated_rws.onnx',
execution_mode="phased",
batch_size=2,
num_replicas=2,
num_iterations=5,
optimizer=popart.Adam(optimizer_dict, popart.AdamMode.Lamb),
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipRtsLocation,
optimizer_state_tensor_location_settings=offChipRtsLocation,
accumulator_tensor_location_settings=offChipLocation,
reduction=reduction)
# Weights and optimizer off-chip, accumulator off chip, RTS
run_model(tmpdir,
'phased_replicated_rws_acc.onnx',
execution_mode="phased",
batch_size=1,
num_replicas=2,
num_iterations=5,
enable_accum=True,
accum_factor=2,
optimizer=popart.Adam(optimizer_dict, popart.AdamMode.Lamb),
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=offChipRtsLocation,
optimizer_state_tensor_location_settings=offChipRtsLocation,
accumulator_tensor_location_settings=offChipLocation,
reduction=reduction)
# Weights on-chip, non-RTS, optimizer state off-chip, RTS
run_model(tmpdir,
'phased_replicated_rws_acc_nw.onnx',
execution_mode="phased",
batch_size=1,
num_replicas=2,
num_iterations=5,
enable_accum=True,
accum_factor=2,
optimizer=popart.Adam(optimizer_dict, popart.AdamMode.Lamb),
activation_tensor_location_settings=offChipLocation,
weight_tensor_location_settings=onChipLocation,
optimizer_state_tensor_location_settings=offChipRtsLocation,
accumulator_tensor_location_settings=onChipLocation,
reduction=reduction)
phased = onnx.load(str(tmpdir / 'phased.onnx'))
phased_replicated = onnx.load(str(tmpdir / 'phased_replicated.onnx'))
phased_replicated_rws = onnx.load(
str(tmpdir / 'phased_replicated_rws.onnx'))
phased_replicated_rws_acc = onnx.load(
str(tmpdir / 'phased_replicated_rws_acc.onnx'))
phased_replicated_rws_acc_nw = onnx.load(
str(tmpdir / 'phased_replicated_rws_acc_nw.onnx'))
check_model(phased, phased_replicated)
check_model(phased, phased_replicated_rws)
check_model(phased, phased_replicated_rws_acc)
check_model(phased, phased_replicated_rws_acc_nw)
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.5 on 2021-01-14 08:31
import django.contrib.postgres.indexes
import django.contrib.postgres.search
from django.db import migrations, models
def parse_description_json_to_string(description):
string = ""
blocks = description.get("blocks")
if not blocks or not isinstance(blocks, list):
return ""
for block in blocks:
block_type = block["type"]
if block_type == "list":
for item in block["data"].get("items"):
if not item:
continue
string += item
else:
text = block["data"].get("text")
if not text:
continue
string += text
return string
def migrate_description_into_description_plaintext(apps, schema):
Product = apps.get_model("product", "Product")
for product in Product.objects.iterator():
product.description_plaintext = parse_description_json_to_string(
product.description
)
product.save()
class Migration(migrations.Migration):
dependencies = [
("product", "0138_migrate_description_json_into_description"),
]
operations = [
migrations.AddField(
model_name="product",
name="description_plaintext",
field=models.TextField(blank=True, default=""),
),
migrations.AddField(
model_name="product",
name="search_vector",
field=django.contrib.postgres.search.SearchVectorField(
blank=True, null=True
),
),
migrations.AddIndex(
model_name="product",
index=django.contrib.postgres.indexes.GinIndex(
fields=["search_vector"], name="product_pro_search__e78047_gin"
),
),
migrations.RunSQL(
"""
CREATE TRIGGER title_vector_update BEFORE INSERT OR UPDATE
ON product_product FOR EACH ROW EXECUTE PROCEDURE
tsvector_update_trigger(
'search_vector', 'pg_catalog.english', 'description_plaintext'
);
"""
),
migrations.RunPython(
migrate_description_into_description_plaintext,
migrations.RunPython.noop,
),
]
|
nilq/baby-python
|
python
|
from .concat_dataset import ConcatDataset
from .dataset_wrapper import DatasetWrapper
from .image_collection_dataset import ImageCollectionDataset
from .segmentation_dataset import SegmentationDataset
from .sampler import MinForegroundSampler
|
nilq/baby-python
|
python
|
# Generated by Django 2.0.4 on 2019-07-17 11:36
from django.core.management import call_command
from django.db import migrations
from events.helpers.permissions import (
create_organizer_group_migrations_wrapped,
remove_organizer_group
)
def update_permissions(schema, group):
call_command('update_permissions')
class Migration(migrations.Migration):
dependencies = [
('events', '0005_auto_20190715_1440'),
]
operations = [
migrations.RunPython(update_permissions, reverse_code=migrations.RunPython.noop),
migrations.RunPython(create_organizer_group_migrations_wrapped, remove_organizer_group),
]
|
nilq/baby-python
|
python
|
# PLASMA is a software package provided by:
# University of Tennessee, US,
# University of Manchester, UK.
from __future__ import print_function
# standard python modules
import os
import sys
import re
import shlex
import subprocess
from subprocess import PIPE
# local modules
from environment import Environment
# ==============================================================================
# initialization and settings
# ------------------------------------------------------------------------------
def init():
'''
Initialize config module.
'''
global env, top_dir, auto, verbose
env = Environment()
top_dir = os.getcwd()
if (not os.path.exists( 'config-src' )):
os.mkdir( 'config-src' )
os.chdir( 'config-src' )
auto = False
verbose = 0
args = sys.argv[1:]
for arg in args:
if (arg == '--auto'):
auto = True
elif (arg == '--verbose' or arg == '-v'):
verbose += 1
else:
match = re.search( r'^(\w+)=(.*)', arg )
if (match):
env[ match.group(1) ] = match.group(2)
else:
print_comment( 'unrecognized argument: ' + arg )
#raise Error( "unrecognized command line argument: " + arg )
# end
# end
# end
# ------------------------------------------------------------------------------
def set_auto( in_auto=True ):
'''
Sets automatic, non-interactive mode on or off.
Auto always selects the first available choice.
Auto is off by default.
'''
global auto
auto = in_auto
# end
# ------------------------------------------------------------------------------
def set_verbose( in_verbose=1 ):
'''
Sets verbose level.
Verbose = 0 (off) by default.
'''
global verbose
verbose = in_verbose
# end
# ==============================================================================
# support
# ------------------------------------------------------------------------------
# All errors this code raises.
# Using this allows Python Exceptions to fall through, give tracebacks.
class Error( Exception ):
pass
# ------------------------------------------------------------------------------
# ANSI codes
esc = chr(0x1B) + '['
red = esc + '31m'
green = esc + '32m'
yellow = esc + '33m'
blue = esc + '34m'
magenta = esc + '35m'
cyan = esc + '36m'
white = esc + '37m'
font_bold = esc + '1m'
font_normal = esc + '0m'
font_header = font_bold
font_subhead = blue
font_comment = cyan
str_yes = font_bold + green + 'yes' + font_normal
str_no = red + 'no' + font_normal
dots_width = 50
subdots_width = 58
# ------------------------------------------------------------------------------
# Current programming language
lang_stack = [ None ]
lang = None
def get_lang():
global lang
return lang
def set_lang( in_lang ):
global lang, lang_stack
assert( in_lang in ('C', 'C++', 'Fortran', 'F77') )
lang_stack[-1] = in_lang
lang = in_lang
def push_lang( in_lang ):
global lang, lang_stack
assert( in_lang in ('C', 'C++', 'Fortran', 'F77') )
lang_stack.append( in_lang )
lang = in_lang
def pop_lang():
global lang, lang_stack
if (len(lang_stack) == 1):
raise Error("popping last language")
pop = lang_stack.pop()
lang = lang_stack[-1]
return pop
# ==============================================================================
# output files
output_headers_done = False
# ------------------------------------------------------------------------------
# todo: in autoconf, this changes @DEFS@ to be -DHAVE_CONFIG_H instead of
# -Dfoo1=bar1 -Dfoo2=bar2 ... for all variables foo{i}.
def output_headers( files ):
'''
Create each file in files from file.in, substituting @foo@ with
variable foo.
files can be a single header file or an iterable list of files.
If the contents of file are not modified, it does not touch it,
to avoid unnecesary recompilation.
Ex:
cfg.output_headers( "config.h" )
cfg.output_headers( ["config.h", "meta.h"] )
'''
output_headers_done = True
print_header( 'Output files' )
if (isinstance( files, str )):
files = [ files ]
pwd = os.getcwd()
os.chdir( top_dir )
for fname in files:
txt = read( fname + '.in' )
out = re.sub( r'@(\w+)@', sub_env, txt )
out = re.sub( r'#undef (\w+)', sub_define, txt )
if (os.path.exists( fname ) and out == read( fname )):
print( fname, 'is unchanged' )
else:
print( 'creating', fname )
write( fname, out )
# end
env['DEFS'] = '-DHAVE_CONFIG_H'
os.chdir( pwd )
# end
# ------------------------------------------------------------------------------
def output_files( files ):
'''
Create each file in files from file.in, substituting @foo@ with
variable foo.
files can be a single header file or an iterable list of files.
Unlike output_headers(), this always recreates the file even if the
contents are the same.
Ex:
cfg.output_files( "make.inc" )
cfg.output_files( ["Makefile", "src/Makefile"] )
'''
if (not output_headers_done):
print_header( 'Output files' )
if (isinstance( files, str )):
files = [ files ]
pwd = os.getcwd()
os.chdir( top_dir )
for fname in files:
txt = read( fname + '.in' )
out = re.sub( r'@(\w+)@', sub_env, txt )
print( 'creating', fname )
write( fname, out )
# end
os.chdir( pwd )
# end
# ==============================================================================
# compilers
# ------------------------------------------------------------------------------
def prog_cc( extra=[],
default=['gcc', 'cc', 'icc', 'xlc_r', 'xlc', 'clang'],
required=True ):
'''
Detect C compilers.
extra List of additional compilers to check, before default.
default List of compilers to check, currently:
GNU gcc
generic cc
Intel icc
IBM xlc_r
IBM xlc
Clang clang
If $CC is set in environment or on command line,
uses that instead of extra & default.
If $CFLAGS, $LDFLAGS, or $LIBS are set in environment or on command line,
uses those flags when compiling.
It is recommended to override extra and not override default.
'''
print_header( 'Detecting C compilers' )
cc = env['CC']
if (cc):
print_comment( 'Test using $CC=' + cc )
compilers = [ cc ]
else:
compilers = unique( extra + default )
set_lang('C')
src = 'prog_cc.c'
choices = []
for cc in compilers:
env.push()
env['CC'] = cc
try:
try_compiler( src )
print_subhead( ' Required features:' )
prog_cc_c99()
openmp()
openmp_depend()
choices.append( env.top() ) # passed required features
print_subhead( ' Optional features:' )
openmp_priority( required=False )
#for flag in ('-O2', '-Wall', '-Wshadow', '-Wno-unused-function', '-pedantic'):
# compiler_flag( flag, required=False )
except Error, e:
if (verbose): print( e )
env.pop()
#print()
# end
val = choose( 'C compiler', 'CC', choices )
if (val is not None):
env.push( val )
elif (required):
raise Error( 'C compiler not found' )
# end
# ------------------------------------------------------------------------------
def prog_cxx( extra=[],
default=['g++', 'c++', 'CC', 'cxx',
'icpc', 'xlC_r', 'xlC', 'clang++'],
required=True ):
'''
Detect C++ compilers.
extra List of additional compilers to check, before default.
default List of compilers to check, currently:
GNU g++
generic c++
generic CC
generic cxx
Intel icpc
IBM xlC_r
IBM xlC
Clang clang++
If $CXX is set in environment or on command line,
uses that instead of extra & default.
If $CXXFLAGS, $LDFLAGS, or $LIBS are set in environment or on command line,
uses those flags when compiling.
It is recommended to override extra and not override default.
'''
print_header( 'Detecting C++ compilers' )
cxx = env['CXX']
if (cxx):
print_comment( 'Test using $CXX=' + cxx )
compilers = [ cxx ]
else:
compilers = unique( extra + default )
set_lang('C++')
src = 'prog_cxx.cxx'
choices = []
for cxx in compilers:
env.push()
env['CXX'] = cxx
try:
try_compiler( src )
print_subhead( ' Required features:' )
prog_cxx_cxx11()
openmp()
openmp_depend()
choices.append( env.top() ) # passed required features
print_subhead( ' Optional features:' )
openmp_priority( required=False )
#for flag in ('-O2', '-Wall', '-Wshadow', '-Wno-unused-function', '-pedantic'):
# compiler_flag( flag, required=False )
except Error, e:
if (verbose): print( e )
env.pop()
#print()
# end
val = choose( 'C++ compiler', 'CXX', choices )
if (val is not None):
env.push( val )
elif (required):
raise Error( 'C++ compiler not found' )
# end
# ------------------------------------------------------------------------------
# todo: autoconf also has [ifc efc lf95 epcf90 frt cf77 fort77 fl32 af77]
fortran_compilers = [
'gfortran', 'g95', 'g77',
'fort', 'f95', 'f90', 'f77',
'ftn', 'nagfor', 'ifort',
'xlf95', 'xlf90', 'xlf',
'pgfortran', 'pgf95', 'pgf90', 'pghpf', 'pgf77'
]
def prog_fortran( extra=[], default=fortran_compilers, required=True ):
'''
Detect modern Fortran compilers (Fortran 90 and newer).
extra List of additional compilers to check, before default.
default List of compilers to check, currently:
GNU gfortran
GNU g95
GNU g77
generic fort
generic f95
generic f90
generic f77
Cray ftn
NAG nagfor
Intel ifort
IBM xlf
IBM xlf95
IBM xlf90
PGI pgfortran
PGI pgf95
PGI pgf90
PGI pghpf
PGI pgf77
If $FC is set in environment or on command line,
uses that instead of extra & default.
If $FCFLAGS, $LDFLAGS, or $LIBS are set in environment or on command line,
uses those flags when compiling.
It is recommended to override extra and not override default.
'''
print_header( 'Detecting Fortran compilers' )
fc = env['FC']
if (fc):
print_comment( 'Test using $FC=' + fc )
compilers = [ fc ]
else:
compilers = unique( extra + default )
set_lang('Fortran')
src = 'prog_fortran.f90'
choices = []
for fc in compilers:
env.push()
env['FC'] = fc
try:
try_compiler( src )
print_subhead( ' Required features:' )
prog_fortran_f2008()
openmp()
choices.append( env.top() ) # passed required features
print_subhead( ' Optional features:' )
openmp_depend( required=False )
openmp_priority( required=False )
#for flag in ('-O2', '-Wall', '-Wshadow', '-Wno-unused-function', '-pedantic'):
# compiler_flag( flag, required=False )
except Error, e:
if (verbose): print( e )
env.pop()
#print()
# end
val = choose( 'Fortran compiler', 'FC', choices )
if (val is not None):
env.push( val )
elif (required):
raise Error( 'Fortran compiler not found' )
# end
# ------------------------------------------------------------------------------
def prog_f77( extra=[], default=fortran_compilers, required=True ):
'''
Detect Fortran 77 compilers.
extra List of additional compilers to check, before default.
default List of compilers to check, currently:
GNU gfortran
GNU g95
GNU g77
generic fort
generic f95
generic f90
generic f77
Cray ftn
NAG nagfor
Intel ifort
IBM xlf
IBM xlf95
IBM xlf90
PGI pgfortran
PGI pgf95
PGI pgf90
PGI pghpf
PGI pgf77
If $F77 is set in environment or on command line,
uses that instead of extra & default.
If $FFLAGS, $LDFLAGS, or $LIBS are set in environment or on command line,
uses those flags when compiling.
It is recommended to override extra and not override default.
'''
print_header( 'Detecting Fortran 77 compilers' )
f77 = env['F77']
if (f77):
print_comment( 'Test using $F77=' + f77 )
compilers = [ f77 ]
else:
compilers = unique( extra + default )
set_lang('F77')
src = 'prog_f77.f'
choices = []
for f77 in compilers:
env.push()
env['F77'] = f77
try:
try_compiler( src )
print_subhead( ' Required features:' )
openmp()
choices.append( env.top() ) # passed required features
print_subhead( ' Optional features:' )
openmp_depend( required=False )
openmp_priority( required=False )
#for flag in ('-O2', '-Wall', '-Wshadow', '-Wno-unused-function', '-pedantic'):
# compiler_flag( flag, required=False )
except Error, e:
if (verbose): print( e )
env.pop()
#print()
# end
val = choose( 'Fortran 77 compiler', 'F77', choices )
if (val is not None):
env.push( val )
elif (required):
raise Error( 'Fortran 77 compiler not found' )
# end
# ==============================================================================
# compiler features
# ------------------------------------------------------------------------------
# While many compilers support C99 by default, it's best to explicitly set
# -std=c99 if it exists, to exclude non-standard extensions.
# gcc's current default is gnu11.
def prog_cc_c99( flags=['-std=c99', ''], required=True ):
assert( lang == 'C' )
src = 'prog_cc_c99.c'
found = False
for flag in flags:
print_dots( ' C99 support: ' + flag, subdots_width )
save_cflags = env.append( 'CFLAGS', flag )
try:
try_compile_run( src )
found = True
print( str_yes )
break
except Error, e:
env['CFLAGS'] = save_cflags
print( str_no )
# end
if (not found):
print_error( ' C99 not supported' )
if (required):
raise Error( 'C99 not supported' )
# end
# ------------------------------------------------------------------------------
# While many compilers support C++11 by default, it's best to explicitly set
# -std=c++11 if it exists, to exclude non-standard extensions.
# g++'s current default is gnu++14.
def prog_cxx_cxx11( flags=['-std=c++11', ''], required=True ):
assert( lang == 'C++' )
src = 'prog_cxx_cxx11.cxx'
found = False
for flag in flags:
print_dots( ' C++11 support: ' + flag, subdots_width )
save_cxxflags = env.append( 'CXXFLAGS', flag )
try:
try_compile_run( src )
found = True
print( str_yes )
break
except Error, e:
env['CXXFLAGS'] = save_cxxflags
print( str_no )
# end
if (not found):
print_error( ' C++11 not supported' )
if (required):
raise Error( 'C++11 not supported' )
# end
# ------------------------------------------------------------------------------
# While many compilers support f2008 by default, it's best to explicitly set
# -std=f2008 if it exists, to exclude non-standard extensions.
# gfortran's current default is gnu.
# todo: ifort gives warning: ignoring unknown option '-std=f2008'
def prog_fortran_f2008( flags=['-stand=f08', '-std=f2008'], required=True ):
assert( lang == 'Fortran' )
src = 'prog_fortran_f2008.f90' # without C binding
if (env['CC']):
push_lang('C')
c_src = 'prog_fortran_f2008_foo.c'
try:
c_obj = try_compile_obj( c_src )
src = 'prog_fortran_f2008_cbind.f90' # with C binding
except Exception:
c_obj = ''
print_comment( " prog_fortran_f2008: compiling C failed; skipping C binding test" )
pop_lang()
else:
c_obj = ''
print_comment( " prog_fortran_f2008: no C compiler set; skipping C binding test" )
# end
found = False
for flag in flags:
print_dots( ' Fortran 2008 support: ' + flag, subdots_width )
save_fcflags = env.append( 'FCFLAGS', flag )
try:
try_compile_run( src, [c_obj] )
found = True
print( str_yes )
break
except Error, e:
env['FCFLAGS'] = save_fcflags
print( str_no )
# end
if (not found):
print_error( ' Fortran 2008 not supported' )
if (required):
raise Error( 'Fortran 2008 not supported' )
# end
# ------------------------------------------------------------------------------
def compiler_flag( flag, required=True ):
(cc, flagname) = lang_vars()
if (lang == 'C'):
src = 'prog_cc.c'
elif (lang == 'C++'):
src = 'prog_cxx.cxx'
elif (lang == 'Fortran'):
src = 'prog_fortran.f90'
elif (lang == 'F77'):
src = 'prog_f77.f'
else:
raise Error( "unknown language " + lang )
# end
# skip test if flag already included, e.g., by user setting $CFLAGS
save = env[ flagname ]
if (flag in save):
return
save = env.append( flagname, flag )
try:
print_dots( ' Accepts ' + flag, subdots_width )
try_compile_obj( src )
print( str_yes )
except Error, e:
env[ flagname ] = save
print( str_no )
if (required):
raise Error
# end
# ------------------------------------------------------------------------------
def openmp( flags=['-fopenmp', '-qopenmp', '-openmp', '-omp', ''], required=True ):
(cc, flagname) = lang_vars()
if (lang == 'C'):
src = 'omp_cc.c'
elif (lang == 'C++'):
src = 'omp_cc.c' # same as C
elif (lang == 'Fortran'):
src = 'omp_fortran.f90'
elif (lang == 'F77'):
src = 'omp_f77.f'
else:
raise Error( "unknown language " + lang )
# end
found = False
for flag in flags:
# typically -fopenmp must be specified both when compiling & linking
save_flags = env.append( flagname, flag )
save_ldflags = env.append( 'LDFLAGS', flag )
try:
print_dots( ' OpenMP: ' + flag, subdots_width )
try_compile_run( src )
env['OPENMP_' + flagname] = flag
found = True
print( str_yes )
break
except Error, e:
print( str_no )
finally:
env[ flagname ] = save_flags
env['LDFLAGS'] = save_ldflags
# end
if (not found):
print_error( ' OpenMP not supported' )
if (required):
raise Error( 'OpenMP not supported' )
# end
# ------------------------------------------------------------------------------
def openmp_depend( required=True ):
(cc, flagname) = lang_vars()
if (lang == 'C'):
src = 'omp_depend_cc.c'
elif (lang == 'C++'):
src = 'omp_depend_cc.c' # same as C
elif (lang == 'Fortran'):
src = 'omp_depend_fortran.f90'
elif (lang == 'F77'):
src = 'omp_depend_f77.f'
else:
raise Error( "unknown language " + lang )
# end
# temporarily add, e.g., OPENMP_CFLAGS to CFLAGS and LDFLAGS
save_flags = env.append( flagname, env['OPENMP_'+flagname] )
save_ldflags = env.append( 'LDFLAGS', env['OPENMP_'+flagname] )
try:
print_dots( ' OpenMP 4 task depend', subdots_width )
try_compile_run( src )
env.append( 'DEFS', '-DHAVE_OPENMP_DEPEND' ) # todo compiler-specific DEFS?
print( str_yes )
except Error, e:
print( str_no )
if (required):
raise e
finally:
env[ flagname ] = save_flags
env['LDFLAGS'] = save_ldflags
# end
# ------------------------------------------------------------------------------
def openmp_priority( required=True ):
(cc, flagname) = lang_vars()
if (lang == 'C'):
src = 'omp_priority_cc.c'
elif (lang == 'C++'):
src = 'omp_priority_cc.c' # same as C
elif (lang == 'Fortran'):
src = 'omp_priority_fortran.f90'
elif (lang == 'F77'):
src = 'omp_priority_f77.f'
else:
raise Error( "unknown language " + lang )
# end
# temporarily add, e.g., OPENMP_CFLAGS to CFLAGS and LDFLAGS
save_flags = env.append( flagname, env['OPENMP_'+flagname] )
save_ldflags = env.append( 'LDFLAGS', env['OPENMP_'+flagname] )
try:
print_dots( ' OpenMP 4.5 task priority', subdots_width )
try_compile_run( src )
env.append( 'DEFS', '-DHAVE_OPENMP_PRIORITY' ) # todo compiler-specific DEFS?
print( str_yes )
except Error, e:
print( str_no )
if (required):
raise e
finally:
env[ flagname ] = save_flags
env['LDFLAGS'] = save_ldflags
# end
# ==============================================================================
# (C)BLAS and LAPACK(E) libraries
# ------------------------------------------------------------------------------
def blas( ilp64=False, required=True ):
print_header( 'Detecting BLAS libraries' )
push_lang('C')
src = 'blas.c'
tests = []
# ----------
# build list of tests; each test is an environment (hash)
# of LAPACK_CFLAGS and LAPACK_LIBS.
cflags = env['LAPACK_CFLAGS']
libs = env['LAPACK_LIBS']
if (cflags or libs):
# user specified
print_comment( 'Test using $LAPACK_CFLAGS="' + cflags +
'" and $LAPACK_LIBS="' + libs + '"' )
libs = re.sub( '-L +', '-L', libs ) # keep -Ldir together, not -L dir
libs = re.split( ' +', libs )
libdir = filter( lambda x: x.startswith( '-L' ), libs )
libs = filter( lambda x: not x.startswith( '-L' ), libs )
libdir = join( *libdir )
libs = join( *libs )
tests.append({
'CFLAGS': cflags,
'LIBS': libs,
})
else:
# included by default (e.g., with Cray cc compilers)
tests.append( {} )
# plain BLAS
tests.append( {'LIBS': '-lblas'} )
# MacOS Accelerate
if (sys.platform == 'darwin'):
tests.append({
'DEFS': '-DHAVE_ACCELERATE',
'LIBS': '-framework Accelerate -lm',
})
# end
# OpenBLAS
(inc, libdir) = get_inc_lib( ['OPENBLAS', 'OPENBLASDIR', 'OPENBLAS_DIR'] )
tests.append({
'DEFS': '-DHAVE_OPENBLAS',
'CFLAGS': inc,
'LDFLAGS': libdir,
'LIBS': '-lopenblas -lm',
})
# ATLAS
(inc, libdir) = get_inc_lib( ['ATLAS', 'ATLASDIR', 'ATLAS_DIR'] )
for libs in ('-latlas -lm',
'-lf77blas -latlas -lm',
'-lf77blas -latlas -lgfortran -lm',):
tests.append({
'DEFS': '-DHAVE_ATLAS',
'CFLAGS': inc,
'LDFLAGS': libdir,
'LIBS': libs,
})
# end
# ACML
# may be in a subdirectory if user sets
# $ACMLDIR = /path/to/acml-5.3.1 instead of
# $ACMLDIR = /path/to/acml-5.3.1/gfortran64
# modules on titan use ACML_BASE_DIR
for gf in ('', 'gfortran64', 'gfortran64_mp',
'gfortran32', 'gfortran32_mp'):
(inc, libdir) = get_inc_lib( ['ACML', 'ACMLDIR', 'ACML_DIR', 'ACML_BASE_DIR'], gf )
if (libdir or not gf): # if gf != '', require we found lib directory
if (re.search( '_mp', gf )):
libs = '-lacml_mp -lm'
else:
libs = '-lacml -lm'
tests.append({
'DEFS': '-DHAVE_ACML',
'CFLAGS': inc,
'LDFLAGS': libdir,
'LIBS': libs,
})
# end
# end
# MKL has combination of 3 libs:
# interface lib (gf for gfortran, intel for ifort,
# lp64 for 64-bit long-pointer,
# ilp64 for 64-bit int-long-pointer),
# thread lib (gnu_thread for libgomp, intel_thread for iomp5),
# core lib
# Not recommended to mix mkl_gf_*lp64 with mkl_intel_thread,
# or mkl_intel_*lp64 with mkl_gnu_thread.
# todo: if compiler is GNU, suppress intel_thread version,
# and if compiler is Intel, suppress gnu_thread versions?
libs = [
'-lmkl_gf_lp64 -lmkl_sequential',
'-lmkl_gf_lp64 -lmkl_gnu_thread',
'-lmkl_intel_lp64 -lmkl_sequential',
'-lmkl_intel_lp64 -lmkl_intel_thread',
]
if (ilp64):
libs.extend([
'-lmkl_gf_ilp64 -lmkl_sequential',
'-lmkl_gf_ilp64 -lmkl_gnu_thread',
'-lmkl_intel_ilp64 -lmkl_sequential',
'-lmkl_intel_ilp64 -lmkl_intel_thread',
])
# end
mkl_set_library_path()
(inc, libdir) = get_inc_lib( ['MKLROOT'] )
for lib in libs:
defs = '-DHAVE_MKL'
if (re.search( 'ilp64', lib )):
defs += ' -DMKL_ILP64'
tests.append({
'DEFS': defs,
'CFLAGS': inc,
'LDFLAGS': libdir,
'LIBS': join( lib, '-lmkl_core -lm' ),
})
# end
# end
# ----------
choices = []
for test in tests:
save = merge_env( test )
try:
print_dots( join_vars( test, 'CFLAGS', 'LDFLAGS', 'LIBS' ))
try_compile_run( src )
choices.append( test )
print( str_yes )
except Error, e:
print( str_no )
finally:
restore_env( save )
# end
pop_lang()
val = choose( 'BLAS library', 'LIBS', choices )
if (val is not None):
merge_env( val )
elif (required):
raise Error( 'BLAS not found' )
# end
# ------------------------------------------------------------------------------
def cblas( required=True ):
print_header( 'Detecting CBLAS library' )
push_lang('C')
src = 'cblas.c'
# test if (1) cblas in blas library, or (2) cblas in -lcblas
tests = [
{},
{'LIBS': '-lcblas'},
]
# add lapack directory
(inc, libdir) = get_inc_lib( ['LAPACK', 'LAPACKDIR', 'LAPACK_DIR'] )
if (inc or libdir):
tests.append({
'CFLAGS': inc + '/CBLAS/include',
'LDFLAGS': libdir,
'LIBS': '-lcblas',
})
# end
# add cblas directory
(inc, libdir) = get_inc_lib( ['CBLAS', 'CBLASDIR', 'CBLAS_DIR'] )
if (inc or libdir):
tests.append({
'CFLAGS': inc,
'LDFLAGS': libdir,
'LIBS': '-lcblas',
})
# end
# MacOS has cblas.h header buried in Frameworks
if (sys.platform == 'darwin'):
try:
(stdout, stderr) = run(
'find /System/Library/Frameworks/Accelerate.framework -name cblas.h')
(path, fname) = os.path.split( stdout )
tests.append( {'CFLAGS': '-I'+path} )
except Error:
pass
# end
found = False
for test in tests:
save = merge_env( test )
try:
print_dots( join_vars( test, 'CFLAGS', 'LDFLAGS', 'LIBS' ))
try_compile_run( src )
found = True
print( str_yes )
break
except Error, e:
print( str_no )
restore_env( save )
# end
pop_lang()
if (not found):
print_error( 'CBLAS not found; see http://www.netlib.org/lapack/' )
if (required):
raise Error( 'CBLAS not found' )
# end
# ------------------------------------------------------------------------------
def lapack( required=True ):
print_header( 'Detecting LAPACK library' )
push_lang('C')
src = 'lapack.c'
# test if (1) lapack in blas library, or (2) lapack in -llapack
tests = [
{},
{'LIBS': '-llapack'},
]
# add lapack directory
(inc, libdir) = get_inc_lib( ['LAPACK', 'LAPACKDIR', 'LAPACK_DIR'] )
if (inc or libdir):
tests.append({
'CFLAGS': inc,
'LDFLAGS': libdir,
'LIBS': '-llapack'
})
tests.append({
'CFLAGS': inc,
'LDFLAGS': libdir,
'LIBS': '-llapack -lgfortran'
})
# end
found = False
for test in tests:
save = merge_env( test )
try:
print_dots( join_vars( test, 'CFLAGS', 'LDFLAGS', 'LIBS' ))
try_compile_run( src )
found = True
print( str_yes )
break
except Error, e:
print( str_no )
restore_env( save )
# end
pop_lang()
if (not found):
print_error( 'LAPACK not found; see http://www.netlib.org/lapack/' )
if (required):
raise Error( 'LAPACK not found' )
# end
# ------------------------------------------------------------------------------
def lapacke( required=True ):
print_header( 'Detecting LAPACKE library' )
push_lang('C')
src = 'lapacke.c'
# test if (1) lapacke in blas library, or (2) lapacke in -llapacke
tests = [
{},
{'LIBS': '-llapacke'},
]
# add lapack directory
(inc, libdir) = get_inc_lib( ['LAPACK', 'LAPACKDIR', 'LAPACK_DIR'] )
if (inc or libdir):
tests.append({
'CFLAGS': inc + '/LAPACKE/include',
'LDFLAGS': libdir,
'LIBS': '-llapacke',
})
tests.append({
'CFLAGS': inc + '/LAPACKE/include',
'LDFLAGS': libdir,
'LIBS': '-llapacke -lgfortran',
})
# end
found = False
for test in tests:
save = merge_env( test )
try:
print_dots( join_vars( test, 'CFLAGS', 'LDFLAGS', 'LIBS' ))
try_compile_run( src )
found = True
print( str_yes )
break
except Error, e:
print( str_no )
restore_env( save )
# end
pop_lang()
if (not found):
print_error( 'LAPACKE not found; see http://www.netlib.org/lapack/' )
if (required):
raise Error( 'LAPACKE not found' )
# end
# ------------------------------------------------------------------------------
def blas_return_float( required=True ):
print_dots( 'BLAS return float (e.g., sdot)' )
push_lang('C')
src = 'blas_return_float.c'
try:
try_compile_run( src )
print( 'returns float (standard)' )
return
except Exception, e:
pass
finally:
pop_lang()
push_lang('C')
src = 'blas_return_float_f2c.c'
try:
try_compile_run( src )
print( 'returns double (f2c, clapack, MacOS Accelerate)' )
env.append( 'DEFS', '-DBLAS_RETURN_FLOAT_AS_DOUBLE' )
except Exception, e:
print_error( 'unknown' )
if (required):
raise e
finally:
pop_lang()
# end
# ------------------------------------------------------------------------------
def blas_return_complex( required=True ):
print_dots( 'BLAS return complex (e.g., zdotc)' )
push_lang('C')
src = 'blas_return_complex.c'
try:
try_compile_run( src )
print( 'returns complex' )
return
except Exception, e:
pass
finally:
pop_lang()
push_lang('C')
src = 'blas_return_complex_intel.c'
try:
try_compile_run( src )
print( 'complex result is first argument' )
env.append( 'DEFS', '-DBLAS_RETURN_COMPLEX_AS_ARGUMENT' )
except Exception, e:
print_error( 'unknown' )
if (required):
raise e
finally:
pop_lang()
# end
# ------------------------------------------------------------------------------
def lapacke_dlascl( required=True ):
print_dots( 'LAPACKE_dlascl exists (LAPACK >= 3.6.0)' )
push_lang('C')
src = 'lapacke_dlascl.c'
try:
try_compile_run( src )
print( str_yes )
env.append( 'DEFS', '-DHAVE_LAPACKE_DLASCL' )
except Exception, e:
print( str_no )
if (required):
raise e
finally:
pop_lang()
# end
# ------------------------------------------------------------------------------
def lapacke_dlantr( required=True ):
print_dots( 'LAPACKE_dlantr works (LAPACK >= 3.6.1)' )
push_lang('C')
src = 'lapacke_dlantr.c'
try:
try_compile_run( src )
print( str_yes )
env.append( 'DEFS', '-DHAVE_LAPACKE_DLANTR' )
except Exception, e:
print( str_no )
if (required):
raise e
finally:
pop_lang()
# end
# ------------------------------------------------------------------------------
def lapacke_dlassq( required=True ):
print_dots( 'LAPACKE_dlassq exists (LAPACK >= ???)' )
push_lang('C')
src = 'lapacke_dlassq.c'
try:
try_compile_run( src )
print( str_yes )
env.append( 'DEFS', '-DHAVE_LAPACKE_DLASSQ' )
except Exception, e:
print( str_no )
if (required):
raise e
finally:
pop_lang()
# end
# ==============================================================================
# utilities for (C)BLAS and LAPACK(E) tests
# ------------------------------------------------------------------------------
def join_vars( env2, *variables ):
'''
For variables v1, ..., vn, joins env2[v1], ..., env2[vn] with spaces.
Ignores variables that don't exist in env2.
If result is empty, returns "[default flags]".
Ex: join_vars( test, 'CFLAGS', 'LIBS' ) is similar to:
join( test.has_key('CFLAGS') and test['CFLAGS'] or '',
test.has_key('LIBS') and test['LIBS'] or '' ) or '[default]'.
'''
txt = ''
for var in variables:
if (env2.has_key( var )):
txt += ' ' + env2[ var ]
txt = txt.strip()
if (not txt):
txt = '[default flags]'
return txt
# end
# ------------------------------------------------------------------------------
def merge_env( env2 ):
'''
Appends all key-value pairs in env2 to corresponding key-value pair in env
(except prepend for $LIBS). Return original key-value pairs from env in a
map, to restore using restore_env.
'''
save = {}
for key in env2.keys():
if (key == 'LIBS'):
save[key] = env.prepend( key, env2[key] )
else:
save[key] = env.append( key, env2[key] )
return save
# end
# ------------------------------------------------------------------------------
def restore_env( save ):
'''
Restores all key-value pairs in save to env.
'''
for key in save.keys():
env[key] = save[key]
# end
# ------------------------------------------------------------------------------
def get_inc_lib( variables, subdir=None ):
'''
Determines include and library paths using environment variables.
Ex: (inc, libdir) = get_inc_lib( ['ACML', 'ACML_DIR'], 'gfortran64' )
Checks if ${ACML} or ${ACML_DIR} exists, uses that as PATH.
Then sets inc to the first of these that exists:
PATH/gfortran64/include
PATH/gfortran64
and sets libdir to the first of these that exists:
PATH/gfortran64/lib64
PATH/gfortran64/lib/intel64
PATH/gfortran64/lib
PATH/gfortran64
'''
for var in variables:
inc = ''
libdir = ''
path = env[ var ]
if (path and os.path.exists( path )):
inc = '-I${' + var + '}'
libdir = '-L${' + var + '}'
if (subdir):
path = os.path.join( path, subdir )
if (not os.path.exists( path )):
inc = ''
libdir = ''
continue
inc += '/' + subdir
libdir += '/' + subdir
# end
for lib in ('lib64', 'lib/intel64', 'lib'):
path_lib = os.path.join( path, lib )
if (os.path.exists( path_lib )):
libdir += '/' + lib
break
# end
path_inc = os.path.join( path, 'include' )
if (os.path.exists( path_inc )):
inc += '/include'
break
# end
# end
return (inc, libdir)
# end
# ------------------------------------------------------------------------------
def mkl_set_library_path():
'''
MKL needs (DY)LD_LIBRARY_PATH set or it won't run,
but (DY)LD_LIBRARY_PATH may not be passed to python or make,
so if it isn't set, try setting it ourselves.
'''
mklroot = env['MKLROOT']
LD_LIBRARY_PATH = 'LD_LIBRARY_PATH'
if (sys.platform == 'darwin'):
LD_LIBRARY_PATH = 'DYLD_LIBRARY_PATH'
if (mklroot and not os.environ.has_key( LD_LIBRARY_PATH )):
(inteldir, mkl) = os.path.split( mklroot )
intel_compiler = os.path.join( inteldir, 'compiler' )
if (os.path.exists( intel_compiler )):
inteldir = intel_compiler
paths = []
for lib in ('lib64', 'lib/intel64', 'lib'):
intel_lib = os.path.join( inteldir, lib )
if (os.path.exists( intel_lib )):
paths.append( intel_lib )
break
# end
for lib in ('lib64', 'lib/intel64', 'lib'):
mkl_lib = os.path.join( mklroot, lib )
if (os.path.exists( mkl_lib )):
paths.append( mkl_lib )
break
# end
if (paths):
paths = ':'.join( paths )
os.environ[ LD_LIBRARY_PATH ] = paths
print_comment( 'Setting '+ LD_LIBRARY_PATH +'='+ paths )
# end
# end
# end
# ==============================================================================
# lower level compilation & execution
# ------------------------------------------------------------------------------
def lang_vars():
if (lang == 'C'):
cc = env['CC']
flagname = 'CFLAGS'
elif (lang == 'C++'):
cc = env['CXX']
flagname = 'CXXFLAGS'
elif (lang == 'Fortran'):
cc = env['FC']
flagname = 'FCFLAGS'
elif (lang == 'F77'):
cc = env['F77']
flagname = 'FFLAGS'
else:
raise Error( "unknown language " + lang )
# end
return (cc, flagname)
# end
# ------------------------------------------------------------------------------
def try_compiler( src, required=True ):
(cc, flagname) = lang_vars()
try:
print_dots( cc )
(stdout, stderr) = run([ 'which', cc ])
ccpath = stdout.strip()
if (sys.platform == 'darwin'):
# due to case-insensitive filesystem, `which CC` returns
# non-existent /usr/bin/CC on MacOS; ignore it
(ccdir, exe) = os.path.split( ccpath )
if (not cc in os.listdir( ccdir )):
raise Error
# end
print( ccpath )
print_dots( ' compile and run test', subdots_width )
try_compile_run( src )
print( str_yes )
except Error, e:
print( str_no )
if (required):
raise e
# end
# ------------------------------------------------------------------------------
def try_compile_run( src, extra_objs=[] ):
exe = try_compile_exe( src, extra_objs )
run( './'+exe )
# end
# ------------------------------------------------------------------------------
def try_compile_obj( src ):
(cc, flagname) = lang_vars()
flags = env[ flagname ]
openmp = env['OPENMP_' + flagname]
defs = env['DEFS']
write_test( src )
(exe, ext) = os.path.splitext( src )
exe += '_' + cc
obj = exe + '.o'
run([ cc, flags, defs, '-c', src, '-o', obj ])
return obj
# end
# ------------------------------------------------------------------------------
def try_compile_exe( src, extra_objs=[] ):
(cc, flagname) = lang_vars()
flags = env[ flagname ]
openmp = env['OPENMP_' + flagname]
defs = env['DEFS']
ldflags = env['LDFLAGS']
libs = env['LIBS']
write_test( src )
(exe, ext) = os.path.splitext( src )
exe += '_' + cc
obj = exe + '.o'
run([ cc, flags, openmp, defs, '-c', src, '-o', obj ])
run([ cc, ldflags, openmp, obj, extra_objs, libs, '-o', exe ])
return exe
# end
# ------------------------------------------------------------------------------
def run( cmd ):
if (not isinstance( cmd, str )):
cmd = ' '.join( flatten( cmd ))
if (verbose):
dots = ' ' * max( 0, 71 - len(cmd) )
print( font_comment + ' >>', cmd, dots, font_normal, end='' )
# Popen propogates (DY)LD_LIBRARY_FLAGS, unlike os.system,
# but it doesn't substitute variables, so do that ourselves.
# Prefer ${xyz} syntax, which both shell and make recognize.
cmd = re.sub( r'\$\{(\w+)\}', sub_env, cmd ) # shell & make
cmd = re.sub( r'\$\((\w+)\)', sub_env, cmd ) # make only
cmd = re.sub( r'\$(\w+)', sub_env, cmd ) # shell only
cmd_list = shlex.split( cmd )
proc = subprocess.Popen( cmd_list, stdout=PIPE, stderr=PIPE )
(stdout, stderr) = proc.communicate()
if (verbose > 1):
sys.stdout.write( stdout )
sys.stderr.write( stderr )
rc = proc.wait()
if (verbose):
if (rc != 0):
print_comment( 'failed' )
else:
print_comment( 'ok' )
if (rc != 0):
raise Error
return (stdout, stderr)
# end
# ==============================================================================
# utilities
# ------------------------------------------------------------------------------
def choose( name, key, choices ):
'''
Asks user to choose one of choices, and returns it.
name Used for prompts, e.g.,
name='C++ compiler' generates prompt
'Which C++ compiler to use?'
choices List to choose from.
'''
#print( 'choose', name, choices )
comment = ''
num = len(choices)
if (num == 0):
print_error( name + ' not found' )
return None
elif (num == 1):
i = 0
elif (auto):
i = 0
comment = ' [auto]'
else:
print( 'Available choices for', name +':' )
for i in xrange(num):
print( '%3d) %s' % (i+1, choices[i][key]) )
# end
while (True):
print( 'Which', name, 'to use [1-%d, or quit]? ' % (num), end='' ) # no newline
reply = raw_input()
if (re.search( 'q|quit', reply, re.I )):
raise Error('cancelled')
try:
i = int(reply) - 1
assert( i >= 0 and i < num )
break
except:
print( 'invalid input' )
# end
# end
print( font_bold + green + 'Using '+ name + ': ' + choices[i][key] +
comment + font_normal )
return choices[i]
# end
# ------------------------------------------------------------------------------
def print_dots( txt, width=dots_width ):
dots = '.' * max( 3, width - len(txt) )
end = ' ' if (not verbose) else '\n'
print( txt, dots, end=end )
def print_header( txt ):
print( font_header + '\n' + '='*80 + '\n' + txt + font_normal )
def print_subhead( txt ):
print( font_subhead + txt + font_normal )
def print_comment( txt ):
print( font_comment + txt + font_normal )
def print_error( txt ):
print( red + txt + font_normal )
# ------------------------------------------------------------------------------
def sub_env_empty( match ):
'''
Replaces an environment variable.
For use in: re.sub( pattern, sub_env_empty, txt ).
Returns contents of variable match.group(1),
or '' if variable doesn't exist.
'''
return env[ match.group(1) ]
# end
# ------------------------------------------------------------------------------
def sub_env( match ):
'''
Replaces an environment variable.
For use in: re.sub( pattern, sub_env_empty, txt ).
Returns contents of variable match.group(1),
or the original match.group(0) if variable doesn't exist.
'''
key = match.group(1)
val = env.get( key )
if (val is None):
return match.group(0) # no change
else:
return val
# end
# ------------------------------------------------------------------------------
def sub_define( match ):
'''
Called as repl in re.sub( pattern, sub_define, txt ).
Returns:
#define variable value
for variable match.group(1), or commented out:
/* variable */
if variable doesn't exist.
'''
key = match.group(1)
defs = env['DEFS']
m = re.search( r'-D' + key + r'(\S*)', defs )
if (m):
return join( '#define', key, m.group(1) )
else:
return join( '/*', match.group(0), '*/' ) # comment out
# end
# ------------------------------------------------------------------------------
g_written = {}
def write_test( filename ):
'''
Write g_test_files[ filename ] to file, the first time this is called with
that filename. Subsequent calls with the same filename do nothing.
'''
if (not g_written.has_key( filename )):
g_written[ filename ] = True
write( filename, g_test_files[ filename ] )
# end
# end
# ------------------------------------------------------------------------------
def write( filename, txt ):
'''
Write txt to file.
'''
f = open( filename, 'w' )
f.write( txt )
f.close()
# end
# ------------------------------------------------------------------------------
def read( filename ):
'''
Read file and return its contents.
'''
f = open( filename, 'r' )
txt = f.read()
f.close()
return txt
# end
# ------------------------------------------------------------------------------
def join( *args ):
'''
Joins its arguments with space.
Ex: join( "foo", "bar", "baz" ) returns "foo bar baz"
'''
return ' '.join( args ).strip()
# end
# ------------------------------------------------------------------------------
def unique( lst ):
'''
Returns first of each unique item from lst, without changing order.
Ex: unique( [ 1, 2, 1, 2, 3, 4, 3 ] ) returns [ 1, 2, 3, 4 ]
'''
lst2 = []
for x in lst:
if (not x in lst2):
lst2.append( x )
return lst2
# end
# ------------------------------------------------------------------------------
def flatten( l, ltypes=(list, tuple) ):
'''
Flattens nested list or tuple.
Ex: flatten( [1, 2, [3, [4, 5], 6]] ) returns [1, 2, 3, 4, 5, 6]
see http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
'''
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
# end
# ==============================================================================
# common code
# ------------------------------------------------------------------------------
# utilities prepended to some tests that call Fortran
fortran_mangling = r'''
#if defined( MKL_ILP64 ) || defined( ILP64 )
typedef long long myint;
#else
typedef int myint;
#endif
#if defined( LOWERCASE )
#define FORTRAN_NAME( lower, UPPER ) lower
#elif defined( UPPERCASE )
#define FORTRAN_NAME( lower, UPPER ) UPPER
#else
#define FORTRAN_NAME( lower, UPPER ) lower ## _
#endif
#ifdef __cplusplus
#define EXTERN_C extern "C"
#else
#define EXTERN_C
#endif
'''
# ==============================================================================
# All the test codes are saved in g_test_files map, with filenames as keys.
# These are written to disk using write_test().
# ==============================================================================
g_test_files = {
# ==============================================================================
# compilers
# ------------------------------------------------------------------------------
'prog_cc.c': r'''
#include <stdio.h>
int main( int argc, char** argv )
{
int x = 1;
printf( "hello, x=%d\n", x );
return 0;
}
''',
# ------------------------------------------------------------------------------
'prog_cc_c99.c': r'''
#include <stdio.h>
#if __STDC_VERSION__ >= 199901L
// supports C99
#else
choke function();
#endif
int main( int argc, char** argv )
{
printf( "hello, __STDC_VERSION__ = %ld\n", __STDC_VERSION__ );
for (int i = 0; i < 10; ++i) {
// pass
}
return 0;
}
''',
# ------------------------------------------------------------------------------
'prog_cxx.cxx': r'''
#include <iostream>
class Simple {
public:
Simple(): x(1) {}
int x;
};
int main( int argc, char** argv )
{
Simple s;
std::cout << "hello, x=" << s.x << "\n";
return 0;
}
''',
# ------------------------------------------------------------------------------
'prog_cxx_cxx11.cxx': r'''
#include <iostream>
#if __cplusplus >= 201103L
// supports C++11
#else
choke function();
#endif
enum class Foo { a, b };
enum Bar : char { c, d };
int main( int argc, char** argv )
{
int *y = nullptr;
y = new int[3] { 0, 1, 2 };
auto x = 1.23;
std::cout << "hello, x=" << x << ", __cplusplus = " << __cplusplus << "\n";
return 0;
}
''',
# ------------------------------------------------------------------------------
'prog_fortran.f90': r'''
program main
implicit none
print '(a)', 'hello'
end program main
''',
# ------------------------------------------------------------------------------
'prog_fortran_f2008.f90': r'''
program main
use iso_c_binding
implicit none
integer(c_int), parameter :: x = 100
print '(a,i3)', 'hello', x
end program main
''',
# ------------------------------------------------------------------------------
'prog_fortran_f2008_cbind.f90': r'''
program main
use iso_c_binding
implicit none
interface
subroutine foo( x ) &
bind( C, name="foo" )
use iso_c_binding
integer(c_int), value :: x
end subroutine foo
end interface
integer(c_int), parameter :: x = 100
print '(a,i3)', 'hello', x
call foo( x )
end program main
''',
# ------------------------------------------------------------------------------
'prog_fortran_f2008_foo.c': r'''
#include <stdio.h>
void foo( int x )
{
printf( "%s( %d )\n", __func__, x );
}
''',
# ------------------------------------------------------------------------------
'prog_f77.f': r'''
program main
implicit none
print '(a)', 'hello'
end program main
''',
# ==============================================================================
# OpenMP
# ------------------------------------------------------------------------------
'omp_cc.c': r'''
#include <stdio.h>
#include <omp.h>
int main( int argc, char** argv )
{
int x[10], nt;
#pragma omp parallel
nt = omp_get_num_threads();
#pragma omp parallel for
for (int i = 0; i < 10; ++i) {
x[i] = i;
}
printf( "openmp x[0]=%d, nt=%d\n", x[0], nt );
return 0;
}
''',
# ------------------------------------------------------------------------------
'omp_fortran.f90': r'''
program main
use omp_lib
implicit none
integer :: x(10), nt, i
!$omp parallel
nt = omp_get_num_threads()
!$omp end parallel
!$omp parallel do
do i = 1, 10
x(i) = i
end do
print '(a,i3,a,i3)', 'openmp x(1)=', x(1), 'nt=', nt
end program
''',
# ------------------------------------------------------------------------------
'omp_f77.f': r'''
program main
implicit none
integer omp_get_num_threads
external omp_get_num_threads
integer x(10), nt, i
!$omp parallel
nt = omp_get_num_threads()
!$omp end parallel
!$omp parallel do
do i = 1, 10
x(i) = i
end do
print '(a,i3,a,i3)', 'openmp x(1)=', x(1), 'nt=', nt
end program
''',
# ------------------------------------------------------------------------------
'omp_depend_cc.c': r'''
#include <stdio.h>
void task( int n, int* x, int id )
{
for (int i = 0; i < n; ++i) {
x[i] = id + i;
}
}
int main( int argc, char** argv )
{
int n = 1000, x[1000] = { 0 };
int last = 1000;
for (int iter = 0; iter < 100; ++iter) {
// inserts last/10 tasks that update x
#pragma omp parallel
{
for (int i = 0; i <= last; i += 10) {
#pragma omp task depend(inout:x[0:n])
task( n, x, i );
}
}
// verify that updates worked
for (int i = 0; i < n; ++i) {
int expect = last + i;
if (x[i] != expect) {
printf( "openmp task depend failed, x[%d] = %d, expected %d (iter %d)\n",
i, x[i], expect, iter );
return 1;
}
}
}
printf( "openmp task depend seems ok\n" );
return 0;
}
''',
# ------------------------------------------------------------------------------
'omp_depend_fortran.f90': r'''
program main
use omp_lib
implicit none
integer, parameter :: n = 1000, last = 1000
integer :: x(n), iter, i, expect
do iter = 1, 100
!! inserts last/10 tasks that update x
!$omp parallel
do i = 0, last, 10
!$omp task depend(inout:x(1:n))
call task( n, x, i )
!$omp end task
end do
!$omp end parallel
!! verify that updates worked
do i = 1, n
expect = last + i
if (x(i) .ne. expect) then
print '(a,i4,a,i4)', 'openmp task depend failed, x(', i, ') = ', x(i)
stop 1
endif
end do
end do
print '(a)', 'openmp task depend seems ok'
end program
subroutine task( n, x, id )
integer :: n, x(n), id
integer :: i
do i = 1, n
x(i) = id + i
end do
end subroutine task
''',
# ------------------------------------------------------------------------------
'omp_depend_f77.f': r'''
program main
implicit none
integer n, last
parameter (n = 1000, last = 1000)
integer x(n), iter, i, expect
do iter = 1, 100
!! inserts last/10 tasks that update x
!$omp parallel
do i = 0, last, 10
!$omp task depend(inout:x(1:n))
call task( n, x, i )
!$omp end task
end do
!$omp end parallel
!! verify that updates worked
do i = 1, n
expect = last + i
if (x(i) .ne. expect) then
print '(a,i4,a,i4)',
c 'openmp task depend failed, x(',
c i, ') = ', x(i)
stop 1
endif
end do
end do
print '(a)', 'openmp task depend seems ok'
end program
subroutine task( n, x, id )
integer n, x(n), id
integer i
do i = 1, n
x(i) = id + i
end do
end subroutine task
''',
# ------------------------------------------------------------------------------
'omp_priority_cc.c': r'''
#include <stdio.h>
void task( int n, int* x, int id )
{
for (int i = 0; i < n; ++i) {
x[i] = id + i;
}
}
int main( int argc, char** argv )
{
int n = 1000, x[1000] = { 0 };
#pragma omp parallel
{
#pragma omp task depend(inout:x[0:n]) priority(1)
task( n, x, 0 );
#pragma omp task depend(inout:x[0:n]) priority(2)
task( n, x, 100 );
}
for (int i = 0; i < n; ++i) {
if (x[i] != 100 + i) {
printf( "openmp task priority failed, x[%d] = %d, expected %d\n",
i, x[i], 100 + i );
return 1;
}
}
printf( "openmp task priority ok\n" );
return 0;
}
''',
# ------------------------------------------------------------------------------
'omp_priority_fortran.f90': r'''
program main
use omp_lib
implicit none
integer, parameter :: n = 1000
integer :: x(n), i, expect
!$omp parallel
!! todo: verify priority syntax
!$omp task depend(inout:x(1:n)), priority(1)
call task( n, x, 0 );
!$omp end task
!$omp task depend(inout:x(1:n)), priority(2)
call task( n, x, 100 );
!$omp end task
!$omp end parallel
do i = 1, n
expect = 100 + i
if (x(i) .ne. expect) then
print '(a,i4,a,i4,a,i4)', 'openmp task priority failed, x(', &
i, ') = ', x(i), ', expected ', expect
stop 1
endif
end do
print '(a)', 'openmp task priority ok'
end program
subroutine task( n, x, id )
integer :: n, x(n), id
integer :: i
do i = 1, n
x(i) = id + i
end do
end subroutine
''',
# ------------------------------------------------------------------------------
'omp_priority_f77.f': r'''
program main
implicit none
integer n
parameter (n = 1000)
integer x(n), i, expect
!$omp parallel
!! todo: verify priority syntax
!$omp task depend(inout:x(1:n)), priority(1)
call task( n, x, 0 );
!$omp end task
!$omp task depend(inout:x(1:n)), priority(2)
call task( n, x, 100 );
!$omp end task
!$omp end parallel
do i = 1, n
expect = 100 + i
if (x(i) .ne. expect) then
print '(a,i4,a,i4,a,i4)',
c 'openmp task priority failed, x(',
c i, ') = ', x(i), ', expected ', expect
stop 1
endif
end do
print '(a)', 'openmp task priority ok'
end program
subroutine task( n, x, id )
integer n, x(n), id
integer i
do i = 1, n
x(i) = id + i
end do
end subroutine
''',
# ==============================================================================
# library
# ------------------------------------------------------------------------------
'blas.c': fortran_mangling + r'''
#include <stdio.h>
#define sgemm FORTRAN_NAME( sgemm, SGEMM )
EXTERN_C
void sgemm( const char* transA, const char* transB,
const myint* m, const myint* n, const myint* k,
const float* alpha,
const float* A, const myint* lda,
const float* B, const myint* ldb,
const float* beta,
float* C, const myint* ldc );
int main( int argc, char** argv )
{
// A is 4x2 embedded in 4x2 array
// B is 2x3 embedded in 3x3 array
// C is 4x3 embedded in 5x3 array
// D = alpha*A*B + beta*C
myint i, j;
myint m = 4, n = 3, k = 2, lda = 4, ldb = 3, ldc = 5;
float alpha = 2, beta = -1;
float A[ 5*2 ] = { 1, 2, 3, 4, 4, 1, 2, 3 };
float B[ 3*3 ] = { 1, 3, 0, 2, 1, 0, 3, 2, 0 };
float C[ 5*3 ] = { 1, 2, 3, 4, 0, 4, 1, 2, 3, 0, 3, 4, 1, 2, 0 };
float D[ 5*3 ] = { 25, 8, 15, 22, 0, 8, 9, 14, 19, 0, 19, 12, 25, 34 };
sgemm( "no", "no", &m, &n, &k, &alpha, A, &lda, B, &ldb, &beta, C, &ldc );
// check C == D
for (i = 0; i < ldc*n; ++i) {
if (C[i] != D[i]) {
printf( "sgemm failed: C[%d] %.2f != D[%d] %.2f\n",
i, C[i], i, D[i] );
return 1;
}
}
printf( "sgemm ok\n" );
return 0;
}
''',
# ------------------------------------------------------------------------------
'cblas.c': r'''
#include <stdio.h>
#ifdef HAVE_MKL
#include <mkl_cblas.h>
#else
#include <cblas.h>
#endif
int main( int argc, char** argv )
{
// A is 4x2 embedded in 4x2 array
// B is 2x3 embedded in 3x3 array
// C is 4x3 embedded in 5x3 array
// D = alpha*A*B + beta*C
int i, j;
int m = 4, n = 3, k = 2, lda = 4, ldb = 3, ldc = 5;
float alpha = 2, beta = -1;
float A[ 5*2 ] = { 1, 2, 3, 4, 4, 1, 2, 3 };
float B[ 3*3 ] = { 1, 3, 0, 2, 1, 0, 3, 2, 0 };
float C[ 5*3 ] = { 1, 2, 3, 4, 0, 4, 1, 2, 3, 0, 3, 4, 1, 2, 0 };
float D[ 5*3 ] = { 25, 8, 15, 22, 0, 8, 9, 14, 19, 0, 19, 12, 25, 34 };
cblas_sgemm( CblasColMajor, CblasNoTrans, CblasNoTrans,
m, n, k, alpha, A, lda, B, ldb, beta, C, ldc );
// check C == D
for (i = 0; i < ldc*n; ++i) {
if (C[i] != D[i]) {
printf( "cblas_sgemm failed: C[%d] %.2f != D[%d] %.2f\n",
i, C[i], i, D[i] );
return 1;
}
}
printf( "cblas_sgemm ok\n" );
return 0;
}
''',
# ------------------------------------------------------------------------------
'blas_return_float.c': fortran_mangling + r'''
#include <stdio.h>
#define sdot FORTRAN_NAME( sdot, SDOT )
EXTERN_C
float sdot( const myint* n,
const float* x, const myint* incx,
const float* y, const myint* incy );
int main( int argc, char** argv )
{
myint n = 5, ione = 1;
float x[5] = { 1, 2, 3, 4, 5 };
float y[5] = { 5, 4, 3, 2, 1 };
float expect = 35;
float result = sdot( &n, x, &ione, y, &ione );
myint okay = (result == expect);
printf( "sdot result %.2f, expect %.2f, %s\n",
result, expect, (okay ? "ok" : "failed"));
return ! okay;
}
''',
# ------------------------------------------------------------------------------
# f2c has sdot return double instead of float; appears in MacOS Accelerate
'blas_return_float_f2c.c': fortran_mangling + r'''
#include <stdio.h>
#define sdot FORTRAN_NAME( sdot, SDOT )
EXTERN_C
double sdot( const myint* n,
const float* x, const myint* incx,
const float* y, const myint* incy );
int main( int argc, char** argv )
{
myint n = 5, ione = 1;
float x[5] = { 1, 2, 3, 4, 5 };
float y[5] = { 5, 4, 3, 2, 1 };
float expect = 35;
float result = sdot( &n, x, &ione, y, &ione );
myint okay = (result == expect);
printf( "sdot result %.2f, expect %.2f, %s\n",
result, expect, (okay ? "ok" : "failed"));
return ! okay;
}
''',
# ------------------------------------------------------------------------------
'blas_return_complex.c': fortran_mangling + r'''
#include <stdio.h>
#include <complex.h>
#define zdotc FORTRAN_NAME( zdotc, ZDOTC )
EXTERN_C
double _Complex zdotc( const myint* n,
const double _Complex* x, const myint* incx,
const double _Complex* y, const myint* incy );
int main( int argc, char** argv )
{
myint n = 5, ione = 1;
double _Complex x[5] = { 1, 2, 3, 4, 5 };
double _Complex y[5] = { 5, 4, 3, 2, 1 };
double _Complex expect = 35;
double _Complex result = zdotc( &n, x, &ione, y, &ione );
myint okay = (result == expect);
printf( "zdotc result %.2f, expect %.2f, %s\n",
creal(result), creal(expect), (okay ? "ok" : "failed"));
return ! okay;
}
''',
# ------------------------------------------------------------------------------
# Intel Fortran complex number return convention
# see https://software.intel.com/en-us/node/528406
# "Calling BLAS Functions that Return the Complex Values in C/C++ Code"
'blas_return_complex_intel.c': fortran_mangling + r'''
#include <stdio.h>
#include <complex.h>
#define zdotc FORTRAN_NAME( zdotc, ZDOTC )
EXTERN_C
void zdotc( double _Complex* result,
const myint* n,
const double _Complex* x, const myint* incx,
const double _Complex* y, const myint* incy );
int main( int argc, char** argv )
{
myint n = 5, ione = 1;
double _Complex x[5] = { 1, 2, 3, 4, 5 };
double _Complex y[5] = { 5, 4, 3, 2, 1 };
double _Complex expect = 35;
double _Complex result;
zdotc( &result, &n, x, &ione, y, &ione );
myint okay = (result == expect);
printf( "zdotc result %.2f, expect %.2f, %s\n",
creal(result), creal(expect), (okay ? "ok" : "failed"));
return ! okay;
}
''',
# ------------------------------------------------------------------------------
'lapack.c': fortran_mangling + r'''
#include <stdio.h>
#define dpotrf FORTRAN_NAME( dpotrf, DPOTRF )
EXTERN_C
void dpotrf( const char* uplo, const myint* n,
double* A, const myint* lda,
myint* info );
int main( int argc, char** argv )
{
myint i, n = 2, info = 0;
double A[2*2] = { 16, 4, -1, 5 };
double L[2*2] = { 4, 1, -1, 2 };
double work[1];
dpotrf( "lower", &n, A, &n, &info );
if (info != 0) {
printf( "dpotrf failed: info %d\n", info );
return 1;
}
for (i = 0; i < n*n; ++i) {
if (A[i] != L[i]) {
printf( "dpotrf failed: A[%d] %.2f != L[%d] %.2f\n",
i, A[i], i, L[i] );
return 1;
}
}
printf( "dpotrf ok\n" );
return 0;
}
''',
# ------------------------------------------------------------------------------
'lapacke.c': r'''
#include <stdio.h>
#ifdef HAVE_MKL
#include <mkl_lapacke.h>
#else
#include <lapacke.h>
#endif
int main( int argc, char** argv )
{
int i, n = 2, info = 0;
double A[2*2] = { 16, 4, -1, 5 };
double L[2*2] = { 4, 1, -1, 2 };
double work[1];
info = LAPACKE_dpotrf_work( LAPACK_COL_MAJOR, 'L', n, A, n );
if (info != 0) {
printf( "dpotrf failed: info %d\n", info );
return 1;
}
for (i = 0; i < n*n; ++i) {
if (A[i] != L[i]) {
printf( "dpotrf failed: A[%d] %.2f != L[%d] %.2f\n",
i, A[i], i, L[i] );
return 1;
}
}
printf( "dpotrf ok\n" );
return 0;
}
''',
# ------------------------------------------------------------------------------
'lapacke_dlascl.c': r'''
#include <stdio.h>
#ifdef HAVE_MKL
#include <mkl_lapacke.h>
#else
#include <lapacke.h>
#endif
int main( int argc, char** argv )
{
int i, m = 4, n = 3, info = 0;
double A[4*3] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 };
double D[4*3] = { 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24 };
info = LAPACKE_dlascl_work( LAPACK_COL_MAJOR, 'g', -1, -1, 1.0, 2.0, m, n, A, m );
if (info != 0) {
printf( "dlascl failed: info %d\n", info );
return 1;
}
for (i = 0; i < m*n; ++i) {
if (A[i] != D[i]) {
printf( "dlascl failed: A[%d] %.2f != D[%d] %.2f\n",
i, A[i], i, D[i] );
return 1;
}
}
printf( "dlascl ok\n" );
return 0;
}
''',
# ------------------------------------------------------------------------------
'lapacke_dlantr.c': r'''
#include <stdio.h>
#ifdef HAVE_MKL
#include <mkl_lapacke.h>
#else
#include <lapacke.h>
#endif
int main( int argc, char** argv )
{
int n = 3;
double A[3*3] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
double work[1];
double expect = 11;
double result = LAPACKE_dlantr_work( LAPACK_COL_MAJOR, '1', 'L', 'N', n, n, A, n, work );
int okay = (result == expect);
printf( "dlantr result %.2f, expect %.2f, %s\n",
result, expect, (okay ? "ok" : "failed"));
return ! okay;
}
''',
# ------------------------------------------------------------------------------
'lapacke_dlassq.c': fortran_mangling + r'''
#include <stdio.h>
#ifdef HAVE_MKL
#include <mkl_lapacke.h>
#else
#include <lapacke.h>
#endif
#define dlassq FORTRAN_NAME( dlassq, DLASSQ )
//EXTERN_C
//void dlassq( const myint* n, const double* x, const myint* incx,
// double* scale, double* sumsq );
int main( int argc, char** argv )
{
int n = 3, incx = 1;
double x[3] = { 1, 2, 2 };
double scale, sumsq;
double expect = 9;
LAPACKE_dlassq_work( n, x, incx, &scale, &sumsq );
//dlassq( &n, x, &incx, &scale, &sumsq );
double result = sumsq*scale*scale;
int okay = (result == expect);
printf( "dlassq result %.2f, expect %.2f, %s\n",
result, expect, (okay ? "ok" : "failed"));
return ! okay;
}
''',
} # end g_test_files
|
nilq/baby-python
|
python
|
from .code import Code
from .docstring import DocString
from .value import NamedValue, UnnamedValue
from .function import Function
from .proptypes import PropTypes
from .klass import Class
from .module import Module
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from setuptools import setup, find_packages
"""
Documentation can be found at https://docs.python.org/2/distutils/index.html,
but usually you only need to do the following steps to publish a new package
version to PyPI::
# Update the version tag in this file (setup.py)
python setup.py sdist --formats=gztar
python setup.py bdist_wheel
twine upload dist/*
That's already it. You should get the following output written to your
command line::
Server response (200): OK
If you get errors, check the following things:
- Are you behind a proxy? --> Try not to be behind a proxy (I don't actually
know how to configure setup.py to be proxy-aware)
- Is your command correct? --> Double-check using the reference documentation
- Do you have all the necessary libraries to generate the wanted formats? -->
Reduce the set of formats or install libs
"""
version = __import__('cleanerversion').get_version()
setup(name='CleanerVersion',
version=version,
description='A versioning solution for relational data models using the '
'Django ORM',
long_description='CleanerVersion is a solution that allows you to read '
'and write multiple versions of an entry '
'to and from your relational database. It allows to '
'keep track of modifications on an object '
'over time, as described by the theory of **Slowly '
'Changing Dimensions** (SCD) **- Type 2**. '
''
'CleanerVersion therefore enables a Django-based '
'Datawarehouse, which was the initial idea of '
'this package.',
author='Manuel Jeckelmann, Jean-Christophe Zulian, Brian King, '
'Andrea Marcacci',
author_email='engineering.sophia@swisscom.com',
license='Apache License 2.0',
packages=find_packages(exclude=['cleanerversion.settings.*']),
url='https://github.com/swisscom/cleanerversion',
package_data={'versions': ['static/js/*.js',
'templates/versions/*.html']},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Topic :: Database',
'Topic :: System :: Archiving',
])
|
nilq/baby-python
|
python
|
# Program to automatically send email when something goes down
# by calling the function email()
# like this --> email(["example@somewhere.com","somewhereelse@other.com"])
import smtplib # Necessary modules
from email.mime.text import MIMEText
xhoni = "xhp1@pitt.edu"
def sendMail(recievers,content,subject="Alert"):
sender = "Floor2TowerB@gmail.com"
msg = MIMEText(content)
msg['Subject'] = subject
msg['From'] = sender
if type(recievers) == type(["LIST"]): msg['To'] = ",".join(recievers)
else: msg['To'] = recievers
try:
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login("Floor2TowerB","Fl00r2Rules!!/")
server.sendmail(sender,recievers,msg.as_string())
except:
print("UNABLE TO SEND EMAIL")
|
nilq/baby-python
|
python
|
src = Split('''
src/mbedtls_net.c
src/mbedtls_alt.c
src/mbedtls_auth.c
''')
aos_global_config.set("DEBUG", "yes")
component = aos_component('itls', src)
component.add_global_includes('include')
component.add_macros('CONFIG_PLAT_AOS')
component.add_macros('CONFIG_KEY_OTP_ENABLED')
if aos_global_config.get("DEBUG")!="no":
component.add_global_macros("CONFIG_SSL_DEBUG")
if aos_global_config.board != 'linuxhost':
component.add_macros('LWIP_ENABLED')
component.add_prebuilt_libs('lib/' + component.get_arch() + '/libitls.a')
component.add_comp_deps('security/alicrypto')
component.add_comp_deps('security/id2')
|
nilq/baby-python
|
python
|
import boto3
from os import walk
from datetime import datetime
client = boto3.client('s3')
# Create s3 bucket
now = datetime.now()
date_time = now.strftime("%m-%d-medium-datasets")
response = client.create_bucket(
Bucket=date_time
)
print(response)
#
s3 = boto3.resource('s3')
# Upload files to bucket
mypath = '/Genex/genex/data/results/'
for (dirpath, dirname, filenames) in walk(mypath):
for direc in dirname:
for (dirpath2, _, filenames2) in walk(mypath + direc):
for files in filenames2:
s3.meta.client.upload_file(dirpath2 + "/" + files, date_time, files)
#
|
nilq/baby-python
|
python
|
from clinical_records import settings
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Div, Field, Layout, Submit
from django import forms
from django.forms import DateField
from django.forms import ModelForm
from patients.models import CIE10
from patients.models import ClinicalHistory
from patients.models import MedicalFormulas
from patients.models import MedicalHistory
from patients.models import PhysicalExam
from patients.models import SystemsReview
from patients.models import Patient
class PatientForm(ModelForm):
birth_date = DateField(input_formats=settings.DATE_INPUT_FORMATS)
helper = FormHelper()
helper.form_class = 'form-inline'
helper.form_tag = False
helper.layout = Layout(
Div(
Div('names', css_class='column_two'),
Div('last_names', css_class='column_two'),
css_class='row'),
Div(
Div('document_type', css_class='column_two'),
Div('document', css_class='column_two'),
css_class='row'),
Div(
Div('birth_date', css_class='column_two'),
Div('birth_city', css_class='column_two'),
css_class='row'),
Div(
Div('address', css_class='column_two'),
Div('phone', css_class='column_two'),
css_class='row'),
Div(
Div('scholarship', css_class='column_two'),
Div('profesion', css_class='column_two'),
css_class='row'),
Div(
Div('gender', css_class='column_three'),
Div('civil_status', css_class='column_three'),
Div('origin', css_class='column_three'),
css_class='row'),
)
class Meta(object):
model = Patient
fields = '__all__'
widgets = {
'birth_date': forms.DateInput(format=('%Y-%m-%d'), attrs={'type': 'date'})
}
class MedicalFormulasForm(ModelForm):
helper = FormHelper()
helper.form_class = 'form-inline'
helper.form_tag = False
helper.layout = Layout(
Div(
Div('medicines', css_class='column_one'),
css_class='row'),
Div(
Div('paraclinicals', css_class='column_one'),
css_class='row'),
Div(
Div('remission', css_class='column_one'),
css_class='row'),
Div(
Div('conduct', css_class='column_one'),
css_class='row'),
)
class Meta(object):
model = MedicalFormulas
fields = '__all__'
class MedicalHistoryForm(ModelForm):
helper = FormHelper()
helper.form_class = 'form-inline'
helper.layout = Layout(
Div(
Div('companion', css_class='column_two'),
Div('phone_companion', css_class='column_two'),
css_class='row'),
Div(
Div('referred_by', css_class='column_two'),
Div('sgss', css_class='column_two'),
css_class='row'),
Div(
Div('created_at_date', css_class='column_two'),
Div('created_at_hour', css_class='column_two'),
css_class='row'),
Div(
Div('reason_consultation', css_class='column_one'),
css_class='row'),
Div(
Div('current_illness', css_class='column_one'),
css_class='row')
)
class Meta(object):
model = MedicalHistory
fields = ['created_at_date', 'created_at_hour', 'companion',
'referred_by', 'phone_companion',
'reason_consultation', 'current_illness', 'sgss']
class ClinicalHistoryForm(ModelForm):
helper = FormHelper()
helper.form_class = 'form-inline'
helper.form_tag = False
helper.layout = Layout(
Div(
Div('pathological', css_class='column_one'),
css_class='row'),
Div(
Div('surgical', css_class='column_one'),
css_class='row'),
Div(
Div('traumatic', css_class='column_one'),
css_class='row'),
Div(
Div('poisoning', css_class='column_one'),
css_class='row'),
Div(
Div('smoking', css_class='column_one'),
css_class='row'),
Div(
Div('liqueur', css_class='column_one'),
css_class='row'),
Div(
Div('psychoactive', css_class='column_one'),
css_class='row'),
Div(
Div('permanent_medication', css_class='column_one'),
css_class='row'),
Div(
Div('allergic', css_class='column_one'),
css_class='row'),
Div(
Div('immunological', css_class='column_one'),
css_class='row'),
Div(
Div('transfusions', css_class='column_one'),
css_class='row'),
Div(
Div('obstetric', css_class='hidden'),
Div('men', css_class='column_ten'),
Div('Cic', css_class='column_ten'),
Div('FUM', css_class='column_ten'),
Div('G', css_class='column_ten'),
Div('P', css_class='column_ten'),
Div('A', css_class='column_ten'),
Div('C', css_class='column_ten'),
Div('FUP', css_class='column_ten'),
Div('Menop', css_class='column_ten'),
css_class='row'),
Div(
Div('relatives', css_class='column_one'),
css_class='row'),
)
class Meta(object):
model = ClinicalHistory
fields = '__all__'
widgets = {
'created_at_date': forms.DateInput(format=('%Y-%m-%d'), attrs={'type': 'date'})
}
class PhysicalExamForm(ModelForm):
diagnostics_images_aux = forms.MultipleChoiceField(label='Diagnostico')
helper = FormHelper()
helper.form_class = 'form-inline'
helper.form_tag = False
helper.layout = Layout(
Div(
Div('weight', css_class='column_ten'),
Div('tall', css_class='column_ten'),
Div('IM', css_class='column_ten'),
Div('TA', css_class='column_ten'),
Div('Fc', css_class='column_ten'),
Div('minfr', css_class='column_ten'),
Div('mint', css_class='column_ten'),
Div('so', css_class='column_ten'),
Div('pe', css_class='column_ten'),
Div('pa', css_class='column_ten'),
css_class='row'),
Div(
Div('mental_sphere', css_class='column_one'),
css_class='row'),
Div(
Div('head', css_class='column_one'),
css_class='row'),
Div(
Div('orl', css_class='column_one'),
css_class='row'),
Div(
Div('neck_thyroid', css_class='column_one'),
css_class='row'),
Div(
Div('chest', css_class='column_one'),
css_class='row'),
Div(
Div('cardiovascular', css_class='column_one'),
css_class='row'),
Div(
Div('respiratory', css_class='column_one'),
css_class='row'),
Div(
Div('digestive', css_class='column_one'),
css_class='row'),
Div(
Div('mammary_gland', css_class='column_one'),
css_class='row'),
Div(
Div('genitourinary', css_class='column_one'),
css_class='row'),
Div(
Div('snc_peripheral', css_class='column_one'),
css_class='row'),
Div(
Div('osteomuscular', css_class='column_one'),
css_class='row'),
Div(
Div('skin_faneras', css_class='column_one'),
css_class='row'),
Div(
Div('senses_organs', css_class='column_one'),
css_class='row'),
Div(
Div('diagnostics_images_aux', css_class='column_one'),
css_class='row'),
)
class Meta(object):
model = PhysicalExam
fields = ['weight', 'tall', 'IM', 'TA', 'Fc', 'minfr', 'mint', 'so',
'pe', 'pa', 'mental_sphere', 'head', 'orl', 'neck_thyroid',
'chest', 'cardiovascular', 'respiratory', 'digestive',
'mammary_gland', 'genitourinary', 'snc_peripheral',
'osteomuscular', 'skin_faneras', 'senses_organs']
def clean_diagnostics_images(self):
diagnostics = self.cleaned_data['diagnostics_images']
return diagnostics
class SystemsReviewForm(ModelForm):
helper = FormHelper()
helper.form_class = 'form-inline'
helper.form_tag = False
helper.layout = Layout(
Div(
Div('skin_faneras', css_class='column_one'),
css_class='row'),
Div(
Div('orl', css_class='column_one'),
css_class='row'),
Div(
Div('respiratory', css_class='column_one'),
css_class='row'),
Div(
Div('cardiovascular', css_class='column_one'),
css_class='row'),
Div(
Div('digestive', css_class='column_one'),
css_class='row'),
Div(
Div('genitourinary', css_class='column_one'),
css_class='row'),
Div(
Div('snc_peripheral', css_class='column_one'),
css_class='row'),
Div(
Div('endocrine', css_class='column_one'),
css_class='row'),
Div(
Div('locomotor', css_class='column_one'),
css_class='row'),
Div(
Div('hematic_lymphatic', css_class='column_one'),
css_class='row'),
Div(
Div('senses_organs', css_class='column_one'),
css_class='row')
)
class Meta(object):
model = SystemsReview
fields = '__all__'
|
nilq/baby-python
|
python
|
import os
import unittest
from io import StringIO
from datetime import timedelta
from urllib.parse import quote
from django.conf import settings
from django.contrib import auth
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from oauth2_provider.models import AccessToken
from common.testing import BrowserTestCase, get_or_create_user, SmokeTestCase
from data_import.models import DataType
from open_humans.models import Member
from .models import (
DataRequestProject,
DataRequestProjectMember,
OnSiteDataRequestProject,
OAuth2DataRequestProject,
ProjectDataFile,
)
from .testing import DirectSharingMixin, DirectSharingTestsMixin
UserModel = auth.get_user_model()
@override_settings(SSLIFY_DISABLE=True)
class DirectSharingOnSiteTests(DirectSharingMixin, DirectSharingTestsMixin, TestCase):
"""
Tests for private sharing on-site projects.
"""
@classmethod
def setUpClass(cls):
super(DirectSharingOnSiteTests, cls).setUpClass()
cls.join_url = "/direct-sharing/projects/on-site/join/abc-2/"
cls.authorize_url = "/direct-sharing/projects/on-site/authorize/abc-2/"
user1 = get_or_create_user("user1")
cls.member1, _ = Member.objects.get_or_create(user=user1)
email1 = cls.member1.primary_email
email1.verified = True
email1.save()
def setUp(self):
self.member1_project = DataRequestProject.objects.get(slug="abc-2")
self.member1_project.save()
def test_join_if_logged_out(self):
response = self.client.get(self.join_url)
self.assertRedirects(response, "/account/login/")
def test_join_if_logged_in(self):
login = self.client.login(username="user1", password="user1")
self.assertTrue(login)
response = self.client.get(self.join_url)
self.assertEqual(response.status_code, 200)
def test_authorize_if_logged_out(self):
response = self.client.get(self.authorize_url)
self.assertRedirects(response, "/account/login/")
def test_authorize_if_logged_in_and_not_joined(self):
login = self.client.login(username="user1", password="user1")
self.assertTrue(login)
self.update_member(joined=False, authorized=False)
response = self.client.get(self.authorize_url)
self.assertRedirects(response, self.join_url)
def test_join_if_already_joined(self):
login = self.client.login(username="user1", password="user1")
self.assertTrue(login)
self.update_member(joined=True, authorized=False)
response = self.client.get(self.join_url)
self.assertRedirects(response, self.authorize_url)
def test_authorize_if_already_joined(self):
login = self.client.login(username="user1", password="user1")
self.assertTrue(login)
self.update_member(joined=True, authorized=False)
response = self.client.get(self.authorize_url)
self.assertEqual(response.status_code, 200)
def test_join_if_already_authorized(self):
login = self.client.login(username="user1", password="user1")
self.assertTrue(login)
self.update_member(joined=True, authorized=True)
response = self.client.get(self.join_url)
self.assertRedirects(response, self.authorize_url)
def test_authorize_if_already_authorized(self):
login = self.client.login(username="user1", password="user1")
self.assertTrue(login)
self.update_member(joined=True, authorized=True)
response = self.client.get(self.authorize_url)
self.assertEqual(b"Project previously authorized." in response.content, True)
def test_message_member(self):
member = self.update_member(joined=True, authorized=True)
response = self.client.post(
"/api/direct-sharing/project/message/?access_token=def456",
data={
"project_member_ids": [member.project_member_id],
"subject": "Sending a good test email",
"message": "The content of this email\nis a test.\n",
},
)
response_json = response.json()
self.assertEqual(response_json, "success")
def test_message_deauthorized_member(self):
member = self.update_member(joined=False, authorized=False, revoked=True)
response = self.client.post(
"/api/direct-sharing/project/message/?access_token=def456",
data={
"project_member_ids": [member.project_member_id],
"subject": "Sending a bad test email",
"message": "The content of this email\nis a test.\n",
},
)
response_json = response.json()
self.assertIn("errors", response_json)
self.assertIn("project_member_ids", response_json["errors"])
self.assertIn(
"Invalid project member ID",
response_json["errors"]["project_member_ids"][0],
)
def test_remove_member(self):
projmember = self.update_member(joined=True, authorized=True)
response = self.client.post(
"/api/direct-sharing/project/remove-members/?access_token=def456",
data={"project_member_ids": [projmember.project_member_id]},
)
response_json = response.json()
self.assertEqual(response_json, "success")
# Get a fresh copy before checking.
projmember = DataRequestProjectMember.objects.get(id=projmember.id)
self.assertEqual(projmember.authorized, False)
@override_settings(SSLIFY_DISABLE=True)
class DirectSharingOAuth2Tests(DirectSharingMixin, DirectSharingTestsMixin, TestCase):
"""
Tests for private sharing OAuth2 projects.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.authorize_url = (
"/direct-sharing/projects/oauth2/authorize/"
"?client_id=test-key&response_type=code"
)
user1 = get_or_create_user("bacon")
cls.member1, _ = Member.objects.get_or_create(user=user1)
email1 = cls.member1.primary_email
email1.verified = True
email1.save()
def setUp(self):
# Hacky way of inserting requested_sources, but it seems django doesn't
# want to read this from the test fixture
self.member1_project = OAuth2DataRequestProject.objects.get(slug="abc")
project_2 = DataRequestProject.objects.get(slug="abc-2")
self.member1_project.requested_sources.add(project_2)
self.member1_project.save()
self.access_token = AccessToken(
application=self.member1_project.application,
user=self.member1.user,
token="test-token-1",
expires=timezone.now() + timedelta(days=1),
scope="read",
)
self.access_token.save()
self.access_token_expired = AccessToken(
application=self.member1_project.application,
user=self.member1.user,
token="test-token-2",
expires=timezone.now() - timedelta(days=1),
scope="read",
)
self.access_token_expired.save()
@unittest.skip("Hitting django bug #27398")
def test_authorize_if_logged_out(self):
response = self.client.get(self.authorize_url)
self.assertRedirects(
response,
"/account/login/oauth2/?connection=abc&next={}".format(
quote(self.authorize_url, safe="")
),
)
def test_authorize_if_logged_in(self):
login = self.client.login(username="bacon", password="asdfqwerty")
self.assertTrue(login)
self.update_member(joined=False, authorized=False)
response = self.client.get(self.authorize_url)
self.assertEqual(response.status_code, 200)
def test_authorize_if_already_authorized(self):
login = self.client.login(username="bacon", password="asdfqwerty")
self.assertTrue(login)
self.update_member(joined=True, authorized=True)
response = self.client.get(self.authorize_url)
self.assertTrue(b"Project previously authorized." in response.content)
@unittest.skipIf((not settings.AWS_STORAGE_BUCKET_NAME), "AWS not set up.")
def test_exchange_member(self):
self.update_member(joined=True, authorized=True)
project_member = DataRequestProjectMember.objects.get(
project=self.member1_project, member=self.member1
)
response = self.client.get(
"/api/direct-sharing/project/exchange-member/"
"?access_token={}".format(self.access_token)
)
json = response.json()
self.assertTrue(json["project_member_id"] == project_member.project_member_id)
self.assertTrue(json["username"] == "bacon")
self.assertTrue(len(json["sources_shared"]) == 2)
self.assertTrue("direct-sharing-2" in json["sources_shared"])
self.assertFalse("direct-sharing-1" in json["sources_shared"])
datafile_sources = [x["source"] for x in json["data"]]
self.assertIn("direct-sharing-2", datafile_sources)
# Project sees its own data.
self.assertIn("direct-sharing-2", datafile_sources)
# Unauthorized data not available.
self.assertNotIn("direct-sharing-3", datafile_sources)
def test_exchange_member_token_expired(self):
self.update_member(joined=True, authorized=True)
response = self.client.get(
"/api/direct-sharing/project/exchange-member/"
"?access_token={}".format(self.access_token_expired)
)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.json()["detail"], "Expired token.")
@unittest.skip("Obsolete")
def test_oauth2_authorize(self):
login = self.client.login(username="bacon", password="asdfqwerty")
self.assertTrue(login)
response = self.client.get(self.authorize_url)
data = {
"redirect_uri": "http://localhost:8001/oauth-authorize",
"scope": "read",
"client_id": "test-key",
"state": "",
"response_type": "code",
"allow": "Authorize project",
}
response = self.client.post(self.authorize_url, data=data)
self.assertIn("http://localhost:8001/oauth-authorize?code=", response.url)
code = response.url.replace(
"http://localhost:8001/oauth-authorize?code=", ""
).replace("&origin=external", "")
data = {
"client_id": "test-key",
"client_secret": "test-secret",
"code": code,
"grant_type": "authorization_code",
"redirect_uri": "http://localhost:8001/oauth-authorize",
}
response = self.client.post("/oauth2/token/", data=data)
json = response.json()
self.assertIn("access_token", json)
self.assertIn("refresh_token", json)
self.assertEqual(json["expires_in"], 36000)
self.assertEqual(json["scope"], "read")
self.assertEqual(json["token_type"], "Bearer")
@unittest.skipIf((not settings.AWS_STORAGE_BUCKET_NAME), "AWS not set up.")
def test_member_access_token(self):
member = self.update_member(joined=True, authorized=True)
datatypes = self.insert_datatypes()
self.member1_project.registered_datatypes.clear()
self.member1_project.registered_datatypes.add(
datatypes.get(name="all your base")
)
self.member1_project.registered_datatypes.add(
datatypes.get(name="are belong to us")
)
self.member1_project.save()
response = self.client.post(
"/api/direct-sharing/project/files/upload/?access_token={}".format(
self.access_token
),
data={
"project_member_id": member.project_member_id,
"datatypes": '["all your base", "are belong to us"]',
"metadata": (
'{"description": "Test description...", '
'"tags": ["tag 1", "tag 2", "tag 3"]}'
),
"data_file": StringIO("just testing..."),
},
)
response_json = response.json()
self.assertIn("id", response_json)
self.assertEqual(response.status_code, 201)
self.assertNotIn("errors", response_json)
data_file = ProjectDataFile.objects.get(
id=response_json["id"],
direct_sharing_project=self.member1_project,
user=self.member1.user,
)
self.assertEqual(data_file.metadata["description"], "Test description...")
self.assertEqual(data_file.metadata["tags"], ["tag 1", "tag 2", "tag 3"])
self.assertEqual(data_file.file.readlines(), [b"just testing..."])
def test_message_member(self):
self.update_member(joined=True, authorized=True)
response = self.client.post(
"/api/direct-sharing/project/message/?access_token={}".format(
self.access_token
),
data={
"subject": "Sending a good test email",
"message": "The content of this email\nis a test.\n",
},
)
response_json = response.json()
self.assertEqual(response_json, "success")
def test_message_expired_token(self):
self.update_member(joined=True, authorized=True)
response = self.client.post(
"/api/direct-sharing/project/message/?access_token={}".format(
self.access_token_expired
),
data={
"subject": "Sending a bad test email",
"message": "The content of this email\nis a test.\n",
},
)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.json()["detail"], "Expired token.")
def test_remove_member(self):
projmember = self.update_member(joined=True, authorized=True)
response = self.client.post(
"/api/direct-sharing/project/remove-members/?"
"access_token={}".format(self.access_token)
)
response_json = response.json()
self.assertEqual(response_json, "success")
# Get a fresh copy before checking.
projmember = DataRequestProjectMember.objects.get(id=projmember.id)
self.assertEqual(projmember.authorized, False)
@override_settings(SSLIFY_DISABLE=True)
class DirectSharingOAuth2Tests2(DirectSharingMixin, TestCase):
"""
Another OAuth2 project to test alternate setup situations.
- master token expired
- all_sources_access true
Because the master token is expired, skip DirectSharingTestsMixin as these
are expected to fail.
"""
@classmethod
def setUpClass(cls):
super(DirectSharingOAuth2Tests2, cls).setUpClass()
cls.authorize_url = (
"/direct-sharing/projects/oauth2/authorize/"
"?client_id=test-key-2&response_type=code"
)
user1 = get_or_create_user("bacon")
cls.member1, _ = Member.objects.get_or_create(user=user1)
cls.member1.save()
cls.member1_project = OAuth2DataRequestProject.objects.get(slug="abc3")
cls.member1_project.save()
email1 = cls.member1.primary_email
cls.access_token = AccessToken(
application=cls.member1_project.application,
user=user1,
token="test-token-1",
expires=timezone.now() + timedelta(days=1),
scope="read",
)
cls.access_token.save()
email1.verified = True
email1.save()
@unittest.skipIf((not settings.AWS_STORAGE_BUCKET_NAME), "AWS not set up.")
def test_exchange_member_all_sources(self):
self.update_member(joined=True, authorized=True)
project_member = DataRequestProjectMember.objects.get(
project=self.member1_project, member=self.member1
)
response = self.client.get(
"/api/direct-sharing/project/exchange-member/" "?access_token=test-token-1"
)
json = response.json()
self.assertTrue(json["project_member_id"] == project_member.project_member_id)
self.assertTrue(json["username"] == "bacon")
self.assertEqual(len(json["sources_shared"]), 0)
datafile_sources = [x["source"] for x in json["data"]]
self.assertIn("direct-sharing-1", datafile_sources)
def test_message_expired_master_token(self):
member = self.update_member(joined=True, authorized=True)
response = self.client.post(
"/api/direct-sharing/project/message/?access_token=ghi789",
data={
"project_member_ids": [member.project_member_id],
"subject": "Sending a good test email",
"message": "The content of this email\nis a test.\n",
},
)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.json()["detail"], "Expired token.")
class SmokeTests(SmokeTestCase):
"""
A simple GET test for all of the simple URLs in the site.
"""
on_site_master_token = "def456"
oauth2_master_token = "abc123"
authenticated_urls = [
"/direct-sharing/projects/manage/",
"/direct-sharing/projects/oauth2/abc/",
"/direct-sharing/projects/on-site/abc-2/",
# Tests desired appear to fail due to missing 'css_classes', probably
# from as_bootstrap templatetag. Might not be django 2 compatible.
# '/direct-sharing/projects/oauth2/create/',
# '/direct-sharing/projects/oauth2/update/abc/',
# '/direct-sharing/projects/message/abc/',
# '/direct-sharing/projects/remove-members/abc/',
# '/direct-sharing/projects/on-site/update/abc-2/',
# '/direct-sharing/projects/on-site/create/',
]
authenticated_or_anonymous_urls = [
"/direct-sharing/overview/",
"/api/direct-sharing/project/?access_token={0}".format(on_site_master_token),
"/api/direct-sharing/project/?access_token={0}".format(oauth2_master_token),
"/api/direct-sharing/project/members/?access_token={0}".format(
on_site_master_token
),
"/api/direct-sharing/project/members/?access_token={0}".format(
oauth2_master_token
),
]
fixtures = SmokeTestCase.fixtures + ["private_sharing/fixtures/test-data.json"]
class BrowserTests(BrowserTestCase):
"""
Browser tests of direct sharing functionality.
"""
fixtures = BrowserTestCase.fixtures + ["private_sharing/fixtures/test-data.json"]
@unittest.skipIf(settings.NOBROWSER, "skipping browser tests")
def test_join_and_authorize(self):
driver = self.driver
self.login()
driver.get(
self.live_server_url + "/direct-sharing/projects/on-site/join/abc-2/"
)
self.assertEqual(
"Join 'abc 2'", driver.find_element_by_css_selector("h3.page-header").text
)
driver.find_element_by_id("accept").click()
self.assertEqual(
"Authorize 'abc 2'",
driver.find_element_by_css_selector("h3.page-header").text,
)
driver.find_element_by_id("authorize-project").click()
self.assertEqual(
(
'You have successfully joined the project "abc 2".'
in driver.find_element_by_css_selector(".message.success").text
),
True,
)
@unittest.skipIf(settings.NOBROWSER, "skipping browser tests")
def test_create_on_site(self):
driver = self.driver
self.login()
driver.get(self.live_server_url + "/direct-sharing/projects/manage/")
driver.find_element_by_link_text(
"Create a new on-site data request project"
).click()
driver.find_element_by_id("id_is_study_1").click()
driver.find_element_by_css_selector("div.radio > label").click()
driver.find_element_by_id("id_name").clear()
driver.find_element_by_id("id_name").send_keys("Test Study")
driver.find_element_by_id("id_leader").clear()
driver.find_element_by_id("id_leader").send_keys("Beau Gunderson")
driver.find_element_by_id("id_organization").clear()
driver.find_element_by_id("id_organization").send_keys("N/A")
driver.find_element_by_id("id_is_academic_or_nonprofit_1").click()
driver.find_element_by_id("id_contact_email").clear()
driver.find_element_by_id("id_contact_email").send_keys(
"beau@beaugunderson.com"
)
driver.find_element_by_id("id_info_url").clear()
driver.find_element_by_id("id_info_url").send_keys("https://beaugunderson.com/")
driver.find_element_by_id("id_short_description").clear()
driver.find_element_by_id("id_short_description").send_keys("Just testing!")
driver.find_element_by_id("id_long_description").clear()
driver.find_element_by_id("id_long_description").send_keys("Just testing!")
driver.find_element_by_id("id_badge_image").clear()
driver.find_element_by_id("id_badge_image").send_keys(
os.path.abspath("static/images/open_humans_logo_only.png")
)
driver.findElement(By.id("id_request_sources_access_1")).sendKeys(
Keys.PAGE_DOWN
)
driver.find_element_by_id("id_request_sources_access_1").click()
driver.find_element_by_id("id_request_sources_access_2").click()
driver.find_element_by_id("id_request_sources_access_3").click()
driver.find_element_by_id("id_request_sources_access_4").click()
driver.find_element_by_id("id_request_sources_access_5").click()
driver.find_element_by_id("id_request_sources_access_6").click()
driver.find_element_by_id("id_request_sources_access_7").click()
driver.find_element_by_id("id_request_sources_access_8").click()
driver.find_element_by_id("id_request_sources_access_9").click()
driver.find_element_by_id("id_request_sources_access_10").click()
driver.find_element_by_id("id_request_sources_access_11").click()
driver.find_element_by_id("id_request_sources_access_12").click()
driver.find_element_by_id("id_request_sources_access_13").click()
driver.find_element_by_id("id_request_username_access_1").click()
driver.find_element_by_id("id_consent_text").clear()
driver.find_element_by_id("id_consent_text").send_keys(
"## Consent form\n\n- list item 1\n- list item 2\n- list item 3"
)
driver.find_element_by_id("id_post_sharing_url").clear()
driver.find_element_by_id("id_post_sharing_url").send_keys(
"https://beaugunderson.com/?id=PROJECT_MEMBER_ID"
)
driver.find_element_by_id("create-project").click()
self.assertEqual(
"Test Study",
driver.find_element_by_xpath(
"//table[@id='on-site-projects']/tbody/tr[1]/td"
).text,
)
@unittest.skipIf(settings.NOBROWSER, "skipping browser tests")
def test_returned_data_description_activity(self):
driver = self.driver
driver.get(self.live_server_url + "/")
prefix = '//div[@id="activity-direct-sharing-1"]'
leader = driver.find_element_by_xpath(
'{}//div[@class="leader"]'.format(prefix)
).text
self.assertIn("abc", leader)
organization = driver.find_element_by_xpath(
'{}//div[@class="leader"]'.format(prefix)
).text
self.assertIn("abc", organization)
description = driver.find_element_by_xpath(
'{}//p[@class="activity-description"]'.format(prefix)
).text
self.assertIn("abc", description)
prefix = '//div[@id="activity-direct-sharing-2"]'
leader = driver.find_element_by_xpath(
'{}//div[@class="leader"]'.format(prefix)
).text
self.assertIn("xyz", leader)
organization = driver.find_element_by_xpath(
'{}//div[@class="leader"]'.format(prefix)
).text
self.assertIn("abcxyz", organization)
description = driver.find_element_by_xpath(
'{}//p[@class="activity-description"]'.format(prefix)
).text
self.assertIn("def", description)
|
nilq/baby-python
|
python
|
import pytest
from pyroll.core import Transport
class Specs:
@Transport.hookspec
def hook1(self, transport):
""""""
Transport.plugin_manager.add_hookspecs(Specs())
def test_hook_not_present():
transport = Transport(time=1)
with pytest.raises(AttributeError):
print(transport.does_not_exist)
def test_hook_result_none():
transport = Transport(time=1)
with pytest.raises(AttributeError):
print(transport.hook1)
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from delivery import deli, tasks
urlpatterns = [
url(r'^$', deli.delivery_list, name='delivery'),
url(r'^add/$', deli.delivery_add, name='delivery_add'),
url(r'^ist/$', deli.delivery_list, name='delivery_list'),
url(r'^status/(?P<project_id>\d+)/$', deli.status, name='delivery_status'),
url(r'^edit/(?P<project_id>\d+)/$', deli.delivery_edit, name='delivery_edit'),
url(r'^log/(?P<project_id>\d+)/$', deli.log, name='delivery_log'),
url(r'^log2/(?P<project_id>\d+)/$', deli.log2, name='delivery_log2'),
url(r'^deploy/(?P<project_id>\d+)/$', deli.delivery_deploy, name='delivery_deploy'),
url(r'^taskstop/(?P<project_id>\d+)/$', deli.task_stop, name='delivery_taskstop'),
url(r'^delete/$', deli.delivery_del, name='delivery_del'),
]
|
nilq/baby-python
|
python
|
from distutils.core import setup
import os
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.md')) as f:
CHANGES = f.read()
setup(name='django-cli',
packages=['django-cli'],
version='0.0.1',
description='Command line interface for sensible Django projects.',
long_description=README + '\n\n' + CHANGES,
author='Brandon J. Schwartz',
author_email='brandon@boomajoom.com',
url='https://github.com/brandonjschwartz/django-cli',
download_url='https://github.com/brandonjschwartz/django-cli/tarball/0.0.1',
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Utilities'
],
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from irc3.testing import BotTestCase, MagicMock
from irc3.plugins.autojoins import AutoJoins
class TestAutojoin(BotTestCase):
def test_autojoin_without_diese(self):
bot = self.callFTU(autojoins=['foo'])
bot.notify('connection_made')
bot.dispatch(':hobana.freenode.net 376 irc3 :End of /MOTD command.')
self.assertSent(['JOIN #foo'])
bot.dispatch(':hobana.freenode.net 376 irc3 :End of /MOTD command.')
self.assertSent([])
def test_nomotd_events_removed(self):
bot = self.callFTU(autojoins=['#foo'])
bot.notify('connection_made')
bot.dispatch(':hobana.freenode.net 422 irc3 :No MOTD.')
self.assertSent(['JOIN #foo'])
bot.dispatch(':hobana.freenode.net 422 irc3 :No MOTD.')
self.assertSent([])
def test_autojoin_nomotd(self):
bot = self.callFTU(autojoins=['#foo'])
bot.notify('connection_made')
bot.dispatch(':hobana.freenode.net 422 irc3 :No MOTD.')
self.assertSent(['JOIN #foo'])
def test_autojoin(self):
bot = self.callFTU(autojoins=['#foo'])
bot.notify('connection_made')
bot.dispatch(':hobana.freenode.net 376 irc3 :End of /MOTD command.')
self.assertSent(['JOIN #foo'])
bot.dispatch(':kicker!k@k KICK #foo irc3')
self.assertSent(['JOIN #foo'])
bot.dispatch(':kicker!k@k KICK #foo irc3 :bastard!')
self.assertSent(['JOIN #foo'])
plugin = bot.get_plugin(AutoJoins)
self.assertEqual(plugin.handles, {})
bot.dispatch(':server 473 irc3 #foo :You are banned')
self.assertSent(['JOIN #foo'])
self.assertIn('#foo', plugin.handles)
self.assertEqual(2, plugin.handles['#foo'][0])
# assume it doesn't break when a timeout is set
bot.dispatch(':server 473 irc3 #foo :You are banned')
self.assertSent(['JOIN #foo'])
self.assertEqual(8, plugin.handles['#foo'][0])
bot.dispatch(':kicker!k@k KICK #foo irc3 :bastard!')
self.assertSent(['JOIN #foo'])
self.assertNotIn('#foo', plugin.handles)
bot.notify('connection_lost')
def test_autojoin_delay(self):
bot = self.callFTU(autojoins=['#foo'], autojoin_delay=3)
bot.loop.call_later = MagicMock()
bot.notify('connection_made')
bot.dispatch(':hobana.freenode.net 422 irc3 :No MOTD.')
self.assertTrue(bot.loop.call_later.called)
def test_autojoin_reload(self):
bot = self.callFTU(autojoins=['#foo', '#bar'])
bot.notify('connection_made')
bot.dispatch(':hobana.freenode.net 422 irc3 :No MOTD.')
self.assertSent(['JOIN #foo', 'JOIN #bar'])
bot.config['autojoins'] = ['#foo', '#foo2']
bot.reload()
self.assertSent(['JOIN #foo2', 'PART #bar'])
|
nilq/baby-python
|
python
|
"""
My fun little python script to autoclick in cookie clicker
#Only functions on Windows right now
"""
from ctypes import windll, Structure, c_long, byref, wintypes
import keyboard
import time
import sys
from msvcrt import kbhit, getch
click = False
pressed = False
running = True
setx = 0
sety = 0
offstr = "OFF"
onstr = "ON"
def getCursorPos():
pt = wintypes.POINT()
windll.user32.GetCursorPos(byref(pt))
return pt
def keyboardInput(callback):
global pressed
global click
global setx
global sety
if callback.event_type == 'down':
if callback.name == 'home' and pressed == False:
pressed = True
if callback.event_type == 'up' and pressed == True:
if(callback.name == 'home'):
pressed = False
setx = getCursorPos().x
sety = getCursorPos().y
click = not click
def handleMouse():
windll.user32.SetCursorPos(setx, sety)
windll.user32.mouse_event(0x0002, 0, 0, 0, 0)
windll.user32.mouse_event(0x0004, 0, 0, 0, 0)
def milliTime():
return int(round(time.time() * 1000))
def mainfun():
global click
global running
print("\nInitializing AutoClicker...")
clickIndex = 0
timer = milliTime()
keyboard.hook(keyboardInput)
doClick = 0
clickedSec = 0
clickedLast = 0
toggle = offstr
print("AutoClicker Initialized!")
print("\nPress HOME anywhere to toggle autoclicking, and press ESC in console window to exit autoclicker.\n")
while running:
if kbhit():
key = ord(getch())
if key == 27:
running = False
if click:
if(doClick >= 30):
doClick = 0
if doClick == 0:
handleMouse()
clickedSec += 1
doClick += 1
toggle = onstr
else:
toggle = offstr
if(milliTime() - timer >= 1000):
timer = milliTime()
#print("\rClicks Last Second: " + str(clickedSec))
clickedLast = clickedSec
clickedSec = 0
print("AutoClicker toggled: " + toggle + " | Clicks Last Second: " + str(clickedLast) + " ", end=' \r')
if running == False:
print("\n\nAutoClicker ending...")
if __name__ == "__main__":
mainfun()
|
nilq/baby-python
|
python
|
import asyncio
from concurrent.futures import ThreadPoolExecutor
async def run_in_executor(method):
loop = asyncio.get_event_loop()
result = await loop.run_in_executor(None, method)
return result
|
nilq/baby-python
|
python
|
from time import sleep
from loguru import logger
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import Select
from selenium import webdriver
from bot.constants import TIMEOUT
from bot.driver.driver import get_chrome_driver as get_driver
class RozetkaBot:
def __init__(self, driver_type):
self.driver = get_driver(driver_type)
self.timeout = TIMEOUT
def find(self, xpath):
sleep(0.5)
return WebDriverWait(self.driver, self.timeout).until(EC.visibility_of_element_located((By.XPATH, xpath)))
def find_click(self, xpath):
return WebDriverWait(self.driver, self.timeout).until(EC.element_to_be_clickable((By.XPATH, xpath)))
def find_all(self, xpath):
sleep(0.5)
return WebDriverWait(self.driver, self.timeout).until(EC.visibility_of_all_elements_located((By.XPATH, xpath)))
def auth(self, login, password):
self.find('//button[@class="header__button ng-star-inserted"]').click()
self.find('//input[@id="auth_email"]').send_keys(login)
self.find('//input[@id="auth_pass"]').send_keys(password)
self.find('//button[contains(@class, "auth-modal__submit")]').click()
try:
self.find('//div[contains(@class,"recaptcha-checkbox")]')
except TimeoutException:
# can't check if indeed logged in
logger.debug(f"Logged in as {login}")
else:
self.find('//div[contains(@class,"recaptcha-checkbox")]').click()
self.find('//button[contains(@class, "auth-modal__submit")]').click()
# can't check if indeed logged in
logger.debug(f"Logged in as {login} after captcha")
def open_phones(self):
# self.find('//button[@id="fat-menu"]').click()
# self.find('//a[contains(@href, "c4627949")]').click()
# doesn't work :(
# self.find('//a[contains(@href, "c80003")]').click()
self.driver.get('https://rozetka.com.ua/ua/mobile-phones/c80003/preset=smartfon/')
logger.debug("Opened smartphones")
def check_boxes(self):
# brand
self.find('//a[@data-id="OnePlus"]').click() # //label[@for="OnePlus"]
self.find('//a[@data-id="Samsung"]').click()
self.find('//a[@data-id="Xiaomi"]').click()
logger.debug("Sorted by brands")
# ram
self.find('//a[@data-id="8 ГБ"]').click()
self.find('//a[@data-id="12 ГБ"]').click()
logger.debug("Sorted by RAM")
# memory
self.find('//a[@data-id="128 ГБ"]').click()
self.find('//a[@data-id="256 ГБ"]').click()
logger.debug("Sorted by memory")
# screen
self.find('//a[contains(@data-id, "6.49")]').click()
self.find('//a[contains(@data-id, "6.5")]').click()
logger.debug("Sorted by screen size")
# processor
self.find('//a[contains(@data-id, "Qualcomm")]').click()
logger.debug("Sorted by processor")
# price
self.find('//input[@formcontrolname="min"]').clear()
self.find('//input[@formcontrolname="min"]').send_keys(10000)
self.find('//input[@formcontrolname="max"]').clear()
self.find('//input[@formcontrolname="max"]').send_keys(20000)
self.find('//button[text()=" Ok "]').click()
logger.debug("Sorted by price")
def sort(self):
Select(self.find('//select')).select_by_visible_text('Новинки')
logger.debug("Sorted by 'Новинки'")
def add_to_compare_and_click(self, first_n):
elements = self.find_all(f'//button[contains(@class,"compare-button")]')
for el in elements[:first_n]:
# for ElementClickInterceptedException
webdriver.ActionChains(self.driver).move_to_element(el).click(el).perform()
logger.debug("Added to compare")
self.find('//button[contains(@aria-label, "Списки")]').click()
self.find('//a[contains(@class, "comparison")]').click()
self.find('//button[contains(text(),"відмінності")]').click()
logger.debug("Chose only differences")
|
nilq/baby-python
|
python
|
# _*_ coding: utf-8 _*_
import json
import uuid
from datetime import datetime
from multiprocessing import Process
import redis
__author__ = 'Arun KR (@kra3)'
class SimpleJobQueue(object):
SCHEDULED, IN_PROGRESS, DONE, FAILED = (
'SCHEDULED', 'IN_PROGRESS', 'DONE', 'FAILED')
TIMESTAMP, PAYLOAD, STATUS, QUEUE, RESULT = (
'timestamp', 'payload', 'status', 'queue', 'result')
def __init__(self, host='localhost', port=6379):
self._redis = redis.StrictRedis(host=host, port=port)
@property
def redis(self):
return self._redis
def _get_schedule_queue(self, queue_name):
return 'SCHED::{}'.format(queue_name)
def _get_work_queue(self, queue_name):
return 'WORK::{}'.format(queue_name)
def _get_data_set(self, job_id):
return "DATA::{}".format(job_id)
def _build_unique_id(self):
return uuid.uuid4().hex
def _get_timestamp(self):
return datetime.now().timestamp()
def package_result(self, res):
return json.dumps(res)
def publish_to(self, queue_name):
def _publish(func):
def _inner(*args, **kwargs):
res = func(*args, **kwargs)
job_id = self._build_unique_id()
with self.redis.pipeline() as pipe:
try:
pipe.hmset(self._get_data_set(job_id), {
self.TIMESTAMP: self._get_timestamp(),
self.QUEUE: queue_name,
self.STATUS: self.SCHEDULED,
self.PAYLOAD: self.package_result(res)
})
pipe.lpush(
self._get_schedule_queue(queue_name), job_id)
pipe.execute()
except Exception as e:
print(e)
return job_id
return _inner
return _publish
def subscribe_to(self, queue_name):
def _subscribe(func):
def _worker():
while True:
# move a scheduled item to work queue
job_id = self.redis.brpoplpush(
self._get_schedule_queue(queue_name),
self._get_work_queue(queue_name), 10)
if not job_id:
continue
else:
job_id = job_id.decode("utf-8")
# update timestamp
self.redis.hmset(self._get_data_set(job_id), {
self.TIMESTAMP: self._get_timestamp(),
self.STATUS: self.IN_PROGRESS
})
payload = self.redis.hget(
self._get_data_set(job_id), self.PAYLOAD)
try:
res = func(json.loads(payload))
except Exception as e:
self.redis.hmset(self._get_data_set(job_id), {
self.TIMESTAMP: self._get_timestamp(),
self.STATUS: self.FAILED,
self.RESULT: self.package_result(str(e))
})
else:
self.redis.hmset(self._get_data_set(job_id), {
self.TIMESTAMP: self._get_timestamp(),
self.STATUS: self.DONE,
self.RESULT: self.package_result(res)
})
# start a worker thread
p = Process(target=_worker, args=[])
p.start()
p.join()
return _subscribe
def get_status(self, job_id):
key = self._get_data_set(job_id)
if self.redis.exists(key):
status, result = self.redis.hmget(key, self.STATUS, self.RESULT)
res = {'status': status}
if result is not None:
res['result'] = result
return res
else:
return {'status': 'error', 'reason': 'job not found'}
|
nilq/baby-python
|
python
|
l=[]
for i in range(int(input())):
l.append(input().split())
ans=[]
for i in l:
if i[1]=='rat':
ans.append(i[0])
for i in l:
if i[1]=='woman' or i[1]=='child':
ans.append(i[0])
for i in l:
if i[1]=='man':
ans.append(i[0])
for i in l:
if i[1]=='captain':
ans.append(i[0])
for i in ans:
print(i)
|
nilq/baby-python
|
python
|
#! /usr/local/bin/stackless2.6
# by pts@fazekas.hu at Tue Feb 2 21:18:10 CET 2010
import cStringIO
import syncless.coio
import socket
import unittest
# TODO(pts): Test reverse lookup without canonical name.
# TODO(pts): Test for IPv6 addresses:
# print syncless.coio.dns_resolve_ipv6('www.ipv6.org')
#: <dnsresult code=0, t=3, ttl=2936 value=['2001:6b0:1:ea:202:a5ff:fecd:13a6'] at 0xb7d76d24>
# print syncless.coio.dns_resolve_reverse('2001:6b0:1:ea:202:a5ff:fecd:13a6')
#: <dnsresult t=2, ttl=3446 values=['igloo.stacken.kth.se'] at 0x824226c>
ERR_NODATA = set([1])
ERR_ADDRFAMILY = set([2])
ERR_HOST_NOT_FOUND = set([3])
ERR_NONAME = set([4])
RESOLVE_IPV4_RESULT = {
'152.66.84.8': ['152.66.84.8'],
'127.5.6.7': ['127.5.6.7'],
'74.125.39.106': ['74.125.39.106'],
'mail.szit.bme.hu': ['152.66.84.8'],
'fourier.szit.bme.hu': ['152.66.84.8'],
'www.google.com': ['74.125.39.106', '74.125.39.103',
'74.125.39.147', '74.125.39.104',
'74.125.39.105', '74.125.39.99'],
'www.l.google.com': ['74.125.39.106', '74.125.39.103',
'74.125.39.147', '74.125.39.104',
'74.125.39.105', '74.125.39.99'],
'foo.bar.baz': None,
'www.ipv6.org': ['130.237.234.40'],
'unknown': None,
}
RESOLVE_REVERSE_RESULT = {
'152.66.84.8': ['fourier.szit.bme.hu'],
'2001:6b0:1:ea:202:a5ff:fecd:13a6': ['igloo.stacken.kth.se'],
'74.125.39.106': ['fx-in-f106.1e100.net'],
'130.237.234.40': ['igloo.stacken.kth.se'],
'127.5.6.7': None,
}
GETHOSTBYNAME_RESULT = {
'other': '1.2.3.5',
'1.2.3.5': '1.2.3.5',
'bogus3': '1.2.3.4',
'bogus4.foo.bar': '1.2.3.4',
'1.2.3.4': '1.2.3.4',
'localhost': '127.0.0.1',
'127.0.0.1': '127.0.0.1',
'127.5.6.7': '127.5.6.7',
'152.66.84.8': '152.66.84.8',
'2001:6b0:1:ea:202:a5ff:fecd:13a6': ERR_ADDRFAMILY,
'74.125.39.106': '74.125.39.106',
'foo.bar.baz': ERR_NODATA,
'fourier.szit.bme.hu': '152.66.84.8',
'mail.szit.bme.hu': '152.66.84.8',
'www.google.com': '74.125.39.106',
'www.l.google.com': '74.125.39.106',
'www.ipv6.org': '130.237.234.40',
}
# Item 1 is usually empty except for hostnames fetched from /etc/hosts.
GETHOSTBYADDR_RESULT = {
'bogus3': ('bogus1.there',
['bogus2', 'bogus3', 'bogus4.foo.bar'], ['1.2.3.4']),
'bogus4.foo.bar': ('bogus1.there',
['bogus2', 'bogus3', 'bogus4.foo.bar'], ['1.2.3.4']),
'1.2.3.4': ('bogus1.there',
['bogus2', 'bogus3', 'bogus4.foo.bar'], ['1.2.3.4']),
'localhost': ('localhost', [], ['127.0.0.1']),
'127.0.0.1': ('localhost', [], ['127.0.0.1']),
'other': ('other', ['bogus3'], ['1.2.3.5']),
'1.2.3.5': ('other', ['bogus3'], ['1.2.3.5']),
'127.5.6.7': ERR_HOST_NOT_FOUND,
'152.66.84.8': ('fourier.szit.bme.hu', [], ['152.66.84.8']),
'2001:6b0:1:ea:202:a5ff:fecd:13a6': (
'igloo.stacken.kth.se', [], ['2001:6b0:1:ea:202:a5ff:fecd:13a6']),
'74.125.39.106': ('fx-in-f106.1e100.net', [], ['74.125.39.106']),
'foo.bar.baz': ERR_NODATA,
'fourier.szit.bme.hu': ('fourier.szit.bme.hu', [], ['152.66.84.8']),
'mail.szit.bme.hu': ('fourier.szit.bme.hu', [], ['152.66.84.8']),
'www.google.com': ('fx-in-f106.1e100.net', [], ['74.125.39.106']),
'www.l.google.com': ('fx-in-f106.1e100.net', [], ['74.125.39.106']),
'www.ipv6.org': ('igloo.stacken.kth.se', [], ['130.237.234.40']),
}
GETFQDN_RESULT = {
'1.2.3.4': 'bogus1.there',
'1.2.3.5': 'other',
'127.0.0.1': 'localhost',
'127.5.6.7': '127.5.6.7',
'152.66.84.8': 'fourier.szit.bme.hu',
'2001:6b0:1:ea:202:a5ff:fecd:13a6': 'igloo.stacken.kth.se',
'74.125.39.106': 'fx-in-f106.1e100.net',
'bogus3': 'bogus1.there',
'bogus4.foo.bar': 'bogus1.there',
'foo.bar.baz': 'foo.bar.baz',
'fourier.szit.bme.hu': 'fourier.szit.bme.hu',
'localhost': 'localhost',
'mail.szit.bme.hu': 'fourier.szit.bme.hu',
'other': 'other',
'www.google.com': 'fx-in-f106.1e100.net',
'www.ipv6.org': 'igloo.stacken.kth.se',
'www.l.google.com': 'fx-in-f106.1e100.net',
'unknown': 'unknown',
}
GETHOSTBYNAME_EX_RESULT = {
'1.2.3.4': ('1.2.3.4', [], ['1.2.3.4']),
'1.2.3.5': ('1.2.3.5', [], ['1.2.3.5']),
'127.0.0.1': ('127.0.0.1', [], ['127.0.0.1']),
'127.5.6.7': ('127.5.6.7', [], ['127.5.6.7']),
'152.66.84.8': ('152.66.84.8', [], ['152.66.84.8']),
'2001:6b0:1:ea:202:a5ff:fecd:13a6': ERR_ADDRFAMILY,
'74.125.39.106': ('74.125.39.106', [], ['74.125.39.106']),
# Incomplete emulation: socket.gethostbyname_ex would return:
# 'bogus3': ('bogus1.there',
# ['bogus2', 'bogus3', 'bogus4.foo.bar'],
# ['1.2.3.4', '1.2.3.5']), # !! for gethostbyaddr
'bogus3': ('bogus1.there',
['bogus2', 'bogus3', 'bogus4.foo.bar'],
['1.2.3.4']),
'bogus4.foo.bar': ('bogus1.there',
['bogus2', 'bogus3', 'bogus4.foo.bar'], ['1.2.3.4']),
'foo.bar.baz': ERR_NODATA,
'fourier.szit.bme.hu': ('fourier.szit.bme.hu', [], ['152.66.84.8']),
'localhost': ('localhost', [], ['127.0.0.1']),
'mail.szit.bme.hu': ('fourier.szit.bme.hu', ['mail.szit.bme.hu'],
['152.66.84.8']),
'other': ('other', ['bogus3'], ['1.2.3.5']),
# Incomplete emulation: socket.gethostbyname_ex would return:
#'www.google.com': ('www.l.google.com', ['www.google.com'],
# ['74.125.39.106', '74.125.39.103',
# '74.125.39.147', '74.125.39.104',
# '74.125.39.105', '74.125.39.99']),
'www.google.com': ('fx-in-f106.1e100.net', ['www.google.com'],
['74.125.39.106', '74.125.39.103',
'74.125.39.147', '74.125.39.104',
'74.125.39.105', '74.125.39.99']),
# Incomplete emulation: socket.gethostbyname_ex would return:
#'www.l.google.com': ('www.l.google.com', [],
# ['74.125.39.106', '74.125.39.103',
# '74.125.39.147', '74.125.39.104',
# '74.125.39.105', '74.125.39.99']),
'www.l.google.com': ('fx-in-f106.1e100.net', ['www.l.google.com'],
['74.125.39.106', '74.125.39.103',
'74.125.39.147', '74.125.39.104',
'74.125.39.105', '74.125.39.99']),
# Incomplete emulation: socket.gethostbyname_ex would return:
#'www.ipv6.org': ('shake.stacken.kth.se', ['www.ipv6.org'],
# ['130.237.234.40']),
'www.ipv6.org': ('igloo.stacken.kth.se', ['www.ipv6.org'],
['130.237.234.40']),
'unknown': ERR_NODATA,
}
#xx = syncless.coio.dns_resolve_reverse # !!
def FakeDnsResolveIpv4(name):
values = RESOLVE_IPV4_RESULT[name]
if values is None:
raise syncless.coio.DnsLookupError(-3, 'fake error') # name does not exist
return syncless.coio.dnsresult(1, 1, values)
def FakeDnsResolveReverse(name):
values = RESOLVE_REVERSE_RESULT[name]
if values is None:
raise syncless.coio.DnsLookupError(-3, 'fake error')
return syncless.coio.dnsresult(2, 1, values)
def Wrap(function, *args):
try:
return function(*args)
except socket.gaierror, e:
assert type(e.args[1]) == str, repr(e.args)
if e.args[0] == socket.EAI_NODATA:
return ERR_NODATA
elif e.args[0] == socket.EAI_NONAME:
return ERR_NONAME
elif e.args[0] == socket.EAI_ADDRFAMILY:
return ERR_ADDRFAMILY
else:
assert 0, repr(e.args)
except socket.herror, e:
if e.args[0] == syncless.coio.HERROR_HOST_NOT_FOUND:
return ERR_HOST_NOT_FOUND
else:
assert 0, repr(e.args)
class DnsCompatTest(unittest.TestCase):
def setUp(self):
assert callable(getattr(syncless.coio, 'dns_resolve_ipv4', None))
syncless.coio.dns_resolve_ipv4 = FakeDnsResolveIpv4
syncless.coio.dns_resolve_reverse = FakeDnsResolveReverse
syncless.coio.names_by_ip.clear()
syncless.coio.names_by_nameip.clear()
# TODO(pts): fake 127.5.6.7 for reverse DNS lookup.
f = cStringIO.StringIO()
f.write("#127.0.0.1\t \tbad1\n")
f.write(" 127.0.0.1\t \tlocalhost\n")
f.write("127.0.0.1\t \tbad2\n")
f.write("\t \t1.2.3.4 bogus1.there\tbogus2 bogus3 bogus4.foo.bar\n")
f.write("1.2.3.5 other bogus3\n")
f.reset()
syncless.coio.read_etc_hosts(f=f)
# TODO(pts): Cleanup in tearDown.
def testEtcHostsDicts(self):
items1 = ['1.2.3.4', 'bogus1.there', 'bogus2', 'bogus3', 'bogus4.foo.bar']
items2 = ['127.0.0.1', 'localhost']
items3 = ['1.2.3.5', 'other', 'bogus3']
items4 = ['127.0.0.1', 'bad2']
self.assertEqual(
{'1.2.3.4': items1, '127.0.0.1': items2,
'1.2.3.5': items3}, syncless.coio.names_by_ip)
self.assertEqual(
{'1.2.3.4': items1, 'bogus1.there': items1, 'bogus2': items1,
'bad2': items4, '1.2.3.5': items3, 'other': items3,
'bogus3': items1, 'bogus4.foo.bar': items1, 'localhost': items2,
'127.0.0.1': items2}, syncless.coio.names_by_nameip)
def testGetHostByName(self):
for name in sorted(GETHOSTBYNAME_RESULT):
result = Wrap(syncless.coio.gethostbyname, name)
self.assertEqual({name: GETHOSTBYNAME_RESULT[name]}, {name: result})
def testGetHostByNameEx(self):
for name in sorted(GETHOSTBYNAME_EX_RESULT):
result = Wrap(syncless.coio.gethostbyname_ex, name)
self.assertEqual({name: GETHOSTBYNAME_EX_RESULT[name]}, {name: result})
def testGetHostByAddr(self):
for name in sorted(GETHOSTBYADDR_RESULT):
result = Wrap(syncless.coio.gethostbyaddr, name)
self.assertEqual({name: GETHOSTBYADDR_RESULT[name]}, {name: result})
def testGetFqdn(self):
for name in sorted(GETFQDN_RESULT):
result = syncless.coio.getfqdn(name) # Never raises an exception.
self.assertEqual({name: GETFQDN_RESULT[name]}, {name: result})
# def testZZZGen(self):
# for name in sorted(GETHOSTBYNAME_EX_RESULT) + ['unknown']:
# print repr(name), ':', Wrap(socket.gethostbyname_ex, name)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from pygraphblas import Matrix, BOOL, lib
def rpq(g, r):
kron = g.get_intersection(r)
closure_matrix = Matrix.sparse(BOOL, kron.size, kron.size)
for label in kron.matrices:
if kron.matrices[label].nrows < kron.size:
kron.matrices[label].resize(kron.size, kron.size)
closure_matrix += kron.matrices[label]
tmp = closure_matrix.dup()
closure_matrix += closure_matrix @ closure_matrix
while not tmp.iseq(closure_matrix):
tmp = closure_matrix
closure_matrix += closure_matrix @ closure_matrix
res = Matrix.sparse(BOOL, g.size, g.size)
for i, j, _ in zip(*closure_matrix.select(lib.GxB_NONZERO).to_lists()):
i_g, i_r = i // r.size, i % r.size
j_g, j_r = j // r.size, j % r.size
res[i_g, j_g] = True
return kron, res
|
nilq/baby-python
|
python
|
'''
This file implements the Schlaufen Detection algorithm in the paper:
Testing Strategic Interaction in Networks
'''
from ugd.schlaufen_construction.di_schlaufen_construction_util import mark_edge, mark_node, cycle_found, random_draw
from ugd.help_function.util import rand_element_of_set, del_nodes_mark
def add_di_random_schlaufe(graph, schlaufen_number):
# random draw of initial node (step 1)
start_node = rand_element_of_set(range(graph.node_number))
cycle_node = None
active_cycle_node = None
working_node = start_node
is_active = True
is_schlaufe = False
while not is_schlaufe:
mark_node(graph, working_node, is_active)
found, out_node = random_draw(graph, working_node, is_active) # step 2 or 4, depending on is_active
if found:
mark_edge(graph, working_node, out_node, is_active, schlaufen_number)
working_node = out_node
if cycle_found(graph, out_node, is_active): # step 3 or 5
active_cycle_node = not is_active # out_node is one step ahead
cycle_node = out_node
is_schlaufe = True
else:
is_active = not is_active
else:
is_schlaufe = True
del_nodes_mark(graph, start_node) # but don't delete marked edges
return start_node, cycle_node, active_cycle_node
|
nilq/baby-python
|
python
|
import pytest
from asynctest import (
mock as async_mock,
TestCase as AsyncTestCase,
)
from aries_cloudagent.messaging.request_context import RequestContext
from aries_cloudagent.messaging.responder import MockResponder
from aries_cloudagent.transport.inbound.receipt import MessageReceipt
from aries_cloudagent.protocols.issue_credential.v1_1.messages.credential_request import (
CredentialRequest,
)
from aries_cloudagent.protocols.issue_credential.v1_1.handlers.credential_request import (
CredentialRequestHandler,
)
from aries_cloudagent.storage.basic import BasicStorage
from aries_cloudagent.storage.base import BaseStorage
from aries_cloudagent.protocols.issue_credential.v1_1.models.credential_exchange import (
CredentialExchangeRecord,
)
class TestCredentialOfferHandler(AsyncTestCase):
async def test_is_saving_record(self):
context = RequestContext()
storage = BasicStorage()
context.injector.bind_instance(BaseStorage, storage)
context.connection_ready = True
context.message = CredentialRequest(
credential={
"credential_type": "TEST",
"credential_values": {"test": "one", "value": "two"},
}
)
responder = MockResponder()
responder.connection_id = "1234"
handler_inst = CredentialRequestHandler()
await handler_inst.handle(context, responder)
assert len(responder.messages) == 0
assert 1 == len(responder.webhooks)
id = responder.webhooks[0][1]["credential_exchange_id"]
exchange = await CredentialExchangeRecord.retrieve_by_id(context, id)
assert exchange != None
assert exchange.connection_id == responder.connection_id
assert exchange.state == CredentialExchangeRecord.STATE_REQUEST_RECEIVED
assert exchange.credential_request == context.message.credential
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 18 07:06:38 2020
@author: daskalot
"""
#from doc_embedders import TfIdfTransformer as tfidf
from sklearn.feature_extraction.text import CountVectorizer
import string
from sklearn import preprocessing
import graphvite as gv
import numpy as np
from tqdm import tqdm
import pickle
import os
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
lemmatizer = WordNetLemmatizer()
lemma = lemmatizer.lemmatize
my_stop_words = [lemma(t) for t in stopwords.words('english')]
def remove_punctuation(text):
table = text.maketrans({key: None for key in string.punctuation})
text = text.translate(table)
return text
def tokenize(text):
no_punct = remove_punctuation(text)
stems = [lemma(t) for t in word_tokenize(no_punct) if not t in my_stop_words ]
return stems
def naive_terms(texts, n=2):
print("Term finding started.")
vectorizer = CountVectorizer(tokenizer = tokenize, strip_accents = 'ascii', ngram_range = (1,n))#, stop_words=my_stop_words)
X = vectorizer.fit_transform(texts)
terms = vectorizer.inverse_transform(X)
print("Term finding finished.")
return terms
def wikify(unique_terms, model_name="transe"):
present_terms = {}
print("Wikifying started.")
with open("kg/"+model_name+".pkl", "rb") as fin:
model = pickle.load(fin)
alias2entity = gv.dataset.wikidata5m.alias2entity
entity2id = model.graph.entity2id
entity_embeddings = model.solver.entity_embeddings
relation2id = model.graph.relation2id
relation_embeddings = model.solver.relation_embeddings
alias2relation = gv.dataset.wikidata5m.alias2relation
for term in unique_terms:
try:
if term in alias2entity:
present_terms[term] = entity_embeddings[entity2id[alias2entity[term]]]
except:
pass
try:
if term in alias2relation:
present_terms[term] = relation_embeddings[relation2id[alias2relation[term]]]
except:
pass
print("Wikifying finished.")
return present_terms
def naive_embedd(docs, ngrams = 2, model_name = "transe"):
term_candidates = naive_terms(docs)
with open("kg/wikidata_aliases.pkl", "rb") as fin:
entities = pickle.load(fin)
with open("kg/wikidata_relations.pkl", "rb") as fin:
relations = pickle.load(fin)
unique_terms = set(x for c in term_candidates for x in c)
unique_present = set(unique_terms).intersection(entities)
doc2terms = []
for doc in term_candidates:
curr_terms = []
for term in doc:
if term in unique_present:
curr_terms.append(term)
doc2terms.append(curr_terms)
present_terms = wikify(unique_present, model_name)
print("Embedding started.")
doc_kg_embs = []
final_terms = []
for doc in doc2terms:
term2vec = {}
for term in doc:
if term in present_terms:
term2vec[term] = present_terms[term]
doc_kg_emb = make_embedding(term2vec)
doc_kg_embs.append(doc_kg_emb)
final_terms.append(list(term2vec.keys()))
print("Embedding ended.")
outputs = zip(final_terms, doc_kg_embs)
return outputs
def make_embedding(term2vec):
if len(term2vec) == 0:
return np.array([0]*512)
avg = sum(list(term2vec.values())) / len(term2vec.values())
return avg
def test_env():
train_text = {"text1":"Brexit (/ˈbrɛksɪt, ˈbrɛɡzɪt/;[1] a portmanteau of British and exit) is the withdrawal of the United Kingdom (UK) from the European Union (EU). Following a referendum held on 23 June 2016 in which 51.9 per cent of those voting supported leaving the EU, the Government invoked Article 50 of the Treaty on European Union, starting a two-year process which was due to conclude with the UK's exit on 29 March 2019 – a deadline which has since been extended to 31 October 2019.[2]",
"text2":"Withdrawal from the EU has been advocated by both left-wing and right-wing Eurosceptics, while pro-Europeanists, who also span the politica#l spectrum, have advocated continued membership and maintaining the customs union and single market. The UK joined the European Communities (EC) in 1973 under the Conservative government of Edward Heath, with continued membership endorsed by a referendum in 1975. In the 1970s and 1980s, withdrawal from the EC was advocated mainly by the political left, with the Labour Party's 1983 election manifesto advocating full withdrawal. From the 1990s, opposition to further European integration came mainly from the right, and divisions within the Conservative Party led to rebellion over the Maastricht Treaty in 1992. The growth of the UK Independence Party (UKIP) in the early 2010s and the influence of the cross-party People's Pledge campaign have been described as influential in bringing about a referendum. The Conservative Prime Minister, David Cameron, pledged during the campaign for the 2015 general election to hold a new referendum—a promise which he fulfilled in 2016 following pressure from the Eurosceptic wing of his party. Cameron, who had campaigned to remain, resigned after the result and was succeeded by Theresa May, his former Home Secretary. She called a snap general election less than a year later but lost her overall majority. Her minority government is supported in key votes by the Democratic Unionist Party.",
"text3":"The broad consensus among economists is that Brexit will likely reduce the UK's real per capita income in the medium term and long term, and that the Brexit referendum itself damaged the economy.[a] Studies on effects since the referendum show a reduction in GDP, trade and investment, as well as household losses from increased inflation. Brexit is likely to reduce immigration from European Economic Area (EEA) countries to the UK, and poses challenges for UK higher education and academic research. As of May 2019, the size of the divorce bill—the UK's inheritance of existing EU trade agreements—and relations with Ireland and other EU member states remains uncertain. The precise impact on the UK depends on whether the process will be a hard or soft Brexit."}
x = naive_embedd(train_text.values())
for z,y in x:
print(z)
#test_env()
|
nilq/baby-python
|
python
|
from streamlit.components.v1 import html
try:
from keywords import SHARE_KEYWORD
from keywords import check_keyword
except:
from .keywords import SHARE_KEYWORD
from .keywords import check_keyword
def share_parser(lines):
"""Parses a list of lines into a dictionary with the parsed values.
:param lines: list of lines
:type lines: list
:return: parsed values for text and url
:rtype: dict
"""
# Dict to store the parsed values
parse_dict = {
"my_text":"",
"my_url":"",
}
for i, line in enumerate(lines):
if i==0:
if check_keyword(line, SHARE_KEYWORD):
continue
else:
break
elif i==1:
parse_dict["my_text"] = line.strip()
elif i==2:
parse_dict["my_url"] = line.strip()
return parse_dict
def share_from_lines(lines):
"""Renders the share buttons from a list of lines.
:param lines: list of lines
:type lines: list
:return: None
"""
parse_dict = share_parser(lines)
share(my_text=parse_dict["my_text"],
my_url=parse_dict["my_url"], )
return
def share(my_text, my_url):
"""
This function takes a url and a text and displays
clickable sharing buttons in html.
:param my_text: the text to share on social media
:type my_text: str
:param my_url: the url to share on social media
:type my_url: str
"""
# Define the css style for the sharing buttons
my_css = """
.resp-sharing-button__link,
.resp-sharing-button__icon {
display: inline-block
}
.resp-sharing-button__link {
text-decoration: none;
color: #fff;
margin: 0.5em
}
.resp-sharing-button {
border-radius: 5px;
transition: 25ms ease-out;
padding: 0.5em 0.75em;
font-family: Helvetica Neue,Helvetica,Arial,sans-serif
}
.resp-sharing-button__icon svg {
width: 1em;
height: 1em;
margin-right: 0.4em;
vertical-align: top
}
.resp-sharing-button--small svg {
margin: 0;
vertical-align: middle
}
/* Non solid icons get a stroke */
.resp-sharing-button__icon {
stroke: #fff;
fill: none
}
/* Solid icons get a fill */
.resp-sharing-button__icon--solid,
.resp-sharing-button__icon--solidcircle {
fill: #fff;
stroke: none
}
.resp-sharing-button--twitter {
background-color: #55acee
}
.resp-sharing-button--twitter:hover {
background-color: #2795e9
}
.resp-sharing-button--pinterest {
background-color: #bd081c
}
.resp-sharing-button--pinterest:hover {
background-color: #8c0615
}
.resp-sharing-button--facebook {
background-color: #3b5998
}
.resp-sharing-button--facebook:hover {
background-color: #2d4373
}
.resp-sharing-button--tumblr {
background-color: #35465C
}
.resp-sharing-button--tumblr:hover {
background-color: #222d3c
}
.resp-sharing-button--reddit {
background-color: #5f99cf
}
.resp-sharing-button--reddit:hover {
background-color: #3a80c1
}
.resp-sharing-button--google {
background-color: #dd4b39
}
.resp-sharing-button--google:hover {
background-color: #c23321
}
.resp-sharing-button--linkedin {
background-color: #0077b5
}
.resp-sharing-button--linkedin:hover {
background-color: #046293
}
.resp-sharing-button--email {
background-color: #777
}
.resp-sharing-button--email:hover {
background-color: #5e5e5e
}
.resp-sharing-button--xing {
background-color: #1a7576
}
.resp-sharing-button--xing:hover {
background-color: #114c4c
}
.resp-sharing-button--whatsapp {
background-color: #25D366
}
.resp-sharing-button--whatsapp:hover {
background-color: #1da851
}
.resp-sharing-button--hackernews {
background-color: #FF6600
}
.resp-sharing-button--hackernews:hover, .resp-sharing-button--hackernews:focus { background-color: #FB6200 }
.resp-sharing-button--vk {
background-color: #507299
}
.resp-sharing-button--vk:hover {
background-color: #43648c
}
.resp-sharing-button--facebook {
background-color: #3b5998;
border-color: #3b5998;
}
.resp-sharing-button--facebook:hover,
.resp-sharing-button--facebook:active {
background-color: #2d4373;
border-color: #2d4373;
}
.resp-sharing-button--twitter {
background-color: #55acee;
border-color: #55acee;
}
.resp-sharing-button--twitter:hover,
.resp-sharing-button--twitter:active {
background-color: #2795e9;
border-color: #2795e9;
}
.resp-sharing-button--email {
background-color: #777777;
border-color: #777777;
}
.resp-sharing-button--email:hover,
.resp-sharing-button--email:active {
background-color: #5e5e5e;
border-color: #5e5e5e;
}
.resp-sharing-button--linkedin {
background-color: #0077b5;
border-color: #0077b5;
}
.resp-sharing-button--linkedin:hover,
.resp-sharing-button--linkedin:active {
background-color: #046293;
border-color: #046293;
}
.resp-sharing-button--whatsapp {
background-color: #25D366;
border-color: #25D366;
}
.resp-sharing-button--whatsapp:hover,
.resp-sharing-button--whatsapp:active {
background-color: #1DA851;
border-color: #1DA851;
}
.resp-sharing-button--telegram {
background-color: #54A9EB;
}
.resp-sharing-button--telegram:hover {
background-color: #4B97D1;}
"""
# Define the html
my_html = f"""
<!-- Sharingbutton Facebook -->
<span>
<a class="resp-sharing-button__link" href="https://facebook.com/sharer/sharer.php?u={my_url}" target="_blank" rel="noopener" aria-label="">
<div class="resp-sharing-button resp-sharing-button--facebook resp-sharing-button--small"><div aria-hidden="true" class="resp-sharing-button__icon resp-sharing-button__icon--solidcircle">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M12 0C5.38 0 0 5.38 0 12s5.38 12 12 12 12-5.38 12-12S18.62 0 12 0zm3.6 11.5h-2.1v7h-3v-7h-2v-2h2V8.34c0-1.1.35-2.82 2.65-2.82h2.35v2.3h-1.4c-.25 0-.6.13-.6.66V9.5h2.34l-.24 2z"/></svg>
</div>
</div>
</a>
<!-- Sharingbutton Twitter -->
<a class="resp-sharing-button__link" href="https://twitter.com/intent/tweet/?text={my_text}&url={my_url}" target="_blank" rel="noopener" aria-label="">
<div class="resp-sharing-button resp-sharing-button--twitter resp-sharing-button--small"><div aria-hidden="true" class="resp-sharing-button__icon resp-sharing-button__icon--solidcircle">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M12 0C5.38 0 0 5.38 0 12s5.38 12 12 12 12-5.38 12-12S18.62 0 12 0zm5.26 9.38v.34c0 3.48-2.64 7.5-7.48 7.5-1.48 0-2.87-.44-4.03-1.2 1.37.17 2.77-.2 3.9-1.08-1.16-.02-2.13-.78-2.46-1.83.38.1.8.07 1.17-.03-1.2-.24-2.1-1.3-2.1-2.58v-.05c.35.2.75.32 1.18.33-.7-.47-1.17-1.28-1.17-2.2 0-.47.13-.92.36-1.3C7.94 8.85 9.88 9.9 12.06 10c-.04-.2-.06-.4-.06-.6 0-1.46 1.18-2.63 2.63-2.63.76 0 1.44.3 1.92.82.6-.12 1.95-.27 1.95-.27-.35.53-.72 1.66-1.24 2.04z"/></svg>
</div>
</div>
</a>
<!-- Sharingbutton E-Mail -->
<a class="resp-sharing-button__link" href="mailto:?subject={my_text}&body={my_url}" target="_self" rel="noopener" aria-label="">
<div class="resp-sharing-button resp-sharing-button--email resp-sharing-button--small"><div aria-hidden="true" class="resp-sharing-button__icon resp-sharing-button__icon--solidcircle">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M12 0C5.38 0 0 5.38 0 12s5.38 12 12 12 12-5.38 12-12S18.62 0 12 0zm8 16c0 1.1-.9 2-2 2H6c-1.1 0-2-.9-2-2V8c0-1.1.9-2 2-2h12c1.1 0 2 .9 2 2v8z"/><path d="M17.9 8.18c-.2-.2-.5-.24-.72-.07L12 12.38 6.82 8.1c-.22-.16-.53-.13-.7.08s-.15.53.06.7l3.62 2.97-3.57 2.23c-.23.14-.3.45-.15.7.1.14.25.22.42.22.1 0 .18-.02.27-.08l3.85-2.4 1.06.87c.1.04.2.1.32.1s.23-.06.32-.1l1.06-.9 3.86 2.4c.08.06.17.1.26.1.17 0 .33-.1.42-.25.15-.24.08-.55-.15-.7l-3.57-2.22 3.62-2.96c.2-.2.24-.5.07-.72z"/></svg>
</div>
</div>
</a>
<!-- Sharingbutton LinkedIn -->
<a class="resp-sharing-button__link" href="https://www.linkedin.com/shareArticle?mini=true&url={my_url}&title={my_text}&summary={my_text}&source={my_url}" target="_blank" rel="noopener" aria-label="">
<div class="resp-sharing-button resp-sharing-button--linkedin resp-sharing-button--small"><div aria-hidden="true" class="resp-sharing-button__icon resp-sharing-button__icon--solidcircle">
<svg version="1.1" x="0px" y="0px" width="24px" height="24px" viewBox="0 0 24 24" enable-background="new 0 0 24 24" xml:space="preserve">
<path d="M12,0C5.383,0,0,5.383,0,12s5.383,12,12,12s12-5.383,12-12S18.617,0,12,0z M9.5,16.5h-2v-7h2V16.5z M8.5,7.5 c-0.553,0-1-0.448-1-1c0-0.552,0.447-1,1-1s1,0.448,1,1C9.5,7.052,9.053,7.5,8.5,7.5z M18.5,16.5h-3V13c0-0.277-0.225-0.5-0.5-0.5 c-0.276,0-0.5,0.223-0.5,0.5v3.5h-3c0,0,0.031-6.478,0-7h3v0.835c0,0,0.457-0.753,1.707-0.753c1.55,0,2.293,1.12,2.293,3.296V16.5z" />
</svg>
</div>
</div>
</a>
<!-- Sharingbutton WhatsApp -->
<a class="resp-sharing-button__link" href="https://wa.me/?text={my_text}%20{my_url}" target="_blank" rel="noopener" aria-label="">
<div class="resp-sharing-button resp-sharing-button--whatsapp resp-sharing-button--small"><div aria-hidden="true" class="resp-sharing-button__icon resp-sharing-button__icon--solidcircle">
<svg xmlns="http://www.w3.org/2000/svg" height="24" width="24" viewBox="0 0 24 24"><path d="m12 0c-6.6 0-12 5.4-12 12s5.4 12 12 12 12-5.4 12-12-5.4-12-12-12zm0 3.8c2.2 0 4.2 0.9 5.7 2.4 1.6 1.5 2.4 3.6 2.5 5.7 0 4.5-3.6 8.1-8.1 8.1-1.4 0-2.7-0.4-3.9-1l-4.4 1.1 1.2-4.2c-0.8-1.2-1.1-2.6-1.1-4 0-4.5 3.6-8.1 8.1-8.1zm0.1 1.5c-3.7 0-6.7 3-6.7 6.7 0 1.3 0.3 2.5 1 3.6l0.1 0.3-0.7 2.4 2.5-0.7 0.3 0.099c1 0.7 2.2 1 3.4 1 3.7 0 6.8-3 6.9-6.6 0-1.8-0.7-3.5-2-4.8s-3-2-4.8-2zm-3 2.9h0.4c0.2 0 0.4-0.099 0.5 0.3s0.5 1.5 0.6 1.7 0.1 0.2 0 0.3-0.1 0.2-0.2 0.3l-0.3 0.3c-0.1 0.1-0.2 0.2-0.1 0.4 0.2 0.2 0.6 0.9 1.2 1.4 0.7 0.7 1.4 0.9 1.6 1 0.2 0 0.3 0.001 0.4-0.099s0.5-0.6 0.6-0.8c0.2-0.2 0.3-0.2 0.5-0.1l1.4 0.7c0.2 0.1 0.3 0.2 0.5 0.3 0 0.1 0.1 0.5-0.099 1s-1 0.9-1.4 1c-0.3 0-0.8 0.001-1.3-0.099-0.3-0.1-0.7-0.2-1.2-0.4-2.1-0.9-3.4-3-3.5-3.1s-0.8-1.1-0.8-2.1c0-1 0.5-1.5 0.7-1.7s0.4-0.3 0.5-0.3z"/></svg>
</div>
</div>
</a>
<!-- Sharingbutton Telegram -->
<a class="resp-sharing-button__link" href="https://telegram.me/share/url?text={my_text}&url={my_url}" target="_blank" rel="noopener" aria-label="">
<div class="resp-sharing-button resp-sharing-button--telegram resp-sharing-button--small"><div aria-hidden="true" class="resp-sharing-button__icon resp-sharing-button__icon--solidcircle">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M12 23.5c6.35 0 11.5-5.15 11.5-11.5S18.35.5 12 .5.5 5.65.5 12 5.65 23.5 12 23.5zM2.505 11.053c-.31.118-.505.738-.505.738s.203.62.513.737l3.636 1.355 1.417 4.557a.787.787 0 0 0 1.25.375l2.115-1.72a.29.29 0 0 1 .353-.01L15.1 19.85a.786.786 0 0 0 .746.095.786.786 0 0 0 .487-.573l2.793-13.426a.787.787 0 0 0-1.054-.893l-15.568 6z" fill-rule="evenodd"/></svg>
</div>
</div>
</a>
</span>
"""
# Define button html
my_share_button = f"""
<style>
{my_css}
</style>
{my_html}
"""
# Render
html(my_share_button)
return
|
nilq/baby-python
|
python
|
"""
This file demonstrates writing tests using the pytest module. These will pass
when you run "./bin/pytest app/tests.py".
Replace this with more appropriate tests for your application.
"""
# -*- coding: utf-8 -*-
import pytest
class TestSuite(object):
def test_run(self):
pytest.skip('todo')
|
nilq/baby-python
|
python
|
import textwrap
import uuid
from moviepy.editor import *
TITLE_FONT_SIZE = 30
FONT_SIZE = 30
TITLE_FONT_COLOR = 'white'
BGM_PATH = 'assets/bgm.mp3'
STATIC_PATH = 'assets/static.mp4'
SIZE = (1280, 720)
BG_COLOR = (16,16,16)
VIDEO_PATH = ""
FONT = 'Amiri-regular'
def generate_title(text, audio_path):
color_clip = ColorClip(SIZE, BG_COLOR)
audio_clip = AudioFileClip(audio_path)
font_size = TITLE_FONT_SIZE
wrapped_text = textwrap.fill(text, width=90)
txt_clip = TextClip(wrapped_text,fontsize=font_size, font=FONT, color=TITLE_FONT_COLOR, align="west")
txt_clip = txt_clip.set_pos("center")
clip = CompositeVideoClip([color_clip, txt_clip])
clip.audio = audio_clip
clip.duration = audio_clip.duration
static_clip = VideoFileClip(STATIC_PATH)
clip = concatenate_videoclips([clip, static_clip])
return clip
def generate_clip(post, comment):
text = comment.body
audio_path = comment.body_audio
color_clip = ColorClip(SIZE, BG_COLOR)
audio_clip = AudioFileClip(audio_path)
font_size = TITLE_FONT_SIZE
author_font_size = 20
wrapped_text = textwrap.fill(text, width=90)
txt_clip = TextClip(wrapped_text,fontsize=font_size, font=FONT, color=TITLE_FONT_COLOR, align="west", interline=2)
txt_clip = txt_clip.set_pos("center")
author_clip = TextClip(f"/u/{comment.author}", fontsize=author_font_size, font=FONT, color="lightblue")
author_pos = (SIZE[0]/2 - txt_clip.size[0]/2, SIZE[1]/2 - txt_clip.size[1]/2 - author_font_size - 10)
author_clip = author_clip.set_pos(author_pos)
score_clip = TextClip(f"{comment.score} points", fontsize=author_font_size, font=FONT, color="grey")
score_pos = (author_pos[0] + author_clip.size[0] + 20, author_pos[1])
score_clip = score_clip.set_pos(score_pos)
clip = CompositeVideoClip([color_clip, txt_clip, author_clip, score_clip])
clip.audio = audio_clip
clip.duration = audio_clip.duration
static_clip = VideoFileClip(STATIC_PATH)
clip = concatenate_videoclips([clip, static_clip])
return clip
def generate_video(context):
post = context["post"]
clips = []
clips.append(generate_title(post.title, post.title_audio))
for comment in post.comments:
comment_clip = generate_clip(post, comment)
# overlay reply
if comment.reply:
# TODO this
pass
clips.append(comment_clip)
video = concatenate_videoclips(clips)
background_audio_clip = AudioFileClip(BGM_PATH)
background_audio_clip = afx.audio_loop(background_audio_clip, duration=video.duration)
background_audio_clip = background_audio_clip.fx(afx.volumex, 0.15)
video.audio = CompositeAudioClip([video.audio, background_audio_clip])
video_id = uuid.uuid4()
path = f"{VIDEO_PATH}{video_id}.mp4"
context["video_path"] = path
context["video_id"] = video_id
video.write_videofile(path, fps=24, codec='libx264', threads=4)
|
nilq/baby-python
|
python
|
from rest_framework import serializers
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username', 'password']
def create(self, validated_data):
user = User(username=validated_data.get('username'))
user.set_password(validated_data.get('password'))
user.save()
return User(**validated_data)
def update(self, instance, validated_data):
instance.set_password(validated_data.get('password'))
return instance
class AdminUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username', 'password', 'is_superuser']
def create(self, validated_data):
user = User(username=validated_data.get('username'))
user.set_password(validated_data.get('password'))
user.is_superuser = True
user.save()
return User(**validated_data)
def update(self, instance, validated_data):
instance.set_password(validated_data.get('password'))
return instance
|
nilq/baby-python
|
python
|
from anet.utils.format import MultiObssPreprocessor
from anet.utils.log import configure_logging
from anet.utils.optimizer import load_optimizer, save_optimizer
|
nilq/baby-python
|
python
|
import sys
import pandas as pd
import numpy as np
import re
import pickle
import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from sqlalchemy import create_engine
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.ensemble import AdaBoostClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import f1_score, recall_score, precision_score, classification_report, accuracy_score
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
def load_data(database_filepath):
"""
Load the data from the specific file
Args:
database_filepath: str. File path of the specific database
Returns:
X: ndarray. Ndarray values store the message information
Y: ndarray. Ndarray values store the the multi class information
category_names: list. List contains the multi class string
"""
# loading data from .db file
engine = create_engine('sqlite:///{}'.format(database_filepath))
df = pd.read_sql_table("MessagesCategories", engine)
# creating message and category lists
X = df['message']
Y = df.iloc[:, 4:]
category_names = df.columns[4:]
return X, Y, category_names
def tokenize(text):
"""Load, clean and tokenize a text"""
#clean url's from text
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, " ")
#normalize text
text = re.sub(r"[^a-zA-Z0-9]", " ", text) #[^a-zA-Z0-9]
text = text.lower()
#tokenize text
tokens = word_tokenize(text)
# list tokens without stop-words
lemmatizer = WordNetLemmatizer()
stop_words = stopwords.words("english")
clean_tokens = []
for token in tokens:
if token not in stop_words:
clean_tokens.append(lemmatizer.lemmatize(token))
return clean_tokens
def build_model():
"""
Create model to be trained on. The model include pipeline and cross validation parameters for grid search.
Grid search is optional.
"""
#creating pipeline for preprocesses and training
pipeline = Pipeline([
("vect", CountVectorizer(tokenizer=tokenize)),
("tfidf", TfidfTransformer()),
('clf', MultiOutputClassifier(AdaBoostClassifier()))
])
# define parameters for grid search
parameters = {
'tfidf__use_idf': (True, False),
'clf__estimator__n_estimators': [50, 100, 150]
}
# choose a method to build model
print("Hint: GridSearchCV will takes more time!\n")
chose_option = input("Do you want to GridSearchCV(Yes or No): ").lower()
while True:
if chose_option in ["yes", "y"]:
model = GridSearchCV(pipeline, param_grid=parameters, cv=3)
elif chose_option in ["no", "n"]:
model = pipeline
else:
print("Choose a validate option!")
model = False
if model:
return model
def evaluate_model(model, X_test, Y_test, category_names):
"""Evaluate model"""
# predicting labels
Y_pred = model.predict(X_test.astype(str))
#comparing predicted labels with ground-truth
print(classification_report(Y_pred, Y_test.values, target_names=category_names))
print('Accuracy Score: {}'.format(np.mean(Y_test.values == Y_pred)))
def save_model(model, model_filepath):
"""Save model"""
with open(model_filepath, "wb") as file:
pickle.dump(model, file)
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train.astype(str), Y_train)
print('Evaluating model...')
evaluate_model(model, X_test.astype(str), Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
class ListSecure(list):
def get(self, index, default=None):
try:
return self.__getitem__(index)
except IndexError:
return default
|
nilq/baby-python
|
python
|
"""common functions for client and server"""
def send_msg(socket, msg):
"""send message to a socket"""
socket.send(bytes(str(msg), 'utf8'))
def recv_msg(socket):
"""receive a message from a socket"""
return socket.recv(4096).decode('utf8')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import pickle
import os
import string
import numpy as np
import tempfile
import pandas as pd
import sys
import hashlib
import errno
import time
import shutil
from sklearn.datasets.base import Bunch
TRANS = str.maketrans('', '', string.punctuation.replace('-', ''))
TEMP = tempfile.gettempdir()
if sys.version_info[0] == 3:
import pickle
import io
import urllib
_basestring = str
cPickle = pickle
StringIO = io.StringIO
BytesIO = io.BytesIO
_urllib = urllib
izip = zip
def md5_hash(string):
m = hashlib.md5()
m.update(string.encode('utf-8'))
return m.hexdigest()
else:
import cPickle
import StringIO
import urllib
import urllib2
import urlparse
import types
import itertools
_basestring = basestring
cPickle = cPickle
StringIO = BytesIO = StringIO.StringIO
izip = itertools.izip
class _module_lookup(object):
modules = [urlparse, urllib2, urllib]
def __getattr__(self, name):
for module in self.modules:
if hasattr(module, name):
attr = getattr(module, name)
if not isinstance(attr, types.ModuleType):
return attr
raise NotImplementedError(
'This function has not been imported properly')
module_lookup = _module_lookup()
class _urllib():
request = module_lookup
error = module_lookup
parse = module_lookup
def md5_hash(string):
m = hashlib.md5()
m.update(string)
return m.hexdigest()
def movetree(src, dst):
"""Move an entire tree to another directory. Any existing file is
overwritten"""
names = os.listdir(src)
# Create destination dir if it does not exist
_makedirs(dst)
errors = []
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.isdir(srcname) and os.path.isdir(dstname):
movetree(srcname, dstname)
os.rmdir(srcname)
else:
shutil.move(srcname, dstname)
except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
# catch the Error from the recursive movetree so that we can
# continue with other files
except Exception as err:
errors.extend(err.args[0])
if errors:
raise Exception(errors)
def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None,
initial_size=0, total_size=None, verbose=1):
"""Download a file chunk by chunk and show advancement
Parameters
----------
response: _urllib.response.addinfourl
Response to the download request in order to get file size
local_file: file
Hard disk file where data should be written
chunk_size: int, optional
Size of downloaded chunks. Default: 8192
report_hook: bool
Whether or not to show downloading advancement. Default: None
initial_size: int, optional
If resuming, indicate the initial size of the file
total_size: int, optional
Expected final size of download (None means it is unknown).
verbose: int, optional
verbosity level (0 means no message).
Returns
-------
data: string
The downloaded file.
"""
try:
if total_size is None:
total_size = response.info().get('Content-Length').strip()
total_size = int(total_size) + initial_size
except Exception as e:
if verbose > 1:
print("Warning: total size could not be determined.")
if verbose > 2:
print("Full stack trace: %s" % e)
total_size = None
bytes_so_far = initial_size
# t0 = time.time()
if report_hook:
pbar = tqdm(total=total_size, unit="b", unit_scale=True)
while True:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
if report_hook:
# sys.stderr.write('\n')
pbar.close()
break
local_file.write(chunk)
if report_hook:
pbar.update(len(chunk)) # This is better because works in ipython
# _chunk_report_(bytes_so_far, total_size, initial_size, t0)
if report_hook:
pbar.close()
return
def _makedirs(path): # https://stackoverflow.com/a/600612/223267
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def save_pickle(data,savepath):
fdr = '/'.join(savepath.strip().split('/')[:-1])
if not os.path.exists(fdr):
os.makedirs(fdr)
with open(savepath+'.pkl', 'wb') as fn:
pickle.dump(data, fn)
def load_pickle(file):
with open(file, 'rb') as fn:
data = pickle.load(fn)
return data
def disc_pr():
print("***********************************")
def check_list(lst):
for id,el in enumerate(lst[1:179]):
if not(int(el)-int(lst[id-1])==1 and int(lst[id+1])-int(el)==1):
return True
return False
def fetch_embeds(wds_list,embed_fl='../word_embeddings/glove.42B.300d.txt'):
wds_vec=dict()
if isinstance(wds_list,str):
wds_list=[wds_list]
with open(embed_fl,'r') as fl:
for line in fl:
wd = line.strip().split()[0]
vec = [float(x) for x in line.strip().split()[1:]]
if wd in wds_list:
wds_vec[wd] = vec
wds_list.remove(wd)
if wds_list == []:
return wds_vec
return "Not all words found."
def loadGloveModel(gloveFile='../word_embeddings/glove.42B.300d.txt'):
print("Loading Glove Model")
f = open(gloveFile,'r')
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
return model
def extract_sent_embed1(sent, glove_embeddings=None, weights=None):
if glove_embeddings is None:
w2vec_dict = load_pickle('./stimuli/word2vec.pkl')
w2vec_dict = glove_embeddings
with open('./stimuli/stopwords.txt') as f:
stp_wds = f.read().splitlines()
sent = (sent.translate(TRANS)).lower().split(' ')
sent_proc=[]
for wd in sent:
if wd not in stp_wds:
if '-' in wd:
split_words = wd.split('-')
for w in split_words:
sent_proc.append(w)
else:
sent_proc.append(wd)
avg_vec=np.zeros((5000,))
for wd in sent_proc:
if weights is not None:
we = np.dot(weights, w2vec_dict[wd])[:-1]
else:
we = w2vec_dict[wd]
avg_vec += we
#avg_vec/=len(sent_proc)
#avg_vec = avg_vec.reshape((300,))
return avg_vec
def extract_sent_embed(sent, glove_embeddings=None):
if glove_embeddings is None:
w2vec_dict = load_pickle('./stimuli/word2vec.pkl')
w2vec_dict = glove_embeddings
with open('./stimuli/stopwords.txt') as f:
stp_wds = f.read().splitlines()
sent = (sent.translate(TRANS)).lower().split(' ')
sent_proc=[]
for wd in sent:
if wd not in stp_wds:
if '-' in wd:
split_words = wd.split('-')
for w in split_words:
sent_proc.append(w)
else:
sent_proc.append(wd)
avg_vec=np.zeros((1,300))
for wd in sent_proc:
avg_vec += w2vec_dict[wd]
#avg_vec/=len(sent_proc)
avg_vec = avg_vec.reshape((300,))
return avg_vec
def load_data_meta(data_tuple):
data = dict()
meta = dict()
data_cleared = dict()
for fl in data_tuple:
if 'meta' in fl:
meta = load_pickle(fl)
else:
data = load_pickle(fl)
assert data,meta
# clear or not? morning call
for k,v in data.items():
data_cleared[k[0]] = v
return data_cleared,meta
def _get_dataset_dir(sub_dir=None, data_dir=None, default_paths=None,
verbose=1):
""" Create if necessary and returns data directory of given dataset.
Parameters
----------
sub_dir: string
Name of sub-dir
data_dir: string, optional
Path of the data directory. Used to force data storage in a specified
location. Default: None
default_paths: list of string, optional
Default system paths in which the dataset may already have been
installed by a third party software. They will be checked first.
verbose: int, optional
verbosity level (0 means no message).
Returns
-------
data_dir: string
Path of the given dataset directory.
Notes
-----
This function retrieves the datasets directory (or data directory) using
the following priority :
1. defaults system paths
2. the keyword argument data_dir
3. the global environment variable WEB_SHARED_DATA
4. the user environment variable WEB_DATA
5. web_data in the user home folder
"""
# We build an array of successive paths by priority
# The boolean indicates if it is a pre_dir: in that case, we won't add the
# dataset name to the path.
paths = []
# Search given environment variables
if default_paths is not None:
for default_path in default_paths:
paths.extend([(d, True) for d in default_path.split(':')])
# Check data_dir which force storage in a specific location
if data_dir is not None:
paths.extend([(d, False) for d in data_dir.split(':')])
else:
global_data = os.getenv('WEB_SHARED_DATA')
if global_data is not None:
paths.extend([(d, False) for d in global_data.split(':')])
local_data = os.getenv('WEB_DATA')
if local_data is not None:
paths.extend([(d, False) for d in local_data.split(':')])
paths.append((os.path.expanduser('~/web_data'), False))
if verbose > 2:
print('Dataset search paths: %s' % paths)
# Check if the dataset exists somewhere
for path, is_pre_dir in paths:
if not is_pre_dir and sub_dir:
path = os.path.join(path, sub_dir)
if os.path.islink(path):
# Resolve path
path = readlinkabs(path)
if os.path.exists(path) and os.path.isdir(path):
if verbose > 1:
print('\nDataset found in %s\n' % path)
return path
# If not, create a folder in the first writeable directory
errors = []
for (path, is_pre_dir) in paths:
if not is_pre_dir and sub_dir:
path = os.path.join(path, sub_dir)
if not os.path.exists(path):
try:
_makedirs(path)
if verbose > 0:
print('\nDataset created in %s\n' % path)
return path
except Exception as exc:
short_error_message = getattr(exc, 'strerror', str(exc))
errors.append('\n -{0} ({1})'.format(
path, short_error_message))
raise OSError('Web tried to store the dataset in the following '
'directories, but:' + ''.join(errors))
def _uncompress_file(file_, delete_archive=True, verbose=1):
"""Uncompress files contained in a data_set.
Parameters
----------
file: string
path of file to be uncompressed.
delete_archive: bool, optional
Wheteher or not to delete archive once it is uncompressed.
Default: True
verbose: int, optional
verbosity level (0 means no message).
Notes
-----
This handles zip, tar, gzip and bzip files only.
"""
if verbose > 0:
print('Extracting data from %s...' % file_)
data_dir = os.path.dirname(file_)
# We first try to see if it is a zip file
try:
filename, ext = os.path.splitext(file_)
with open(file_, "rb") as fd:
header = fd.read(4)
processed = False
if zipfile.is_zipfile(file_):
z = zipfile.ZipFile(file_)
z.extractall(data_dir)
z.close()
processed = True
elif ext == '.gz' or header.startswith(b'\x1f\x8b'):
import gzip
gz = gzip.open(file_)
if ext == '.tgz':
filename = filename + '.tar'
out = open(filename, 'wb')
shutil.copyfileobj(gz, out, 8192)
gz.close()
out.close()
# If file is .tar.gz, this will be handle in the next case
if delete_archive:
os.remove(file_)
file_ = filename
filename, ext = os.path.splitext(file_)
processed = True
if tarfile.is_tarfile(file_):
with contextlib.closing(tarfile.open(file_, "r")) as tar:
tar.extractall(path=data_dir)
processed = True
if not processed:
raise IOError(
"[Uncompress] unknown archive file format: %s" % file_)
if delete_archive:
os.remove(file_)
if verbose > 0:
print(' ...done.')
except Exception as e:
if verbose > 0:
print('Error uncompressing file: %s' % e)
raise
def _get_as_pd(url, dataset_name, **read_csv_kwargs):
return pd.read_csv(_fetch_file(url, dataset_name, verbose=0), **read_csv_kwargs)
def fetch_MEN(which="all", form="natural"):
"""
Fetch MEN dataset for testing similarity and relatedness
----------
which : "all", "test" or "dev"
form : "lem" or "natural"
Returns
-------
data : sklearn.datasets.base.Bunch
dictionary-like object. Keys of interest:
'X': matrix of 2 words per column,
'y': vector with scores
Published at http://clic.cimec.unitn.it/~elia.bruni/MEN.html.
"""
if which == "dev":
data = _get_as_pd('https://www.dropbox.com/s/c0hm5dd95xapenf/EN-MEN-LEM-DEV.txt?dl=1',
'similarity', header=None, sep=" ")
elif which == "test":
data = _get_as_pd('https://www.dropbox.com/s/vdmqgvn65smm2ah/EN-MEN-LEM-TEST.txt?dl=1',
'similarity/EN-MEN-LEM-TEST', header=None, sep=" ")
elif which == "all":
data = _get_as_pd('https://www.dropbox.com/s/b9rv8s7l32ni274/EN-MEN-LEM.txt?dl=1',
'similarity', header=None, sep=" ")
else:
raise RuntimeError("Not recognized which parameter")
if form == "natural":
# Remove last two chars from first two columns
data = data.apply(lambda x: [y if isinstance(y, float) else y[0:-2] for y in x])
elif form != "lem":
raise RuntimeError("Not recognized form argument")
return Bunch(X=data.values[:, 0:2].astype("object"), y=data.values[:, 2:].astype(np.float) / 5.0)
def fetch_WS353(which="all"):
"""
Fetch WS353 dataset for testing attributional and
relatedness similarity
Parameters
----------
which : 'all': for both relatedness and attributional similarity,
'relatedness': for relatedness similarity
'similarity': for attributional similarity
'set1': as divided by authors
'set2': as divided by authors
References
----------
Finkelstein, Gabrilovich, "Placing Search in Context: The Concept Revisited†", 2002
Agirre, Eneko et al., "A Study on Similarity and Relatedness Using Distributional and WordNet-based Approaches",
2009
Returns
-------
data : sklearn.datasets.base.Bunch
dictionary-like object. Keys of interest:
'X': matrix of 2 words per column,
'y': vector with scores,
'sd': vector of std of scores if available (for set1 and set2)
"""
if which == "all":
data = _get_as_pd('https://www.dropbox.com/s/eqal5qj97ajaycz/EN-WS353.txt?dl=1',
'similarity', header=0, sep="\t")
elif which == "relatedness":
data = _get_as_pd('https://www.dropbox.com/s/x94ob9zg0kj67xg/EN-WSR353.txt?dl=1',
'similarity', header=None, sep="\t")
elif which == "similarity":
data = _get_as_pd('https://www.dropbox.com/s/ohbamierd2kt1kp/EN-WSS353.txt?dl=1',
'similarity', header=None, sep="\t")
elif which == "set1":
data = _get_as_pd('https://www.dropbox.com/s/opj6uxzh5ov8gha/EN-WS353-SET1.txt?dl=1',
'similarity', header=0, sep="\t")
elif which == "set2":
data = _get_as_pd('https://www.dropbox.com/s/w03734er70wyt5o/EN-WS353-SET2.txt?dl=1',
'similarity', header=0, sep="\t")
else:
raise RuntimeError("Not recognized which parameter")
# We basically select all the columns available
X = data.values[:, 0:2]
y = data.values[:, 2].astype(np.float)
# We have also scores
if data.values.shape[1] > 3:
sd = np.std(data.values[:, 2:15].astype(np.float), axis=1).flatten()
return Bunch(X=X.astype("object"), y=y, sd=sd)
else:
return Bunch(X=X.astype("object"), y=y)
def fetch_RG65():
"""
Fetch Rubenstein and Goodenough dataset for testing attributional and
relatedness similarity
Returns
-------
data : sklearn.datasets.base.Bunch
dictionary-like object. Keys of interest:
'X': matrix of 2 words per column,
'y': vector with scores,
'sd': vector of std of scores if available (for set1 and set2)
References
----------
Rubenstein, Goodenough, "Contextual correlates of synonymy", 1965
Notes
-----
Scores were scaled by factor 10/4
"""
data = _get_as_pd('https://www.dropbox.com/s/chopke5zqly228d/EN-RG-65.txt?dl=1',
'similarity', header=None, sep="\t").values
return Bunch(X=data[:, 0:2].astype("object"),
y=data[:, 2].astype(np.float) * 10.0 / 4.0)
def fetch_RW():
"""
Fetch Rare Words dataset for testing attributional similarity
Returns
-------
data : sklearn.datasets.base.Bunch
dictionary-like object. Keys of interest:
'X': matrix of 2 words per column,
'y': vector with scores,
'sd': vector of std of scores
References
----------
Published at http://www-nlp.stanford.edu/~lmthang/morphoNLM/.
Notes
-----
2034 word pairs that are relatively rare with human similarity scores. Rare word selection: our choices of
rare words (word1) are based on their frequencies – based on five bins (5, 10], (10, 100], (100, 1000],
(1000, 10000], and the affixes they possess. To create a diverse set of candidates, we randomly
select 15 words for each configuration (a frequency bin, an affix). At the scale of Wikipedia,
a word with frequency of 1-5 is most likely a junk word, and even restricted to words with
frequencies above five, there are still many non-English words. To counter such problems,
each word selected is required to have a non-zero number of synsets in WordNet(Miller, 1995).
"""
data = _get_as_pd('https://www.dropbox.com/s/xhimnr51kcla62k/EN-RW.txt?dl=1',
'similarity', header=None, sep="\t").values
return Bunch(X=data[:, 0:2].astype("object"),
y=data[:, 2].astype(np.float),
sd=np.std(data[:, 3:].astype(np.float)))
def fetch_multilingual_SimLex999(which="EN"):
"""
Fetch Multilingual SimLex999 dataset for testing attributional similarity
Parameters
-------
which : "EN", "RU", "IT" or "DE" for language
Returns
-------
data : sklearn.datasets.base.Bunch
dictionary-like object. Keys of interest:
'X': matrix of 2 words per column,
'y': vector with scores,
'sd': vector of sd of scores,
References
----------
Published at http://technion.ac.il/~ira.leviant/MultilingualVSMdata.html.
Notes
-----
Scores for EN are different than the original SimLex999 dataset.
Authors description:
Multilingual SimLex999 resource consists of translations of the SimLex999 word similarity data set to
three languages: German, Italian and Russian. Each of the translated datasets is scored by
13 human judges (crowdworkers) - all fluent speakers of its language. For consistency, we
also collected human judgments for the original English corpus according to the same protocol
applied to the other languages. This dataset allows to explore the impact of the "judgement language"
(the language in which word pairs are presented to the human judges) on the resulted similarity scores
and to evaluate vector space models on a truly multilingual setup (i.e. when both the training and the
test data are multilingual).
"""
if which == "EN":
data = _get_as_pd('https://www.dropbox.com/s/nczc4ao6koqq7qm/EN-MSIM999.txt?dl=1',
'similarity', header=None, encoding='utf-8', sep=" ")
elif which == "DE":
data = _get_as_pd('https://www.dropbox.com/s/ucpwrp0ahawsdtf/DE-MSIM999.txt?dl=1',
'similarity', header=None, encoding='utf-8', sep=" ")
elif which == "IT":
data = _get_as_pd('https://www.dropbox.com/s/siqjagyz8dkjb9q/IT-MSIM999.txt?dl=1',
'similarity', header=None, encoding='utf-8', sep=" ")
elif which == "RU":
data = _get_as_pd('https://www.dropbox.com/s/3v26edm9a31klko/RU-MSIM999.txt?dl=1',
'similarity', header=None, encoding='utf-8', sep=" ")
else:
raise RuntimeError("Not recognized which parameter")
# We basically select all the columns available
X = data.values[:, 0:2]
scores = data.values[:, 2:].astype(np.float)
y = np.mean(scores, axis=1)
sd = np.std(scores, axis=1)
return Bunch(X=X.astype("object"), y=y, sd=sd)
def fetch_SimLex999():
"""
Fetch SimLex999 dataset for testing attributional similarity
Returns
-------
data : sklearn.datasets.base.Bunch
dictionary-like object. Keys of interest:
'X': matrix of 2 words per column,
'y': vector with scores,
'sd': vector of sd of scores,
'conc': matrix with columns conc(w1), conc(w2) and concQ the from dataset
'POS': vector with POS tag
'assoc': matrix with columns denoting free association: Assoc(USF) and SimAssoc333
References
----------
Hill, Felix et al., "Simlex-999: Evaluating semantic models with (genuine) similarity estimation", 2014
Notes
-----
SimLex-999 is a gold standard resource for the evaluation of models that learn the meaning of words and concepts.
SimLex-999 provides a way of measuring how well models capture similarity, rather than relatedness or
association. The scores in SimLex-999 therefore differ from other well-known evaluation datasets
such as WordSim-353 (Finkelstein et al. 2002). The following two example pairs illustrate the
difference - note that clothes are not similar to closets (different materials, function etc.),
even though they are very much related: coast - shore 9.00 9.10, clothes - closet 1.96 8.00
"""
data = _get_as_pd('https://www.dropbox.com/s/0jpa1x8vpmk3ych/EN-SIM999.txt?dl=1',
'similarity', sep="\t")
# We basically select all the columns available
X = data[['word1', 'word2']].values
y = data['SimLex999'].values
sd = data['SD(SimLex)'].values
conc = data[['conc(w1)', 'conc(w2)', 'concQ']].values
POS = data[['POS']].values
assoc = data[['Assoc(USF)', 'SimAssoc333']].values
return Bunch(X=X.astype("object"), y=y, sd=sd, conc=conc, POS=POS, assoc=assoc)
def _fetch_file(url, data_dir=TEMP, uncompress=False, move=False,md5sum=None,
username=None, password=None, mock=False, handlers=[], resume=True, verbose=0):
"""Load requested dataset, downloading it if needed or requested.
This function retrieves files from the hard drive or download them from
the given urls. Note to developpers: All the files will be first
downloaded in a sandbox and, if everything goes well, they will be moved
into the folder of the dataset. This prevents corrupting previously
downloaded data. In case of a big dataset, do not hesitate to make several
calls if needed.
Parameters
----------
dataset_name: string
Unique dataset name
resume: bool, optional
If true, try to resume partially downloaded files
uncompress: bool, optional
If true, will uncompress zip
move: str, optional
If True, will move downloaded file to given relative path.
NOTE: common usage is zip_file_id/zip_file.zip together
with uncompress set to True
md5sum: string, optional
MD5 sum of the file. Checked if download of the file is required
username: string, optional
Username used for basic HTTP authentication
password: string, optional
Password used for basic HTTP authentication
handlers: list of BaseHandler, optional
urllib handlers passed to urllib.request.build_opener. Used by
advanced users to customize request handling.
data_dir: string, optional
Path of the data directory. Used to force data storage in a specified
location. Default: None
resume: bool, optional
If true, try resuming download if possible
verbose: int, optional
verbosity level (0 means no message).
Returns
-------
files: list of string
Absolute paths of downloaded files on disk
"""
# TODO: move to global scope and rename
def _fetch_helper(url, data_dir=TEMP, resume=True, overwrite=False,
md5sum=None, username=None, password=None, handlers=[],
verbose=1):
if not os.path.isabs(data_dir):
data_dir = _get_dataset_dir(data_dir)
# Determine data path
_makedirs(data_dir)
# Determine filename using URL
parse = _urllib.parse.urlparse(url)
file_name = os.path.basename(parse.path)
if file_name == '':
file_name = md5_hash(parse.path)
temp_file_name = file_name + ".part"
full_name = os.path.join(data_dir, file_name)
temp_full_name = os.path.join(data_dir, temp_file_name)
if os.path.exists(full_name):
if overwrite:
os.remove(full_name)
else:
return full_name
if os.path.exists(temp_full_name):
if overwrite:
os.remove(temp_full_name)
t0 = time.time()
local_file = None
initial_size = 0
try:
# Download data
url_opener = _urllib.request.build_opener(*handlers)
request = _urllib.request.Request(url)
request.add_header('Connection', 'Keep-Alive')
if username is not None and password is not None:
if not url.startswith('https'):
raise ValueError(
'Authentication was requested on a non secured URL (%s).'
'Request has been blocked for security reasons.' % url)
# Note: HTTPBasicAuthHandler is not fitted here because it relies
# on the fact that the server will return a 401 error with proper
# www-authentication header, which is not the case of most
# servers.
encoded_auth = base64.b64encode(
(username + ':' + password).encode())
request.add_header(b'Authorization', b'Basic ' + encoded_auth)
if verbose > 0:
displayed_url = url.split('?')[0] if verbose == 1 else url
print('Downloading data from %s ...' % displayed_url)
if resume and os.path.exists(temp_full_name):
# Download has been interrupted, we try to resume it.
local_file_size = os.path.getsize(temp_full_name)
# If the file exists, then only download the remainder
request.add_header("Range", "bytes=%s-" % (local_file_size))
try:
data = url_opener.open(request)
content_range = data.info().get('Content-Range')
if (content_range is None or not content_range.startswith(
'bytes %s-' % local_file_size)):
raise IOError('Server does not support resuming')
except Exception:
# A wide number of errors can be raised here. HTTPError,
# URLError... I prefer to catch them all and rerun without
# resuming.
if verbose > 0:
print('Resuming failed, try to download the whole file.')
return _fetch_helper(
url, data_dir, resume=False, overwrite=overwrite,
md5sum=md5sum, username=username, password=password,
handlers=handlers, verbose=verbose)
local_file = open(temp_full_name, "ab")
initial_size = local_file_size
else:
data = url_opener.open(request)
local_file = open(temp_full_name, "wb")
_chunk_read_(data, local_file, report_hook=(verbose > 0),
initial_size=initial_size, verbose=verbose)
# temp file must be closed prior to the move
if not local_file.closed:
local_file.close()
shutil.move(temp_full_name, full_name)
dt = time.time() - t0
if verbose > 0:
print('...done. (%i seconds, %i min)' % (dt, dt // 60))
except _urllib.error.HTTPError as e:
if verbose > 0:
print('Error while fetching file %s. Dataset fetching aborted.' %
(file_name))
if verbose > 1:
print("HTTP Error: %s, %s" % (e, url))
raise
except _urllib.error.URLError as e:
if verbose > 0:
print('Error while fetching file %s. Dataset fetching aborted.' %
(file_name))
if verbose > 1:
print("URL Error: %s, %s" % (e, url))
raise
finally:
if local_file is not None:
if not local_file.closed:
local_file.close()
if md5sum is not None:
if (_md5_sum_file(full_name) != md5sum):
raise ValueError("File %s checksum verification has failed."
" Dataset fetching aborted." % local_file)
return full_name
if not os.path.isabs(data_dir):
data_dir = _get_dataset_dir(data_dir)
# There are two working directories here:
# - data_dir is the destination directory of the dataset
# - temp_dir is a temporary directory dedicated to this fetching call. All
# files that must be downloaded will be in this directory. If a corrupted
# file is found, or a file is missing, this working directory will be
# deleted.
parse = _urllib.parse.urlparse(url)
file_name = os.path.basename(parse.path)
files_pickle = cPickle.dumps([(file_, url) for file_, url in zip([file_name], [url])])
files_md5 = hashlib.md5(files_pickle).hexdigest()
temp_dir = os.path.join(data_dir, files_md5)
# Create destination dir
_makedirs(data_dir)
# Abortion flag, in case of error
abort = None
# 2 possibilities:
# - the file exists in data_dir, nothing to do (we have to account for move parameter here)
# - the file does not exists: we download it in temp_dir
# Target file in the data_dir
target_file = os.path.join(data_dir, file_name)
# Change move so we always uncompress to some folder (this is important for
# detecting already downloaded files)
# Ex. glove.4B.zip -> glove.4B/glove.4B.zip
if uncompress and not move:
dirname, _ = os.path.splitext(file_name)
move = os.path.join(dirname, os.path.basename(file_name))
if (abort is None
and not os.path.exists(target_file)
and (not move or (move and uncompress and not os.path.exists(os.path.dirname(os.path.join(data_dir, move)))))
or (move and not uncompress and not os.path.exists(os.path.join(data_dir, move)))):
# Target file in temp dir
temp_target_file = os.path.join(temp_dir, file_name)
# We may be in a global read-only repository. If so, we cannot
# download files.
if not os.access(data_dir, os.W_OK):
raise ValueError('Dataset files are missing but dataset'
' repository is read-only. Contact your data'
' administrator to solve the problem')
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
dl_file = _fetch_helper(url, temp_dir, resume=resume,
verbose=verbose, md5sum=md5sum,
username=username,
password=password,
handlers=handlers)
if (abort is None and not os.path.exists(target_file) and not
os.path.exists(temp_target_file)):
if not mock:
warnings.warn('An error occured while fetching %s' % file_)
abort = ("Dataset has been downloaded but requested file was "
"not provided:\nURL:%s\nFile:%s" %
(url, target_file))
else:
_makedirs(os.path.dirname(temp_target_file))
open(temp_target_file, 'w').close()
if move:
move = os.path.join(data_dir, move)
move_dir = os.path.dirname(move)
_makedirs(move_dir)
shutil.move(dl_file, move)
dl_file = move
target_file = dl_file
if uncompress:
try:
if os.path.getsize(dl_file) != 0:
_uncompress_file(dl_file, verbose=verbose)
else:
os.remove(dl_file)
target_file = os.path.dirname(target_file)
except Exception as e:
abort = str(e)
else:
if verbose > 0:
print("File already downloaded, skipping")
if move:
target_file = os.path.join(data_dir, move)
if uncompress:
target_file = os.path.dirname(target_file)
if abort is not None:
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
raise IOError('Fetching aborted: ' + abort)
# If needed, move files from temps directory to final directory.
if os.path.exists(temp_dir):
# XXX We could only moved the files requested
# XXX Movetree can go wrong
movetree(temp_dir, data_dir)
shutil.rmtree(temp_dir)
return target_file
|
nilq/baby-python
|
python
|
# Copyright 2016 Internap
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask
from hamcrest import assert_that, is_
import json
import mock
import unittest
from ubersmith_remote_module_server.api import Api
class ApiTest(unittest.TestCase):
def setUp(self):
self.app = Flask('test_app')
self.api_client = self.app.test_client()
self.router = mock.Mock()
self.module1 = mock.Mock()
self.module2 = mock.Mock()
self.modules = {'module1': self.module1,
'module2': self.module2}
self.api = Api(self.modules, self.app, self.router)
def generate_module_path(self, module_name):
return '/{0}/'.format(module_name)
def test_list_implemented_methods(self):
self.router.list_implemented_methods.return_value = ['abcd', 'efgh']
output = self.api_client.get(self.generate_module_path('module1'))
self.router.list_implemented_methods.assert_called_with(self.module1)
assert_that(json.loads(output.data.decode(output.charset)), is_({
"implemented_methods": [
"abcd",
"efgh"
]
}))
def test_execute_method_returns_string(self):
self.router.invoke_method.return_value = 'simple string'
output = self.api_client.post(self.generate_module_path('module2'),
headers={'Content-Type': 'application/json'},
data=json.dumps(
{
"method": "remote_method",
"params": [],
"env": {
"variable1": "value1"
},
"callback": {}
}
))
self.router.invoke_method.assert_called_with(module=self.module2, method='remote_method', params=[], env={'variable1': 'value1'}, callback={})
assert_that(json.loads(output.data.decode(output.charset)), is_('simple string'))
def test_execute_method_returns_list(self):
self.router.invoke_method.return_value = ['a', 'b', 'c']
output = self.api_client.post(self.generate_module_path('module2'),
headers={'Content-Type': 'application/json'},
data=json.dumps(
{
"method": "remote_method",
"params": [],
"env": {
"variable1": "value1"
},
"callback": {}
}
))
self.router.invoke_method.assert_called_with(module=self.module2, method='remote_method', params=[], env={'variable1': 'value1'}, callback={})
assert_that(json.loads(output.data.decode(output.charset)), is_(['a', 'b', 'c']))
def test_invoking_unknown_module_returns_a_404(self):
output = self.api_client.post(self.generate_module_path('new_module'),
headers={'Content-Type': 'application/json'},
data=json.dumps(
{
"method": "remote_method",
"params": [],
"env": {
"variable1": "value1"
},
"callback": {}
}
))
assert_that(output.status_code, is_(404))
def test_listing_unknown_module_returns_a_404(self):
output = self.api_client.get(self.generate_module_path('new_module'))
assert_that(output.status_code, is_(404))
class NoTrailingSlashApiTest(ApiTest):
def generate_module_path(self, module_name):
return '/{0}'.format(module_name)
|
nilq/baby-python
|
python
|
import concurrent.futures
import sqlite3
import pytest
from yesql.drivers.sio import sqlite
pytestmark = pytest.mark.asyncio
@pytest.fixture(autouse=True)
def connection(MockSQLiteConnection):
conn = MockSQLiteConnection.return_value
yield conn
MockSQLiteConnection.reset_mock()
sqlite3.connect.reset_mock()
@pytest.fixture(scope="module")
def connector() -> sqlite.SQLiteConnector:
connector = sqlite.SQLiteConnector(database="foo")
return connector
class TestSQLiteConnector:
@staticmethod
def test_initialize(connector: sqlite.SQLiteConnector, connection):
# Given
connector.initialized = False
# When
connector.initialize()
# Then
assert connector.initialized
assert connection.execute.called
@staticmethod
def test_initialize_done(connector: sqlite.SQLiteConnector, connection):
# Given
connector.initialized = True
# When
connector.initialize()
# Then
assert not connection.execute.called
@staticmethod
def test_initialize_concurrent(connector: sqlite.SQLiteConnector, connection):
# Given
connector.initialized = False
# When
with concurrent.futures.ThreadPoolExecutor() as pool:
futs = (
pool.submit(connector.initialize),
pool.submit(connector.initialize),
pool.submit(connector.initialize),
pool.submit(connector.initialize),
pool.submit(connector.initialize),
)
concurrent.futures.wait(futs)
# Then
assert connection.execute.call_count == 1
@staticmethod
def test_connection(connector: sqlite.SQLiteConnector, connection):
# When
with connector.connection():
...
# Then
assert sqlite3.connect.called
assert connection.rollback.called
@staticmethod
def test_connection_no_transaction(connector: sqlite.SQLiteConnector, connection):
# Given
connection.in_transaction = False
# When
with connector.connection():
...
# Then
assert sqlite3.connect.called
assert not connection.rollback.called
@staticmethod
def test_connection_provided(
connector: sqlite.SQLiteConnector, connection: sqlite3.Connection
):
# When
with connector.connection(connection=connection):
...
# Then
assert not sqlite3.connect.called
assert not connection.rollback.called
@staticmethod
def test_transaction(connector: sqlite.SQLiteConnector, connection):
# When
with connector.transaction():
...
# Then
assert connection.commit.called
@staticmethod
def test_transaction_rollback(connector: sqlite.SQLiteConnector, connection):
# When
with connector.transaction(rollback=True):
...
# Then
assert not connection.commit.called
@staticmethod
def test_close(connector: sqlite.SQLiteConnector):
# When
connector.close()
# Then
assert connector.initialized is False
@staticmethod
def test_open(connector: sqlite.SQLiteConnector):
# Given
connector.initialized = True
# Then
assert connector.open
@staticmethod
def test_get_explain_command(connector: sqlite.SQLiteConnector):
# Given
expected = connector.EXPLAIN_PREFIX
# When
cmd = connector.get_explain_command()
# Then
assert cmd == expected
@staticmethod
def test_get_explain_command_analyze(connector: sqlite.SQLiteConnector):
# Given
expected = connector.EXPLAIN_PREFIX
# When
cmd = connector.get_explain_command(analyze=True)
# Then
assert cmd == expected
@staticmethod
def test_get_explain_command_format(connector: sqlite.SQLiteConnector):
# Given
expected = connector.EXPLAIN_PREFIX
# When
cmd = connector.get_explain_command(analyze=True, format="json")
# Then
assert cmd == expected
|
nilq/baby-python
|
python
|
import psycopg2
from flask import render_template, request, jsonify
from giraffe_api import app, db
q_metric = "select giraffe.init_metric(%(name)s, %(description)s)"
q_metric_value = "insert into giraffe.metric_value" \
"(db_timestamp, cluster, db, metric_id, integer_value, numeric_value) " \
"values(%(db_timestamp)s, %(cluster)s, %(db)s, %(metric_id)s, " \
"%(integer_value)s, %(numeric_value)s)"
@app.route('/receive', methods=['POST'])
def receive():
data = request.json
for (cluster_name, metrics) in data.items():
for metric in metrics:
db.execute(q_metric_value, {'db_timestamp': metric['timestamp'],
'cluster': metric['cluster'],
'db': metric['db'],
'metric_id': metric['id'],
'integer_value': metric['value'],
'numeric_value': None})
return 'OK'
@app.route('/propose', methods=['POST'])
def propose():
# Receives metric list from receiver
data = request.json
metrics = {}
for (metric_name, metric) in data.items():
id = db.get_value(q_metric, {'name': metric_name, 'description': metric['description']})
metrics[metric_name] = id
return jsonify({'result': 'ok', 'metrics': metrics})
@app.route("/", methods=['GET'])
def index():
return "Everything is OK"
|
nilq/baby-python
|
python
|
from turtle import Turtle
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.l_score = 0
self.r_score = 0
self.color('white')
self.hideturtle()
self.penup()
self.sety(200)
def increase_r_score(self):
self.r_score += 1
def increase_l_score(self):
self.l_score += 1
def display_score(self):
self.clear()
self.write(arg=f"{self.l_score} {self.r_score}", align='center', font=("Roboto", 40, 'normal'))
def game_over(self):
self.goto(0,0)
self.write(arg='GAME OVER', align='center', font=("Roboto", 40, 'normal'))
|
nilq/baby-python
|
python
|
import cv2
import os
# source: https://stackoverflow.com/a/44659589
def image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized
class CFEVideoConf(object):
# Standard Video Dimensions Sizes
STD_DIMENSIONS = {
"360p": (480, 360),
"480p": (640, 480),
"720p": (1280, 720),
"1080p": (1920, 1080),
"4k": (3840, 2160),
}
# Video Encoding, might require additional installs
# Types of Codes: http://www.fourcc.org/codecs.php
VIDEO_TYPE = {
'avi': cv2.VideoWriter_fourcc(*'XVID'),
#'mp4': cv2.VideoWriter_fourcc(*'H264'),
'mp4': cv2.VideoWriter_fourcc(*'XVID'),
}
width = 640
height = 480
dims = (640, 480)
capture = None
video_type = None
def __init__(self, capture, filepath, res="480p", *args, **kwargs):
self.capture = capture
self.filepath = filepath
self.width, self.height = self.get_dims(res=res)
self.video_type = self.get_video_type()
# Set resolution for the video capture
# Function adapted from https://kirr.co/0l6qmh
def change_res(self, width, height):
self.capture.set(3, width)
self.capture.set(4, height)
def get_dims(self, res='480p'):
width, height = self.STD_DIMENSIONS['480p']
if res in self.STD_DIMENSIONS:
width, height = self.STD_DIMENSIONS[res]
self.change_res(width, height)
self.dims = (width, height)
return width, height
def get_video_type(self):
filename, ext = os.path.splitext(self.filepath)
if ext in self.VIDEO_TYPE:
return self.VIDEO_TYPE[ext]
return self.VIDEO_TYPE['avi']
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from .uploader import GraphiteUploader
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-26 08:15
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('portal', '0006_auto_20160224_1510'),
]
operations = [
migrations.AlterField(
model_name='authenticateemail',
name='portal_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='email_user', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='authenticateemailtask',
name='authenticate_email',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_email', to='portal.AuthenticateEmail'),
),
]
|
nilq/baby-python
|
python
|
import json
from rentomatic.serializers import storageroom_serializer as srs
from rentomatic.domain.storageroom import StorageRoom
def test_serialize_domain_storageroom():
room = StorageRoom(
'f853578c-fc0f-4e65-81b8-566c5dffa35a',
size=200,
price=10,
longitude='-0.09998975',
latitude='51.75436293')
expected_json = """
{
"code": "f853578c-fc0f-4e65-81b8-566c5dffa35a",
"size": 200,
"price": 10,
"longitude": -0.09998975,
"latitude": 51.75436293
}
"""
assert json.loads(json.dumps(room, cls=srs.StorageRoomEncoder)) == json.loads(expected_json)
|
nilq/baby-python
|
python
|
from django.shortcuts import get_list_or_404, get_object_or_404
from rest_framework.serializers import ModelSerializer, Serializer
from shop.models import Image, Product
from rest_framework import fields
class ImageSerializer(ModelSerializer):
mid_size = fields.ImageField()
class Meta:
model = Image
fields = ['id', 'name', 'variant', 'mid_size']
class ProductSerializer(ModelSerializer):
class Meta:
model = Product
fields = ['id', 'name', 'active']
class ImageAssociationSerializer(Serializer):
images = fields.ListField()
def save(self, product_id, **kwargs):
product = get_object_or_404(Product, id=product_id)
images = Image.objects.filter(id__in=self.validated_data['images'])
product.images.add(*images)
class RenamProductsValidation(Serializer):
products = fields.ListField()
name = fields.CharField(max_length=100, validators=[])
def save(self):
products = get_list_or_404(Product, id__in=self.validated_data['products'])
for product in products:
product.name = self.validated_data['name']
product.save()
return products
class ProductUpdateValidation(Serializer):
name = fields.CharField()
active = fields.BooleanField(default=False)
def save(self, **kwargs):
if self.instance:
for key, value in self.validated_data.items():
setattr(self.instance, key, value)
self.instance.save()
|
nilq/baby-python
|
python
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, unicode_literals, print_function
import copy
import numpy as np
import warnings
from astropy.extern import six
from astropy.modeling.core import Model
from astropy.modeling.parameters import Parameter
from . import region
from .util import RegionError
def _toindex(value):
"""
Convert value to an int or an int array.
Input coordinates should be turned into integers
corresponding to the center of the pixel.
They are used to index the mask.
Examples
--------
>>> _toindex(np.array([-0.5, 0.49999]))
array([0, 0])
>>> _toindex(np.array([0.5, 1.49999]))
array([1, 1])
>>> _toindex(np.array([1.5, 2.49999]))
array([2, 2])
"""
indx = np.empty(value.shape, dtype=np.int32)
indx = np.floor(value + 0.5, out=indx)
return indx
class SelectorMask(Model):
"""
A mask model to be used with the `~gwcs.selector.RegionsSelector` transform.
For an IFU observation, the values of the mask
correspond to the region (slice) label.
Parameters
----------
mask : ndarray
An array of integers or strings where the values
correspond to a transform label in `~gwcs.selector.RegionsSelector` model.
If a transform is not defined the value shoul dbe set to 0 or " ".
"""
inputs = ('x', 'y')
outputs = ('z')
linear = False
fittable = False
def __init__(self, mask):
if mask.dtype.type is not np.unicode_:
self._mask = np.asanyarray(mask, dtype=np.int)
else:
self._mask = mask
if mask.dtype.type is np.string_:
self._no_transform_value = ""
else:
self._no_transform_value = 0
super(SelectorMask, self).__init__()
@property
def mask(self):
return self._mask
@property
def no_transform_value(self):
return self._no_transform_value
def evaluate(self, x, y):
indx = _toindex(x)
indy = _toindex(y)
return self.mask[indx, indy]
@classmethod
def from_vertices(cls, mask_shape, regions):
"""
Create a `~gwcs.selector.SelectorMask` from
polygon vertices read in from a json file.
Parameters
----------
mask_shape : tuple
shape of mask array
regions: dict
{region_label : list_of_polygon_vertices}
The keys in this dictionary should match the region labels
in `~gwcs.selector.RegionsSelector`.
The list of vertices is ordered in such a way that when traversed in a
counterclockwise direction, the enclosed area is the polygon.
The last vertex must coincide with the first vertex, minimum
4 vertices are needed to define a triangle.
Returns
-------
mask : `~gwcs.selectorSelectorMask`
Mask to be used with `~gwcs.selector.SelectorModel`.
Examples
-------_
mask = region.create_regions_mask_from_json((300,300), 'regions.json',
'region_schema.json')
"""
labels = np.array(list(regions.keys()))
mask = np.zeros(mask_shape, dtype=labels.dtype)
for rid, vert in regions.items():
pol = region.Polygon(rid, vert)
mask = pol.scan(mask)
return cls(mask)
class RegionsSelector(Model):
"""
A model which maps regions to their corresponding transforms.
It evaluates the model matching inputs to the correct region/transform.
Parameters
----------
inputs : list of str
Names of the inputs.
outputs : list of str
Names of the outputs.
selector : dict
Mapping of region labels to transforms.
Labels can be of type int or str, transforms are of type `~astropy.modeling.core.Model`
mask : `~gwcs.selector.SelectorMask`
Mask with region labels.
undefined_transform_value : float, np.nan (default)
Value to be returned if there's no transform defined for the inputs.
"""
_param_names = ()
linear = False
fittable = False
def __init__(self, inputs, outputs, selector, mask, undefined_transform_value=np.nan):
self._inputs = inputs
self._outputs = outputs
self.mask = mask.copy()
self._undefined_transform_value = undefined_transform_value
self._selector = copy.deepcopy(selector)
if " " in selector.keys() or 0 in selector.keys():
raise ValueError('"0" and " " are not allowed as keys.')
super(RegionsSelector, self).__init__(n_models=1)
# make sure that keys in mapping match labels in mask
labels_mask = self.labels_from_mask(mask.mask)
if not np.in1d(labels_mask, list(self._selector.keys()), assume_unique=True).all() or \
not np.in1d(list(self._selector.keys()), labels_mask, assume_unique=True).all():
raise ValueError("Labels don't match regions_mask.")
@staticmethod
def labels_from_mask(regions_mask):
"""
Parameters
----------
regions_mask : ndarray
An array where regions are indicated by int or str labels.
" " and 0 indicate a pixel on the detector which is not within any region.
Evaluating the model in these locations returns NaN or
``undefined_transform_value`` if provided.
"""
labels = np.unique(regions_mask).tolist()
try:
labels.remove(0)
except ValueError:
pass
try:
labels.remove('')
except ValueError:
pass
return labels
@staticmethod
def get_unique_regions(mask):
unique_regions = np.unique(mask).tolist()
try:
unique_regions.remove(0)
unique_regions.remove('')
except ValueError:
pass
try:
unique_regions.remove("")
except ValueError:
pass
return unique_regions
def set_input(self, rid):
"""
Sets one of the inputs and returns a transform associated with it.
"""
def _eval_input(x, y):
return self._selector[rid](x, y)
return _eval_input
def evaluate(self, x, y):
"""
Parameters
----------
x : float or ndarray
Input pixel coordinate.
y : float or ndarray
Input pixel coordinate.
"""
# Get the region labels corresponding to these inputs
indx = _toindex(x)
indy = _toindex(y)
rids = self.mask(indx, indy).flatten()
# Raise an error if all pixels are outside regions
if (rids == self.mask.no_transform_value).all():
raise RegionError("The input positions are not inside any region.")
# Create output arrays and set any pixels not withing regions to
# "undefined_transform_value"
no_trans_ind = (rids == self.mask.no_transform_value).nonzero()
outputs = [np.empty(rids.shape) for n in range(self.n_outputs)]
for out in outputs:
out[no_trans_ind] = self.undefined_transform_value
# Compute the transformations
x = x.flatten()
y = y.flatten()
uniq = self.get_unique_regions(rids)
for rid in uniq:
ind = (rids == rid)
result = self._selector[rid](x[ind], y[ind])
for j in range(self.n_outputs):
outputs[j][ind] = result[j]
return outputs
def __call__(self, *inputs, **kwargs):
"""
Evaluate this model using the given input(s) and the parameter values
that were specified when the model was instantiated.
"""
import itertools
parameters = self._param_sets(raw=True)
evaluate = self.evaluate
inputs, format_info = self.prepare_inputs(*inputs, **kwargs)
outputs = evaluate(*itertools.chain(inputs, parameters))
if self.n_outputs == 1:
outputs = (outputs,)
return self.prepare_outputs(format_info, *outputs, **kwargs)
def inverse(self):
"""
The inverse exists if all transforms have an inverse
and the mask has an inverse.
"""
selector_inverse = copy.deepcopy(self._selector)
for tr in selector_inverse:
selector_inverse[tr] = selector_inverse[tr].inverse
try:
mask = self.mask.inverse
except NotImplementedError:
raise
return self.__class__(self.outputs, self.inputs, selector_inverse,
mask, self.undefined_transform_value)
@property
def undefined_transform_value(self):
return self._undefined_transform_value
@undefined_transform_value.setter
def undefined_transform_value(self, value):
self._undefined_transform_value = value
@property
def inputs(self):
"""
The name(s) of the input variable(s) on which a model is evaluated.
"""
return self._inputs
@property
def outputs(self):
"""The name(s) of the output(s) of the model."""
return self._outputs
@property
def selector(self):
return self._selector
|
nilq/baby-python
|
python
|
import colorama, os, ctypes, re, glob
from colorama import Fore
from sys import exit
def cls():
os.system("cls" if os.name=="nt" else "clear")
def fexit():
input(f"\n{Fore.RESET}Press Enter button for exit")
exit()
os.system("cls")
if __name__ == "__main__":
os.system("cls")
ctypes.windll.kernel32.SetConsoleTitleW("Discord Token Parser by GuFFy_OwO")
colorama.init()
print(f"{Fore.RESET}[{Fore.CYAN}1{Fore.RESET}] Check one file")
print(f"{Fore.RESET}[{Fore.CYAN}2{Fore.RESET}] Check many files")
print()
checktype = input(f"{Fore.CYAN}>{Fore.RESET} Select An Option{Fore.CYAN}:{Fore.RESET} ")
if "1" in checktype:
cls()
tokenFileName = input(f"{Fore.CYAN}>{Fore.RESET}Enter the name of the file in wich are the unparsed tokens{Fore.CYAN}:{Fore.RESET} ")
elif "2" in checktype:
cls()
tokenDirectoryName = input(f"{Fore.CYAN}>{Fore.RESET}Enter the directory of the files in wich are the unparsed tokens{Fore.CYAN}:{Fore.RESET} ")
if not os.path.exists(tokenDirectoryName):
print(tokenDirectoryName + " directory not exist.")
fexit()
else:
print("Invalid Option.")
fexit()
deleteDuplicates = input(f"{Fore.CYAN}>{Fore.RESET}Delete duplicates tokens? [Y/N]{Fore.CYAN}:{Fore.RESET} ")
cls()
if "2" in checktype:
try:
os.remove(f"{tokenDirectoryName}\\all_data.tmp")
except: None
open(f"{tokenDirectoryName}\\all_data.tmp", "a+")
print(f"Glue the files...\n")
files = glob.glob(f"{tokenDirectoryName}\\*.txt")
with open(f"{tokenDirectoryName}\\all_data.tmp", "w", encoding="utf-8") as result:
for file_ in files:
for line in open( file_, "r", encoding="utf-8"):
result.write(line)
tokenFileName = f"{tokenDirectoryName}\\all_data.tmp"
if not os.path.exists(tokenFileName):
print(tokenFileName + " not exist.")
fexit()
def main():
print(f"Parse tokens...")
try:
os.remove("Parsed Tokens.txt")
except: None
open("Parsed Tokens.txt", "a+")
tokens = []
for line in [x.strip() for x in open(f"{tokenFileName}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in re.findall(regex, line):
tokens.append(token)
if deleteDuplicates.lower() == "y":
tokens = list(dict.fromkeys(tokens))
tokens_str = "\n".join(tokens)
with open("Parsed Tokens.txt", "a", encoding="utf-8") as f:
f.write(tokens_str)
found = sum(1 for line in open("Parsed Tokens.txt", "r", encoding="utf-8"))
print(f"\nDone. Found {Fore.CYAN}{found}{Fore.RESET} tokens!")
try:
os.remove(f"{tokenDirectoryName}\\all_data.tmp")
except: None
fexit()
main()
|
nilq/baby-python
|
python
|
# coding: utf-8
"""
"""
import torch
import torch.optim as optim
import torch.nn as nn
import os
import time
import copy
import numpy as np
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score, f1_score
from visual_confuse_matrix import make_confusion_matrix
from dataset import genDataset, genExtraForEvalDataset
from model import SegClsModule
from sklearn.metrics import cohen_kappa_score
import argparse
import logging
import os
import sys
import torchvision.transforms as transforms
import cv2
import numpy as np
import math
import random
import yaml
from pathlib import Path
from loss import Weighted_Jaccard_loss
from utils import dice_coef, probs2one_hot
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def setup_logger(name, save_dir, distributed_rank, filename="log.txt"):
"""terminal and log file
name: application information
save_dir: log dir
distributed_rank: only host 0 can generate log
filename: log file name
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# don't log results for the non-master process
if distributed_rank > 0:
return logger
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
fh = logging.FileHandler(os.path.join(save_dir, filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def set_visible_gpu(gpu_idex):
"""
to control which gpu is visible for CUDA user
set_visible_gpu(1)
print(os.environ["CUDA_DEVICE_ORDER"])
print(os.environ["CUDA_VISIBLE_DEVICES"])
"""
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "{0}".format(gpu_idex)
def get_results(val_labels, val_outs, val_probs, save_cf_png_dir, save_metric_dir):
# first for probs
AUC_score = roc_auc_score(val_labels, val_probs)
F1_score = f1_score(val_labels, val_outs)
CM = confusion_matrix(val_labels, val_outs)
labels = ['True Neg','False Pos','False Neg','True Pos']
categories = ['0', '1']
make_confusion_matrix(CM,
group_names=labels,
categories=categories,
cmap='Blues',save_dir=save_cf_png_dir)
#make_confusion_matrix(CM, figsize=(8,6), cbar=False)
TN = CM[0][0]
FN = CM[1][0]
TP = CM[1][1]
FP = CM[0][1]
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN)
# Specificity or true negative rate
TNR = TN/(TN+FP)
# Precision or positive predictive value
PPV = TP/(TP+FP)
# Negative predictive value
NPV = TN/(TN+FN)
# Fall out or false positive rate
FPR = FP/(FP+TN)
# False negative rate
FNR = FN/(TP+FN)
# False discovery rate
FDR = FP/(TP+FP)
# Overall accuracy
ACC = (TP+TN)/(TP+FP+FN+TN)
result_str = "Sensitivity=%.3f, Specificity=%.3f, PPV=%.3f, NPV=%.3f, FPR=%.3f, FNR=%.3f, FDR=%.3f, ACC=%.3f, AUC=%.3f, F1_score=%.3f\n" % (TPR, TNR, PPV, NPV, FPR, FNR, FDR, ACC, AUC_score, F1_score)
save_dir = save_metric_dir
with open(save_dir, "a+") as f:
f.writelines([result_str])
return result_str
def eval_model(model, dataloaders, log_dir="./log/", logger=None, opt=None):
since = time.time()
if False:#opt.do_seg:
# eval lung segmentation
logger.info("-"*8+"eval lung segmentation"+"-"*8)
model.eval()
all_dices = []
all_dices_au = []
for batch_idx, (inputs, labels) in enumerate(dataloaders["tgt_lung_seg_val"], 0):
annotation = dataloaders["tgt_lung_seg_val"].dataset.annotations[batch_idx]
img_dir = annotation.strip().split(',')[0]
img_name = Path(img_dir).name
inputs = inputs.to(device)
# adjust labels
labels[labels==opt.xray_mask_value_dict["lung"]] = 1
labels = labels[:,-1].to(device)
labels = torch.stack([labels == c for c in range(2)], dim=1)
with torch.set_grad_enabled(False):
if opt.use_aux:
_, _, seg_logits, _, seg_logits_au = model(inputs)
else:
_, _, seg_logits, _, _ = model(inputs)
seg_probs = torch.softmax(seg_logits, dim=1)
predicted_mask = probs2one_hot(seg_probs.detach())
# change the infection to Lung
predicted_mask_lung = predicted_mask[:,:-1]
predicted_mask_lung[:,-1] += predicted_mask[:,-1]
dices = dice_coef(predicted_mask_lung, labels.detach().type_as(predicted_mask)).cpu().numpy()
all_dices.append(dices) # [(B,C)]
predicted_mask_lung = predicted_mask_lung.squeeze().cpu().numpy() # 3xwxh
mask_inone = (np.zeros_like(predicted_mask_lung[0])+predicted_mask_lung[1]*255).astype(np.uint8)
# save dir:
save_dir = os.path.join(opt.logs, "tgt_lung_seg_val", "eval")
#
if not os.path.exists(save_dir):
os.makedirs(save_dir)
cv2.imwrite(os.path.join(save_dir, img_name), mask_inone)
###################################################au
if opt.use_aux:
seg_probs_au = torch.softmax(seg_logits_au, dim=1)
predicted_mask_au = probs2one_hot(seg_probs_au.detach())
# change the infection to Lung
predicted_mask_lung_au = predicted_mask_au[:,:-1]
predicted_mask_lung_au[:,-1] += predicted_mask_au[:,-1]
dices_au = dice_coef(predicted_mask_lung_au, labels.detach().type_as(predicted_mask_au)).cpu().numpy()
all_dices_au.append(dices_au) # [(B,C)]
predicted_mask_lung_au = predicted_mask_lung_au.squeeze().cpu().numpy() # 3xwxh
mask_inone_au = (np.zeros_like(predicted_mask_lung_au[0])+predicted_mask_lung_au[1]*255).astype(np.uint8)
# save dir:
save_dir_au = os.path.join(opt.logs, "tgt_lung_seg_val_au", "eval")
#
if not os.path.exists(save_dir_au):
os.makedirs(save_dir_au)
cv2.imwrite(os.path.join(save_dir_au, img_name), mask_inone_au)
avg_dice = np.mean(np.concatenate(all_dices, 0), 0) #
logger.info("tgt_lung_seg_val:[%d/%d],dice0:%.03f,dice1:%.03f,dice:%.03f"
% (batch_idx, len(dataloaders['tgt_lung_seg_val'].dataset)//inputs.shape[0],
avg_dice[0], avg_dice[1], np.mean(np.concatenate(all_dices, 0))))
if opt.use_aux:
avg_dice_au = np.mean(np.concatenate(all_dices_au, 0), 0) #
logger.info("tgt_lung_seg_val_au:[%d/%d],dice0:%.03f,dice1:%.03f,dice:%.03f"
% (batch_idx, len(dataloaders['tgt_lung_seg_val'].dataset)//inputs.shape[0],
avg_dice_au[0], avg_dice_au[1], np.mean(np.concatenate(all_dices_au, 0))))
if True:
# eval infection segmentation and cls
logger.info("-"*8+"eval infection cls"+"-"*8)
model.eval()
val_gt = []
val_cls_pred = []
val_cls_probs = [] # for VOC
val_seg_pred = []
val_seg_probs = [] # for VOC
val_seg_probs_au = []
val_seg_pred_au = [] # for VOC
for batch_idx, (inputs, labels) in enumerate(dataloaders["tgt_cls_val"], 0):
inputs = inputs.to(device)
# adjust label
val_gt.append(labels.cpu().data.numpy())
with torch.set_grad_enabled(False):
annotation = dataloaders["tgt_cls_val"].dataset.annotations[batch_idx]
img_dir = annotation.strip().split(',')[0]
img_name = Path(img_dir).name
if opt.use_aux:
cls_logits, _, seg_logits, _, seg_logits_au = model(inputs)
else:
cls_logits, _, seg_logits, _, _ = model(inputs)
if opt.do_seg:
seg_probs = torch.softmax(seg_logits, dim=1)
val_seg_probs.append(seg_probs[:,-1:].detach().cpu().view(seg_probs.shape[0], 1, -1).max(-1)[0])
predicted_mask_onehot = probs2one_hot(seg_probs.detach())
# for save
predicted_mask = predicted_mask_onehot.squeeze().cpu().numpy() # 3xwxh
mask_inone = (np.zeros_like(predicted_mask[0])+predicted_mask[1]*128+predicted_mask[2]*255).astype(np.uint8)
# save dir:
save_dir = os.path.join(opt.logs, "tgt_cls_val", "eval")
#
if not os.path.exists(save_dir):
os.makedirs(save_dir)
cv2.imwrite(os.path.join(save_dir, img_name), mask_inone)
# seg2cls
preds_cls_seg = (predicted_mask_onehot[:,-1:].sum(-1).sum(-1) > 0).cpu().numpy().astype(np.uint8)
val_seg_pred.append(preds_cls_seg)
if opt.do_seg and opt.use_aux:
seg_probs_au = torch.softmax(seg_logits_au, dim=1)
val_seg_probs_au.append(seg_probs_au[:,-1:].detach().cpu().view(seg_probs_au.shape[0], 1, -1).max(-1)[0])
predicted_mask_onehot_au = probs2one_hot(seg_probs_au.detach())
# for save
predicted_mask_au = predicted_mask_onehot_au.squeeze().cpu().numpy() # 3xwxh
mask_inone_au = (np.zeros_like(predicted_mask_au[0])+predicted_mask_au[1]*128+predicted_mask_au[2]*255).astype(np.uint8)
# save dir:
save_dir_au = os.path.join(opt.logs, "tgt_cls_val_au", "eval")
#
if not os.path.exists(save_dir_au):
os.makedirs(save_dir_au)
cv2.imwrite(os.path.join(save_dir_au, img_name), mask_inone_au)
# seg2cls
preds_cls_seg_au = (predicted_mask_onehot_au[:,-1:].sum(-1).sum(-1) > 0).cpu().numpy().astype(np.uint8)
val_seg_pred_au.append(preds_cls_seg_au)
# cls
#print(cls_logits)
if opt.do_cls:
probs_cls = torch.softmax(cls_logits, dim=1)
val_cls_probs.append(probs_cls[...,1:].detach().cpu().numpy())
preds_cls = (probs_cls[...,1:] > 0.5).type(torch.long)
val_cls_pred.append(preds_cls.cpu().data.numpy())
if not os.path.exists(os.path.join(opt.logs, "cf")):
os.makedirs(os.path.join(opt.logs, "cf"))
val_gt = np.concatenate(val_gt, axis=0)
if opt.do_cls:
val_cls_pred = np.concatenate(val_cls_pred, axis=0)
val_cls_probs = np.concatenate(val_cls_probs, axis=0)
save_cf_png_dir = os.path.join(opt.logs, "cf", "eval_cls_cf.png")
save_metric_dir = os.path.join(opt.logs, "eval_metric_cls.txt")
result_str = get_results(val_gt, val_cls_pred, val_cls_probs, save_cf_png_dir, save_metric_dir)
logger.info("tgt_cls_val:[cls]: %s" % (result_str))
if opt.do_seg:
val_seg_pred = np.concatenate(val_seg_pred, axis=0)
val_seg_probs = np.concatenate(val_seg_probs, axis=0)
# seg2cls
save_cf_png_dir = os.path.join(opt.logs, "cf", "eval_seg_cf.png")
save_metric_dir = os.path.join(opt.logs, "eval_metric_seg.txt")
result_str = get_results(val_gt, val_seg_pred, val_seg_probs, save_cf_png_dir, save_metric_dir)
logger.info("tgt_seg_val:[seg2cls]: %s" % (result_str))
if opt.do_seg and opt.use_aux:
val_seg_pred_au = np.concatenate(val_seg_pred_au, axis=0)
val_seg_probs_au = np.concatenate(val_seg_probs_au, axis=0)
# seg2cls
save_cf_png_dir_au = os.path.join(opt.logs, "cf", "eval_seg_au_cf.png")
save_metric_dir_au = os.path.join(opt.logs, "eval_metric_seg_au.txt")
result_str_au = get_results(val_gt, val_seg_pred_au, val_seg_probs_au, save_cf_png_dir_au, save_metric_dir_au)
logger.info("tgt_seg_au_val:[seg2cls]: %s" % (result_str_au))
time_elapsed = time.time() - since
logger.info("Eval complete in {:.0f}m {:.0f}s".format(time_elapsed // 60, time_elapsed % 60))
def extra_eval_model(model, dataloaders, log_dir="./log/", logger=None, opt=None):
since = time.time()
if True:
# eval infection segmentation and cls
logger.info("-"*8+"extra eval infection cls"+"-"*8)
model.eval()
val_gt = []
val_cls_pred = []
val_cls_probs = [] # for VOC
val_seg_pred = []
val_seg_probs = [] # for VOC
val_seg_probs_au = []
val_seg_pred_au = [] # for VOC
annotations = dataloaders["tgt_cls_extra_val"].dataset.annotations
for batch_idx, (inputs, labels) in enumerate(dataloaders["tgt_cls_extra_val"], 0):
inputs = inputs.to(device)
# adjust label
val_gt.append(labels.cpu().data.numpy())
with torch.set_grad_enabled(False):
annotation = annotations[batch_idx]
img_dir = annotation.strip().split(',')[0]
img_name = Path(img_dir).name
print(batch_idx, len(annotations))
if opt.use_aux:
cls_logits, _, seg_logits, _, seg_logits_au = model(inputs)
else:
cls_logits, _, seg_logits, _, _ = model(inputs)
if opt.do_seg:
seg_probs = torch.softmax(seg_logits, dim=1)
val_seg_probs.append(seg_probs[:,-1:].detach().cpu().view(seg_probs.shape[0], 1, -1).max(-1)[0])
predicted_mask_onehot = probs2one_hot(seg_probs.detach())
# for save
predicted_mask = predicted_mask_onehot.squeeze().cpu().numpy() # 3xwxh
mask_inone = (np.zeros_like(predicted_mask[0])+predicted_mask[1]*128+predicted_mask[2]*255).astype(np.uint8)
# save dir:
save_dir = os.path.join(opt.logs, "tgt_cls_extra_val", "eval")
#
if not os.path.exists(save_dir):
os.makedirs(save_dir)
cv2.imwrite(os.path.join(save_dir, img_name), mask_inone)
# seg2cls
preds_cls_seg = (predicted_mask_onehot[:,-1:].sum(-1).sum(-1) > 0).cpu().numpy().astype(np.uint8)
val_seg_pred.append(preds_cls_seg)
if opt.do_seg and opt.use_aux:
seg_probs_au = torch.softmax(seg_logits_au, dim=1)
val_seg_probs_au.append(seg_probs_au[:,-1:].detach().cpu().view(seg_probs_au.shape[0], 1, -1).max(-1)[0])
predicted_mask_onehot_au = probs2one_hot(seg_probs_au.detach())
# for save
predicted_mask_au = predicted_mask_onehot_au.squeeze().cpu().numpy() # 3xwxh
mask_inone_au = (np.zeros_like(predicted_mask_au[0])+predicted_mask_au[1]*128+predicted_mask_au[2]*255).astype(np.uint8)
# save dir:
save_dir_au = os.path.join(opt.logs, "tgt_cls_extra_val_au", "eval")
#
if not os.path.exists(save_dir_au):
os.makedirs(save_dir_au)
cv2.imwrite(os.path.join(save_dir_au, img_name), mask_inone_au)
# seg2cls
preds_cls_seg_au = (predicted_mask_onehot_au[:,-1:].sum(-1).sum(-1) > 0).cpu().numpy().astype(np.uint8)
val_seg_pred_au.append(preds_cls_seg_au)
# cls
#print(cls_logits)
if opt.do_cls:
probs_cls = torch.softmax(cls_logits, dim=1)
val_cls_probs.append(probs_cls[...,1:].detach().cpu().numpy())
preds_cls = (probs_cls[...,1:] > 0.5).type(torch.long)
val_cls_pred.append(preds_cls.cpu().data.numpy())
if not os.path.exists(os.path.join(opt.logs, "cf")):
os.makedirs(os.path.join(opt.logs, "cf"))
val_gt = np.concatenate(val_gt, axis=0)
if opt.do_cls:
val_cls_pred = np.concatenate(val_cls_pred, axis=0)
val_cls_probs = np.concatenate(val_cls_probs, axis=0)
save_cf_png_dir = os.path.join(opt.logs, "cf", "extra_eval_cls_cf.png")
save_metric_dir = os.path.join(opt.logs, "extra_eval_metric_cls.txt")
result_str = get_results(val_gt, val_cls_pred, val_cls_probs, save_cf_png_dir, save_metric_dir)
logger.info("tgt_cls_extra_val:[cls]: %s" % (result_str))
if opt.do_seg:
val_seg_pred = np.concatenate(val_seg_pred, axis=0)
val_seg_probs = np.concatenate(val_seg_probs, axis=0)
# seg2cls
save_cf_png_dir = os.path.join(opt.logs, "cf", "extra_eval_seg_cf.png")
save_metric_dir = os.path.join(opt.logs, "extra_eval_metric_seg.txt")
result_str = get_results(val_gt, val_seg_pred, val_seg_probs, save_cf_png_dir, save_metric_dir)
logger.info("tgt_seg_extra_val:[seg2cls]: %s" % (result_str))
if opt.do_seg and opt.use_aux:
val_seg_pred_au = np.concatenate(val_seg_pred_au, axis=0)
val_seg_probs_au = np.concatenate(val_seg_probs_au, axis=0)
# seg2cls
save_cf_png_dir_au = os.path.join(opt.logs, "cf", "extra_eval_seg_au_cf.png")
save_metric_dir_au = os.path.join(opt.logs, "extra_eval_metric_seg_au.txt")
result_str_au = get_results(val_gt, val_seg_pred_au, val_seg_probs_au, save_cf_png_dir_au, save_metric_dir_au)
logger.info("tgt_seg_au_extra_val:[seg2cls]: %s" % (result_str_au))
time_elapsed = time.time() - since
logger.info("Extra_Eval complete in {:.0f}m {:.0f}s".format(time_elapsed // 60, time_elapsed % 60))
def get_argument():
parser = argparse.ArgumentParser()
parser.add_argument('--config', default="./cfgs/experiment.yaml", type=str)
#parser.add_argument('--setseed', default=2020, type=int)
parser.add_argument('--fold', default=0, type=int)
parser.add_argument('--setgpuid', default=0, type=int)
opt = parser.parse_args()
with open(opt.config) as f:
config = yaml.load(f)
for k, v in config['common'].items():
setattr(opt, k, v)
# repalce experiment
opt.experiment = opt.experiment.replace("only", "mmd")
opt.seg_augment = True
opt.cls_augment = True
opt.do_cls_mmd = True
opt.do_seg = True
opt.do_cls = True
opt.do_seg_mmd = False
opt.eval_cls_times = 50
opt.eval_times = 50
# opt.random_seed = opt.setseed
opt.random_seed = 1010 * (opt.fold + 1)
opt.gpuid = opt.setgpuid
selected_drr_datasets_indexes = np.array(opt.selected_drr_datasets_indexes+opt.selected_drr_datasets_indexes)
#print(selected_drr_datasets_indexes)
# # [[0, 0, 0], [1, 0, 0], [0, 0, 1], [1, 0, 1]]
print(selected_drr_datasets_indexes[-1][-1])
selected_drr_datasets_indexes[2][-1] = 1
selected_drr_datasets_indexes[3][-1] = 1
opt.selected_drr_datasets_indexes = [list(_) for _ in list(selected_drr_datasets_indexes)]
log_dir = "./{}/{}/{}".format("logs_bk", opt.experiment, "fold%d"%opt.fold)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
opt.logs = log_dir
return opt
if __name__ == "__main__":
opt = get_argument()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(opt.gpuid)
setup_seed(opt.random_seed)
assert opt.mode == 12, ("opt.mode is not supported in %s" % __file__)
log_dir = opt.logs
logger = setup_logger("{}".format(os.path.basename(__file__).split(".")[0]),
save_dir=opt.logs, distributed_rank=0, filename="log_eval.txt")
logger.info(opt)
batch_size = opt.batch_size
num_epochs = opt.num_epochs
use_pretrained = True
device_name = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(device_name)
model_ft = SegClsModule(opt)
train_dataset, tgt_cls_train_dataset, tgt_cls_val_dataset, tgt_lung_seg_val_dataset = genDataset(opt)
tgt_cls_extra_val_dataset = genExtraForEvalDataset(opt)
logger.info("-"*8+"train:"+"-"*8)
logger.info(train_dataset.annotations)
logger.info("-"*8+"tgt_cls_train:"+"-"*8)
logger.info(tgt_cls_train_dataset.annotations)
logger.info("-"*8+"tgt_cls_val:"+"-"*8)
logger.info(tgt_cls_val_dataset.annotations)
logger.info("-"*8+"tgt_cls_extra_val:"+"-"*8)
logger.info(tgt_cls_extra_val_dataset.annotations)
# logger.info("-"*8+"tgt_lung_seg_val:"+"-"*8)
# logger.info(tgt_lung_seg_val_dataset.annotations)
image_datasets = {'train': train_dataset, 'tgt_cls_train': tgt_cls_train_dataset, 'tgt_cls_val': tgt_cls_val_dataset, 'tgt_cls_extra_val': tgt_cls_extra_val_dataset, "tgt_lung_seg_val": tgt_lung_seg_val_dataset}
shuffles = {"train": True,'tgt_cls_train': True, 'tgt_cls_val': False, 'tgt_cls_extra_val': False, "tgt_lung_seg_val": False}
batch_sizes_dict = {"train": batch_size,'tgt_cls_train': batch_size, 'tgt_cls_val': 1, 'tgt_cls_extra_val': 1, "tgt_lung_seg_val": 1}
drop_lasts = {"train": True,'tgt_cls_train': True, 'tgt_cls_val': False, 'tgt_cls_extra_val': False, "tgt_lung_seg_val": False}
number_worker_dict = {"train": 4,'tgt_cls_train': 4, 'tgt_cls_val': 0, 'tgt_cls_extra_val': 0, "tgt_lung_seg_val": 0}
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_sizes_dict[x], shuffle=shuffles[x], num_workers=number_worker_dict[x], drop_last=drop_lasts[x]) for x in ['train', 'tgt_cls_train', 'tgt_cls_val', 'tgt_cls_extra_val', "tgt_lung_seg_val"]}
# Send the model to GPU
weight_path = os.path.join(log_dir, "latest.pth")
model_ft.load_state_dict(torch.load(weight_path))
model_ft = model_ft.to(device)
model_ft.eval()
#eval_model(model_ft, dataloaders_dict, log_dir=log_dir, logger=logger, opt=opt)
extra_eval_model(model_ft, dataloaders_dict, log_dir=log_dir, logger=logger, opt=opt)
|
nilq/baby-python
|
python
|
# Author: Andrew Sainz
#
# Purpose: XMLAnalyze is designed to iterate through a collection of Post data collected from Stack Overflow
# forums. Data collected to analyze the code tagged information to find the language of the code
# being utilized.
#
# How to use: To run from command line input "python XMLAnalyze.py Posts.xml"
import sys
import re
import os
import nltk
import operator
from random import randint
from nltk.util import ngrams
from ngramFunctions import *
from XMLParser import *
from frequencyFunctions import *
from lxml import etree
def features(sentence):
words = sentence.lower().split()
return dict(('contains(%s)' %w, True) for w in words)
if __name__ == '__main__':
xmldoc = sys.argv[1]
knownJava = sys.argv[2]
knownCpp = sys.argv[3]
###################################################################
# Section 1: Gather known data to create frequencies for known information
###################################################################
knownJavaFile = open(knownJava)
knownJavaString = ""
for line in knownJavaFile:
knownJavaString += line
# knownJavaGram = ngramsFunction(knownJavaString, 3)
knownJavaGram = ngrams(knownJavaString.split(' '),3)#ngramsFunction(knownJavaString, 3)
knownJavaHashFreq = nltk.FreqDist(knownJavaGram)
# javaMaxGram = max(knownJavaHashFreq, key=knownJavaHashFreq.get)
# print(javaMaxGram, knownJavaHashFreq[javaMaxGram])
knownCPPFile = open(knownCpp)
knownCPPString = ""
for line in knownCPPFile:
knownCPPString += line
# print(knownCPPString)
knownCPPGram = ngrams(knownCPPString.split(' '),3)
knownCPPHashFreq = nltk.FreqDist(knownCPPGram)
# cppMaxGram = max(knownCPPHashFreq, key=knownCPPHashFreq.get)
# print(cppMaxGram, knownCPPHashFreq[cppMaxGram])
#############################################################################################
# Section 2: to calculate trigram Probability
#############################################################################################
kneserJava = nltk.KneserNeyProbDist(knownJavaHashFreq)
kneserCPP = nltk.KneserNeyProbDist(knownCPPHashFreq)
kneserJavaHash = convertProbListToHash(kneserJava)
kneserCPPHash = convertProbListToHash(kneserCPP)
cpp = 0
java = 0
totalCppWithTag = 0
totalJavaWithTag = 0
totalJavaTags = 0
totalCppTags = 0
totalEval = 0
resultsFile = open('Results.txt', 'a')
codeFile = open('Code.txt', 'a')
analyticsFile = open('Analytics.txt', 'a')
resultsFileString = codeFileString = analyticsString = ''
presencePosCpp = presenceNegCpp = absencePosCpp = absenceNegCpp =0
presencePosJava = presenceNegJava = absencePosJava = absenceNegJava = 0
# tree = ET.parse(xmldoc)
# root = tree.getroot()
for event, element in etree.iterparse(xmldoc, tag="row"):
body = element.get('Body')
# Only allow posts with a code tag to be added
if '<code>' in body:
postId = element.get('Id')
# Tags for comment post
tags = element.get('Tags')
if tags == None:
continue
tags.lower()
# if not ('<java>' or 'c++' or '<c>' or '<c++-faq>' or '<android>' or '<spring>' or '<swing>' or '<pass-by-reference>' or '<eclipse>' or '<regex>' or '<recursion>' or '<binary-tree>' or '<software-engineering>' or '<divide-by-zero>' or '<arraylist>' or '<garbage-collection>' or '<object>' or '<arrays>' or '<iterator>' or '<hashcode>' or '<inheritance>' or '<tostring>' or '<unicode>' or '<quicksort>' or '<sorting>' or '<jar>' or '<bubble-sort>' or '<hashcode>' or '<multidimensional-array>' or '<codebase>' or '<class>') in tags:
# continue
# Skip if post contains tags from multiple languauges
# if (('<c++>' or '<c++-faq>' or '<c>' in tags) and ('<java>' or '<android>' or '<spring>' or '<swing>' in tags)) :
# continue
code = parseBodyForTagCode(body)
codeString = ''
for item in code:
snipetLength = len(item.split())
if snipetLength > 5:
codeString = codeString+re.sub('<code>|</code>',' ',item)
codeString = re.sub('\n|\r|/\s\s+/g}',' ',codeString)
codeString = re.sub('\.', ' ', codeString)
codeString = re.sub('\t', '',codeString)
codeString = re.sub(re.compile("/\*.*?\*/",re.DOTALL ) ,"" ,codeString)
codeString = re.sub(re.compile("//.*?\n" ) ,"" ,codeString)
codeString = re.sub( '[^0-9a-zA-Z]+', ' ', codeString )
codeString = re.sub( '\s+', ' ', codeString).strip()
codeFileString = codeFileString+codeString
codeLength = len(codeString.split())
# print(codeLength)
if(codeLength < 3):
continue
totalEval += 1# total posts not skipped
# In some cases a post can include tags associated with more than one languauge
if ('c++' or '<c++-faq>' or '<c>') in tags:
totalCppTags += 1
if ('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
totalJavaTags += 1
# print(codeString)
codeList = ngrams(codeString.split(' '),5)
codeGram = nltk.FreqDist(codeList)
for gram in codeGram:
cppValue = kneserCPPHash.get(str(gram))
javaValue = kneserJavaHash.get(str(gram))
if cppValue != None and javaValue != None:
# Compare to the frequency values
if cppValue > javaValue:
cpp += 1
else:
java += 1
# if there is a hit for either one then add to hit value
elif cppValue == None and javaValue != None:
java += 1
elif cppValue != None and javaValue == None:
cpp += 1
# if hit values are the same make a guess on language
if java == cpp:
ran = randint(0,1)
if(ran == 0):
java += 1
else:
cpp += 1
# Done looking for gram hit values
#################################
# fix absence
#################################
# if java == 0 and ('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
# absenceNegJava += 1
# if cpp == 0 and ('c++' or '<c++-faq>' or '<c>') in tags:
# absenceNegCpp += 1
# if java > cpp and not ('java' or '<android>' or '<spring>' or '<swing>') in tags:
# print('absence is true')
# absencePosJava += 1
# if cpp > java and not ('c++' or '<c++-faq>' or '<c>') in tags:
# absencePosCpp += 1
#################################
# if no values where hit then move on to next post row
# if java == 0 and cpp == 0:
# continue
determinedCpp = determinedJava = False
resultsFileString = resultsFileString+'Grams assigned as followed:\n'
resultsFileString = resultsFileString+'PostId: {}\nC++: {} Java: {}\nCode: {} \n'.format(postId,cpp,java,codeString)
if cpp > java:
resultsFileString = resultsFileString+'Snippet determined to be C++\nTags include {}\n\n'.format(tags)
determinedCpp = True
# if ('c++' or '<c++-faq>' or '<c>') in tags:
# totalCppWithTag += 1
elif java > cpp:
resultsFileString = resultsFileString+'Snippet determined to be Java\nTags include {}\n\n'.format(tags)
determinedJava = True
# if ('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
# totalJavaWithTag += 1
# analyze results
if determinedCpp == True and ('c++' or '<c++-faq>' or '<c>') in tags:
presencePosCpp += 1
if determinedCpp == False and ('c++' or '<c++-faq>' or '<c>') in tags:
presenceNegCpp += 1
if determinedCpp == True and not('c++' or '<c++-faq>' or '<c>') in tags:
absencePosCpp += 1
if determinedCpp == False and not('c++' or '<c++-faq>' or '<c>') in tags:
absenceNegCpp += 1
if determinedJava == True and ('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
presencePosJava += 1
if determinedJava == False and ('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
presenceNegJava += 1
if determinedJava == True and not('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
absencePosJava += 1
if determinedJava == False and not('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
absenceNegJava += 1
# if ('c++' or '<c++-faq>' or '<c>') in tags:
# # presence is true
# if cpp > java:
# # positive is true
# # true positive
# presencePosCpp += 1
# else:
# # false negative
# presenceNegCpp += 1
# # elif cpp > java:
# # # been determined cpp but no cpp tags
# # # incorectly determined
# # # false positive
# # absencePosCpp += 1
# # else:
# # # determined not to be cpp correctly
# # # true negative
# # absenceNegCpp += 1
# if ('<java>' or '<android>' or '<spring>' or '<swing>') in tags:
# # presence is true
# if java > cpp:
# presencePosJava += 1
# else:
# presenceNegJava += 1
# # elif java > cpp:
# absencePosJava += 1
# else:
# absenceNegJava += 1
java = 0
cpp = 0
element.clear()
for ancestor in element.xpath('ancestor-or-self::*'):
while ancestor.getprevious() is not None:
del ancestor.getparent()[0]
javaSensitivity = presencePosJava / (presencePosJava+presenceNegJava)
javaSpecificity = absenceNegJava / (absenceNegJava+absencePosJava)
javaRateFalsePos = absencePosJava / (absencePosJava+absenceNegJava)
javaRateFalseNeg = presenceNegJava / (presenceNegJava+presencePosJava)
javaPosPredict = presencePosJava / (presencePosJava+ absencePosJava)
javaNegPredict = presenceNegJava / (presenceNegJava+ absenceNegJava)
javaRelativeRisk = (presencePosJava/ (presencePosJava + presenceNegJava)) / (absencePosJava / (absencePosJava + absenceNegJava))
cppSensitivity = presencePosCpp / (presencePosCpp+presenceNegCpp)
cppSpecificity = absenceNegCpp / (absenceNegCpp+absencePosCpp)
cppRateFalsePos = absencePosCpp / (absencePosCpp+absenceNegCpp)
cppRateFalseNeg = presenceNegCpp / (presenceNegCpp+presencePosCpp)
cppPosPredict = presencePosCpp / (presencePosCpp+ absencePosCpp)
cppNegPredict = presenceNegCpp / (presenceNegCpp+absenceNegCpp)
cppRelativeRisk = (presencePosCpp/ (presencePosCpp + presenceNegCpp)) / (absencePosCpp / (absencePosCpp + absenceNegCpp))
analyticsString = 'Java\n------\nTrue Positive: {}\nFalse Negative: {}\nFalse Positive: {}\nTrue Negative: {}'.format(presencePosJava,presenceNegJava,absencePosJava,absenceNegJava)
analyticsString += '\nSensitivity: {}\nSpecificity: {}'.format(javaSensitivity, javaSpecificity)
analyticsString += '\nRate False Positives: {}\nRate False Negatives: {}'.format(javaRateFalsePos, javaRateFalseNeg)
analyticsString += '\nEstimate Positive Predictive Value: {}\nEstimate Negative Predictive Value: {}'.format(javaPosPredict, javaNegPredict)
analyticsString += '\nRelative Risk: {}'.format(javaRelativeRisk)
analyticsString += '\n\nC++\n------\nTrue Positive: {}\nFalse Negative: {}\nFalse Positive: {}\nTrue Negative: {}'.format(presencePosCpp,presenceNegCpp,absencePosCpp,absenceNegCpp)
analyticsString += '\nSensitivity: {}\nSpecificity: {}'.format(cppSensitivity, cppSpecificity)
analyticsString += '\nRate False Positives: {}\nRate False Negatives: {}'.format(cppRateFalsePos, cppRateFalseNeg)
analyticsString += '\nEstimate Positive Predictive Value: {}\nEstimate Negative Predictive Value: {}'.format(cppPosPredict, cppNegPredict)
analyticsString += '\nRelative Risk: {}'.format(cppRelativeRisk)
#############################################################################################
# Section Output
#############################################################################################
resultsFile.write(resultsFileString)
codeFile.write(codeFileString)
analyticsFile.write(analyticsString)
# print('Total Java snippets determined and also have tags (java, android, spring, swing): {}'.format(totalJavaWithTag))
# print('Total Java snippets: {}'.format(totalJavaTags))
# print('Total C++ snippets determined and also have tags (c++, c++-faq, c): {}'.format(totalCppWithTag))
# print('Total C++ snippets: {}'.format(totalCppTags))
# print('Total snippets evaluated: {}'.format(totalEval))
|
nilq/baby-python
|
python
|
from .bindings import so, FPDF_PATH_POINT, FPDF_RECT
from ctypes import pointer
class Glyph:
'''Represents Glyph drawing instructions'''
LINETO = 0 #: LineTo instruction
CURVETO = 1 #: CurveTo instruction
MOVETO = 2 #: MoveTo instruction
def __init__(self, glyph, parent):
self._glyph = glyph
self._parent = parent
self._bounds = None
def __len__(self):
return so.REDGlyph_Size(self._glyph)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def __getitem__(self, i):
'''Returns a 4-tuple representing this drawing instruction: (x, y, type, close).
Args:
i (int): index of the instruction
'''
point = FPDF_PATH_POINT()
so.REDGlyph_Get(self._glyph, i, pointer(point))
return point.x, point.y, point.type, point.close
# @memoize
# def _bounds(self):
# if len(self) == 0:
# return None
# coords = [(x, y) for x, y, _, _ in self]
# xmin = min(x for x,_ in coords)
# xmax = max(x for x,_ in coords)
# ymin = min(y for _,y in coords)
# ymax = max(y for _,y in coords)
# return xmin, ymin, xmax, ymax
def bounds(self):
if self._bounds is None:
rect = FPDF_RECT(0., 0., 0., 0.)
so.REDGlyph_GetBounds(self._glyph, pointer(rect))
self._bounds = rect.left, rect.bottom, rect.right, rect.top
return self._bounds
@property
def ascent(self):
bounds = self.bounds()
return max(0, bounds[3])
@property
def descent(self):
_, ymin, _, _ = self.bounds()
return max(0, -ymin)
@property
def advance(self):
_, _, xmax, _ = self.bounds()
return max(0, xmax)
|
nilq/baby-python
|
python
|
n = int(raw_input())
comportadas = 0
children = []
for i in range(n):
ent = map(str, raw_input().split())
children.append(ent[1])
if ent[0] == "+":
comportadas += 1
children.sort()
for child in children:
print child
print "Se comportaram: %d | Nao se comportaram: %d" % (comportadas, n - comportadas)
|
nilq/baby-python
|
python
|
import sys
from mpc_func import *
try:
poses = load_poses(sys.argv[1])
except:
print('Please use the right .csv file!')
sparseness = 100
sparse_poses = poses[1::sparseness, 1:3]
path = [148, 150, 151, 153, 154, 156, 158, 160, 162, 163]
dt = 1 # [s] discrete time
lr = 1.0 # [m]
T = 6 # number of horizon
max_speed = 5
min_speed = -5
speed_now = 1
theta = -1.5
path_poses = sparse_poses[path[:T+1], :]
u, next_x, xstar, ustar = path_poses_to_input(path_poses, speed_now, theta,
dt, lr, T, max_speed, min_speed)
# plot the result
plt.figure(figsize=(10,10))
plt.subplot(3, 1, 1)
plt.plot(path_poses[0][0], path_poses[0][1], 'xb', label='current pose')
plt.plot(next_x[0], next_x[1], 'xr', label='next pose given current control output')
plt.plot(GetListFromMatrix(xstar.value[0, :]), GetListFromMatrix(
xstar.value[1, :]), '-.', label='estimated trajectory given control outputs')
plt.plot(path_poses[:T,0], path_poses[:T,1], label='reference trajectory')
plt.axis("equal")
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.legend()
plt.grid(True)
plt.subplot(3, 1, 2)
plt.cla()
plt.plot(GetListFromMatrix(xstar.value[2, :]), '-b',label='linear velocity')
plt.plot(GetListFromMatrix(xstar.value[3, :]), '-r',label='pose angle')
#plt.ylim([-1.0, 1.0])
plt.ylabel("velocity[m/s]")
plt.xlabel("horizon")
plt.legend()
plt.grid(True)
plt.subplot(3, 1, 3)
plt.cla()
plt.plot(GetListFromMatrix(ustar.value[0, :]), '-r', label="acceleration")
plt.plot(GetListFromMatrix(ustar.value[1, :]), '-b', label="beta")
#plt.ylim([-0.5, 0.5])
plt.legend()
plt.grid(True)
plt.show()
|
nilq/baby-python
|
python
|
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index, name = 'index'),
path('home/',views.home,name = 'home'),
path('maps/',views.maps, name = 'maps'),
path('maps/3dmap',views.mapsd, name = 'mapsd'),
path('accounts/login/', views.loginpage , name = 'loginpage'),
path('login/validate', views.loginvalidate , name = 'loginvalidate'),
path('sponsors/', views.sponsors , name = 'sponsors'),
path('team/', views.team , name = 'team'),
path('gallery/', views.gallery, name = 'gallery'),
path('social/', views.social, name = 'social'),
path('events/', views.events , name = 'events'),
path('signup/', views.signup , name = 'signup'),
]
|
nilq/baby-python
|
python
|
import os
import sys
import unittest
if __name__ == "__main__":
# add the path to be execute in the main directory
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
testmodules = [
'tests.test_nsga2',
#'pymoo.usage.test_usage'
]
suite = unittest.TestSuite()
for t in testmodules:
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))
unittest.TextTestRunner().run(suite)
|
nilq/baby-python
|
python
|
import logging
import os
from peek_plugin_base.PeekVortexUtil import peekStorageName
logger = logging.getLogger(__name__)
class StorageTestMixin:
def __init__(self):
self._dbConn = None
def setUp(self) -> None:
from peek_storage._private.storage import setupDbConn
from peek_storage._private.storage.DeclarativeBase import metadata
import peek_storage
from peek_plugin_base.storage.DbConnection import DbConnection
from peek_storage._private.service.PeekStorageConfig import PeekStorageConfig
from peek_platform import PeekPlatformConfig
PeekPlatformConfig.componentName = peekStorageName
config = PeekStorageConfig()
alembicDir = os.path.join(
os.path.dirname(peek_storage._private.__file__),
"alembic")
self._dbConn = DbConnection(dbConnectString=config.dbConnectString,
metadata=metadata,
alembicDir=alembicDir,
dbEngineArgs=config.dbEngineArgs,
enableCreateAll=False,
enableForeignKeys=False)
self._dbConn.migrate()
def tearDown(self) -> None:
self._dbConn.closeAllSessions()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
654. Maximum Binary Tree
Given an integer array with no duplicates. A maximum tree building on this array is defined as follow:
The root is the maximum number in the array.
The left subtree is the maximum tree constructed from left part subarray divided by the maximum number.
The right subtree is the maximum tree constructed from right part subarray divided by the maximum number.
Construct the maximum tree by the given array and output the root node of this tree.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def constructMaximumBinaryTree(self, nums) -> TreeNode:
if not nums:
return None
max_val = nums[0]
max_ind = 0
for ind, val in enumerate(nums):
if val > max_val:
max_ind = ind
max_val = val
l_node = self.constructMaximumBinaryTree(nums[:max_ind])
r_node = self.constructMaximumBinaryTree(nums[max_ind + 1:])
root = TreeNode(val=max_val, left=l_node, right=r_node)
return root
|
nilq/baby-python
|
python
|
import torch
import math
import numpy as np
from torch.distributions.multivariate_normal import MultivariateNormal
from spirl.utils.pytorch_utils import ten2ar
from spirl.utils.general_utils import batch_apply
class Gaussian:
""" Represents a gaussian distribution """
# TODO: implement a dict conversion function
def __init__(self, mu, log_sigma=None):
"""
:param mu:
:param log_sigma: If none, mu is divided into two chunks, mu and log_sigma
"""
if log_sigma is None:
if not isinstance(mu, torch.Tensor):
import pdb; pdb.set_trace()
mu, log_sigma = torch.chunk(mu, 2, -1)
self.mu = mu
self.log_sigma = torch.clamp(log_sigma, min=-10, max=2) if isinstance(log_sigma, torch.Tensor) else \
np.clip(log_sigma, a_min=-10, a_max=2)
self._sigma = None
def sample(self):
return self.mu + self.sigma * torch.randn_like(self.sigma)
def kl_divergence(self, other):
"""Here self=q and other=p and we compute KL(q, p)"""
return (other.log_sigma - self.log_sigma) + (self.sigma ** 2 + (self.mu - other.mu) ** 2) \
/ (2 * other.sigma ** 2) - 0.5
def nll(self, x):
# Negative log likelihood (probability)
return -1 * self.log_prob(x)
def log_prob(self, val):
"""Computes the log-probability of a value under the Gaussian distribution."""
return -1 * ((val - self.mu) ** 2) / (2 * self.sigma**2) - self.log_sigma - math.log(math.sqrt(2*math.pi))
def entropy(self):
return 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(self.sigma)
@property
def sigma(self):
if self._sigma is None:
self._sigma = self.log_sigma.exp()
return self._sigma
@property
def shape(self):
return self.mu.shape
@staticmethod
def stack(*argv, dim):
return Gaussian._combine(torch.stack, *argv, dim=dim)
@staticmethod
def cat(*argv, dim):
return Gaussian._combine(torch.cat, *argv, dim=dim)
@staticmethod
def _combine(fcn, *argv, dim):
mu, log_sigma = [], []
for g in argv:
mu.append(g.mu)
log_sigma.append(g.log_sigma)
mu = fcn(mu, dim)
log_sigma = fcn(log_sigma, dim)
return Gaussian(mu, log_sigma)
def average(self, dists):
"""Fits single Gaussian to a list of Gaussians."""
mu_avg = torch.stack([d.mu for d in dists]).sum(0) / len(dists)
sigma_avg = torch.stack([d.mu ** 2 + d.sigma ** 2 for d in dists]).sum(0) - mu_avg**2
return type(self)(mu_avg, torch.log(sigma_avg))
def chunk(self, *args, **kwargs):
return [type(self)(chunk) for chunk in torch.chunk(self.tensor(), *args, **kwargs)]
def view(self, shape):
self.mu = self.mu.view(shape)
self.log_sigma = self.log_sigma.view(shape)
self._sigma = self.sigma.view(shape)
return self
def __getitem__(self, item):
return Gaussian(self.mu[item], self.log_sigma[item])
def tensor(self):
return torch.cat([self.mu, self.log_sigma], dim=-1)
def rsample(self):
"""Identical to self.sample(), to conform with pytorch naming scheme."""
return self.sample()
def detach(self):
"""Detaches internal variables. Returns detached Gaussian."""
return type(self)(self.mu.detach(), self.log_sigma.detach())
def to_numpy(self):
"""Convert internal variables to numpy arrays."""
return type(self)(ten2ar(self.mu), ten2ar(self.log_sigma))
class UnitGaussian(Gaussian):
def __init__(self, size, device):
mu = torch.zeros(size, device=device)
log_sigma = torch.zeros(size, device=device)
super().__init__(mu, log_sigma)
class MultivariateGaussian(Gaussian):
def log_prob(self, val):
return super().log_prob(val).sum(-1)
@staticmethod
def stack(*argv, dim):
return MultivariateGaussian(Gaussian.stack(*argv, dim=dim).tensor())
@staticmethod
def cat(*argv, dim):
return MultivariateGaussian(Gaussian.cat(*argv, dim=dim).tensor())
class MultivariateDiagNormal(MultivariateNormal):
def __init__(self, loc, scale, *args, **kwargs):
cov = torch.diag_embed(scale.pow(2))
super().__init__(loc, cov, *args, **kwargs)
class SequentialGaussian_SharedPQ:
""" stacks two Gaussians """
def __init__(self, g1, z1, g2):
"""
"""
self.g1 = g1
self.g2 = g2
self.z1 = z1
assert z1.shape == g1.shape
self.shared_dims = None # how many shape dimensions are shared
self._update_shared_dims()
def sample(self):
"""
sample z2 and concatentate with z1
:return:
"""
return torch.cat([self.z1, self.g2.sample()], dim=1)
def kl_divergence(self, other):
return self.g1.kl_divergence(other.g1)
@property
def shape(self):
self._update_shared_dims()
return self.g1.shape[:self.shared_dims]
@property
def mu(self):
return self.g1.mu
@staticmethod
def stack(*argv, dim):
return SequentialGaussian_SharedPQ._combine(torch.stack, *argv, dim=dim)
@staticmethod
def cat(*argv, dim):
return SequentialGaussian_SharedPQ._combine(torch.cat, *argv, dim=dim)
@staticmethod
def _combine(fcn, *argv, dim):
def fn_apply(inputs):
mu, log_sigma = [], []
for g in inputs:
mu.append(g.mu)
log_sigma.append(g.log_sigma)
mu = fcn(mu, dim)
log_sigma = fcn(log_sigma, dim)
return Gaussian(mu, log_sigma)
g1_list = [a.g1 for a in argv]
g2_list = [a.g2 for a in argv]
z1_list = [a.z1 for a in argv]
return SequentialGaussian_SharedPQ(fn_apply(g1_list), fcn(z1_list, dim=dim), fn_apply(g2_list))
def view(self, shape):
# assume that this shape does not include the last dimensions
self._update_shared_dims()
self.g1 = self.g1.view(shape + list(self.g1.shape[self.shared_dims:]))
self.g2 = self.g2.view(shape + list(self.g2.shape[self.shared_dims:]))
self.z1 = self.z1.view(shape + list(self.z1.shape[self.shared_dims:]))
return self
def __getitem__(self, item):
return SequentialGaussian_SharedPQ(self.g1[item], self.z1[item], self.g2[item])
def _update_shared_dims(self):
shared_dims = 0
for i, j in zip(self.g1.shape, self.g2.shape):
if i != j: break
shared_dims += 1
assert shared_dims is not 0 # need at least one shared dim between the Gaussians
self.shared_dims = shared_dims
class ProbabilisticModel:
def __init__(self):
self._sample_prior = False
def switch_to_prior(self):
self._sample_prior = True
def switch_to_inference(self):
self._sample_prior = False
def get_fixed_prior(tensor, bs=None, dim=None):
if dim is not None:
return Gaussian(tensor.new_zeros(bs, dim, 1, 1), tensor.new_zeros(bs, dim, 1, 1))
else:
return Gaussian(torch.zeros_like(tensor.mu), torch.zeros_like(tensor.log_sigma))
def stack(inp, dim):
if isinstance(inp[0], Gaussian):
return Gaussian.stack(*inp, dim=dim)
else:
return torch.stack(inp, dim)
def mc_kl_divergence(p, q, n_samples=1):
"""Computes monte-carlo estimate of KL divergence. n_samples: how many samples are used for the estimate."""
samples = [p.sample() for _ in range(n_samples)]
return torch.stack([p.log_prob(x) - q.log_prob(x) for x in samples], dim=1).mean(dim=1)
if __name__ == "__main__":
d = [Gaussian(torch.tensor([1., 1]), torch.zeros(2)), Gaussian(torch.tensor([-1., -1]), torch.zeros(2))]
d_avg = Gaussian.average(d)
print(d_avg.mu, d_avg.sigma)
|
nilq/baby-python
|
python
|
#-*- coding: utf-8 -*-
""" Use name to identify share isin by using fondout db and google """
"""
1. Lista pa okända shares vi vill söka isin på.
2. Traversera lista.
3. Kontrollera mot shares
4. Kontrollera mot share_company
5. Kontrollera mot shares LIKE från båda håll min chars 5
6. Kontrollera mot shares_company LIKE från båda håll min chars 5
7. Googla namn + isin.
Jämför google resultat med det vi ev. hittat i databasen.
Om vi väljer google res. på konflikt söker vi baklänges och ser om det
resultatet finns i databasen.
Olika resultatskoder skrivs ut i databasen.
TODO:
- Bryta ut parametrar för gränsvärden i variabler
- Bryt ut lösenord och användarnamn till ENV_VAR
"""
import sys
import mysql.connector
from isingoogle import GoogleIsinSearch
import argparse
import time
from random import randint
import signal
class FindIsin:
def exit_procedure(self):
print "Exiting script..."
self.cnxUp.commit()
self.cursorUp.close()
self.cnxUp.close()
self.cursor.close()
self.connection.close()
sys.exit(0)
def signal_handler(self, signal, frame):
print('You pressed Ctrl+C!')
self.exit_procedure()
def _execute_share_search_query(self, query, name):
cursor = self.connection.cursor(buffered=True)
try:
cursor.execute(query, (name, ))
except mysql.connector.errors.IntegrityError, e:
print ('Find share by name exception:', e)
if (cursor is not None):
self.share = cursor.fetchone()
else:
self.share = None
cursor.close()
return self.share
def _execute_share_search_query_get_all(self, query, name):
cursor = self.connection.cursor(buffered=True)
try:
cursor.execute(query, (name, ))
except mysql.connector.errors.IntegrityError, e:
print ('Find all shares exception:', e)
if (cursor is not None):
self.share = cursor.fetchall()
else:
self.share = None
cursor.close()
return self.share
def _find_by_share_exact_name(self, name):
query = ("SELECT s.name, s.isin FROM share s "
"WHERE s.name = (%s) "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_exact_alias(self, name):
query = ("SELECT sa.name, s.isin "
"FROM share s "
"JOIN share_alias sa on sa.share = s.id "
"WHERE sa.name = (%s) "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_company_exact_name(self, name):
query = ("SELECT sc.name, s.isin "
"FROM share_company sc "
"JOIN share s on s.share_company = sc.id "
"WHERE sc.name = (%s) "
+ self.QUERY_WHERE_AND +
"GROUP BY sc.name "
+ self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_company_fuzzy_name(self, name):
query = ("SELECT sc.name, s.isin "
"FROM share_company sc "
"JOIN share s on s.share_company = sc.id "
"WHERE sc.name like CONCAT('%', %s, '%') "
+ self.QUERY_WHERE_AND +
"GROUP BY sc.name "
+ self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_fuzzy_name(self, name):
query = ("SELECT s.name, s.isin "
"FROM share s "
"WHERE s.name like CONCAT('%', %s, '%') "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_reverse_fuzzy_name(self, name):
query = ("SELECT s.name, s.isin "
"FROM share s "
"WHERE %s like CONCAT('%', s.name, '%') "
"AND length(s.name) > 4 "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_fuzzy_alias(self, name):
query = ("SELECT sa.name, s.isin "
"FROM share s "
"JOIN share_alias sa on sa.share = s.id "
"WHERE sa.name like CONCAT('%', %s, '%') "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_reverse_fuzzy_alias(self, name):
query = ("SELECT sa.name, s.isin "
"FROM share s "
"JOIN share_alias sa on sa.share = s.id "
"WHERE %s like CONCAT('%', sa.name, '%') "
"AND length(sa.name) > 4 "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def _find_by_share_company_reverse_fuzzy_name(self, name):
query = ("SELECT s.name, s.isin "
"FROM share_company sc "
"JOIN share s on s.share_company = sc.id "
"WHERE %s like CONCAT('%', sc.name, '%') "
"AND length(s.name) > 4 "
+ self.QUERY_WHERE_AND +
"group by sc.name "
+ self.QUERY_ORDER +
"LIMIT 1")
return self._execute_share_search_query(query, name)
def share_by_isin(self, isin):
cursor = self.connection.cursor(buffered=True)
query = ("SELECT name FROM share s "
"WHERE isin = (%s) "
"AND (s.category = 1 or s.category is null) "
"LIMIT 1")
try:
cursor.execute(query, (isin, ))
except mysql.connector.errors.IntegrityError, e:
print ('Find share by isin exception:', e)
if (cursor is not None):
share_name = cursor.fetchone()
else:
share_name = None
cursor.close()
return share_name
def share_company_by_isin(self, isin):
cursor = self.connection.cursor(buffered=True)
query = ("SELECT sc.name FROM share s "
"join share_company sc on s.share_company = sc.id "
"WHERE isin = %s ")
try:
cursor.execute(query, (isin, ))
except mysql.connector.errors.IntegrityError, e:
print ('Find share_company by isin exception:', e)
if (cursor is not None):
self.share_company_name = cursor.fetchone()
else:
self.share_company_name = None
cursor.close()
if (self.share_company_name is not None):
self.share_company_name = self.share_company_name[0]
return self.share_company_name
def find_exact_share_routine(self, name):
self.share = None
self.share = self._find_by_share_exact_name(name)
if (self.share is None):
self.share = self._find_by_share_exact_alias(name)
if (self.share is None):
self.share = self._find_by_share_company_exact_name(name)
return self.share
def find_share_routine(self, name):
self.share = None
self.share = self.find_exact_share_routine(name)
# Do not allow fuzzy search on search strings shorter than four letters!
if (len(name) > 3):
if (self.share is None):
self.share = self._find_by_share_fuzzy_name(name)
if (self.share is None):
self.share = self._find_by_share_company_fuzzy_name(name)
if (self.share is None):
self.share = self._find_by_share_fuzzy_alias(name)
if (self.share is None):
self.share = self._find_by_share_reverse_fuzzy_alias(name)
if (self.share is None):
self.share = self._find_by_share_reverse_fuzzy_name(name)
if (self.share is None):
self.share = self._find_by_share_company_reverse_fuzzy_name(
name)
return self.share
def exact_and_fuzzy_routine(self, name):
share = None
share = self.find_exact_share_routine(name)
if(share is None):
share = self.find_share_routine(name)
return share
def all_exact(self, name):
shares = []
for query in self._exact_queries:
shares = shares + self._execute_share_search_query_get_all(query, name)
return shares
def all_fuzzy(self, name):
shares = []
for query in self._fuzzy_queries:
shares = shares + self._execute_share_search_query_get_all(query, name)
return shares
def find_share_alt_name(self, name):
name = name.lower()
share = None
used_names = []
used_names.append(name)
for suffix in self.company_suffix:
new_name = name.replace(suffix, "")
if (new_name not in used_names):
share = self.exact_and_fuzzy_routine(new_name)
used_names.append(new_name)
if (share is None):
new_name = new_name.replace(",", "").replace(".", "")
if (new_name not in used_names):
share = self.exact_and_fuzzy_routine(new_name)
used_names.append(new_name)
if (share is None):
new_name = new_name.replace(" ", "")
if (new_name not in used_names):
share = self.exact_and_fuzzy_routine(new_name)
used_names.append(new_name)
if (share is not None):
break
return share
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def __init__(self):
self.QUERY_WHERE_AND = ( "AND s.isin IS NOT NULL "
"AND (s.category = 1 or s.category is null) ")
self.QUERY_ORDER = "ORDER BY s.category desc, s.id asc "
self.company_suffix =[' group', '.', ',', ' corporation', ' group', ' plc',
' limited', ' & co.',
' ab', ' a/s', ' oyj', ' asa', ' hf', ' abp',
' incorporated', ' company', ' & company',
' ag', ' (the)', ' and company', ' holdings',
' financial', 'the ', ' corp', ' inc', ' hldgs',
' companies', ' nl', ' se', 's.p.a.', ' spa', 's.a.',
'aktiengesellschaft', ', inc.', ' co. ltd.', 'ltd', 'plc'
'company limited']
self.QUERY_LIMIT = ""
self._find_by_share_exact_name_query = ("SELECT s.name, s.isin FROM share s "
"WHERE s.name = (%s) "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_exact_alias_query = ("SELECT s.name, s.isin "
"FROM share s "
"JOIN share_alias sa on sa.share = s.id "
"WHERE sa.name = (%s) "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_company_exact_name_query = ("SELECT s.name, s.isin "
"FROM share_company sc "
"JOIN share s on s.share_company = sc.id "
"WHERE sc.name = (%s) "
+ self.QUERY_WHERE_AND +
"GROUP BY sc.name "
+ self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_company_fuzzy_name_query = ("SELECT s.name, s.isin "
"FROM share_company sc "
"JOIN share s on s.share_company = sc.id "
"WHERE sc.name like CONCAT('%', %s, '%') "
+ self.QUERY_WHERE_AND +
"GROUP BY sc.name "
+ self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_fuzzy_name_query = ("SELECT s.name, s.isin "
"FROM share s "
"WHERE s.name like CONCAT('%', %s, '%') "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_reverse_fuzzy_name_query = ("SELECT s.name, s.isin "
"FROM share s "
"WHERE %s like CONCAT('%', s.name, '%') "
"AND length(s.name) > 4 "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_fuzzy_alias_query = ("SELECT s.name, s.isin "
"FROM share s "
"JOIN share_alias sa on sa.share = s.id "
"WHERE sa.name like CONCAT('%', %s, '%') "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_reverse_fuzzy_alias_query = ("SELECT s.name, s.isin "
"FROM share s "
"JOIN share_alias sa on sa.share = s.id "
"WHERE %s like CONCAT('%', sa.name, '%') "
"AND length(sa.name) > 4 "
+ self.QUERY_WHERE_AND + self.QUERY_ORDER + self.QUERY_LIMIT)
self._find_by_share_company_reverse_fuzzy_name_query = ("SELECT s.name, s.isin "
"FROM share_company sc "
"JOIN share s on s.share_company = sc.id "
"WHERE %s like CONCAT('%', sc.name, '%') "
"AND length(s.name) > 4 "
+ self.QUERY_WHERE_AND +
"group by sc.name "
+ self.QUERY_ORDER + self.QUERY_LIMIT)
self._exact_queries = (
self._find_by_share_exact_name_query,
self._find_by_share_exact_alias_query)
# not including SC in exact match.
#self._find_by_share_company_exact_name_query
self._fuzzy_queries = (
self._find_by_share_company_exact_name_query,
self._find_by_share_company_fuzzy_name_query,
self._find_by_share_fuzzy_name_query,
self._find_by_share_reverse_fuzzy_name_query,
self._find_by_share_fuzzy_alias_query,
self._find_by_share_reverse_fuzzy_alias_query,
self._find_by_share_company_reverse_fuzzy_name_query)
# Search database
self.connection = mysql.connector.connect(user='root',
password='root',
database='fondout_maxi')
self.cursor = self.connection.cursor()
# Update database
self.cnxUp = mysql.connector.connect(
user='root',
password='root',
database='fund_search')
self.cursorUp = self.cnxUp.cursor()
self._isin_google = GoogleIsinSearch()
def main(self):
# Parse args from command line
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--fund", help="Fund to use.")
args = parser.parse_args()
# Prepare script to listen to ctrl-c
signal.signal(signal.SIGINT, self.signal_handler)
# --------- Choose selection of shares from database
query_unidentified_shares = (
"SELECT name from tmp_shareholding "
"where isin IS NULL")
# uncomment to enable Test list already caught share-isins / redo everything
if args.fund is not None:
print "Fund used : ", args.fund
query_unidentified_shares = (
"SELECT name from tmp_shareholding "
"where fund = (select id from tmp_fund where name LIKE '%"
+ args.fund + "%')"
" AND isin IS NULL and false_positive = 0")
self.cursorUp.execute(query_unidentified_shares)
unidentifiedShares = self.cursorUp.fetchall()
# --------------------------------------------------------------------------
for (share_name,) in unidentifiedShares:
# lowercase because MYSQL is not case sensitive, but python is.
share_name = share_name.lower()
print "New share: ", share_name
# --------------- Find section ---------------
# First db-search attempt, exact match
found_share = self.find_exact_share_routine(share_name)
if (found_share is not None):
exact_match = True
else:
exact_match = False
# Second db-search attempt, fuzzy search
if (found_share is None):
found_share = self.find_share_routine(share_name)
# Third db-search attempt with alternated name
if (found_share is None):
used_names = []
used_names.append(share_name)
for suffix in self.company_suffix:
new_name = share_name.replace(suffix, "")
if (new_name not in used_names):
found_share = self.find_share_routine(new_name)
used_names.append(new_name)
if (found_share is None):
new_name = new_name.replace(",", "").replace(".", "")
if (new_name not in used_names):
found_share = self.find_share_routine(new_name)
used_names.append(new_name)
if (found_share is None):
new_name = new_name.replace(" ", "")
if (new_name not in used_names):
found_share = self.find_share_routine(new_name)
used_names.append(new_name)
if (found_share is not None):
break
if (exact_match is not True):
(googled_isin,
googled_isin_matches,
google_occurances) = self._isin_google.search_google(share_name)
else:
googled_isin_matches = 0
google_occurances = None
googled_isin = None
if (found_share is not None):
(found_name, found_isin) = found_share
print found_name, found_isin
if (googled_isin_matches > 0):
if (found_isin == googled_isin):
# CASE 1: GOOGLE = DBMATCH --> SAME ISIN
# Database found matches top google result
print 'Found isin matches googled isin, gr8 success!'
found_method = ("1: search and google match. Google hits: "
+ str(googled_isin_matches))
elif (googled_isin_matches > 3):
# CASE 2: GOOGLE(>3) != DBMATCH --> CHOOSE GOOGLE
# No match google hits wins - take google result
found_method = ("2:" + str(googled_isin_matches)
+ " google hits, conflict search " + found_name + " "
+ found_isin)
found_isin = googled_isin
found_name = "googled: "
result = self.share_by_isin(googled_isin)
if (result is not None):
found_name = found_name + ' ' + result[0]
elif (googled_isin_matches > 0):
if (found_isin in google_occurances):
# CASE 3: GOOGLE(<3) != DBMATCH, DBMATCH in GOOG OCCURANC
# ---> CHOOSE DBMATCH
found_method = ("3: mismatch db. top google hit: "
+ googled_isin + ":" + str(googled_isin_matches))
else:
# CASE 4: GOOGLE(<4) != DBMATCH, DBMATCH NOT in GOOG OCCURANC
# found_isin = ""
# found_name = ""
found_method = ("4. mismatch db google("
+ str(googled_isin_matches) + ") not in google results. "
+ googled_isin)
result = self.share_by_isin(googled_isin)
if (result is not None):
found_method = (found_method + ' db-matched to ' +
result[0])
elif (exact_match is True):
# CASE 8: EXACT MATCH
found_method = "8: Exact match"
else:
# CASE 2: No google hits, but found in DB
# Make this a separate case?
found_method = "2: No google hits"
elif (googled_isin_matches > 0):
# min 3 google hits makes certain
if (googled_isin_matches > 2):
found_isin = googled_isin
# Search current db for found isin.
result = self.share_by_isin(googled_isin)
found_method = ("5:" + str(googled_isin_matches)
+ " results googled, faild db-search.")
found_name = "googled: "
if (result is not None):
found_name = found_name + result[0]
else:
found_method = ("6. Google hits: " + str(googled_isin_matches)
+ " : " + googled_isin)
result = self.share_by_isin(googled_isin)
if (result is not None):
found_method = found_method + ' db-matched to ' + result[0]
found_isin = ""
found_name = ""
else:
found_isin = ""
found_name = ""
found_method = "7: Nothing found!"
# Get share_company
found_share_company = ""
if(found_isin != ""):
found_share_company = self.share_company_by_isin(found_isin)
# --------------- Update section ---------------
query_update_isin = (
"UPDATE tmp_shareholding "
"SET matched_name=%s, isin=%s, method_found=%s, share_company=%s "
"WHERE name = %s")
update_share_values = (found_name, found_isin, found_method,
found_share_company, share_name)
# If fund is specified add specific by fund.
# obs: Should really be the exact one pulled from the database
if args.fund is not None:
query_update_isin = (
query_update_isin +
" and fund = (select id from tmp_fund "
"where name like CONCAT('%', %s, '%'))")
update_share_values = update_share_values + (args.fund, )
# Update share in fund_search where name = share_name
try:
self.cursorUp.execute(query_update_isin, update_share_values)
except Exception as e:
print('Update execution error', e)
# Use commit for confirming modification of data.
# Rollback to undo.
# Disable for test.
self.cnxUp.commit()
time.sleep(randint(0,10))
self.exit_procedure()
if __name__ == "__main__":
findIsin = FindIsin()
findIsin.main()
|
nilq/baby-python
|
python
|
"""Provides conversion for transaction data into an exchange format."""
import textwrap
import time
from powl import exception
class TransactionConverter(object):
"""
Provides methods to convert data into a financial exchange format.
"""
def convert(self, date, debit, credit, amount, memo):
"""
Convert a transaction into an exchange financial format.
Parameters
----------
date : time.struct_time
Date of the transaction.
debit : str
Debit account of the transaction.
credit : str
Credit account of the transaction.
amount : float
Amount of the transaction.
memo : str
Description of the transaction.
"""
pass
class QifConverter(TransactionConverter):
"""
Provides methods to convert a transaction into QIF format.
"""
def __init__(self, log, files, account_types, assets, liabilities,
revenues, expenses):
"""
Parameters
----------
log : powl.log.Log
Used to log.
files : dict of powl.filesystem.File
Map of account key to files. Every key in files must exist in
either of assets, liabilities, revenues, or expenses.
account_types : dict
Map of account key to QIF account types.
assets : dict
Map of account key to Assets.
liabilities : dict
Map of account key to Liabilitess.
revenues : dict
Map of account key to Revenuess.
expenses : dict
Map of account key to Expensess.
Notes
-----
An account key is a string that maps to a QIF account.
Multiple account key words can map to the same account.
For example "ent" can map to "Expenses:Entertainment" and
"entertainment" can also map to "Expenses:Entertainment".
Raises
------
ValueError
If a key is files does not have a key in any of assets,
liabilities, revenues, or expenses.
"""
self._log = log
self._files = files
self._account_types = account_types
self._assets = assets
self._liabilities = liabilities
self._revenues = revenues
self._expenses = expenses
self._accounts = dict(self._assets.items() +
self._liabilities.items() +
self._revenues.items() +
self._expenses.items())
for key, value in self._files.items():
if key not in self._accounts.keys():
msg = ("account key ({0}) ".format(key) +
"for file ({0}) ".format(value.filename) +
"does not have has an associated QIF account")
err = exception.create(ValueError, msg)
raise err
def convert(self, date, debit, credit, amount, memo):
"""
Convert transaction data into QIF format.
Parameters
----------
date : time.struct_time
Date of the transaction.
debit : str
Debit account of the transaction.
credit : str
Credit account of the transaction.
amount : float
Amount of the transaction.
memo : str
Description of the transaction.
Returns
-------
record : str
QIF record of the transaction.
qif_file : powl.filesystem.File
The QIF file to output to.
Notes
-----
Since it depends which QIF file records the transaction, the return
value also contains the file to write to.
"""
qif_date = self._format_date(date)
qif_transfer = self._get_transfer_account(debit, credit)
qif_amount = self._format_amount(debit, amount)
qif_memo = memo
qif_record = self._format_qif_record(qif_date, qif_transfer,
qif_amount, qif_memo)
qif_file = self._get_qif_file(debit, credit)
self._log_transaction(qif_date, qif_file.filename, qif_transfer,
qif_amount, qif_memo)
return qif_record, qif_file
def _create_qif_templates(self):
templates = []
for key, filename in self.filenames.iteritems():
account_name = self.accounts.get(key)
account_type = self.types.get(key)
header = self._format_qif_header(account_name, account_type)
template = (filename, header)
templates.append(template)
return templates
def _format_amount(self, debit, amount):
"""
Convert amount to QIF format based on debit.
Parameters
----------
debit : str
Account key for the debit of a transaction.
amount : str or float
Amount of the transaction.
Returns
-------
str
Formatted amount to use in QIF file.
Raises
------
ValueError
If amount cannot be converted to a float.
KeyError
If debit key is not an account.
"""
try:
formatted_amount = "{0:.2f}".format(float(amount))
except ValueError as err:
msg = "amount ({0}) cannot be converted to float".format(amount)
exception.add_message(err, msg)
raise
if debit in self._expenses:
# Amount should be negative.
return '-' + formatted_amount
elif debit in self._accounts:
return formatted_amount
else:
msg ="account key ({0}) does not exist".format(debit)
err = exception.create(KeyError, msg)
raise err
def _format_date(self, date):
"""
Convert struct_time to QIF date format (MM/DD/YYYY).
Parameters
----------
date : time.struct_time
The date of the transaction.
Returns
-------
str
String date in the format of "MM/DD/YYYY".
Raises
------
TypeError
If date is not a struct_time.
ValueError
If a date value is out of range.
OverflowError
If a value in the tuple is too large to be stored in a C long.
"""
try:
return time.strftime("%m/%d/%Y", date)
except (ValueError, TypeError, OverflowError) as err:
msg = ("date ({0}) cannot be converted ".format(date) +
"to MM/DD/YYYY ")
exception.add_message(err, msg)
raise
def _format_qif_header(self, account_name, account_type):
"""Format an account name and type into a header for a QIF file."""
data = { 'name': account_name, 'type': account_type }
header = textwrap.dedent("""\
!Account
N{name}
T{type}
^
!Type:{type}""".format(**data))
return header
def _format_qif_record(self, date, transfer, amount, memo):
"""
Formats qif data into a transaction for a QIF file.
Parameters
----------
date : str
Date of the transaction
transfer : str
Transfer QIF account.
amount : str
Formatted amount.
memo : str
Description of the transaction.
"""
return textwrap.dedent(
"""\
D{0}
T{1}
L{2}
M{3}
^""".format(date, amount, transfer, memo))
def _get_qif_file(self, debit, credit):
"""
Get the associated QIF file from the debit and credit keys.
Parameters
----------
debit : str
Account key for the debit of a transaction.
credit : str
Account key for the credit of a transaction.
Raises
------
KeyError
If neither key has an associated QIF file.
Notes
-----
Debit key has priority so if both debit and credit key has an
associated QIF file than the QIF file associated with the debit
key is returned.
This is linked with get_transfer_account. If the QIF file returned
from this is from the debit key then the transfer account must be
from the credit key and vice versa.
"""
if debit in self._files:
return self._files[debit]
elif credit in self._files:
return self._files[credit]
else:
msg = ("neither debit key ({0}) ".format(debit) +
"or credit key ({0}) ".format(credit) +
"has an associated QIF file")
err = exception.create(KeyError, msg)
raise err
def _get_transfer_account(self, debit, credit):
"""
Get the associated QIF account from the debit and credit keys.
Parameters
----------
debit : str
Account key for the debit of a transaction.
credit : str
Account key for the credit of a transaction.
Raises
------
KeyError
If neither key has an associated QIF file.
If neither key has an associated QIF account.
Notes
-----
Credit key has priority so if both debit and credit key has an
associated QIF account than the QIF account associated with the
credit key is returned.
This is linked with get_qif_file. If the transfer account returned
from this is from the credit key then the QIF file must be from the
debit key and vice versa.
"""
if debit in self._files:
key = credit
elif credit in self._files:
key = debit
else:
msg = ("neither debit key ({0}) ".format(debit) +
"or credit key ({0}) ".format(credit) +
"has an associated QIF file")
err = exception.create(KeyError, msg)
raise err
if key in self._accounts:
return self._accounts[key]
else:
msg = ("account key ({0}) ".format(key) +
"does not have has an associated QIF account")
err = exception.create(KeyError, msg)
raise err
def _log_transaction(self, date, filename, transfer, amount, memo):
"""
Debug logs the transaction.
Parameters
----------
date : str
Date of the transaction
filename : str
Name of the QIF file.
transfer : str
Transfer QIF account.
amount : str
Formatted amount.
memo : str
Description of the transaction.
"""
self._log.debug("QIF transaction:")
self._log.debug(" date: %s", date)
self._log.debug(" file: %s", filename)
self._log.debug(" transfer: %s", transfer)
self._log.debug(" amount: %s", amount)
self._log.debug(" memo: %s", memo)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# pylint: disable=wrong-import-position
import os
import subprocess
import sys
import tempfile
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from xv_leak_tools.helpers import current_os
if len(sys.argv) != 2:
sys.stderr.write("USAGE: setup_python.py virtualenv_location\n")
sys.exit(1)
def bin_path():
if current_os() == 'macos':
return '/usr/local/bin/'
return '/usr/bin/'
def install_python_if_not_present(location):
if os.path.exists(os.path.join(location, 'bin', 'activate')):
print("Virtualenv already setup in {}".format(location))
return
print("Creating virtualenv")
cmd = [
os.path.join(bin_path(), 'virtualenv'),
'-p',
os.path.join(bin_path(), 'python3'),
location
]
print("Executing: {}".format(" ".join(cmd)))
subprocess.check_output(cmd)
write_pythonlocation(location)
def install_pip_packages():
if sys.platform == 'linux' or sys.platform == 'linux2':
requirements = 'requirements_linux.txt'
elif sys.platform == 'darwin':
requirements = 'requirements_macos.txt'
elif sys.platform == 'cygwin':
requirements = 'requirements_windows.txt'
else:
raise Exception("Unsupported system: {}".format(sys.platform))
print("Installing pip packages using {}".format(requirements))
script = '''\
source activate
pip3 install -r {}
'''.format(requirements)
_file, script_file = tempfile.mkstemp()
with os.fdopen(_file, 'w') as _file:
_file.write(script)
print("Wrote temp file to {}".format(script_file))
try:
for line in subprocess.check_output(['bash', script_file]).splitlines():
print(line.decode())
except subprocess.CalledProcessError as ex:
print("FAILED: {}".format(ex))
sys.exit(1)
def write_pythonlocation(location):
with open('.pythonlocation', 'w') as _file:
_file.write(location)
LOCATION = sys.argv[1]
print("Setting up python in {}".format(LOCATION))
install_python_if_not_present(LOCATION)
# Write the python location first as the pip step relies on it
write_pythonlocation(LOCATION)
# We always install pip packages so that updates from the repo get picked up. On the build machines
# this is very useful. It's cheap to pip install if everything is already installed.
install_pip_packages()
|
nilq/baby-python
|
python
|
from otd.skosnavigate import SKOSNavigate
from similarity.csv_parse import get_fragment
from utils.graph import create_bound_graph
def sort_turtle(prefix, identifiers, file_in, file_out):
with open(file_in, encoding='utf8') as fd:
line_iter = iter(fd)
entities = dict()
preamble_finished = False
preamble = []
while not preamble_finished:
line = next(line_iter)
preamble.append(line)
if line.strip() == "":
preamble_finished = True
extra = []
try:
while True:
line = next(line_iter)
if line.strip == "":
# Extra blank lines between entities
continue
if not line.startswith(prefix):
# We don't recognize this, just put it in the end
extra.append(line)
continue
# If we are here, then we recognize this line as the first for a
# subject. Its identifier is from after the prefix, until space.
this_id = line.split()[0][len(prefix):]
this_entitity = []
entities[this_id] = this_entitity
# do-while loop
this_entitity.append(line)
while line.strip() != "":
line = next(line_iter)
this_entitity.append(line)
except StopIteration:
# End of file
pass
# Add to file in order given by identifiers
sorted_lines = list()
sorted_lines.extend(preamble)
for ent_id in identifiers:
if ent_id in entities:
sorted_lines.extend(entities[ent_id])
entities.pop(ent_id)
else:
print('Identifier', ent_id, 'not found in turtle file')
remaining_entities = sorted(entities.items())
for _, lines in remaining_entities:
sorted_lines.extend(lines)
if extra:
sorted_lines.append('\n')
sorted_lines.extend(extra)
with open(file_out, mode="w", encoding="utf8") as fd:
fd.writelines(sorted_lines)
def get_concepts_sorted(file_in):
identifiers = []
g = create_bound_graph()
g.parse(location=file_in, format="turtle")
navigator = SKOSNavigate(g)
def do_node(node, visited_nodes):
identifiers.append(get_fragment(str(node)))
children = tuple(sorted(navigator.find_children(node)))
if node not in visited_nodes and len(children):
for child in children:
do_node(child, visited_nodes | {node})
elif node in visited_nodes:
print('Cycle detected for nodes', visited_nodes, 'at node', node)
do_node(navigator.find_root(), set())
return identifiers
|
nilq/baby-python
|
python
|
import pytest
from ee.clickhouse.queries.groups_join_query import GroupsJoinQuery
from posthog.models.filters import Filter
def test_groups_join_query_blank():
filter = Filter(data={"properties": []})
assert GroupsJoinQuery(filter, 2).get_join_query() == ("", {})
def test_groups_join_query_filtering(snapshot):
filter = Filter(
data={"properties": [{"key": "industry", "value": "finance", "type": "group", "group_type_index": 0}]}
)
assert GroupsJoinQuery(filter, 2).get_join_query() == snapshot
def test_groups_join_query_filtering_with_custom_key_names(snapshot):
filter = Filter(
data={
"properties": [
{"key": "industry", "value": "finance", "type": "group", "group_type_index": 0},
{"key": "company", "value": "crashed", "type": "group", "group_type_index": 2},
]
}
)
assert GroupsJoinQuery(filter, 2, join_key="call_me_industry").get_join_query() == snapshot
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.