content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
'''
Program: setup_cython.py
Created: Tue Aug 22 22:06:12 CEST 2017
Author: Tommaso Comparin
Description: Compiles a cython module
Notes: To be executed through
$ python setup_cython.py build_ext --inplace
'''
from distutils.extension import Extension
from distutils.core import setup
from Cython.Distutils import build_ext
lib = 'lib_laughlin_metropolis.pyx'
basename = lib[:-4]
ext_modules = [Extension(basename, [basename + '.pyx'])]
setup(cmdclass={'build_ext': build_ext}, ext_modules=ext_modules)
| [
7061,
6,
198,
15167,
25,
9058,
62,
948,
400,
261,
13,
9078,
198,
41972,
25,
30030,
2447,
2534,
2534,
25,
3312,
25,
1065,
327,
6465,
2177,
198,
13838,
25,
309,
2002,
292,
78,
22565,
259,
198,
11828,
25,
3082,
2915,
257,
3075,
400,
... | 2.816216 | 185 |
import re,os
import sys
import datetime
import pytz
from pytz import timezone
#filename = input("filename: ")
eastern = timezone('US/Central')
now=datetime.datetime.now().astimezone(eastern)
remindMeAT =datetime.datetime(2021,7,8,16,11) #year,month,day,hour,min,sec
week= datetime.timedelta(days = 7)
hour = datetime.timedelta(hours = 9)
my_date = datetime.datetime.now(pytz.timezone('US/Eastern'))
#print(now)
now = now.replace(tzinfo=None)
#print(my_date)
print(now)
#print(now.strftime("%Y-%m-%dT%H:%M:%S"))
print (" " + (now.strftime("%A at %I:%M%p -- %h/%d/%Y ")))
#print(now.strftime("%I:%M%p"))
#print(now)
#print(now+hour)
#print (dayo) | [
11748,
302,
11,
418,
198,
11748,
25064,
198,
11748,
4818,
8079,
198,
11748,
12972,
22877,
198,
6738,
12972,
22877,
1330,
640,
11340,
198,
198,
2,
34345,
796,
5128,
7203,
34345,
25,
366,
8,
198,
68,
6470,
796,
640,
11340,
10786,
2937,
... | 2.294737 | 285 |
#!/usr/bin/env python3
import os
import sys
import argparse
import subprocess
import logging
from multiprocessing import Pool
args = process_args()
logging.basicConfig(format='[%(filename)s] %(asctime)s %(levelname)s: %(message)s', datefmt='%I:%M:%S', level=logging.DEBUG)
main(args)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
11748,
850,
14681,
198,
11748,
18931,
198,
6738,
18540,
305,
919,
278,
1330,
19850,
628,
198,
22046,
796,
1429,
62... | 2.733333 | 105 |
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import time
from PIL import Image
import GC9A01 as GC9A01
from os import listdir
from os.path import isfile, join
print("""
image.py - Display an image on the LCD.
If you're using Breakout Garden, plug the 1.3" LCD (SPI)
breakout into the rear slot.
""")
if len(sys.argv) < 2:
print("Usage: {} <folder containing images> <seconds to delay> <loop>".format(sys.argv[0]))
sys.exit(1)
image_folder = sys.argv[1]
delay = float(sys.argv[2])
loop = True if sys.argv[3] in ["yes", "true", "True", "1"] else False
# Create GC9A01 LCD display class.
disp = GC9A01.GC9A01(
port=0,
cs=GC9A01.BG_SPI_CS_BACK, # BG_SPI_CSB_BACK or BG_SPI_CS_FRONT
dc=9,
rst=24,
backlight=19, # 18 for back BG slot, 19 for front BG slot.
spi_speed_hz=80 * 1000 * 1000
)
WIDTH = disp.width
HEIGHT = disp.height
image_files = [join(image_folder, f) for f in listdir(image_folder) if isfile(join(image_folder, f)) and ".png" in f and not f.startswith(".")]
# Load an image.
print('Loading {} images...'.format(len(image_files)))
images = [Image.open(image_file) for image_file in sorted(image_files)]
# Resize the image
images = [image.resize((WIDTH, HEIGHT)) for image in images]
# Draw the image on the display hardware.
print('Drawing images...')
# Initialize display.
disp.begin()
# Display all the images in order, delaying and looping if requested
running = True
while(running):
for image in images:
disp.display(image)
time.sleep(delay)
if not loop:
running = False
| [
2,
15069,
357,
66,
8,
1946,
1215,
1878,
4872,
20171,
198,
2,
6434,
25,
8832,
6031,
28635,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428,
3788,
290,
3917,
... | 2.993251 | 889 |
"""Gamestore Forms"""
from django import forms
from django.contrib.auth.models import User
from gamestore.models import Game, UserProfile, Application
class UserForm(forms.ModelForm):
"""User form"""
class UserProfileForm(forms.ModelForm):
"""Profile form"""
class GameForm(forms.ModelForm):
"""Form for developers uploading a new game or modifying information."""
class ApplicationForm(forms.ModelForm):
"""Form for applying to become a developer"""
| [
37811,
34777,
395,
382,
39196,
37811,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
198,
6738,
9106,
395,
382,
13,
27530,
1330,
3776,
11,
11787,
37046,
11,
15678,
62... | 3.606061 | 132 |
import random
from PIL import Image, ImageFilter
import numpy as np
def randomize(img, noise_level=.03):
""" given an array, randomizes the values in that array
noise_level [0,1] controls the overall likelihood of a bit being
flipped. This overall level is then multiplied by the levels variable,
which modifies the noise level for the various significant bit values
(i.e. it makes it so that less significant bits are more likely to be
flipped, which is accurate)
"""
levels = [.005, .01, .05, .10, .15, .25, .35, .45]
# more or less randomly chosen modifiers for each bit significance level
for val in np.nditer(img, op_flags=['readwrite']):
xor_val = 0
for level in levels:
if random.random() < level * noise_level:
xor_val = (xor_val << 1) | 1
else:
xor_val = (xor_val << 1) | 0
#print('{:08b}'.format(int(xor_val)))
val[...] = val ^ xor_val
return img
| [
11748,
4738,
198,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
22417,
198,
11748,
299,
32152,
355,
45941,
198,
198,
4299,
4738,
1096,
7,
9600,
11,
7838,
62,
5715,
28,
13,
3070,
2599,
198,
220,
220,
220,
37227,
1813,
281,
7177,
11,
47... | 2.4914 | 407 |
resource_body = """
char *buf;
int len;
Handle h;
if (!PyArg_ParseTuple(_args, "s#", &buf, &len))
return NULL;
h = NewHandle(len);
if ( h == NULL ) {
PyErr_NoMemory();
return NULL;
}
HLock(h);
memcpy(*h, buf, len);
HUnlock(h);
return ResObj_New(h);
"""
f = ManualGenerator("Resource", resource_body)
f.docstring = lambda: """Convert a string to a resource object.
The created resource object is actually just a handle,
apply AddResource() to write it to a resource file.
See also the Handle() docstring.
"""
functions.append(f)
handle_body = """
char *buf;
int len;
Handle h;
ResourceObject *rv;
if (!PyArg_ParseTuple(_args, "s#", &buf, &len))
return NULL;
h = NewHandle(len);
if ( h == NULL ) {
PyErr_NoMemory();
return NULL;
}
HLock(h);
memcpy(*h, buf, len);
HUnlock(h);
rv = (ResourceObject *)ResObj_New(h);
rv->ob_freeit = PyMac_AutoDisposeHandle;
return (PyObject *)rv;
"""
f = ManualGenerator("Handle", handle_body)
f.docstring = lambda: """Convert a string to a Handle object.
Resource() and Handle() are very similar, but objects created with Handle() are
by default automatically DisposeHandle()d upon object cleanup. Use AutoDispose()
to change this.
"""
functions.append(f)
# Convert resources to other things.
as_xxx_body = """
return %sObj_New((%sHandle)_self->ob_itself);
"""
resmethods.append(genresconverter("Control", "Ctl"))
resmethods.append(genresconverter("Menu", "Menu"))
# The definition of this one is MacLoadResource, so we do it by hand...
f = ResMethod(void, 'LoadResource',
(Handle, 'theResource', InMode),
)
resmethods.append(f)
#
# A method to set the auto-dispose flag
#
AutoDispose_body = """
int onoff, old = 0;
if (!PyArg_ParseTuple(_args, "i", &onoff))
return NULL;
if ( _self->ob_freeit )
old = 1;
if ( onoff )
_self->ob_freeit = PyMac_AutoDisposeHandle;
else
_self->ob_freeit = NULL;
return Py_BuildValue("i", old);
"""
f = ManualGenerator("AutoDispose", AutoDispose_body)
f.docstring = lambda: "(int)->int. Automatically DisposeHandle the object on Python object cleanup"
resmethods.append(f)
| [
31092,
62,
2618,
796,
37227,
198,
10641,
1635,
29325,
26,
198,
600,
18896,
26,
198,
37508,
289,
26,
198,
198,
361,
22759,
20519,
28100,
62,
10044,
325,
51,
29291,
28264,
22046,
11,
366,
82,
2,
1600,
1222,
29325,
11,
1222,
11925,
4008,... | 2.766129 | 744 |
import numpy as np
import gym
from copy import deepcopy as copy
import tensorflow as tf
from abc import ABC, abstractmethod
import os
def single_elem_support(func):
"""aop func"""
type_list = (type([]), type(()), type(np.array(1)))
def wrapper(*args, **kwargs):
"""wrapper func"""
res = func(*args, **kwargs)
if type(res) in type_list and len(res) == 1:
return res[0]
elif type(res[0]) in type_list and len(res[0]) == 1:
return [x[0] for x in res]
else:
return res
return wrapper
class RecDataBase(object):
'''
file-based implementation of a RecommnedEnv's data source.
Pulls data from file, preps for use by RecommnedEnv and then
acts as data provider for each new episode.
'''
@staticmethod
class RecSimBase(ABC):
""" Implemention of core recommendation simulator"""
@abstractmethod
@abstractmethod
@abstractmethod
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11550,
198,
6738,
4866,
1330,
2769,
30073,
355,
4866,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
11748,
28686,
628,
198,
4299,
2060,
62,
6... | 2.56383 | 376 |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
from pylash.core import init, addChild
from pylash.loaders import MediaLoader, LoaderEvent
from pylash.events import MouseEvent
from pylash.media import Sound, Video, MediaEvent
from pylash.ui import ButtonSample
init(1000 / 60, "Media", 800, 400, main)
| [
2,
5145,
14,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
640,
198,
6738,
279,
2645,
1077,
13,
7295,
1330,
2315,
11,
751,
16424,
198,
6738,
279,
2645,
1077,... | 2.944444 | 108 |
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
from utils import utils
if __name__ == '__main__':
root = tk.Tk()
version = 'v2.0.1.3'
root.title("开单数量统计 %s" % version)
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
root_width = 300
root_height = 150
x = (screen_width - root_width) / 2
y = (screen_height - root_height) / 2
root.geometry("%dx%d+%d+%d" % (root_width, root_height, x, y))
root.resizable(0, 0)
# 定义选择提示 label
label1 = tk.Label(root, text="请选择需要统计的 excel 文件")
label1.place(x=30, y=10)
# 定义输入框 1
entry1 = tk.Entry(root, width='28', bd=5)
entry1.place(x=30, y=30)
# 定义选择按钮
btn1 = tk.Button(root, text='...', width=3, command=choose_file)
btn1.place(x=250, y=30)
# 定义提示框 1
label2 = tk.Label(root, text="请输入本次统计时间(可选项)")
label2.place(x=30, y=60)
# 定义输入框 2
entry2 = tk.Entry(root, width='28', bd=5)
entry2.place(x=30, y=80)
# 定义统计按钮
btn2 = tk.Button(root, text='统计数据', command=count_data)
btn2.place(relx=0.4, y=110)
# 启动窗口
root.mainloop()
| [
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
256,
74,
3849,
1330,
5717,
498,
519,
198,
6738,
256,
74,
3849,
1330,
3275,
3524,
198,
6738,
3384,
4487,
1330,
3384,
4487,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
83... | 1.659913 | 691 |
import argparse
from dataclasses import dataclass
import json
import os
from typing import Dict, List, Optional, Tuple
from pytablewriter import MarkdownTableWriter
parser = argparse.ArgumentParser(
description="Generate Regula Rules Documentation",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--provider",
default="k8s",
help="Rule provider",
)
@dataclass
class RuleMeta:
"""
Metadata for a single rule
"""
severity: str
id: str
title: str
provider: str
resource_types: List[str]
# controls: Optional[Dict[str, Dict[str, List[str]]]]
# description: str
# service: Optional[str]
# input_type: Optional[str]
provider_name_map: Dict[str, str] = {
"aws": "AWS",
"google": "Google",
"azurerm": "Azure",
"k8s": "Kubernetes",
}
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
32233,
11,
309,
29291,
198,
6738,
12972,
11487,
16002,
1330,
2940,
2902,
10962,... | 2.614035 | 342 |
from dotenv import load_dotenv
from urllib.parse import urlparse, urljoin
import requests
import os
if __name__ == '__main__':
load_dotenv()
collection_name = os.getenv('HUBBLE_COLLECTION')
try:
fetch_images_from_hubble_collection(collection_name=collection_name)
except requests.exceptions.HTTPError as error:
print(error)
exit(2)
| [
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
29572,
11,
19016,
22179,
198,
11748,
7007,
198,
11748,
28686,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10... | 2.625 | 144 |
# -*t coding:Utf-8 -*-
"""
.. currentmodule:: pylayers.antprop.channel
.. autosummary::
:members:
"""
from __future__ import print_function
import doctest
import pdb
import numpy as np
import numpy.ma as ma
import numpy.linalg as la
import scipy as sp
import scipy.signal as si
import pylab as plt
import struct as stru
import scipy.stats as st
import scipy.optimize as optimize
import numpy.fft as fft
from scipy.io import loadmat
import pylayers.util.pyutil as pyu
import pylayers.signal.bsignal as bs
import pylayers.util.geomutil as geu
import pylayers.antprop.antenna as ant
from pylayers.util.project import *
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import fmin
import copy
try:
import h5py
except:
print('h5py is not installed: Ctilde(object cannot be saved)')
class AFPchannel(bs.FUsignal):
""" Angular Frequency Profile channel
Attributes
----------
x : np.array
frequency ,Nf
y : np.array
Amplitude Na,Nf
tx : np.array
tx coordinate (,3)
rx : np.array
rx coordinates (,3)
az : np.array (,Na)
AFP azimutal range in radians
theta : link elevation angle
phi : link (txrx) azimuth angle (with offset)
tau : link delay (ns)
offset : float angle in radians
azimuth offset w.r.t global frame
Methods
-------
norm2
construct
electrical_delay
loadmes
toadp
estimate
peak
specular_model
"""
def loadmes(self,_filename,_filecal,fcGHz=32.6,BW=1.6,win='rect',ang_offset=0.37,ext='txt',dirmeas='meas/Espoo',refinement=False):
""" Load measurement file
Measurement files and the associated back to back calibration files
are placed in the mes directory of the project.
Parameters
----------
_filename : string
data matfile name
_filecal : string
calibration matfile name
fcGHz : float
center frequency
BW : float
measurement bandwidth
win : string
window type in ['rect','hamming','blackman']
ang_offset : float
angle in radian
ext : string
file extension 'txt' | '.mat'
diremeas : string
Notes
-----
This function updates :
+ self.x (frequency GHz)
+ self.y
+ self.az azimuth radians
The calibration file _filecal (.mat file) should be added in the data directory
In practice for Espoo B2B.mat
See Also
--------
pylayers.util.pyutil.getlong
"""
self._filename = _filename
self.BW = BW
self.fcGHz = fcGHz
self.fmin = fcGHz-BW/2.
self.fmax = fcGHz+BW/2.
self.win = win
self.refinement = refinement
# read calibration file (Matlab file) in the same directory as measurments (convention)
filecal = pyu.getlong(_filecal,dirmeas)
U = loadmat(filecal)
cal_trf = U['cal_trf'][:,0]
# read measurement file (.txt or Mat file)
filename = pyu.getlong(_filename,dirmeas)
if ext=='txt':
D = np.loadtxt(filename,skiprows=2)# load Back 2 Back calibration file
amp = D[:,2::2]
ang = D[:,3::2]
else:
D = loadmat(filename)
amp = D['amp']
ang = D['ang']
rotationangle = D['rotationangle'].squeeze()
# load Back 2 Back calibration file
#
# Transfer function reconstruction
#
self.Na = amp.shape[0]
self.Nf = amp.shape[1]
#
# select apodisation window
#
if win=='hamming':
window = np.hamming(self.Nf)
elif win=='blackman':
window = np.blackman(self.Nf)
else:
window = np.ones(self.Nf)
#
# complex transfer function
#
self.x = np.linspace(self.fmin,self.fmax,self.Nf)
self.fcGHz = self.x[int(len(self.x)/2)]
self.y = amp*np.exp(1j*ang*np.pi/180.)*cal_trf[None,:]*window
#
# if extension is txt file comes from ESPOO measurement
#
# self.az : 5.86 -> 1.94
if ext=='txt':
self.azmes = (360-D[:,0])*np.pi/180.
self.az = self.azmes + ang_offset - 2*np.pi
u = np.where(self.az<0)
self.az[u] = self.az[u] + 2*np.pi
else:
self.azmes = rotationangle*np.pi/180.
self.az = ang_offset - self.azmes
u = np.where(self.az<0)
self.az[u] = self.az[u] + 2*np.pi
def electrical_delay(self,tauns=0):
""" electrical delay
Parameters
----------
tauns : float
"""
self.y = self.y * np.exp(-2*1j*np.pi*self.x*tauns)
def toadp(self,imax=-1):
""" convert afp into adp (frequency->delay)
Notes
-----
tx and rx need to be defined
"""
# x : delay (starting at 0 ns)
# y : ifft axis 1 (frequency)
x = np.linspace(0,(len(self.x)-1)/(self.x[-1]-self.x[0]),len(self.x))
y = np.fft.ifft(self.y,axis=1)
if imax!=-1:
y = y[:,0:imax]
x = x[0:imax]
adp = ADPchannel(x=x,
y=y,
az=self.az,
tx=self.tx,
rx=self.rx,
fcGHz=self.fcGHz,
_filename=self._filename,
refinement=self.refinement,
ang_offset = self.ang_offset)
return adp
def estimate(self,taumax=200,phimax=2*np.pi):
""" estimate specular model parameters
Parameters
----------
taumax : float
phimax : float
See Also
--------
specular_model
"""
x_0 = self.peak()
x_est = optimize.fmin_l_bfgs_b(cost,
x_0,
args=(self.x,self.az),
disp=0,
approx_grad=1,
bounds=((0,2*x_0[0]),
(0,taumax),
(0,phimax)))[0]
#x_est = optimize.fmin_l_bfgs_b(cost,
# x_0,
# args=(self.x,self.az),
# disp=0,
# approx_grad=1,
# bounds=((0,2*x_0[0]),
# (0,2),
# (0,2),
# (0,taumax),
# (0,phimax)))[0]
#x_est = optimize.fmin(cost,x_0,args=(self.x,self.az))
Ck = AFPchannel()
Ck.specular_model(x_est,self.x,self.az)
#Ck.specular_model2(x_est,self.x,self.az)
D = self - Ck
return x_est,Ck, D
def specular_model(self,x,fGHz,phi,wH=[],HPBW=10*np.pi/180,GmaxdB=21):
""" Creates an AFP from a discrete specular model
Parameters
----------
x : [a0,a1,..,aK,tau0,tau1,...,tauk,phi0,...,phiK]
fGHz :
phi :
wH : windowing on frequency axis
HPBW : Half Power Beamwidth
Examples
--------
>>> import numpy as np
>>> rs = np.random.seed(1)
>>> E = st.expon(0.5)
>>> K = 5
>>> tauk = 250*np.random.rand(K)
>>> alphak = E.rvs(K)
>>> phik = 2*np.pi*np.random.rand(K)
>>> xk = np.hstack((alphak,tauk,phik))
>>> A = AFPchannel()
>>> fGHz = np.linspace(27,29,2001)
>>> wH = np.ones(len(fGHz))
>>> phi = np.linspace(0,2*np.pi,73)
>>> A.specular_model(xk,fGHz,phi,wH)
"""
K = int(len(x)/3)
assert(len(x)==3*K)
ak = x[0:K][:,None,None]
tk = x[K:2*K][:,None,None]
pk = x[2*K:3*K][:,None,None]
# tf : paths (0) , freq (1), angle (2)
if wH ==[]:
wH = np.ones(len(fGHz))
tf = ak*np.exp(-2*1j*np.pi*fGHz[None,:,None]*tk)*wH[None,:,None]
dphi = pk - phi[None,None,:]
Gmax = 10**(GmaxdB/10.)
g = np.exp(-(2*np.sqrt(np.log(2))*dphi/HPBW)**2)
tfg = tf*g
self.x = fGHz
self.fcGHz = self.x[int(len(self.x)/2)]
# self.y : angle(0) fGHz(1)
self.y = np.sum(tfg,axis=0).T
self.az = phi
#h = np.fft.ifft(H)
def specular_model2(self,x,fGHz,phi,wH=[],HPBW=10*np.pi/180,GmaxdB=21):
""" Creates an AFP from a discrete specular model
Parameters
----------
x : [a0,a1,..,aK,tau0,tau1,...,tauk,phi0,...,phiK]
fGHz :
phi :
wH : windowing on frequency axis
HPBW : Half Power Beamwidth
Examples
--------
>>> import numpy as np
>>> rs = np.random.seed(1)
>>> E = st.expon(0.5)
>>> K = 5
>>> tauk = 250*np.random.rand(K)
>>> alphak = E.rvs(K)
>>> phik = 2*np.pi*np.random.rand(K)
>>> xk = np.hstack((alphak,tauk,phik))
>>> A = AFPchannel()
>>> fGHz = np.linspace(27,29,2001)
>>> wH = np.ones(len(fGHz))
>>> phi = np.linspace(0,2*np.pi,73)
>>> A.specular_model(xk,fGHz,phi,wH)
"""
K = int(len(x)/3)
Nf = len(fGHz)
assert(len(x)==5*K)
ak = x[0:1*K][:,None,None]
bk = x[1*K:2*K][:,None,None]
ck = x[2*K:3*K][:,None,None]
tk = x[3*K:4*K][:,None,None]
pk = x[4*K:5*K][:,None,None]
# tf : paths (0) , freq (1), angle (2)
if wH ==[]:
wH = np.ones(len(fGHz))
a = ak * bk*(fGHz[None,:,None]-fGHz[int(Nf/2)])**ck
tf = a*np.exp(-2*1j*np.pi*fGHz[None,:,None]*tk)*wH[None,:,None]
dphi = pk - phi[None,None,:]
Gmax = 10**(GmaxdB/10.)
g = np.exp(-(2*np.sqrt(np.log(2))*dphi/HPBW)**2)
tfg = tf*g
self.x = fGHz
self.fcGHz = self.x[int(len(self.x)/2)]
# self.y : angle(0) fGHz(1)
self.y = np.sum(tfg,axis=0).T
self.az = phi
#h = np.fft.ifft(H)
class ADPchannel(bs.TUsignal):
""" Angular Delay Profile channel
Attributes
----------
az : array
azimuth in radian
ang_offset :
theta : float
phi : float
tau : float
_filename : string
short filename for saving
"""
def __init__(self,
x = np.array([]),
y = np.array([]),
az = np.array([]),
tx = np.array([]),
rx = np.array([]),
fcGHz=28,
_filename='',
refinement = False,
ang_offset = 0,
):
"""
Parameters
----------
x : np.array
delay
y : np.array
angle x delay
az : np.array
azimuth angle
tx : np.array
tx coordinates
rx : np.array
rx coordinates
_filename :
refinement : boolean
False
offset :
"""
bs.TUsignal.__init__(self, x=x, y=y,label='ADP')
self.az = az
self.tx = tx
self.rx = rx
self._filename = _filename
self.fcGHz = fcGHz
self.refinement = refinement
self.ang_offset = ang_offset
if ((len(self.tx) !=0 ) and (len(self.rx)!= 0)):
v = self.tx - self.rx
distLOS = np.linalg.norm(v)
self.taulos_geo = distLOS/0.3
self.anglos_geo = np.arctan2(v[1],v[0])*180/np.pi
if self.anglos_geo<0:
self.anglos_geo += 360
LFS = -(32.4 + 20*np.log10(fcGHz) + 20*np.log10(distLOS))
self.alphalos_geo = 10**(LFS/10.)
if self.anglos_geo<0:
self.anglos_geo = 2*np.pi+self.anglos_geo
alphapeak,taupeak,angpeak = self.peak(refinement=refinement)
self.angpeak_est = angpeak*180/np.pi
self.taupeak_est = taupeak
self.alphapeak_est = alphapeak
self._filename = _filename
def peak(self, refinement=False):
""" evaluate peak of PADP
Parameters
----------
refinment : boolean
provide a refined version of angular estimation
Returns
-------
alphapeak, taupeak , phipeak
"""
alphapeak = np.max(np.abs(self.y))
iphi, itau = np.where(np.abs(self.y)==alphapeak)
taupeak = self.x[itau][0]
if refinement:
pr = np.abs(self.y)[iphi-1:iphi+2,itau].squeeze()
azr = self.az[iphi-1:iphi+2]
Id = np.sum(pr)
In = np.sum(pr*azr)
phipeak = In/Id
else:
phipeak = self.az[iphi]
return alphapeak, taupeak, phipeak[0]
def correlate(self,adp,thresholddB=-105):
""" correlate ADP with an other ADP
Parameters
----------
adp : ADPchannel
Returns
-------
rhoE : energy ratio of padp Eadp/Eself
rhoEc : energy ratio of centered padp Ecadp/Ecself
rho : normalized intercorrelation : <self-mean(self),adp-mean(adp)>/Eself
rhon : intercorrelation of normalized padp <self_normalized,adp_normalized>
Notes
-----
This can be used to compare a measured PADP with a Ray tracing PADP
"""
#import ipdb
#ipdb.set_trace()
#
# apply the min dB level thresholding
#
tmp_self = np.abs(self.y)
tmp_adp = np.abs(adp.y)
u1 = np.where(20*np.log10(tmp_self)>thresholddB)
u2 = np.where(20*np.log10(tmp_adp)>thresholddB)
padp_self = np.zeros(tmp_self.shape)
padp_adp = np.zeros(tmp_adp.shape)
padp_self[u1] = tmp_self[u1]
padp_adp[u2] = tmp_adp[u2]
padpc_self = padp_self-np.mean(padp_self)
padpc_adp = padp_adp-np.mean(padp_adp)
Eself = np.max(si.correlate2d(padp_self,padp_self,mode='same'))
Ecself = np.max(si.correlate2d(padpc_self,padpc_self,mode='same'))
Eadp = np.max(si.correlate2d(padp_adp,padp_adp,mode='same'))
Ecadp = np.max(si.correlate2d(padpc_adp,padpc_adp,mode='same'))
#Eself = np.sum(padp_self*padp_self)
#Ecself = np.sum(padpc_self*padpc_self)
#Eadp = np.sum(padp_adp*padp_adp)
#Ecadp = np.sum(padpc_adp*padpc_adp)
padpcn_self = padpc_self/np.sqrt(Ecself)
padpcn_adp = padpc_adp/np.sqrt(Ecadp)
rhoE = Eadp/Eself
rhoEc = Ecadp/Ecself
#rho = np.sum(padpc_self*padpc_adp)/Eself
#rhoc = np.sum(padpc_self*padpc_adp)/Ecself
#rhon = np.sum(padpcn_self*padpcn_adp)
rho = np.max(si.correlate2d(padpc_self,padpc_adp,mode='same'))/Eself
rhoc = np.max(si.correlate2d(padpc_self,padpc_adp,mode='same'))/Ecself
rhon = np.max(si.correlate2d(padpcn_self,padpcn_adp,mode='same'))
return rhoE,rhoEc,rho,rhoc,rhon
def svd(self):
""" perform singular value decomposition of the PADP
Notes
-----
It creates a dictionnay
{'sv':sv,'b':b}
"""
[U,S,V]=la.svd(self.y)
self.d = {}
for k,sv in enumerate(S):
b = sv*np.dot(U[:,k][:,None],V[k,:][None,:])
self.d[k] = {'sv':sv,'b':b}
def imshow(self,**kwargs):
""" show Angular Delay Profile
Parameters
----------
origin: string
'lower'
vmax : -65,
vmin : -120,
interpolation : string
'nearest',
alpha:1,
imin = 0
imax = -1
dB = True
fig = []
ax = []
fonts = 18
label = ''
blos = True
orientation = -1
bcolorbar = False
ang_offset = 450
"""
defaults = {'origin':'lower',
'vmax' : -65,
'vmin' : -120,
'interpolation' : 'nearest',
'alpha':1,
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
imin = kwargs.pop('imin',0)
imax = kwargs.pop('imax',-1)
dB = kwargs.pop('dB',True)
fig = kwargs.pop('fig',[])
ax = kwargs.pop('ax',[])
fonts = kwargs.pop('fontsize',18)
label = kwargs.pop('label','')
blos = kwargs.pop('blos',True)
orientation = kwargs.pop('orientation',-1)
bcolorbar = kwargs.pop('colorbar',False)
ang_offset = kwargs.pop('ang_offset',450)
if fig==[]:
fig = plt.figure()
if ax==[]:
ax = fig.add_subplot(111)
#rd2deg = 180/np.pi
#extent = (self.az[-1]*rd2deg+agoffset,
# self.az[0]*rd2deg+agoffset,
# self.x[imin],self.x[imax])
#extent = (self.az[0]*rd2deg,
# self.az[-1]*rd2deg,
# self.x[imin],self.x[imax])
agmin = self.az.min()*180/np.pi
agmax = self.az.max()*180/np.pi
extent = (agmin,agmax,self.x[imin],self.x[imax])
if orientation==-1:
padp = np.abs(self.y)[::-1,imin:imax].T
else:
padp = np.abs(self.y)[:,imin:imax].T
if dB:
padp = 20*np.log10(padp)
im = ax.imshow(padp,extent=extent,aspect='auto',**kwargs)
#plt.axis('equal')
if blos:
a1 = ang_offset + self.angpeak_est
ax.scatter(a1,self.taupeak_est,marker='*',s=70,color='r')
if hasattr(self,'anglos_geo'):
a2 = ang_offset + self.anglos_geo
ax.scatter(a2,self.taulos_geo,marker='D',s=70,color='g')
if bcolorbar:
cbar = plt.colorbar(im)
if dB:
cbar.set_label(label+' dB',fontsize=fonts)
else:
cbar.set_label(label+' linear',fontsize=fonts)
for t in cbar.ax.get_yticklabels():
t.set_fontsize(fonts)
ax.set_ylabel('Propagation delay [ns]',fontsize=fonts)
ax.set_xlabel('Angle[deg]',fontsize=fonts)
#ax.title('PADP',fontsize=fonts)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(fonts)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(fonts)
return fig,ax
def clean(self,threshold_dB=20):
""" clean ADP
Parameters
----------
threshold_dB : float
Notes
-----
All values below Max -threshold are set to zero
"""
Na = self.y.shape[0]
P = np.real(self.y*np.conj(self.y))
MaxdB = 10*np.log10(np.max(P))
u = np.where(10*np.log10(P) < MaxdB-threshold_dB)
self.y[u] = 0+0j
def pap(self,
fcGHz=28,
fontsize=18,
figsize=(10,10),
Gmax=22.68,
Gmin=19,
threshdB=-95,
label='',
color='k',
fig=[],
ax=[],
xlabel=True,
ylabel=True,
legend=True):
""" Calculate Power Angular Profile
Parameters
----------
fcGHz : float
fontsize : int
figsize : tuple
fig :
ax :
xlabel : boolean
ylabel : boolean
legen : boolean
Returns
-------
fig,ax
"""
Na = self.y.shape[0]
# integration over frequency
# adp (angle)
Gtyp = (Gmax+Gmin)/2.
Py = np.real(self.y*np.conj(self.y))
pdp0 = np.sum(Py,axis=0)
pdp0dB = 10*np.log10(pdp0)
u = pdp0dB > threshdB
adp = np.sum(Py[:,u],axis=1)
#mPya = np.median(Py,axis=0)
#mPya = np.mean(Py,axis=0)
#sPy = Py-mPya[None,:]
#adp = np.sum(Pyz,axis=1)
u = np.where(adp==max(adp))[0]
if fig==[]:
fig = plt.figure(figsize=figsize)
else:
fig = fig
if ax == []:
ax = fig.add_subplot(111)
else:
ax = ax
#ax.plot(self.az*180/np.pi,10*np.log10(adp),color='r',label=r'$10\log_{10}(\sum_{\tau} PADP(\phi,\tau))$',linewidth=1.5)
#ag = np.linspace(45,260,len(adp))
ag = self.az*180/np.pi
ax.plot(ag, #360self.az*180/np.pi,
10*np.log10(adp)-Gtyp,
color=color,
label=label,
linewidth=1.5)
ax.vlines(self.anglos_geo,ymin=-130,ymax=-40,linestyles='dashed',color='red')
ax.hlines(-120,xmin=ag[0],xmax=ag[-1],linestyles='dashed',color='black')
#ax.set_ylim(-80,-60)
if xlabel:
ax.set_xlabel('Angle [deg]',fontsize=fontsize)
if ylabel:
ax.set_ylabel('level (dB)',fontsize=fontsize)
#ax.set_title(self._filename,fontsize=fontsize)
if legend:
plt.legend(loc='best')
return fig,ax
def app(self,**kwargs):
""" Calculate Angular Power Profile
"""
Na = self.y.shape[0]
app = np.real(np.sum(self.y*np.conj(self.y),axis=1))
def pltcir(self,phideg,Gain=21):
""" plot Channel Impulse Response
Parameters
----------
phideg : f
Returns
-------
fig,ax
u
"""
phi = phideg*np.pi/180.
dang = np.abs(self.az - phi)
u = np.where(dang==np.min(dang))[0][0]
fig = plt.figure()
ax = fig.add_subplot(111)
FS = -(32.4+20*np.log10(self.x*0.3)+20*np.log10(self.fcGHz))
plt.semilogx(self.x,20*np.log10(np.abs(self.y[u,:]))-Gain)
plt.semilogx(self.x,FS,'k',linewidth=2)
plt.show()
return fig,ax,u
def pdp_v(self,**kwargs):
""" Calculate and plot Power Delay Profile
Parameters
----------
fcGHz : float
"""
defaults = { 'figsize':(10,10),
'fontsize':18,
'fig' : [],
'ax': [],
'xlabel': True,
'ylabel': True,
'legend': True,
'losdelay': True,
'freespace': True,
'desembeded': False,
'noisefloor': False,
'typic':True,
'semilogx':True,
'bcir':False,
'raw': False,
'Gmax':22.68,
'Gmin':19,
'threshdB':75,
'imax':-1,
'Tilt':10,
'HPBW':10,
'dphi':5,
'marker':'*',
'color':'k',
'label':'',
'linewidth':1
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
Gmax = kwargs.pop('Gmax')
Gmin = kwargs.pop('Gmin')
imax = kwargs.pop('imax')
threshdB = kwargs.pop('threshdB')
Gtyp = (Gmax+Gmin)/2.
# get peak value of the PADP
alpha,tau,phi = self.peak()
Na = self.y.shape[0]
# pdp : power delay profie
Py = np.real(self.y*np.conj(self.y))
pap0 = np.sum(Py,axis=1)
pap0dB = 10*np.log10(pap0)
u = pap0dB>np.percentile(pap0dB,threshdB)
pdp = np.sum(Py[u,:],axis=0)
pdp = pdp[0:imax]
x = self.x[0:imax]
# spdp : square root of power delay profie
spdp = TUchannel(x=x,y=np.sqrt(pdp))
u = np.where(pdp==max(pdp))[0]
FS = -(32.4+20*np.log10(x*0.3)+20*np.log10(self.fcGHz))
AttmaxdB = 20*np.log10(alpha)
#Gmax = AttmaxdB-FS[u]
#Gmax_r = np.round(Gmax[0]*100)/100.
#
# The -3dB is specific to the Aalto measurement and desembeding (1/2)
#
pdp_min = 10*np.log10(pdp)-Gmax-1
pdp_max = 10*np.log10(pdp)-Gmin-1
pdp_typ = 10*np.log10(pdp)-Gtyp-1
uflashing = np.where(pdp_typ>FS)
umin = np.where(pdp_min>-118)
pdp_min_thr = pdp_min[umin]
umax = np.where(pdp_max>-118)
pdp_max_thr = pdp_max[umax]
PL = -10*np.log10(np.sum(10**(pdp_min_thr/10.)))
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else:
fig = kwargs['fig']
if kwargs['ax'] == []:
ax = fig.add_subplot(111)
else:
ax = kwargs['ax']
if kwargs['semilogx']:
if kwargs['raw']:
ax.semilogy(10*np.log10(pdp),x,color='r',label=r'$10\log_{10}(\sum_{\phi} PADP(\phi))$',linewidth=0.5)
#ax.semilogx(np.array([tau]),np.array([AttmaxdB]),color='k')
if kwargs['desembeded']:
ax.semilogy(pdp_min,x,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmax),color='green')
ax.semilogy(pdp_max,x,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmin),color='red')
if kwargs['typic']:
ax.semilogy(pdp_typ,x,label=kwargs['label'],color=kwargs['color'],linewidth=kwargs['linewidth'])
ax.semilogy(pdp_typ[uflashing],x[uflashing],label=kwargs['label'],color='red',linewidth=kwargs['linewidth'])
if kwargs['freespace']:
if kwargs['typic']:
ax.semilogy(FS,x,color=kwargs['color'],linewidth=kwargs['linewidth']+1,label='Free Space path profile')
else:
ax.semilogy(FS,x,color='k',linewidth=2,label='Free Space path profile')
if kwargs['losdelay']:
ax.hlines(self.taupeak_est,xmin=-130,xmax=-40,linestyles='dashed',color='blue')
ax.hlines(self.taulos_geo,xmin=-130,xmax=-40,linestyles='dashed',color='red')
if kwargs['noisefloor']:
ax.vlines(-130,ymin=0,ymax=x[-1],linestyles='dashed',color='black')
#ax.set_xlim(10,1000)
if kwargs['xlabel']:
ax.set_ylabel('Delay (ns) log scale',fontsize=kwargs['fontsize'])
if kwargs['bcir']:
phi = self.angpeak_est*np.pi/180.
dang = np.abs(self.az - phi)
u = np.where(dang==np.min(dang))[0][0]
ax.semilogx(20*np.log10(np.abs(self.y[u,:]))-Gmax,x,color='r')
ax.semilogx(20*np.log10(np.abs(self.y[u,:]))-Gmin,x,color='g')
else:
if kwargs['raw']:
ax.plot(10*np.log10(pdp),x,color='r',label=r'$10\log_{10}(\sum_{\phi} PADP(\phi))$',linewidth=0.5)
ax.plot(np.array([AttmaxdB]),np.array([tau]),color='k')
if kwargs['desembeded']:
ax.plot(pdp_min,x,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmax))
ax.plot(pdp_max,x,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmin))
if kwargs['typic']:
ax.plot(pdp_typ,x,label=kwargs['label'],color=kwargs['color'])
ax.scatter(pdp_typ[uflashing],x[uflashing],s=80,c='red')
if kwargs['freespace']:
if kwargs['typic']:
ax.plot(FS,x,color=kwargs['color'],linewidth=kwargs['linewidth']+1,label='Free Space path profile')
ax.plot(FS-(Gmax-Gmin)/2,x,color='blue',linewidth=0.5,label='Free Space path profile')
ax.plot(FS+(Gmax-Gmin)/2,x,color='blue',linewidth=0.5,label='Free Space path profile')
else:
ax.plot(FS,x,color='k',linewidth=2,label='Free Space path profile')
if kwargs['losdelay']:
ax.hlines(self.taupeak_est,xmin=-130,xmax=-40,linestyles='dashed',color='blue')
ax.hlines(self.taulos_geo,xmin=-130,xmax=-40,linestyles='dashed',color='red')
if kwargs['noisefloor']:
ax.vlines(-130,ymin=0,ymax=x[-1],linestyles='dashed',color='red')
#ax.set_xlim(0,1000)
if kwargs['xlabel']:
ax.set_ylabel('Delay (ns)',fontsize=kwargs['fontsize'])
if kwargs['bcir']:
phi = self.angpeak_est*np.pi/180.
dang = np.abs(self.az - phi)
u = np.where(dang==np.min(dang))[0][0]
ax.plot(20*np.log10(np.abs(self.y[u,:]))-Gmax,x,'r')
ax.plot(20*np.log10(np.abs(self.y[u,:]))-Gmin,x,'g')
if kwargs['ylabel']:
ax.set_xlabel('level (dB)',fontsize=kwargs['fontsize'])
#ax.set_title(self._filename+' '+str(PL))
if kwargs['legend']:
plt.legend(loc='best')
ax.set_ylim(0,x[-1])
return fig,ax
def pdp(self,**kwargs):
""" Calculate the Power Delay Profile
Parameters
----------
fcGHz : float
figsize':(1010)
fontsize':18
fig' : []
ax': []
xlabel': True
ylabel': True
legend': True
losdelay': True
freespace': True
desembeded': False
typic':True
semilogx':True
bcir':False
raw': False
Gmax':22.68
Gmin':19
Tilt':10
HPBW':10
Returns
-------
tau
pdp
"""
defaults = { 'figsize':(10,10),
'fontsize':18,
'fig' : [],
'ax': [],
'xlabel': True,
'ylabel': True,
'legend': True,
'losdelay': True,
'freespace': True,
'desembeded': False,
'typic':True,
'semilogx':True,
'bcir':False,
'raw': False,
'bplot':True,
'Gmax':22.68,
'Gmin':19,
'Tilt':10,
'HPBW':10,
'dphi':5,
'marker':'*',
'color':'k',
'label':'',
'linewidth':1
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
# get antenna gain extremum
# typical value is chosen as the mean value
Gmax = kwargs.pop('Gmax')
Gmin = kwargs.pop('Gmin')
Gtyp = (Gmax+Gmin)/2.
# get peak value of the PADP
# it is assume that this retreave the LOS component
alpha, tau, phi = self.peak()
# Na : number of angular steps
Na = self.y.shape[0]
# pdp : power delay profile
pdp = np.real(np.sum(self.y*np.conj(self.y),axis=0))
# delay index of pdp maximum
u = np.where(pdp==max(pdp))[0]
# omnidirectional free space path loss
# spdp : square root of power delay profile
spdp = TUchannel(x=self.x,y=np.sqrt(pdp))
u = np.where(pdp==max(pdp))[0]
FS = -(32.4+20*np.log10(self.x*0.3)+20*np.log10(self.fcGHz))
AttmaxdB = 20*np.log10(alpha)
#Gmax = AttmaxdB-FS[u]
#Gmax_r = np.round(Gmax[0]*100)/100.
#
# The -3dB is specific to the Aalto measurement and desembeding (1/2)
#
pdpdB = 10*np.log10(pdp)
pdp_min = pdpdB-Gmax-1
pdp_max = pdpdB-Gmin-1
pdp_typ = pdpdB-Gtyp-1
umin = np.where(pdp_min>-118)
pdp_min_thr = pdp_min[umin]
umax = np.where(pdp_max>-118)
pdp_max_thr = pdp_max[umax]
PL = -10*np.log10(np.sum(10**(pdp_min_thr/10.)))
if kwargs['bplot']:
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else:
fig = kwargs['fig']
if kwargs['ax'] == []:
ax = fig.add_subplot(111)
else:
ax = kwargs['ax']
if kwargs['semilogx']:
if kwargs['raw']:
ax.semilogx(self.x,10*np.log10(pdp),color='r',label=r'$10\log_{10}(\sum_{\phi} PADP(\phi))$',linewidth=0.5)
#ax.semilogx(np.array([tau]),np.array([AttmaxdB]),color='k')
if kwargs['desembeded']:
ax.semilogx(self.x,pdp_min,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmax),color='green')
ax.semilogx(self.x,pdp_max,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmin),color='red')
if kwargs['typic']:
ax.semilogx(self.x,pdp_typ,label=kwargs['label'],color=kwargs['color'],linewidth=kwargs['linewidth'])
if kwargs['freespace']:
if kwargs['typic']:
ax.semilogx(self.x,FS,color=kwargs['color'],linewidth=kwargs['linewidth']+1,label='Free Space path profile')
else:
ax.semilogx(self.x,FS,color='k',linewidth=2,label='Free Space path profile')
if kwargs['losdelay']:
ax.vlines(self.taupeak_est,ymin=-130,ymax=-40,linestyles='dashed',color='blue')
ax.vlines(self.taulos_geo,ymin=-130,ymax=-40,linestyles='dashed',color='red')
#ax.set_xlim(10,1000)
if kwargs['xlabel']:
ax.set_xlabel('Delay (ns) log scale',fontsize=kwargs['fontsize'])
if kwargs['bcir']:
phi = self.angpeak_est*np.pi/180.
dang = np.abs(self.az - phi)
u = np.where(dang==np.min(dang))[0][0]
ax.semilogx(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmax,color='r')
ax.semilogx(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmin,color='g')
else:
if kwargs['raw']:
ax.plot(self.x,10*np.log10(pdp),color='r',label=r'$10\log_{10}(\sum_{\phi} PADP(\phi))$',linewidth=0.5)
ax.plot(np.array([tau]),np.array([AttmaxdB]),color='k')
if kwargs['desembeded']:
ax.plot(self.x,pdp_min,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmax))
ax.plot(self.x,pdp_max,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmin))
if kwargs['typic']:
ax.plot(self.x,pdp_typ,label=kwargs['label'],color=kwargs['color'])
if kwargs['freespace']:
if kwargs['typic']:
ax.plot(self.x,FS,color=kwargs['color'],linewidth=kwargs['linewidth']+1,label='Free Space path profile')
else:
ax.plot(self.x,FS,color='k',linewidth=2,label='Free Space path profile')
if kwargs['losdelay']:
ax.vlines(self.taupeak_est,ymin=-130,ymax=-40,linestyles='dashed',color='blue')
ax.vlines(self.taulos_geo,ymin=-130,ymax=-40,linestyles='dashed',color='red')
#ax.set_xlim(0,1000)
if kwargs['xlabel']:
ax.set_xlabel('Delay (ns)',fontsize=kwargs['fontsize'])
if kwargs['bcir']:
phi = self.angpeak_est*np.pi/180.
dang = np.abs(self.az - phi)
u = np.where(dang==np.min(dang))[0][0]
ax.plot(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmax,'r')
ax.plot(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmin,'g')
if kwargs['ylabel']:
ax.set_ylabel('level (dB)',fontsize=kwargs['fontsize'])
ax.set_title(self._filename+' '+str(PL))
if kwargs['legend']:
plt.legend(loc='best')
return fig,ax
else:
return (self.x,pdp)
# PL = -10*np.log10(np.sum(10**(pdp_min_thr/10.)))
# return self.x,pdp
# if kwargs['fig']==[]:
# fig = plt.figure(figsize=kwargs['figsize'])
# else:
# fig = kwargs['fig']
# if kwargs['ax'] == []:
# ax = fig.add_subplot(111)
# else:
# ax = kwargs['ax']
#
# if kwargs['semilogx']:
# if kwargs['raw']:
# ax.semilogx(self.x,10*np.log10(pdp),color='r',label=r'$10\log_{10}(\sum_{\phi} PADP(\phi))$',linewidth=0.5)
# #ax.semilogx(np.array([tau]),np.array([AttmaxdB]),color='k')
#
# if kwargs['desembeded']:
# ax.semilogx(self.x,pdp_min,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmax),color='green')
# ax.semilogx(self.x,pdp_max,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmin),color='red')
#
# if kwargs['typic']:
# ax.semilogx(self.x,pdp_typ,label=kwargs['label'],color=kwargs['color'],linewidth=kwargs['linewidth'])
#
# if kwargs['freespace']:
# if kwargs['typic']:
# ax.semilogx(self.x,FS,color=kwargs['color'],linewidth=kwargs['linewidth']+1,label='Free Space path profile')
# else:
# ax.semilogx(self.x,FS,color='k',linewidth=2,label='Free Space path profile')
#
# if kwargs['losdelay']:
# ax.vlines(self.taupeak_est,ymin=-130,ymax=-40,linestyles='dashed',color='blue')
# ax.vlines(self.taulos_geo,ymin=-130,ymax=-40,linestyles='dashed',color='red')
#
# #ax.set_xlim(10,1000)
# if kwargs['xlabel']:
# ax.set_xlabel('Delay (ns) log scale',fontsize=kwargs['fontsize'])
#
# if kwargs['bcir']:
# phi = self.angpeak_est*np.pi/180.
# dang = np.abs(self.az - phi)
# u = np.where(dang==np.min(dang))[0][0]
# ax.semilogx(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmax,color='r')
# ax.semilogx(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmin,color='g')
# else:
# if kwargs['raw']:
# ax.plot(self.x,10*np.log10(pdp),color='r',label=r'$10\log_{10}(\sum_{\phi} PADP(\phi))$',linewidth=0.5)
# ax.plot(np.array([tau]),np.array([AttmaxdB]),color='k')
#
# if kwargs['desembeded']:
# ax.plot(self.x,pdp_min,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmax))
# ax.plot(self.x,pdp_max,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmin))
#
# if kwargs['typic']:
# ax.plot(self.x,pdp_typ,label=kwargs['label'],color=kwargs['color'])
#
# if kwargs['freespace']:
# if kwargs['typic']:
# ax.plot(self.x,FS,color=kwargs['color'],linewidth=kwargs['linewidth']+1,label='Free Space path profile')
# else:
# ax.plot(self.x,FS,color='k',linewidth=2,label='Free Space path profile')
#
# if kwargs['losdelay']:
# ax.vlines(self.taupeak_est,ymin=-130,ymax=-40,linestyles='dashed',color='blue')
# ax.vlines(self.taulos_geo,ymin=-130,ymax=-40,linestyles='dashed',color='red')
#
# #ax.set_xlim(0,1000)
# if kwargs['xlabel']:
# ax.set_xlabel('Delay (ns)',fontsize=kwargs['fontsize'])
#
# if kwargs['bcir']:
# phi = self.angpeak_est*np.pi/180.
# dang = np.abs(self.az - phi)
# u = np.where(dang==np.min(dang))[0][0]
# ax.plot(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmax,'r')
# ax.plot(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmin,'g')
#
# if kwargs['ylabel']:
# ax.set_ylabel('level (dB)',fontsize=kwargs['fontsize'])
# ax.set_title(self._filename+' '+str(PL))
# if kwargs['legend']:
# plt.legend(loc='best')
#
# return fig,ax
def tomap(self,L,**kwargs):
""" surimpose PADP on the Layout
Parameters
----------
L : Layout
xmin : 10
xmax : 400
ymin : 10
ymax : 400,
Nx :3000,
Ny :3000,
'cmap':'jet',
'mode':'image',
'excess':'los',
'figsize':(20,20),
'thmindB':-110,
'thmaxdB':-108,
'vmindB':-110,
'vmaxdB':-60,
'offset':0,
'display':True,
'compensated':True,
'tauns_excess':0
"""
xmin = kwargs.pop('xmin',0)
ymin = kwargs.pop('ymin',0)
xmax = kwargs.pop('xmax',20)
ymax = kwargs.pop('ymax',20)
mode = kwargs.pop('mode','sbounce')
vmindB = kwargs.pop('vmindB',-110)
vmaxdB = kwargs.pop('vmaxdB',-60)
thmindB = kwargs.pop('thmindB',-110)
thmaxdB = kwargs.pop('thmaxdB',-108)
Nx = kwargs.pop('Nx',3000)
Ny = kwargs.pop('Ny',3000)
cmap = kwargs.pop('cmap','jet')
offset = kwargs.pop('offset',0)
excess = kwargs.pop('excess','los')
display = kwargs.pop('display',True)
compensated = kwargs.pop('compensated',False)
tauns_excess = kwargs.pop('tauns_excess',0)
figsize = kwargs.pop('figsize',(20,20))
if 'fig' not in kwargs:
fig = plt.figure(figsize=figsize)
else:
fig = kwargs['fig']
if 'ax' not in kwargs:
ax = fig.add_subplot(111)
else:
ax = kwargs['ax']
#
# Prepare the array for spatial information in horizontal plane x,y
# Nx and Ny should be large enough
#
Z = np.zeros((Nx,Ny),dtype=complex)
#
# spatial indexation in x and y
#
xr = np.linspace(xmin,xmax,Nx)
yr = np.linspace(xmin,xmax,Ny)
# distance Tx Rx in the horizontal plane (2D)
dtx_rx_2D = np.sqrt((self.tx[0]-self.rx[0])**2+(self.tx[1]-self.rx[1])**2)
# distance Tx Rx in the horizontal plane (3D)
dtx_rx = np.sqrt((self.tx[0]-self.rx[0])**2+(self.tx[1]-self.rx[1])**2+(self.tx[2]-self.rx[2])**2)
# distance Tx ground Rx (3D)
dtx_gr_rx = np.sqrt(dtx_rx_2D**2+(self.tx[2]+self.rx[2])**2)
assert(dtx_gr_rx > dtx_rx)
# difference of heights beween Tx and Rx
deltah = np.abs(self.tx[2]-self.rx[2])
#
# Dt = vec(P,Tx)
# Dr = vec(Rx,P)
#
dxt =(self.tx[0]-xr)[:,None]
dyt =(self.tx[1]-yr)[None,:]
#
# nwt : distance between Tx and each point of the plane
# nwr : distance between Rx and each point of the plane
#
nwt = np.sqrt(dxt*dxt+dyt*dyt)
dxr =(xr-self.rx[0])[:,None]
dyr =(yr-self.rx[1])[None,:]
nwr = np.sqrt(dxr*dxr+dyr*dyr)
# dsbounce : elliposidal distance (single bounce hypothesis)
dsbounce = nwt+nwr
# maximal ellipsoidal distance on the Z selected region
dmax = dsbounce.max()
taumax = dmax/0.3
# determine index of maximal distance
if self.x.max()>taumax:
itaumax = np.where(self.x>taumax)[0][0]
else:
itaumax=len(self.x)-1
# convert maximal distance into maximal delay (self.x is delay)
taumax = self.x[itaumax]
# determine coefficient between delay and index ( ns --> integer)
tau2idx = taumax/itaumax
# Determine the angle of arrival
# direction of arrival normalization of the vector
dxrn = dxr/nwr
dyrn = dyr/nwr
# angle of arrival in [-pi,pi]
phi = np.arctan2(dyrn,dxrn)-offset*np.pi/180
# back in [0-2pi]
phi = (1-np.sign(phi))*np.pi+phi
#iphi=((315-phi*180/np.pi)/5).astype(int)
iphi=((360-phi*180/np.pi)/5).astype(int)
drpt = np.sqrt(dxr*dxr+dyr*dyr+dxt*dxt+dyt*dyt)
dpr = np.sqrt(dxr*dxr+dyr*dyr)
if mode=='sbounce':
iid = np.round((np.sqrt(dxt*dxt+dyt*dyt)+np.sqrt(dxr*dxr+dyr*dyr))/(0.3*tau2idx)).astype('int')
else:
#d = np.round(np.sqrt(dxr*dxr+dyr*dyr)/(0.3*0.625)).astype('int')
#d = np.round(np.sqrt(dxr*dxr+dyr*dyr)/(0.3*0.625)).astype('int')
alpha = np.arctan(deltah/drpt)
dv = dpr/np.cos(alpha)
iid = np.round(dv/(0.3*tau2idx)).astype('int')
#pdb.set_trace()
#
# create indexation for spatial region Z
#
ix = np.arange(Nx)[:,None]
iy = np.arange(Ny)[None,:]
# ird : index for delays (d for delays)
ird = iid[ix,iy].ravel()
# irp : index for directio of arrival (p for phi)
irp = iphi[ix,iy].ravel()
#
# (d < dmax ) and (d>dlos+tauns_excess)
# iphi >= 0 and iphi < Nphimax
ilos = np.round((dtx_rx/(0.3*tau2idx))).astype(int)
iground = np.round((dtx_gr_rx/(0.3*tau2idx))).astype(int)
iexcess = np.round(tauns_excess/tau2idx).astype(int)
if excess=='los':
ud = np.where((ird<itaumax) & (ird>ilos+iexcess))
if excess=='ground':
ud = np.where((ird<itaumax) & (ird>iground+iexcess))
up = np.where((irp>=0) & (irp<len(self.az)))
# determine the index of points in a corona wich satisfy jointly the
# condition on delays and angles
#
u = np.intersect1d(ud,up)
# ravelize Z (2D -> 1D)
rz = Z.ravel()
# filling rz with self.y nphi,Ntau
rz[u] = self.y[irp[u],ird[u]]
#
# back to matrix form
#
Z = rz.reshape(Nx,Ny)
lmbda = 0.3/self.fcGHz
sqG = 10
Z_compensated = Z*(4*np.pi*dtx_rx)/(sqG*lmbda)
if compensated:
ZdB = 20*np.log10(np.abs(Z_compensated.T))
else:
ZdB = 20*np.log10(np.abs(Z.T))
mask = ((ZdB.all()>thmindB) and (ZdB.all()<thmaxdB))
#mzdB = ma.masked_array(ZdB,mask)
ZdBmax = ZdB.max()
ZdBmin = ZdB.min()
#
# constructing figure
#
if display:
#fig=plt.figure(figsize=figsize)
fig,ax = L.showG('s', fig=fig, ax=ax, labels=0)
#plt.axis('on')
ax.imshow(ZdB, extent=(xr[0],xr[-1],yr[0],yr[-1]),
cmap = cmap,
origin = 'lower',
alpha = 0.9,
vmin = ZdBmax - 60,
vmax = ZdBmax, interpolation = 'nearest')
#plt.imshow(mzdB,alpha=0.9,origin='lower')
ax.plot(self.tx[0],self.tx[1],'og')
ax.plot(self.rx[0],self.rx[1],'ob')
#plt.colorbar()
ax.set_title(self._filename)
#plt.savefig(self._filename+'.png')
#return Z,np.linspace(xr[0],xr[-1],Nx),np.linspace(yr[0],yr[-1],Ny)
return fig,ax
def polarplot(self,**kwargs):
""" polar plot of PADP
Parameters
-----------
fig
ax
figsize
typ : string
Ndec : int
decimation factor (1)
imax : int
max value 150
vmin : float
-120
vmax : float
-50
cmap : colormap
title : PADP
Returns
-------
fig , ax , pc (colormash)
"""
defaults = { 'fig':[],
'ax':[],
'figsize':(10,10),
'typ':'l20',
'Ndec':1,
'vmin':-120,
'vmax':-50,
'imax':150,
'alpha':1.,
'bcolorbar':True,
'cmap': plt.cm.jet,
'title':'PADP'
}
cvel = 0.3
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig'] == []:
fig = plt.figure(figsize=kwargs['figsize'])
else:
fig = kwargs.pop('fig')
if kwargs['ax'] == []:
ax = fig.add_subplot(111,polar=True)
else:
ax = kwargs.pop('ax')
imax = kwargs.pop('imax')
Ndec = kwargs.pop('Ndec')
vmin = kwargs.pop('vmin')
vmax = kwargs.pop('vmax')
cmap = kwargs.pop('cmap')
alpha = kwargs.pop('alpha')
title = kwargs.pop('title')
rho,theta = np.meshgrid(self.x*cvel,self.az)
# convert y data in desired format
dt,ylabels = self.cformat(**kwargs)
val = dt[:,0::Ndec][:,0:int(imax/Ndec)]
th = theta[:,0::Ndec][:,0:int(imax/Ndec)]
rh = rho[:,0::Ndec][:,0:int(imax/Ndec)]
#vmin = np.min(val)
#vmax = np.max(val)
#Dynamic = max_val-vmin
pc = ax.pcolormesh(th,rh,val,cmap=cmap,vmin=vmin, vmax=vmax, alpha=alpha)
#ptx = ax.plot(self.az,self.x*cvel,'or')
if kwargs['bcolorbar']:
fig.colorbar(pc,orientation='horizontal')
ax.set_title(title)
return fig,ax,pc
#ax.axis('equal')
#ax.axis('equal')
def toafp(self,fmin):
""" angular delay profile -> angular frequency profile
"""
x = np.linspace(0,(len(self.x)-1)/(self.x[-1]-self.x[0]),len(self.x))+fmin
y = np.fft.fft(self.y,axis=1)
afp = AFPchannel(x=x,
y=y,
az=self.az,
tx=self.tx,
rx=self.rx,
_filename = self._filename,
refinement = self.refinement,
ang_offset = self.ang_offset)
return afp
class TBchannel(bs.TBsignal):
""" radio channel in non uniform delay domain
"""
def tau_Emax(self):
""" calculate the delay of max energy peak
.. math::
\max_{\tau} y^{2}(\tau)
"""
y2 = (self.y) ** 2
maxy2 = max(y2)
u = np.nonzero(y2 == maxy2)[0]
tau_Emax = self.x[u]
return(tau_Emax)
def tau_moy(self, alpha=0.1, threshold_dB = 20, tau0=0):
""" calculate mean excess delay starting from delay tau0
Parameters
----------
alpha : float
tau0 : float
"""
u = np.max(self.y*self.y)
v = 10**(np.log10(u)-threshold_dB/10.)
uf = np.where(self.y*self.y > v)
num = np.sum(self.y[uf]*self.y[uf]*self.x[uf[-1]])
den = np.sum(self.y[uf]*self.y[uf])
taum = num/den
return(taum)
def delays(self):
r""" calculate delay parameters and orthogonality factor from cir
Returns
-------
taum :
mean excess delay
delayspread
rms delay spread
of :
orthogonality factor
Neelesh Metha, Andreas Molish, Lary Greenstein "Orthogonality Factor in WCDMA Donlinks in Urban Macrocellular
environments"
.. :math:
\beta0 = 1 \frac{\sum_i=1^L}|\alpha_i|^4}{\left(\sum_i=1^L|\alpha_i|^2)^2}
"""
self.flatteny(reversible=True)
y2 = self.yf*self.yf
y4 = y2*y2
taum = sum(self.x*y2,axis=0)/sum(y2,axis=0)
delayspread = np.sqrt(sum((self.x-taum)*(self.x-taum)*y2)/sum(y2,axis=0))
of = 1 - sum(y4,axis=0)/sum(y2,axis=0)**2
return taum,delayspread,of
def Kfactor(self,threshold_dB=20,dB=True):
""" determine Ricean K factor
Parameters
-----------
Threshold_dB : float
Only the energy above threshold is taken into account
dB : boolean
if True value in dB is returned
"""
t = self.x
y = self.y
u = np.max(self.y*self.y)
v = 10**(np.log10(u)-threshold_dB/10.)
vmax = np.where(self.y*self.y==u)
Pmax = self.y[vmax]*self.y[vmax]
uf = np.where(self.y*self.y>v)
Ptot = np.sum(self.y[uf]*self.y[uf])
K = Pmax/(Ptot-Pmax)
if dB:
K=10*np.log10(K)
return K[0]
def tau_rms(self, alpha=0.1,threshold_dB=20, tau0=0):
r""" calculate root mean square delay spread starting from delay tau_0
Parameters
----------
alpha : float
threshold : float
( delay interval is defined between :math:`\tau(\alpha)` and :math:`\tau(1 -\alpha)` )
tau0 : float
argument for specifying the delay start
Notes
-----
.. math::
\sqrt{\frac{\int_{\tau(\alpha)}^{\tau(1-\alpha)} (\tau-\tau_m)^{2} PDP(\tau) d\tau} {\int_{\tau(\alpha)}^{\tau(1-\alpha)} PDP(\tau) d\tau}}
See Also
--------
TUsignal.ecdf
TUsignal.tau_moy
"""
t = self.x
y = self.y
#cdf, vary = self.ecdf()
#pdp = np.diff(cdf.y)
u = np.max(self.y*self.y)
v = 10**(np.log10(u)-threshold_dB/10.)
uf = np.where(self.y*self.y>v)
taum = self.tau_moy(tau0,threshold_dB=threshold_dB)
num = np.sum(self.y[uf]*self.y[uf]*(self.x[uf[-1]]-taum)**2)
den = np.sum(self.y[uf]*self.y[uf])
taurms = np.sqrt(num/den)
return taurms
def toFD(self,fGHz=np.linspace(2,5,256)):
""" Transform to Frequency domain
Parameters
----------
fGHz : ,Nf
frequency in GHz
Returns
-------
H : Tchannel
"""
z = np.sum(self.y[:,None]*np.exp(-2*1j*fGHz[None,:]*np.pi*self.x[:,None]),axis=0)
H = Tchannel(x=fGHz,y=z,tau=self.x)
return H
def SalehValenzuela(self,**kwargs):
""" generic Saleh and Valenzuela Model
Parameters
----------
Lam : clusters Poisson Process parameter (ns)
lam : rays Poisson Process parameter (ns)
Gam : clusters exponential decay factor
gam : rays exponential decay factor
T : observation duration
Examples
--------
>>> from pylayers.antprop.channel import *
>>> C=TBchannel()
>>> C.SalehValenzuela()
>>> f,a = C.stem()
"""
defaults = { 'Lam' : .1,
'lam' : .5,
'Gam' : 30,
'gam' : 5 ,
'T' : 100}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
Lam = kwargs['Lam']
lam = kwargs['lam']
Gam = kwargs['Gam']
gam = kwargs['gam']
T = kwargs['T']
Nr = 1.2*T/Lam
Nc = 1.2*T/lam
e1 = st.expon(1./Lam)
e2 = st.expon(1./lam)
# cluster time of arrival
tc = np.cumsum(e1.rvs(Nr))
tc = tc[np.where(tc<T)]
Nc = len(tc)
tauc = np.kron(tc,np.ones((1,Nr)))[0,:]
# rays time of arrival
taur = np.cumsum(e2.rvs((Nr,Nc)),axis=0).ravel()
# exponential decays of cluster and rays
etc = np.exp(-tauc/(1.0*Gam))
etr = np.exp(-taur/(1.0*gam))
et = etc*etr
tau = tauc+taur
# filtering < T and reordering in delay domain
tau = tau[np.where(tau<T)]
et = et[np.where(tau<T)]
u = np.argsort(tau)
taus = tau[u]
ets = et[u]*np.sign(np.random.rand(len(u))-0.5)
# delays and amplitudes
self.x = taus
self.y = ets
class TUchannel(TBchannel,bs.TUsignal):
""" Uniform channel in delay domain
"""
def toa_max2(self):
""" calculate time of arrival max2 method
"""
THRE = array([])
V = array([])
VL = array([])
M = max(self.y)
n = np.nonzero(self.y == M)[0]
thre = M
v = 1
vl = 0
THRE = np.hstack((THRE, thre))
V = np.hstack((V, v))
VL = np.hstack((VL, vl))
step = M / 1e2
thre = M - step
# while thre > M/1e2:
while vl < 20:
# while v < 50:
u = np.nonzero(self.y > thre)[0]
v = nbint(u)
h = np.nonzero(u > n)[0]
g = np.delete(u, h)
vl = nbint(g) - 1
THRE = np.hstack((THRE, thre))
V = np.hstack((V, v))
VL = np.hstack((VL, vl))
thre = thre - step
plt.plot(1 - THRE / M, V, 'b', drawstyle='steps',
label='interval number')
plt.plot(1 - THRE / M, VL, '-r', drawstyle='steps',
label='interval(Left) number')
plt.xlabel('Gamma/Vmax')
plt.legend(loc=2)
# ylabel('Interval Number')
plt.show()
def toa_new(self):
""" estimate time of arrival (new method)
"""
t = self.x
Max = max(self.y)
nmax = np.nonzero(self.y == Max)[0]
n = nmax
step = Max / 1e2
thre = Max - step
delta = 100
d = 0
nint = 0
N = np.array([])
N = np.hstack((N, n))
while delta > 4 * Max / 1e2:
u = np.nonzero(self.y > thre)[0]
hr = np.nonzero(u > n)[0]
g = np.delete(u, hr)
if nmax >= 6000:
#set the fenetre=6000*0.005=30ns
hl = np.nonzero(g < nmax - 6000)[0]
u = np.delete(g, hl)
else:
u = g
n_int = nbint(u) - 1
if n_int == 0:
d = d + step
else:
delta = d + step
d = 0
n = u[0]
N = np.hstack((N, n))
#print(N)
thre = thre - step
if thre < 0:
break
if len(N) >= 3:
nn = N[-3]
else:
nn = N[0]
tau = t[nn]
return tau
def toa_win(self, w):
""" calulate time of arrival (window method)
Parameters
----------
w : parameter between 0 and 100
Lei takes w = 9
"""
t = self.x
maxbruit = max(self.y[0:1000])
Max = max(self.y)
nmax = np.nonzero(self.y == Max)[0]
n = nmax
step = Max / 1e2
thre = Max - step
delta = 100
d = 0
nint = 0
N = np.array([])
N = np.hstack((N, n))
# tant delta est plus grande que w% du Max
while delta > w * Max / 1e2:
u = np.nonzero(self.y > thre)[0]
hr = np.nonzero(u > n)[0]
g = np.delete(u, hr)
if nmax >= 6000:
#set the fenetre=6000*0.005=30ns
hl = np.nonzero(g < nmax - 6000)[0]
u = np.delete(g, hl)
else:
u = g
n_int = nbint(u) - 1
if n_int == 0:
thre = thre - step
d = d + step
else:
delta = Max - maxbruit - d - step
d = d + step
n = u[0]
N = np.hstack((N, n))
thre = thre - step
if thre < 0:
break
if len(N) >= 2:
nn = N[-2]
else:
nn = N[0]
tau = t[nn]
return tau
def toa_max(self, nint):
""" calculate time of arrival
descendant threshold based toa estimation
Parameters
----------
nint : integer
number of intervals
"""
#
# seek fot the maximum value of the signal
#
M = self.y.max()
step = M / 1e2
# plot(self.x,self.y)
thre = M - step
while step > M / 1e5:
# axhline(y=thre,color='green')
u = np.where(self.y > thre)[0]
# nbint : number of contiguous intervals
if pyu.nbint(u) < nint:
# down
thre = thre - step
else:
# up + step reduction
thre = thre + step
step = step / 2.
# plt.show()
tau = self.x[u[0]]
return tau
def toa_th(self, thlos, thnlos, visibility=0):
""" calculate time of arrival
threshold based toa estimation using energy peak
"""
#
# ( ) ^2
#
y2 = (self.y) ** 2
maxy2 = max(y2)
t = self.x
if visibility == 'LOS':
th = thlos * maxy2
else:
th = thnlos * maxy2
#
#In the W1-M1 measurement
#thlos=0.05 thnlos=0.15
#
v = np.nonzero(y2 >= th)[0]
toa = t[v[0]]
return toa
def toa_cum(self, th):
""" calculate time of arrival
threshold based toa estimation using cumulative energy
"""
t = self.x
y = self.y
cdf, vary = self.ecdf()
#
#In the W1-M1 measurement th=0.15
#
v = np.nonzero(cdf.y >= th)[0]
toa = t[v[0]]
return toa
def toa_th_tmtm(self):
""" calculate time of arrival
"""
y2 = (self.y) ** 2
maxy2 = max(y2)
t = self.x
alpha = (np.sqrt(self.Etot()) - np.sqrt(self.Emax())) / \
(np.sqrt(self.Etot()) + np.sqrt(self.Emax()))
th = alpha * maxy2
v = np.nonzero(y2 >= th)[0]
toa = t[v[0]]
return toa
def toa_th_tm(self):
""" calculate time of arrival
"""
y2 = (self.y) ** 2
maxy2 = max(y2)
t = self.x
alpha = np.sqrt(self.Emax()) / np.sqrt(self.Etot())
print(alpha)
th = alpha * maxy2
v = np.nonzero(y2 >= th)[0]
toa = t[v[0]]
return toa
def toa_th_tmt(self):
""" calculate time of arrival
"""
y2 = (self.y) ** 2
maxy2 = max(y2)
t = self.x
alpha = (np.sqrt(self.Etot(
)) - np.sqrt(self.Emax())) / np.sqrt(self.Etot())
print(alpha)
th = alpha * maxy2
v = np.nonzero(y2 >= th)[0]
toa = t[v[0]]
return toa
def toa_cum_tm(self):
""" calculate time of arrival
"""
y2 = (self.y) ** 2
t = self.x
maxy2 = max(y2)
u = np.nonzero(y2 == maxy2)[0]
cdf, vary = self.ecdf()
alpha = np.sqrt(cdf.y[u]) / np.sqrt(cdf.y[-1])
v = np.nonzero(cdf.y >= alpha * cdf.y[u])[0]
toa = t[v[0]]
return toa
def toa_cum_tmtm(self):
""" calculate time of arrival
"""
y2 = (self.y) ** 2
t = self.x
maxy2 = max(y2)
u = np.nonzero(y2 == maxy2)[0]
cdf, vary = self.ecdf()
alpha = (np.sqrt(cdf.y[-1]) - np.sqrt(
cdf.y[u])) / (np.sqrt(cdf.y[-1]) + np.sqrt(cdf.y[u]))
v = np.nonzero(cdf.y >= alpha * cdf.y[u])[0]
toa = t[v[0]]
return toa
def toa_cum_tmt(self):
""" calculate time of arrival
"""
y2 = (self.y) ** 2
t = self.x
maxy2 = max(y2)
u = np.nonzero(y2 == maxy2)[0]
cdf, vary = self.ecdf()
alpha = (np.sqrt(cdf.y[-1]) - np.sqrt(cdf.y[u])) / np.sqrt(cdf.y[-1])
v = np.nonzero(cdf.y >= alpha * cdf.y[u])[0]
toa = t[v[0]]
return toa
def psd(self, Tpns=100, R=50,periodic=True):
""" calculate power spectral density
Parameters
----------
R : Resistance (default 50 Ohms)
Ohms
Tpns : real
Signal period PRP (default 100 ns)
.. note::
Notice this interesting property that if time is represented in ns
the resulting PSD is expressed in dBm/MHz because there is the
same scale factor 1e-9 between second and nanosecond as between
dBW/Hz and dBm/MHz
If periodic is False the signal duration is taken as period.
"""
P = self.esd(mode='unilateral')
if periodic:
P.y = P.y / (R * Tpns)
else:
P.y = P.y/ (R* (P.x[-1]-P.x[0]))
return P
def awgn(self,PSDdBmpHz=-174,snr=0,seed=1,typ='psd',R=50):
""" add a white Gaussian noise
Parameters
----------
PSDdBmpHz : float
snr : float
seed : float
typ : string
'psd' | 'snr'
R : float
Returns
-------
n
sn
See Also
--------
bsignal.Noise
"""
ti = self.x[0]
tf = self.x[-1]
tsns = self.x[1]-self.x[0]
fsGHz = 1./tsns
if typ=='snr':
Ps = self.energy()/(R*(tf-ti))
PW = Ps/10**(snr/10.)
pWpHz = PW/(fsGHz*1e9)
pmWpHz = pWpHz*1e3
PSDdBmpHz = 10*np.log10(pmWpHz)
n = Noise(ti = ti,
tf = tf+tsns,
fsGHz = fsGHz,
PSDdBmpHz = PSDdBmpHz,
R = R,
seed = seed)
sn.y = self.y + n.y[0:len(self.x)]
sn.x = self.x
return sn,n
def Etau0(self, tau0=0.0, Tint=1, sym=0.25, dB=True):
""" calculate energy around delay tau0
Parameters
----------
tau0 : (ns) (0)
Tint : Integration time (ns) (1) include the system error
sym : symetrie factor 0.5 = symetric (0.25)
dB : logscale indicator (True)
"""
#u = nonzero((tau0 + Tint*(1-sym) > self.x) & (self.x > tau0 - Tint*sym))
u = nonzero((tau0 + Tint > self.x) & (self.x > tau0))
etau0 = self.dx() * sum(self.y[u] * np.conj(self.y[u]))
if dB:
etau0 = 10 * np.log10(etau0)
return(etau0)
def Ewin(self, tau, Tint=1, sym=0.25, dB=False):
""" integrate energy around delay tau
Parameters
----------
tau : (ns) (0)
Tint : Integration time (ns) (1) include the system error
sym : symetrie factor 0.5 = symetric (0.25)
dB : logscale indicator (True)
"""
tstart = tau - Tint * sym
tstop = tau + Tint * (1 - sym)
u = np.nonzero((self.x > tstart) & (self.x < tstop))
energy = self.dx() * sum(self.y[u] * np.conj(self.y[u]))
if dB:
energy = 10 * np.log10(energy)
return(energy)
def Etot(self, tau0=0.0, taumax=200, dB=False):
""" Etot calculate the energy of the signal
Parameters
----------
tau0 : start value for integration
dB : (False default) if True value in dB
usage :
s.Etot(tau0=10,dB=True)
"""
u = (self.x > tau0) & (self.x < taumax)
etot = self.dx() * sum(self.y[u] * np.conj(self.y[u]))
if dB:
etot = 10 * np.log10(etot)
return(etot)
def Efirst(self, toa, Tint=1, sym=0.25, dB=True):
""" calculate the energy of the first path
Parameters
----------
toa : float
delay value
Tint : float
duration value (1)
sym : float
symmetry around delay value ( 0.25)
dB : Boolean
Returns
-------
Efirst : Energy amount in the window (in dB if dB)
"""
u = np.nonzero((toa + Tint > self.x) & (self.x > toa))
efirst = self.dx() * sum(self.y[u] * np.conj(self.y[u]))
if dB:
efirst = 10 * np.log10(efirst)
return(efirst)
def Efirst_corr(self, tau0, Sx, Sy, dB=True):
""" calculate Efirst utilizing the correlation of signal emission et reponse impulsionnelle
Parameters
----------
tau0
Sx
Sy
dB
"""
te = self.dx()
E0 = sum(Sy * Sy) * te
n = int(np.ceil(tau0 / te))
Correlation = np.correlate(self.y, Sy, mode='full')
seuil = max(Correlation[len(Sx):len(Sx) + n - 200])
v = np.nonzero(Correlation[len(Sx) + n - 200:] > seuil)[0]
if len(v) == 0:
ff = seuil / E0
else:
w = v[1:] - v[0:-1]
w0 = np.nonzero(w != 1)[0]
if len(w0) == 0:
ff = max(Correlation[len(Sx) + n - 200:][v]) / E0
else:
vv = v[0:w0[0] + 1]
ff = max(Correlation[len(Sx) + n - 200:][vv]) / E0
if dB:
Ef = 20 * np.log10(ff)
return(Ef)
def Efirst_toath(self, tau0, Tint=1, sym=0.25, dB=True):
""" calculate Efirst
Parameters
----------
tau0 : Time of flight
Tint
sym
dB : if True return value in dBnJ
"""
te = self.dx()
n = int(np.ceil(tau0 / te))
seuil = max(self.y[:n])
v = np.nonzero(self.y[n:] > seuil)[0]
if len(v) == 0:
toa = n * te
else:
w = v[1:] - v[0:-1]
w0 = np.nonzero(w != 1)[0]
if len(w0) == 0:
r = max(self.y[n:][v])
toa = np.nonzero(self.y == r)[0] * te
else:
vv = v[0:w0[0] + 1]
r = max(self.y[n:][vv])
toa = np.nonzero(self.y == r)[0] * te
u = np.nonzero((toa + Tint * (1 - sym) > self.x) & (
self.x > toa - Tint * sym))
efirst = te * sum(self.y[u] * np.conj(self.y[u]))
if dB:
efirst = 10 * np.log10(efirst)
return(efirst)
def Epercent(self, N=10):
""" return N percentile delay of a cdf
Parameters
----------
N : 10
"""
cdf, vary = self.ecdf()
t = cdf.x
Cdf = cdf.y
pc = array([])
for i in range(N - 1):
u = np.nonzero(Cdf > (i + 1.) / N)
tp = t[u[0]]
pc = np.hstack((pc, tp))
return(pc)
def Emax(self, Tint=1, sym=0.5, dB=False):
""" calculate the maximum of Energy integrated over a duration Tint
A symetry of sym around the max value of the squared signal
Parameters
----------
Tint: float
Integration time (ns) default 1
sym : float
Symmetry factor (default 0.5)
dB : boolean
default False
Notes
-----
W1-M1
te = 0.005 ns
left = 12
Nright = 33
Tint = 45*te = 0.225 ns
sym = 0.25
"""
#
# ( ) ^2
#
y2 = (self.y) ** 2
#
# determine time of maximum value of ()^2
#
maxy2 = max(y2)
u = np.nonzero(y2 == maxy2)[0]
te = self.dx()
Npt = int(np.ceil(Tint / te))
Nleft = int(np.ceil(sym * Npt))
Nright = int(np.ceil((1 - sym) * Npt))
#
# Integration around the maximum value of E^2
# In the W1_M1 measurement
# te = 0.005 ns
# Nleft = 12
# Nright = 33
# Tint = 45*te = 0.225 ns
# sym = 0.25
#
Y = y2[u - Nleft:u + Nright]
cumY = np.cumsum(Y)
maxY = cumY[-1]
Emax = maxY * te
if dB:
return(10 * np.log10(Emax))
return(Emax)
def tau_Emax(self):
""" calculate the delay of max energy peak
"""
y2 = (self.y) ** 2
t = self.x
maxy2 = max(y2)
u = np.nonzero(y2 == maxy2)[0]
tau_Emax = t[u]
return(tau_Emax)
def aggcir(self,alphak,tauk):
""" aggregation of CIR from (alphak,tauk)
Parameters
----------
alphak : ndarray
CIR path amplitude
tauk : ndarray
CIR delay values
Examples
--------
.. plot::
:include-source:
>>> from pylayers.signal.bsignal import *
>>> import numpy as np
>>> alphak = 10*np.random.rand(7)
>>> tauk = 100*np.random.rand(7)
>>> tau = np.arange(0,150,0.1)
>>> y = np.zeros(len(tau))
>>> # CIR = TUsignal(tau,y)
>>> # CIR.aggcir(alphak,tauk)
>>> # f,a =CIR.plot(typ=['v'])
"""
shy = np.shape(self.y)
x = self.x
eps = (x[1]-x[0])/2
u = map(lambda t: np.where( (x>t-eps) & (x<=t+eps))[0][0],tauk)
ynew = np.zeros(len(x))
ynew[u] = alphak
if len(shy)>1:
self.y = np.vstack((self.y,ynew))
else:
self.y = ynew[None,:]
self.y = np.delete(self.y,0,0)
def readcir(self,filename,outdir=[]):
""" read channel impulse response
Parameters
----------
filename : string
long file name if outdir is []
short file name is outdir is != []
outdir : string
output directory
"""
if outdir != []:
outdir = 'output/'+outdir
filename = getlong(filename, outdir)
cir = ios.loadmat(filename)
self.x = cir['t'].ravel()
self.y = cir['cir'].ravel()
def readuwb(self, _filename):
""" read Waveform from Matlab file
Parameters
----------
_filename : file name with extension (.mat)
"""
outdir = 'output/'+outdir
filename = getlong(_filename, outdir)
wfm = ios.loadmat(filename)
d = wfm['data'][0][0]
T0 = d.T0[0][0] / 1e-9
Tres = d.Tres[0][0] / 1e-9
s = d.WformOut1
N = len(s)
self.x = np.linspace(T0, T0 + (N - 1) * Tres, N)
self.y = s.reshape(len(s))
def ecdf(self, Tnoise=10, rem_noise=True, in_positivity=True, display=False, normalize=True, delay=0):
""" calculate energy cumulative density function
Parameters
----------
Tnoise :
Time duration of noise only portion (default=5ns)
rem_noise :
remove noise if True
in_positivity :
inforce positivity if True
normalize :
normalize if True (Not implemented)
display :
display ecdf if True
delay :
give a delay for vizualization
Returns
-------
ecdf , vary
"""
#
# ( ) ^2
#
t = self.x
y = self.y
te = self.dx()
y2 = y ** 2
#
f1 = np.cumsum(y2) * te
# retrieve the noise only portion at the beginning of TUsignal
#
Nnoise = int(np.ceil(Tnoise / te))
tn = t[0:Nnoise]
fn = f1[0:Nnoise]
stdy = np.std(y[0:Nnoise])
vary = stdy * stdy
y = t * vary
#
# y : linear interpolation of noise ecdf (over whole time base)
#
#(ar,br)= polyfit(tn,fn,1)
#print ar
#y = polyval([ar,br],t)
if rem_noise:
f = f1 - y
else:
f = f1
#
# inforce positivity
#
if in_positivity:
pdf = np.diff(f)
u = np.nonzero(pdf < 0)[0]
pdf[u] = 0
ecdf = np.cumsum(pdf)
else:
ecdf = f
#
# Normalization step
#
E = ecdf[-1]
#print E
if normalize:
ecdf = ecdf / E
#
# Resizing
#
Nt = len(t)
Necdf = len(ecdf)
N = min(Nt, Necdf)
ecdf = bs.TUsignal(t[0:N], ecdf[0:N])
#
# Display
#
if display:
plt.subplot(211)
ecdf.plot()
if normalize:
plt.plot(t, 2 * vary * np.sqrt(2 * t) / E, 'r')
plt.plot(t, -2 * vary * np.sqrt(2 * t) / E, 'r')
else:
plt.plot(t, 3 * vary * np.sqrt(2 * t), 'r')
plt.plot(t, -3 * vary * np.sqrt(2 * t), 'r')
plt.axvline(x=delay, color='red')
plt.subplot(212)
plt.plot(t, y, color='red')
plt.plot(t, f1, color='black')
plt.plot(t, f, color='blue')
plt.show()
return ecdf, vary
class TUDchannel(TUchannel):
""" Uniform channel in Time domain with delay
Attributes
----------
x : ndarray
y : ndarray
taud : ndarray
direct delay
taue : ndarray
excess delay
"""
def fig(self, N):
""" plot a figure of the N first signals
Parameters
----------
N : int
number of y signal to plot
"""
x = self.x
min = self.y.min()
max = self.y.max()
ec = max - min
ecmax = ec.max()
sh = np.shape(self.y)
Nmax = sh[0]
N1 = int(minimum(N, Nmax))
y1 = self.y[0, :] + (N1 - 1) * ecmax
yN1 = self.y[N1 - 1, :]
for k in range(N):
gk = str(N) + str(1) + str(k)
plt.subplot(gk)
plot(x, yN1[k, :])
#r.plot(x, yN1, main='Ray response', xlab='Time (ns)', ylab='y', type='l', col='black' ,frame='False', ylim=r.range(y1,yN1) )
#for i in range(N1-1):
# yi = self.y[i+1,:] + (N1-i)*ecmax
# r.lines(x,yi,col='black')
class Mchannel(bs.FUsignal):
""" Handle the measured channel
"""
def __init__(self,
x ,
y ,
**kwargs):
""" class constructor
Parameters
----------
x : , nfreq
frequency GHz
y : Nm x Nr x Nt x Nf
measured channel
"""
defaults = {
'Aat': [],
'Aar': [],
'calibrated':True,
'label' :'',
'filename':'',
'mes':''
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
self.calibrated = kwargs.pop('calibrated')
self.label = kwargs.pop('label')
self.filename = kwargs.pop('filename')
self.mes = kwargs.pop('mes')
self.Aat = kwargs.pop('Aat')
self.Aar = kwargs.pop('Aar')
sh = y.shape
self.Nm = sh[0]
self.Nr = sh[1]
self.Nt = sh[2]
self.Nf = sh[3]
bs.FUsignal.__init__(self,x=x,y=y,label='Mchannel')
def eig(self,HdH=False):
""" calculate eigen values of the transfer matrix.
it involves H and Hd against svd() which acts only over H.
Returns
-------
HdH : Hermitian transfer matrix (nf x nt x nt )
U : Unitary tensor (nf x nt x nt )
S : Singular values (nf x nt)
V : = Ud (in that case because HdH Hermitian) (nf x nt x nt)
HdH = U L U^{\dagger}
"""
# H : nm x nr x nt x nf
H = self.y
# Hd : nm x nt x nr x nf
Hd = np.conj(self.y.swapaxes(1,2))
if HdH:
#T : nm x nt x nt x nf
T = np.einsum('uijk,ujlk->uilk',Hd,H)
else:
#T : nm x nr x nr x nf
T = np.einsum('uijk,ujlk->uilk',H,Hd)
# HdH : nm x nf x nr x nr
T = T.swapaxes(1,3)
#U : nm x nf x (nr|nt) x (nr|nt)
#S : nm x nf x (nr|nt)
#V : nm x nf x (nr|nt) x (nr|nt)
U,S,V = la.svd(T)
return (U,S,V)
def Bcapacity(self,Pt=np.array([1e-3]),Tp=273):
""" calculates BLAST deterministic MIMO channel capacity
Parameters
----------
Pt : np.array (,NPt)
the total power is assumed uniformaly distributed over the whole bandwidth
Tp : Receiver Temperature (K)
Returns
-------
C : sum rate or spectral efficiency (bit/s)
np.array (Nf,NPt)
rho : SNR
np.array (Nf,Nt,NPt)
log_2(det(I+(Et/(N0Nt))HH^{H})
Notes
-----
The returned value is homogeneous to bit/s the aggregated capacity is
obtrained by a simple summation
of the returned quantity. To obtain the sum rate or the spectral
efficiency in (bit/s/Hz ) the returned value should be divided by the
frequency step dfGHz
"""
fGHz = self.x
Nf = len(fGHz)
BGHz = fGHz[-1]-fGHz[0]
dfGHz = fGHz[1]-fGHz[0]
if type(Pt)==float:
Pt=np.array([Pt])
# White Noise definition
#
# Boltzman constantf = len(fGHz)
kB = 1.03806488e-23
# N0 ~ J ~ W/Hz ~ W.s
N0 = kB*Tp
# Evaluation of the transfer tensor
#
# HdH :
U,S,V = self.eig(HdH=True)
Ps = Pt/(self.Nt)
Pb = N0*BGHz*1e9 # Watt
# S : nm x nf x nr
# rho : nm x nf x nr x power
#
rho = (Ps[None,None,None,:]/Pb)*S[:,:,:,None]
CB = dfGHz*np.sum(np.log(1+rho)/np.log(2),axis=2)
return(rho,CB)
def WFcapacity(self,Pt=np.array([1e-3]),Tp=273):
""" calculates deterministic MIMO channel capacity
Parameters
----------
Pt : the total power to be distributed over the different spatial
channels using water filling
Tp : Receiver Noise Temperature (K)
Returns
-------
C : capacity (bit/s)
rho : SNR (in linear scale)
log_2(det(It + HH^{H})
"""
fGHz = self.x
Nf = len(fGHz)
# Bandwidth
BGHz = fGHz[-1]-fGHz[0]
# Frequency step
dfGHz = fGHz[1]-fGHz[0]
# White Noise definition
#
# Boltzman constant
kB = 1.03806488e-23
# N0 ~ J ~ W/Hz ~ W.s
N0 = kB*Tp
# Evaluation of the transfer HHd tensor
U,ld,V = self.eig(HdH=True)
#
# Iterative implementation of Water Filling algorithm
#
# pb : (nm,nf,nt) noise power (Watt)
pb = N0*dfGHz*1e9*np.ones((self.Nm,self.Nf,self.Nt))
# pt : (nm,nf,nt,power) Total power uniformly spread over (nt*nf-1)
pt = Pt[None,None,None,:]/((self.Nf-1)*self.Nt)
mu = pt
Q0 = np.maximum(0,mu-pb[:,:,:,None]/ld[:,:,:,None])
u = np.where(Q0>0)[0]
Peff = np.sum(np.sum(Q0,axis=1),axis=1)
deltamu = pt
while (np.abs(Peff-Pt)>1e-16).any():
mu = mu + deltamu
Q = np.maximum(0,mu-pb[:,:,:,None]/ld[:,:,:,None])
Peff = np.sum(np.sum(Q,axis=1),axis=1)
#print "mu , Peff : ",mu,Peff
usup = np.where(Peff>Pt)[0]
mu[:,:,:,usup] = mu[:,:,:,usup]- deltamu[:,:,:,usup]
deltamu[:,:,:,usup] = deltamu[:,:,:,usup]/2.
Qn = Q/pb[:,:,:,None]
rho = Qn*ld[:,:,:,None]
Cwf = dfGHz*np.sum(np.log(1+rho)/np.log(2),axis=2)
return(rho,Cwf)
class Tchannel(bs.FUsignal):
""" Handle the transmission channel
The transmission channel TChannel is obtained through combination of the propagation
channel and the antenna transfer functions from both transmitter and receiver.
This channel contains all the spatial information for each individual ray.
Warning : This is a frequency domain channel deriving from bs.FUsignal
Attributes
----------
ray transfer functions (nray,nfreq)
dod :
direction of depature (rad) [theta_t,phi_t] nray x 2
doa :
direction of arrival (rad) [theta_r,phi_r] nray x 2
tau :
delay ray k in ns
Methods
-------
imshow()
apply(W)
applywavB(Wgam)
applywavC(Wgam)
chantap(fcGHz,WGHz,Ntap)
doddoa()
wavefig(w,Nray)
rayfig(w,Nray)
rssi(ufreq)
See Also
--------
pylayers.antprop.Ctilde.prop2tran
"""
def __init__(self,
x = np.arange(0,2,1),
y = np.arange(0,2,1),
tau = np.array(([],)),
dod = np.array(([[],[]])).T,
doa = np.array(([[],[]])).T,
label = ''):
""" class constructor
Parameters
----------
x : , nfreq
frequency GHz
y : nray x nfreq
path amplitude
tau : 1 x nray
path delay (ns)
dod : direction of departure (nray x 2)
doa : direction of arrival (nray x 2)
"""
self.taud = tau
self.taue = np.zeros(len(tau))
# FUDsignal.__init__(self, x, y,taud)
self.dod = dod
self.doa = doa
# , Nf
# Nd x Nf x Np x Nu
self.label = label
self.win = 'rect'
self.isFriis = False
self.windowed = False
self.calibrated = False
self.filcal = "calibration.mat"
bs.FUsignal.__init__(self,x=x,y=y,label='Channel')
def saveh5(self,Lfilename,idx,a,b,Ta,Tb):
""" save Ctilde object in hdf5 format
Parameters
----------
Lfilename : string
Layout filename
Tilde
file identifier number
a : np.ndarray
postion of point a (transmitter)
b : np.ndarray
postion of point b (receiver)
Ta : np.ndarray
rotation matrice of antenna a
Tb : np.ndarray
rotation matrice of antenna b
"""
_Lfilename=Lfilename.split('.')[0]
filename= _Lfilename +'_' + str(idx).zfill(5) + '.h5'
filenameh5=pyu.getlong(filename,pstruc['DIRH'])
f=h5py.File(filenameh5,'w')
# try/except to avoid loosing the h5 file if
# read/write error
try:
f.attrs['a']=a
f.attrs['b']=b
f.attrs['Ta']=Ta
f.attrs['Tb']=Tb
# keys not saved as attribute of h5py file
for k,va in self.__dict__.items():
f.create_dataset(k,shape = np.shape(va),data=va)
f.close()
except:
f.close()
raise NameError('Channel Tchannel: issue when writting h5py file')
def loadh5(self,Lfilename,idx, output = True):
""" load Ctilde object in hdf5 format
Parameters
----------
Lfilename : string
Layout filename
idx : int
file identifier number
output : bool
return an output precised in return
Returns
-------
if output:
(a,b,Ta,Tb)
with
a = np.ndarray
position of point a (transmitter)
b = np.ndarray
position of point b (receiver)
Ta = np.ndarray
rotation matrice of antenna a
Tb = np.ndarray
rotation matrice of antenna b
"""
filename = Lfilename.split('.')[0] +'_' + str(idx).zfill(5) + '.h5'
filenameh5 = pyu.getlong(filename,pstruc['DIRH'])
f=h5py.File(filenameh5, 'r')
try:
# keys not saved as attribute of h5py file
for k,va in f.items():
# if k != 'tau1':
# setattr(self,str(k),va[:])
# else :
setattr(self,str(k),va)
a = f.attrs['a']
b = f.attrs['b']
Ta = f.attrs['Ta']
Tb = f.attrs['Tb']
f.close()
self.__init__(self.x, self.y, self.taud, self.dod, self.doa)
if output :
return a,b,Ta,Tb
except:
f.close()
raise NameError('Channel Tchannel: issue when reading h5py file')
def _saveh5(self,filenameh5,grpname):
""" save Tchannel object in hdf5 format compliant with Link Class
Parameters
----------
filenameh5 : str
file name of h5py file Link format
grpname : int
groupname in filenameh5
"""
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
# try/except to avoid loosing the h5 file if
# read/write error
try:
fh5=h5py.File(filename,'a')
if not grpname in fh5['H'].keys():
fh5['H'].create_group(grpname)
else :
print('Warning : H/'+grpname +'already exists in '+filenameh5)
f=fh5['H/'+grpname]
for k,va in self.__dict__.items():
#print(k,va)
f.create_dataset(k,shape = np.shape(va),data=va)
fh5.close()
except:
fh5.close()
raise NameError('Channel Tchannel: issue when writting h5py file')
def _loadh5(self,filenameh5,grpname,**kwargs):
""" Load H object in hdf5 format compliant with Link Class
Parameters
----------
filenameh5 : str
file name of h5py file Link format
grpname : int
groupname in filenameh5
"""
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
try:
fh5=h5py.File(filename,'r')
f = fh5['H/'+grpname]
# keys not saved as attribute of h5py file
for k,va in f.items():
if k !='isFriis':
try:
setattr(self,str(k),va[:])
except:
setattr(self,str(k),va)
else :
setattr(self,str(k),va)
fh5.close()
self.__init__(self.x, self.y, self.taud, self.dod, self.doa)
except:
fh5.close()
raise NameError('Channel Tchannel: issue when reading h5py file')
def apply(self, W=[]):
""" apply FUsignal W to the Tchannel
Parameters
----------
W : Bsignal.FUsignal
It exploits multigrid convolution from Bsignal.
Returns
-------
V : FUDAsignal
Notes
-----
Returns :math:`W(f) H_k(f)`
+ W may have a more important number of points and a smaller frequency band.
+ If the frequency band of the waveform exceeds the one of the
transmission channel, a warning is sent.
+ W is a FUsignal whose shape doesn't need to be homogeneous with FUChannel H
"""
if W!=[]:
U = W * self
else:
U = self
V = Tchannel(x= U.x, y = U.y, tau = self.taud, dod = self.dod, doa= self.doa)
return(V)
def applywav(self, Wgam=[]):
""" apply waveform (time domain ) to obtain the rays impulses response
Parameters
----------
Wgam : waveform
Returns
-------
rir : array,
impulse response for each ray separately
the size of the array is (nb_rays, support_length)
support_length is calculated in regard of the
delays of the channel
Notes
------
The overall received signal is built in time domain
Wgam is applied on each Ray Transfer function
See Also
--------
pylayers.signal.channel.rir
"""
# product in frequency domain between Channel (self) and waveform
Y = self.apply(Wgam)
# back in time domain
rir = Y.rir(Nz=500,ffts=1)
return rir
def getcir(self,BWGHz=1,Nf=40000,fftshift=False):
""" get the channel impulse response
Parameters
----------
BWGHz : Bandwidth
Nf : Number of frequency points
fftshift : boolean
See Also
--------
pylayers.simul.link.DLink.plt_cir
"""
fGHz = np.linspace(0,BWGHz,Nf)
dfGHz = fGHz[1]-fGHz[0]
tauns = np.linspace(0,1/dfGHz,Nf)
# E : r x nr x nt x f
E = np.exp(-2*1j*np.pi*self.taud[:,None,None,None]*fGHz[None,None,None,:])
# self.y : r x nr x nt x f
if self.y.shape[3]==E.shape[3]:
H = np.sum(E*self.y,axis=0)
else:
if self.y.shape[3]==1:
H = np.sum(E*self.y,axis=0)
else:
H = np.sum(E*self.y[:,:,:,0][:,:,:,None],axis=0)
# back in time - last axis is frequency (axis=2)
cir = np.fft.ifft(H,axis=2)
if fftshift:
cir = np.fft.fftshift(cir,axes=2)
tauns = np.linspace(-Nf/(2*BWGHz),Nf/(2*BWGHz)-1/BWGHz,Nf)
cir = bs.TUsignal(x=tauns,y=cir)
return(cir)
def get_cir(self,Wgam=[]):
""" get Channel impulse response of the channel
for a given waveform
Parameters
----------
Wgam : waveform
Returns
-------
ri : TUsignal
impulse response for each ray separately
See Also
--------
pylayers.antprop.channel.rir
"""
rir = self.applywav(Wgam)
cir = np.sum(rir.y,axis=0)
return bs.TUsignal(rir.x, cir)
def applywavC(self, w, dxw):
""" apply waveform method C
DEPRECATED
Parameters
----------
w :
waveform
dxw :
Notes
-----
The overall received signal is built in time domain
w is apply on the overall CIR
"""
print(DeprecationWarning(
'WARNING : Tchannel.applywavC is going to be replaced by Tchannel.applywav'))
H = self.H
h = H.ft1(500, 1)
dxh = h.dx()
if (abs(dxh - dxw) > 1e-10):
if (dxh < dxw):
# reinterpolate w
f = interp1d(w.x, w.y)
x_new = arange(w.x[0], w.x[-1], dxh)[0:-1]
y_new = f(x_new)
w = bs.TUsignal(x_new, y_new)
else:
# reinterpolate h
f = interp1d(h.x, h.y)
x_new = arange(h.x[0], h.x[-1], dxw)[0:-1]
y_new = f(x_new)
h = bs.TUsignal(x_new, y_new)
ri = h.convolve(w)
return(ri)
def baseband(self,**kwargs):
""" Channel transfer function in baseband
Parameters
----------
fcGHz : center frequency
WMHz : bandwidth in MHz
Nf : Number of frequency points
"""
defaults = {'fcGHz':4.5,
'WMHz':20,
'Nf':100}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
fcGHz = kwargs['fcGHz']
WMHz = kwargs['WMHz']
Nf = kwargs['Nf']
# self.y : Nray x Nr x Nt x Nf
# self.taud : (,Nray)
# complex amplitude in baseband
# Nray x Nr x Nt x Nf1
abb = self.y*np.exp(-2 * 1j * np.pi *self.taud[:,None,None,None] * fcGHz )
fMHz = np.linspace(-WMHz/2.,WMHz/2,Nf)
E = np.exp(-2*1j*np.pi*fMHz[None,None,None,:]*1e-3*self.taud[:,None,None,None])
y = np.sum(abb*E,axis=0)
H = bs.FUsignal(x=fMHz,y=y)
return(H)
def chantap(self,**kwargs):
""" channel tap
Parameters
----------
fcGHz : center frequency
WGHz : bandwidth
Ntap : int
"""
defaults = {'fcGHz':4.5,
'WGHz':1,
'Ntap':100}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
fcGHz=kwargs['fcGHz']
WGHz=kwargs['WGHz']
Ntap=kwargs['Ntap']
# yb : tau x f x 1
yb = self.y[:,:,None]*np.exp(-2 * 1j * np.pi *self.taud[:,None,None] * fcGHz )
# l : 1 x 1 x tap
l = np.arange(Ntap)[None,None,:]
# l : tau x 1 x 1
tau = self.tau0[:,None,None]
# S : tau x f x tap
S = np.sinc(l-tau*WGHz)
# htap : f x tap
htap = np.sum(yb*S,axis=0)
htapi = np.sum(htap,axis=0)
return htapi
def applywavB(self, Wgam):
""" apply waveform method B (time domain )
DEPRECATED
Parameters
----------
Wgam : waveform
Returns
-------
ri : TUDsignal
impulse response for each ray separately
Notes
------
The overall received signal is built in time domain
Wgam is applied on each Ray Transfer function
See Also
--------
pylayers.signal.bsignal.TUDsignal.ft1
"""
print(DeprecationWarning(
'WARNING : Tchannel.applywavB is going to be replaced by Tchannel.applywav'))
# product in frequency domain between Channel (self) and waveform
Y = self.apply(Wgam)
# back in time domain
ri = Y.ft1(Nz=500,ffts=1)
return(ri)
def applywavA(self, Wgam, Tw):
""" apply waveform method A
DEPRECATED
Parameters
----------
Wgam :
Tw :
The overall received signal is built in frequency domain
See Also
--------
pylayers.signal.bsignal
"""
print(DeprecationWarning(
'WARNING : Tchannel.applywavA is going to be replaced by Tchannel.applywav'))
Hab = self.H.ft2(0.001)
HabW = Hab * Wgam
RI = HabW.symHz(10000)
ri = RI.ifft(0,'natural')
ri.translate(-Tw)
return(ri)
def plotd (self, d='doa', **kwargs):
"""plot direction of arrival and departure
Parameters
----------
d: 'doa' | 'dod'
display direction of departure | arrival
fig : plt.figure
ax : plt.axis
phi: tuple (-180, 180)
phi angle
normalize: bool
energy normalized
reverse : bool
inverse theta and phi representation
polar : bool
polar representation
cmap: matplotlib.cmap
mode: 'center' | 'mean' | 'in'
see bsignal.energy
s : float
scatter dot size
fontsize: float
edgecolors: bool
colorbar: bool
title : bool
"""
defaults = {
'fig': [],
'ax': [],
'phi': (-180, 180),
'reverse' : True,
'cmap': plt.cm.hot_r,
'vmin': [],
'vmax': [],
'mode': 'center',
's': 30,
'fontsize':12,
'edgecolors':'none',
'b3d':False,
'polar':False,
'colorbar':False,
'title':False,
'xa':[],
'xb':[]
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
di = getattr(self, d, 'doa')
# remove non plt.scatter kwargs
phi = kwargs.pop('phi')
# b3d = kwargs.pop('b3d')
the = (0,180)
fontsize = kwargs.pop('fontsize')
polar = kwargs.pop('polar')
fig = kwargs.pop('fig')
ax = kwargs.pop('ax')
colorbar = kwargs.pop('colorbar')
reverse = kwargs.pop('reverse')
mode = kwargs.pop('mode')
title =kwargs.pop('title')
xa = kwargs.pop('xa')
xb = kwargs.pop('xb')
b3d = kwargs.pop('b3d')
vmin = kwargs.pop('vmin')
vmax = kwargs.pop('vmax')
if fig == []:
fig = plt.figure()
Etot = self.energy(mode=mode) + 1e-15
EtotdB = 10*np.log10(Etot)
if vmax == []:
vmax = EtotdB.max()
if vmin == []:
vmin = EtotdB.min()
EtotdB = np.minimum(EtotdB,vmax)
EtotdB = np.maximum(EtotdB,vmin)
# col = 1 - (10*log10(Etot)-Emin)/(Emax-Emin)
# WARNING polar plot require radian angles
#
if polar :
al = 1.
alb = 180. / np.pi
phi=np.array(phi)
the=np.array(the)
if reverse :
phi[0] = phi[0]*np.pi/180
phi[1] = phi[1]*np.pi/180
the[0] = the[0]
the[1] = the[1]
else :
phi[0] = phi[0]
phi[1] = phi[1]
the[0] = the[0]*np.pi/180
the[1] = the[1]*np.pi/180
else :
al = 180. / np.pi
alb = 180. / np.pi
col = ((EtotdB - vmin)/(vmax-vmin)).squeeze()
kwargs['c'] = col
kwargs['s'] = 200*col
kwargs['vmin'] = 0.
kwargs['vmax'] = 1.
if len(col) != len(di):
print("len(col):", len(col))
print("len(di):", len(di))
if ax == []:
ax = fig.add_subplot(111, polar=polar)
if reverse :
scat = ax.scatter(di[:, 1] * al, di[:, 0] * alb, **kwargs)
ax.axis((phi[0], phi[1], the[0], the[1]))
ax.set_xlabel('$\phi(^{\circ})$', fontsize=fontsize)
ax.set_ylabel("$\\theta_t(^{\circ})$", fontsize=fontsize)
else:
scat = ax.scatter(di[:, 0] * al, di[:, 1] * alb, **kwargs)
ax.axis((the[0], the[1], phi[0], phi[1]))
ax.set_xlabel("$\\theta_t(^{\circ})$", fontsize=fontsize)
ax.set_ylabel('$\phi(^{\circ})$', fontsize=fontsize)
if title:
ax.set_title(d, fontsize=fontsize+2)
if colorbar:
b = plt.colorbar(scat,cax=ax)
b.set_label('Path Loss (dB)')
for t in b.ax.get_yticklabels():
t.set_fontsize(fontsize)
return (fig, ax)
def plotad(self,a='phi', **kwargs):
"""plot angular delays
Parameters
----------
d: 'doa' | 'dod'
display direction of departure | arrival
typ : 'ns' | 'm'
display delays in nano seconds ( ns) or meter (m)
fig : plt.figure
ax : plt.axis
a : str
angle 'theta' | 'phi'
normalize: bool
energy normalized
reverse : bool
inverse theta and phi represenation
polar : bool
polar representation
cmap: matplotlib.cmap
mode: 'center' | 'mean' | 'in'
see bsignal.energy
s : float
scatter dot size
fontsize: float
edgecolors: bool
colorbar: bool
titel : bool
'clipval': float
remove values below clipval in dB
"""
defaults = { 'fig': [],
'ax': [],
'normalize':False,
'cmap':plt.cm.hot_r,
'mode':'center',
's':30,
'fontsize':12,
'edgecolors':'none',
'polar':False,
'colorbar':False,
'taumin':[],
'taumax':[],
'typ':'m',
'title':False,
'clipval': -2500,
'd':'doa'
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
# remove non plt.scatter kwargs
fontsize = kwargs.pop('fontsize')
polar = kwargs.pop('polar')
fig = kwargs.pop('fig')
ax = kwargs.pop('ax')
colorbar = kwargs.pop('colorbar')
normalize = kwargs.pop('normalize')
mode =kwargs.pop('mode')
dmin = kwargs.pop('taumin')
dmax = kwargs.pop('taumax')
title = kwargs.pop('title')
typ = kwargs.pop('typ')
clipval = kwargs.pop('clipval')
do = kwargs.pop('d')
if fig == []:
fig = plt.figure()
if do=='doa':
di = self.doa
elif do=='dod':
di = self.dod
if a == 'theta':
ang = np.array((0,180))
else :
ang = np.array((-180,180))
delay = self.taud
if typ =='m':
delay = delay*0.3
if dmin == []:
dmin = 0.#min(delay)
if dmax == []:
dmax= max(delay)
Etot = self.energy(mode=mode) + 1e-15
if normalize:
Emax = max(Etot)
Etot = Etot / Emax
#
#
#
# col = 1 - (10*log10(Etot)-Emin)/(Emax-Emin)
# WARNING polar plot require radian angles
#
#
if polar :
al = 1.
else :
al = 180. / np.pi
col = 10 * np.log10(Etot)
cv = np.where(col >= clipval)[0]
kwargs['c'] = col[cv]
if len(col) != len(di):
print("len(col):", len(col))
print("len(di):", len(dir))
if ax == []:
ax = fig.add_subplot(111, polar=polar)
if a == 'phi':
scat = ax.scatter(di[cv, 1] * al, delay[cv], **kwargs)
ax.axis((ang[0], ang[1], dmin, dmax))
ax.set_xlabel(r"$\phi(^{\circ})$", fontsize=fontsize)
if typ == 'm' :
ax.set_ylabel("distance (m)", fontsize=fontsize-2)
else :
ax.set_ylabel(r"$\phi(^{\circ})$", fontsize=fontsize-2)
elif a == 'theta':
scat = ax.scatter(di[cv, 0] * al, delay[cv], **kwargs)
ax.axis((ang[0], ang[1], dmin,dmax))
ax.set_xlabel(r"$\\theta_t(^{\circ})$", fontsize=fontsize)
if typ == 'm' :
ax.set_ylabel("distance (m)", fontsize=fontsize-2)
else :
ax.set_ylabel(r"$\phi(^{\circ})$", fontsize=fontsize-2)
if title :
ax.set_title('DoA vs delay (ns)', fontsize=fontsize+2)
if colorbar:
b=fig.colorbar(scat)
if normalize:
b.set_label('dB')
else:
b.set_label('Path Loss (dB)')
return (fig, ax)
def doadod(self, **kwargs):
""" doadod scatter plot
Parameters
----------
phi: tuple (-180, 180)
phi angle
normalize: bool
energy normalized
reverse : bool
inverse theta and phi represenation
polar : bool
polar representation
cmap: matplotlib.cmap
mode: 'center' | 'mean' | 'in'
see bsignal.energy
s : float
scatter dot size
fontsize: float
edgecolors: bool
colorbar bool
Notes
-----
scatter plot of the DoA-DoD channel structure
the energy is colorcoded over all couples of DoA-DoD
"""
defaults = {
'phi':(-180, 180),
'normalize':False,
'reverse' : True,
'cmap':plt.cm.hot_r,
'mode':'center',
's':30,
'fontsize':12,
'edgecolors':'none',
'polar':False,
'mode':'mean',
'b3d':False,
'xa':0,
'xb':0
}
fig = plt.figure()
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
ax1 = fig.add_subplot(121,polar=kwargs['polar'])
ax2 = fig.add_subplot(122,polar=kwargs['polar'])
if kwargs['xa']<kwargs['xb']:
fig,ax = self.plotd(d='dod',fig=fig,ax=ax1,**kwargs)
fig,ax = self.plotd(d='doa',fig=fig,ax=ax2,**kwargs)
else:
fig,ax = self.plotd(d='doa',fig=fig,ax=ax1,**kwargs)
fig,ax = self.plotd(d='dod',fig=fig,ax=ax2,**kwargs)
return fig,ax
def energy(self,mode='mean',sumray=False):
""" calculates channel energy including antennas spatial filtering
Parameters
----------
mode : string
center | mean | integ (different manner to get the value)
Friis : boolean
apply the Frris coeff(2/(4p pi f)
sumray: boolean
ray energy cummulation indicator
"""
#
# r x f
# axis 1 : ray
# axis 1 : frequency
#
if self.isFriis:
Etot = bs.FUsignal.energy(self,axis=1,mode=mode,Friis=False)
else:
Etot = bs.FUsignal.energy(self,axis=1,mode=mode,Friis=True)
if sumray:
Etot = np.sum(Etot,axis=0)
return Etot
def wavefig(self, w, Nray=5):
""" display
Parameters
----------
w : waveform
Nray : int
number of rays to be displayed
"""
# Construire W
W = w.ft()
# Appliquer W
Y = self.apply(W)
# r.require('graphics')
# r.postscript('fig.eps')
# r('par(mfrow=c(2,2))')
# Y.fig(Nray)
y = Y.iftd(100, 0, 50, 0)
y.fig(Nray)
# r.dev_off()
# os.system("gv fig.eps ")
# y.fidec()
# Sur le FUsignal retourn
# A gauche afficher le signal sur chaque rayon
# A droite le meme signal decal
# En bas a droite le signal resultant
def rayfig(self, k, W, col='red'):
""" build a figure with rays
Parameters
----------
k : ray index
W : waveform (FUsignal)
Notes
-----
W is apply on k-th ray and the received signal is built in time domain
"""
# get the kth Ray Transfer function
Hk = bs.FUDsignal(self.H.x, self.H.y[k,:])
dxh = Hk.dx()
dxw = W.dx()
w0 = W.x[0] # fmin W
hk0 = Hk.x[0] # fmin Hk
# on s'arrange pour que hk0 soit egal a w0 (ou hk0 soit legerement inferieur a w0)
if w0 < hk0:
np = ceil((hk0 - w0) / dxh)
hk0_new = hk0 - np * dxh
x = arange(hk0_new, hk0 + dxh, dxh)[0:-1]
Hk.x = hstack((x, Hk.x))
Hk.y = hstack((zeros(np), Hk.y))
if (abs(dxh - dxw) > 1e-10):
if (dxh < dxw):
# reinterpolate w
print(" resampling w")
x_new = arange(W.x[0], W.x[-1] + dxh, dxh)[0:-1]
Wk = W.resample(x_new)
dx = dxh
else:
# reinterpolate h
print(" resampling h")
x_new = arange(Hk.x[0], Hk.x[-1] + dxw, dxw)[0:-1]
Hk = Hk.resample(x_new)
dx = dxw
Wk = W
# qHk.x[0]==Wk.x[0]
def rssi(self,ufreq=0) :
""" Compute RSSI value for a frequency index
Parameters
----------
ufreq : int
index in the frequency range
Returns
-------
PrdB: float
RSSI value in dB
$$10\log_10 |a_k|^2$$
PrpdB : float
RSSI in a tap over a frequency band ufreq
$$10\log_10 |a_k e^{-2j \pi f \tau|^2$$
Notes
-----
This function will be deprecated by energy function
"""
# Amplitude
Ak = self.y[:, ufreq]
# Power
Pr = np.sum(Ak*np.conj(Ak))
# Complex amplitude
akp = Ak*np.exp(-2*1j*np.pi*self.x[ufreq]*self.taud)
Prp = np.abs(np.sum(akp))**2
PrdB = 10*np.log10(Pr)
PrpdB = 10*np.log10(Prp)
return PrdB,PrpdB
def cut(self,threshold=0.99):
""" cut the signal at an Energy threshold level
Parameters
----------
threshold : float
default 0.99
"""
self.sort(typ='energy')
E = self.eprfl()
cumE = np.cumsum(E)/sum(E)
v = np.where(cumE[0,:]<threshold)[0]
self.taud = self.taud[v]
self.taue = self.taue[v]
#self.tau = self.tau[v]
self.doa = self.doa[v,:]
self.dod = self.dod[v,:]
self.y = self.y[v,...]
def sort(self,typ='tau'):
""" sort FUD signal
Parameters
----------
typ : string
which parameter to sort '
'tau' : (default)
'energy'
"""
if typ == 'tau':
u = np.argsort(self.taud+self.taue)
if typ == 'energy':
E = self.eprfl()
u = np.argsort(E,axis=0)[::-1]
u = u[:,0,0]
self.taud = self.taud[u]
self.taue = self.taue[u]
self.doa = self.doa[u]
self.dod = self.dod[u]
self.y = self.y[u,...]
return(u)
def showtap(self,**kwargs):
""" show tap
Parameters
----------
same as tap
See Also
--------
tap
"""
# f x s x m x tap
htap = self.tap(**kwargs)
# sum over time m
Et_htap = np.sqrt(np.sum(htap*np.conj(htap),axis=i-1))/Nm
# sum over s
Er_htap = np.sum(htap,axis=1)/Ns
corrtap = correlate(Er_htap[0,:,0],np.conj(Er_htap[0,:,0]))
def tap(self,**kwargs):
""" calculate channel tap
Parameters
----------
fcGHz : float
center frequency
WMHz : float
bandwidth
Ntap : int
number of taps (related to bandwith)
as the bandwith increases the potential number of taps increases
Ns : int
number of spatial realizations
Nm : int
number of time samples
the channel is sampled along a distance of half a wavelength
Va : velocity of link termination a
Vb : velocity of link termination b
theta_va : float
theta velocity termination a (in radians)
phi_va :
phi velocity termination a (in radians)
theta_vb:
theta velocity termination b (in radians)
phi_vb :
phi velocity termination b (in radians)
Examples
--------
>>> from pylayers.signal.bsignal import *
"""
defaults = {'fcGHz':4.5,
'WMHz':1,
'Ntap':3,
'Ns':8,
'Nm':10,
'Va':1, #meter/s
'Vb':1, #meter/s
'theta_va':0,
'phi_va':0,
'theta_vb':0,
'phi_vb':0 }
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
fcGHz=kwargs['fcGHz']
WMHz=kwargs['WMHz']
Ntap=kwargs['Ntap']
Ns=kwargs['Ns']
Nm=kwargs['Nm']
Va = kwargs['Va']
Vb = kwargs['Vb']
# direction of link termination velocity vectors
theta_va = kwargs['theta_va']
theta_vb = kwargs['theta_vb']
phi_va = kwargs['phi_va']
phi_vb = kwargs['phi_vb']
Nf = len(self.x)
mmax = 0.3*WMHz*1e6/(2*fcGHz*(Va+Vb))
lam = 0.3/fcGHz
lamo2 = lam/2.
fmaHz = (Va/0.3)*fcGHz
fmbHz = (Vb/0.3)*fcGHz
# Coherence Time
Tca = 9/(14*np.pi*fmaHz)
Tcb = 9/(14*np.pi*fmbHz)
Tc = 9/(14*np.pi*(fmaHz+fmbHz))
# DoD DoA
theta_a = self.dod[:,0]
phi_a = self.dod[:,1]
theta_b = self.doa[:,0]
phi_b = self.doa[:,1]
# 3 x r
ska = np.array([np.cos(theta_a)*np.cos(phi_a),np.cos(theta_a)*np.sin(phi_a),np.sin(theta_a)])
skb = np.array([np.cos(theta_b)*np.cos(phi_b),np.cos(theta_b)*np.sin(phi_b),np.sin(theta_b)])
# Monte Carlo for spatial realization
# s x m x tap
ua0 = (np.cos(theta_va)+1)/2
va0 = phi_va/(2*np.pi)
ub0 = (np.cos(theta_vb)+1)/2
vb0 = phi_vb/(2*np.pi)
# standard deviation of velocity vector orientation is inversely
# proportional to velocity magnitude
ua = (((1/(Va+0.1))*np.random.rand(Ns)+ua0)%1)[:,None,None]
va = (((1/(Va+0.1))*np.random.rand(Ns)+va0)%1)[:,None,None]
ub = (((1/(Vb+0.1))*np.random.rand(Ns)+ub0)%1)[:,None,None]
vb = (((1/(Vb+0.1))*np.random.rand(Ns)+vb0)%1)[:,None,None]
# uniform sampling over the sphere
tha = np.arccos(2*va-1)
pha = 2*np.pi*ua
thb = np.arccos(2*vb-1)
phb = 2*np.pi*ub
vax = np.cos(tha)*np.cos(pha)
vay = np.cos(tha)*np.sin(pha)
vaz = np.sin(tha)*np.cos(pha*0)
vaxy = np.concatenate([vax[None,None,None,...],vay[None,None,None,...]])
va = np.concatenate([vaxy,vaz[None,None,None,...]])
vbx = np.cos(thb)*np.cos(phb)
vby = np.cos(thb)*np.sin(phb)
vbz = np.sin(thb)*np.cos(phb*0)
vbxy = np.concatenate([vbx[None,None,None,...],vby[None,None,None,...]])
# 3 x r x f x s x m x tap
vb = np.concatenate([vbxy,vbz[None,None,None,...]])
# beta : r x f x s x m x tap
betaa = np.sum(ska[:,:,None,None,None,None]*va,axis=0)
betab = np.sum(skb[:,:,None,None,None,None]*vb,axis=0)
# m discrete time axis
# r x f x s x m x tap
m = np.linspace(0,mmax,Nm)[None,None,None,:,None]
# r x f x s x m x tap
l = np.arange(Ntap)[None,None,None,None,:]
# l : r x f x s x m x tap
tau = self.taud[:,None,None,None,None]+ \
self.taue[:,None,None,None,None]
ba = betaa*Va*m/(0.3*WMHz*1e6)
bb = betab*Vb*m/(0.3*WMHz*1e6)
tau2 = tau + ba + bb
# S : r x f x s x m x tap (form 2.34 [D. Tse])
S = np.sinc(l-tau2*WMHz/1000.)
# sum over r : f x s x m x tap
htap = np.sum(S*self.y[...,None,None,None]*np.exp(-2*1j*np.pi*fcGHz*tau2),axis=0)
# f x s x m x tap
htap = htap.reshape(Nf,Ns,Nm,Ntap)
Et_htap = np.sqrt(np.sum(htap*np.conj(htap),axis=2))/Nm
Er_htap = np.sum(htap,axis=1)/Ns
corrtap = correlate(Er_htap[0,:,0],np.conj(Er_htap[0,:,0]))
return(htap,Et_htap,Er_htap,corrtap)
# def minphas(self):
# """ construct a minimal phase FUsignal
# - Evaluate slope of the phase
# - deduce delay
# - update delay of FUDSignal
# - Compensation of phase slope to obtain minimal phase
# This methods updates the excess delay taue member.
# The samplinf frequency step should be
# # Examples
# # --------
# # .. plot::
# # :include-source:
# # >>> from pylayers.signal.bsignal import *
# # >>> import numpy as np
# # >>> fGHz = np.arange(2,11,0.1)
# # >>> tau1 = np.array([1,2,3])[:,None]
# # >>> y = np.exp(-2*1j*np.pi*fGHz[None,:]*tau1)/fGHz[None,:]
# # >>> H = Tchannel(x=fGHz,y=y,tau=np.array([15,17,18]))
# # >>> f,a = H.plot(typ=['ru'],xlabels=['Frequency GHz'])
# # >>> t1 = plt.suptitle('Before minimal phase compensation')
# # >>> H.minphas()
# # >>> H.taue
# # array([ 1., 2., 3.])
# # >>> f,a = H.plot(typ=['ru'],xlabels=['Frequency GHz'])
# # >>> t2 = plt.suptitle('After minimal phase compensation')
# """
# f = self.x
# phase = np.unwrap(np.angle(self.y))
# dphi = phase[:, -1] - phase[:, 0]
# df = self.x[-1] - self.x[0]
# slope = dphi / df
# #if slope >0:
# # print 'm inphas Warning : non causal FUSignal'
# #phi0 = +1j*slope*(f[-1]+f[0]/2)
# F, S = np.meshgrid(f, slope)
# #E = exp(-1j*slope*f+phi0)
# E = np.exp(-1j * S * F)
# self.y = self.y * E
# self.taue = -slope / (2 * np.pi)
# # update total delay
# #self.tau = self.tau+self.taue
def ifft(self):
""" inverse Fourier Transform
Examples
--------
>>> from pylayers.simul.link import *
>>> L = DLink(verbose=False)
>>> aktk = L.eval(force=True)
>>> L.H.cut()
>>> #T1 = L.H.totime()
>>> #f,a = T1.plot(typ='v')
>>> #L.H.minphas()
>>> #T2 = L.H.totime()
>>> #f,a = T2.plot(typ='v')
"""
y = fft.ifft(self.y)
T = 1/(self.x[1]-self.x[0])
x = np.linspace(0,T,len(self.x))
h = TUDchannel(x,y,self.taud,self.taue)
return(h)
def totime(self, Nz=1, ffts=0):
""" transform to TUDchannel
Parameters
----------
Nz : int
Number of zeros for zero padding
ffts : nt
fftshift indicator (default 0 )
Examples
--------
>>> #from pylayers.simul.link import *
>>> #L = DLink(verbose=False)
>>> #aktk = L.eval()
>>> #L.H.cut()
>>> #T1 = L.H.totime()
>>> #f,a = T1.plot(typ='v')
>>> #L.H.minphas()
>>> #T2 = L.H.totime()
>>> #f,a = T2.plot(typ='v')
See Also
--------
FUsignal.ift
"""
Nray = len(self.taud)
s = self.ift(Nz, ffts)
sy_shifted = fft.fftshift(s.y,axes=-1)
h = TUDchannel(s.x, sy_shifted, self.taud,self.taue)
return(h)
def iftd(self, Nz=1, tstart=-10, tstop=100, ffts=0):
""" time pasting
Parameters
----------
Nz : int
Number of zeros
tstart : float
tstop : float
ffts : int
fftshift indicator
Returns
-------
rf : TUsignal (1,N)
See Also
--------
TUsignal.translate
Examples
--------
"""
tau = self.taud+self.taue
Nray = len(tau)
s = self.ift(Nz, ffts)
x = s.x
dx = s.dx()
x_new = np.arange(tstart, tstop, dx)
yini = np.zeros((Nray, len(x_new)))
rf = bs.TUsignal(x_new, yini)
#
# initializes a void signal
#
for i in range(Nray):
r = bs.TUsignal(x_new, np.zeros(len(x_new)))
si = bs.TUsignal(x, s.y[i, :])
si.translate(tau[i])
r = r + si
rf.y[i, :] = r.y
return rf
def rir(self, Nz, ffts=0):
""" construct ray impulse response
Parameters
----------
Nz : number of zeros for zero padding
ffts : fftshift indicator
0 no fftshift
1 apply fftshift
Returns
-------
rir : TUsignal
See Also
--------
pylayers.signal.bsignal.
"""
tau = self.taud + self.taue
taumin = min(tau)
taumax = max(tau)
dtau = (taumax-taumin)
self.s = self.ift(Nz, ffts)
t0 = self.s.x[0]
te = self.s.x[-1]
shy = self.s.y.shape
dx = self.s.x[1]-self.s.x[0]
# Delta Tau + Npoints
N = np.ceil(dtau/dx)+shy[-1]
# convert tau in an integer offset
# taumin ray is not shifted
itau = np.floor((tau-taumin)/dx).astype(int)
U = np.ones((shy[0],shy[-1]),dtype=int)
CU = np.cumsum(U,axis=1)-1 #-1 to start @ value 0
rir = np.zeros((shy[0],N))
col1 = np.repeat(np.arange(shy[0],dtype=int),shy[-1])
col2 = (CU+itau[:,None]).ravel()
index = np.vstack((col1,col2)).T
rir[index[:,0],index[:,1]] = self.s.y.ravel()
t = np.linspace(t0+taumin,te+taumax,N)
return bs.TUsignal(x=t, y=rir)
def ft1(self, Nz, ffts=0):
""" construct CIR from ifft(RTF)
Parameters
----------
Nz : number of zeros for zero padding
ffts : fftshift indicator
0 no fftshift
1 apply fftshift
Returns
-------
r : TUsignal
See Also
--------
pylayers.signal.bsignal.
"""
tau = self.taud + self.taue
self.s = self.ift(Nz, ffts)
x = self.s.x
r = bs.TUsignal(x=x, y=np.zeros(self.s.y.shape[1:]))
if len(tau) == 1:
return(self.s)
else:
for i in range(len(tau)):
si = bs.TUsignal(self.s.x, self.s.y[i, :])
si.translate(tau[i])
r = r + si
return r
def ftau(self, Nz=0, k=0, ffts=0):
""" time superposition
Parameters
----------
Nz : number of zeros for zero padding
k : starting index
ffts = 0 no fftshift
ffts = 1 apply fftshift
Returns
-------
r : TUsignal
"""
tau = self.taud + self.taue
s = self.ift(Nz, ffts)
x = s.x
r = bs.TUsignal(x, np.zeros(len(x)))
si = bs.TUsignal(s.x, s.y[k, :])
si.translate(tau[k])
r = r + si
return r
def plot3d(self,fig=[],ax=[]):
""" plot in 3D
Examples
--------
.. plot::
:include-source:
>>> from pylayers.signal.bsignal import *
>>> import numpy as np
>>> N = 20
>>> fGHz = np.arange(1,3,1)
>>> taud = np.sort(np.random.rand(N))
>>> alpha = np.random.rand(N,len(fGHz))
>>> #s = Tchannel(x=fGHz,y=alpha,tau=taud)
>>> #s.plot3d()
"""
Ntau = np.shape(self.y)[0]
Nf = np.shape(self.y)[1]
if fig==[]:
fig = plt.figure()
if ax == []:
ax = fig.add_subplot(111, projection = '3d')
for k,f in enumerate(self.x):
for i,j in zip(self.taud+self.taue,abs(self.y[:,k])):
ax.plot([i,i],[f,f],[0,j],color= 'k')
ax.set_xlabel('Delay (ns)')
ax.set_xlim3d(0,max(self.taud+self.taue))
ax.set_ylabel('Frequency (fGHz)')
ax.set_ylim3d(self.x[0],self.x[-1])
powermin = abs(self.y).min()
powermax = abs(self.y).max()
ax.set_zlabel('Power (linear)')
ax.set_zlim3d(powermin,powermax)
def ft2(self, df=0.01):
""" build channel transfer function (frequency domain)
Parameters
----------
df : float
frequency step (default 0.01)
Notes
-----
1. get fmin and fmax
2. build a new base with frequency step df
3. Initialize a FUsignal with the new frequency base
4. build matrix tau * f (Nray x Nf)
5. buildl matrix E= exp(-2 j pi f tau)
6. resampling of FUDsignal according to f --> S
7. apply the element wise product E .* S
8. add all rays
"""
fmin = self.x[0]
fmax = self.x[-1]
tau = self.taud+self.taue
f = np.arange(fmin, fmax, df)
U = bs.FUsignal(f, np.zeros(len(f)))
TAUF = np.outer(tau, f)
E = np.exp(-2 * 1j * np.pi * TAUF)
S = self.resample(f)
ES = E * S.y
V = sum(ES, axis=0)
U.y = V
return U
def frombuf(self,S,sign=-1):
""" load a buffer from vna
Parameters
----------
S : buffer
sign : int (+1 |-1) for complex reconstruction
"""
N = len(self.x)
u = np.arange(0,N)*2
v = np.arange(0,N)*2+1
S21 = (S[u]+sign*1j*S[v]).reshape((1,N))
self.y = S21
def capacity(self,Pt,T=290,mode='blast'):
""" calculates channel Shannon capacity (no csi)
Parameters
----------
Pt : Power transmitted
T : Temperature (Kelvin)
mode : string
Returns
-------
C : Channel capacity (bit/s)
"""
kB = 1.3806488e-23
N0 = kB*T
dfGHz = self.x[1]-self.x[0]
BGHz = self.x[-1]-self.x[0]
Pb = N0*BGHz*1e9
H2 = self.y*np.conj(self.y)
snr = Pt[:,None]*H2[None,:]/Pb
c = np.log(1+snr)/np.log(2)
C = np.sum(c,axis=1)*dfGHz
SNR = np.sum(snr,axis=1)*dfGHz
return(C,SNR)
def calibrate(self,filecal='calibration.mat',conjugate=False):
""" calibrate data
Parameters
----------
filecal : string
calibration file name "calibration.mat"
conjugate : boolean
default False
"""
self.filecal = filecal
Hcal = Tchannel()
Hcal.load(filecal)
assert (len(self.x) == len(Hcal.x)),"calibration file has not the same number of points"
if not self.calibrated:
if not(conjugate):
self.y = self.y/Hcal.y
else:
self.y = self.y/np.conj(Hcal.y)
self.calibrated = not self.calibrated
else:
if not(conjugate):
self.y = self.y*Hcal.y
else:
self.y = self.y*np.conj(Hcal.y)
self.calibrated = not self.calibrated
def pdp(self,win='hamming',calibrate=True):
""" calculates power delay profile
Parameters
----------
win : string
window name
"""
self.win = win
if calibrate and not self.calibrated:
self.calibrate()
if not self.windowed:
self.window(win=win)
# inverse Fourier transform
pdp = self.ift(ffts=1)
return pdp
def scatterers(self,pMS,pBS,mode='sbounce'):
"""
Parameters
----------
mode: Boolean
- image | sbounce (single bounce)
pMS : np.array (,3)
pBS : np.array (,3)
ang_offset : float (degrees)
Returns
-------
xs: estimated x scatterer coordinate
ys: estimated y scatterer coordinate
"""
def fun(p_s,pBS,PMs,tau,phi):
""" function to be minimized
Parameters
----------
p_s: np.array (,2)
2D scatterer coordinates containing the xs and ys coordinates
tau : float (in ns)
estimated delay
phi : float (in deg.)
estimated angle
pMS : np.array (,2)
containing the x and y MS coordinates
pBS : np.array (,2)
containing the x and y and z BS coordinates
"""
d0 = np.sqrt((pBS[0] - p_s[0])**2 +
(pBS[1] - p_s[1])**2 +
(pBS[2] - p_s[2])**2 ) # distance between the BS and the estimated scatterer
d1 = np.sqrt((pMS[0] - p_s[0])**2 +
(pMS[1] - p_s[1])**2 +
(pMS[2] - p_s[2])**2) # distance between the estimated scatterer and the MS
xs = p_s[0]
ys = p_s[1]
zs = p_s[2]
# Equations to be minimized
r_fres = 0.3*tau
eq1 = r_fres - (d0 + d1)
eq2 = xs - pBS[0] - d0 * np.cos(phi)
eq3 = ys - pBS[1] - d0 * np.sin(phi)
return np.abs(eq1) + np.abs(eq2) + np.abs(eq3)
Nscat = np.shape(self.y)[0]
phi = self.doa[:,1]
tau = self.taud
if mode == 'image': # image principal mode
xs = pBS[0] + tau*0.3*np.cos(phi)
ys = pBS[1] + tau*0.3*np.sin(phi)
if mode=='sbounce': # single bounce mode
xs = np.array([])
ys = np.array([])
zs = np.array([])
for k in range(Nscat): # loop over the all detected MPCs
#zguess = (pMS[0:2] + pBS[0:2])/2. # Initial guess
zguess = (pMS + pBS)/2. # Initial guess
#z = fmin(fun,zguess,(pBS[0:2],pMS[0:2],tau[k],phi[k]),disp=False) # minimizing Eq.
z = fmin(fun,zguess,(pBS,pMS,tau[k],phi[k]),disp=False) # minimizing Eq.
xs = np.append(xs,z[0])
ys = np.append(ys,z[1])
zs = np.append(zs,z[2])
return(xs,ys,zs)
class Ctilde(PyLayers):
""" container for the 4 components of the polarimetric ray channel
Attributes
----------
Ctt : bsignal.FUsignal
Ctp : bsignal.FUsignal
Cpt : bsignal.FUsignal
Cpp : bsignal.FUsignal
tauk : ndarray delays
tang : ndarray angles of departure
rang : ndarray angles of arrival
tangl : ndarray angles of departure (local)
rangl : ndarray angles of arrival (local)
fGHz : np.array
frequency array
nfreq : int
number of frequency point
nray : int
number of rays
Methods
-------
choose
load
mobility
doadod
show
energy
sort
prop2tran
"""
def __init__(self):
""" class constructor
Notes
-----
transpose == False (r,f)
transpose == True (f,r)
A Ctilde object can be :
+ returned from eval method of a Rays object.
+ generated from a statistical model of the propagation channel
"""
# by default C is expressed between the global frames
self.islocal = False
# by default antenna rotation matrices are identity
self.Ta = np.eye(3)
self.Tb = np.eye(3)
self.fGHz = np.array([2.4])
# a single ray
self.nray = 1
self.Ctt = bs.FUsignal(x=self.fGHz,y=np.array([[1]]))
self.Ctp = bs.FUsignal(x=self.fGHz,y=np.array([[0]]))
self.Cpt = bs.FUsignal(x=self.fGHz,y=np.array([[0]]))
self.Cpp = bs.FUsignal(x=self.fGHz,y=np.array([[1]]))
#
self.tang = np.array([[np.pi/2,np.pi/2]])
self.rang = np.array([[np.pi/2,3*np.pi/2]])
#
self.tangl = np.array([[np.pi/2,np.pi/2]])
self.rangl = np.array([[np.pi/2,3*np.pi/2]])
def inforay(self,iray,ifreq=0):
""" provide information about a specific ray
"""
dray = self.tauk[iray]*0.3
draydB = 20*np.log10(1./dray)
Ctt = self.Ctt.y[iray,ifreq]
Ctp = self.Ctp.y[iray,ifreq]
Cpt = self.Cpt.y[iray,ifreq]
Cpp = self.Cpp.y[iray,ifreq]
Cttc = Ctt*dray
Ctpc = Ctp*dray
Cppc = Cpp*dray
Cptc = Cpt*dray
if self.islocal:
print("between local frames")
print("--------------------")
else:
print("between global frames")
print("--------------------")
print('distance losses',draydB)
if (np.abs(Cttc)!=0):
CttdB = 20*np.log10(np.abs(Ctt))
CttcdB = 20*np.log10(np.abs(Cttc))
else:
CttdB = -np.inf
CttcdB = -np.inf
if (np.abs(Cppc)!=0):
CppdB = 20*np.log10(np.abs(Cpp))
CppcdB = 20*np.log10(np.abs(Cppc))
else:
CppdB = -np.inf
CppcdB = -np.inf
if (np.abs(Ctpc)!=0):
CtpdB = 20*np.log10(np.abs(Ctp))
CtpcdB =20*np.log10(np.abs(Ctpc))
else:
CtpdB = -np.inf
CtpcdB = -np.inf
if (np.abs(Cptc)!=0):
CptdB = 20*np.log10(np.abs(Cpt))
CptcdB = 20*np.log10(np.abs(Cptc))
else:
CptdB = -np.inf
CptcdB = -np.inf
print('Without distance losses (Interactions only)')
print("-----------------------------------------------")
print('co-pol (tt,pp) dB :',CttcdB,CppcdB)
print('cross-pol (tt,pp) dB :',CtpcdB,CptcdB)
print('With distance losses (Interactions + distance)')
print("-----------------------------------------------")
print('co-pol (tt,pp) dB :',CttdB,CppdB)
print('cross-pol (tp,pt) dB :',CtpdB,CptdB)
def saveh5(self,Lfilename,idx,a,b):
""" save Ctilde object in hdf5 format
Parameters
----------
Lfilename : string
Layout filename
idx : int
file identifier number
a : np.ndarray
postion of point a (transmitter)
b : np.ndarray
postion of point b (receiver)
"""
Lfilename=Lfilename.split('.')[0]
_filename= Lfilename +'_' + str(idx).zfill(5) + '.hdf5'
filename=pyu.getlong(_filename,pstruc['DIRCT'])
# save channel in global basis
# new call to locbas
if self.islocal:
self.locbas()
# try/except to avoid loosing the h5 file if
# read/write error
try:
f=h5py.File(filename,'w')
f.create_dataset('Ta',shape=np.shape(self.Ta),data=self.Ta)
f.create_dataset('Tb',shape=np.shape(self.Tb),data=self.Tb)
f.create_dataset('tang',shape=np.shape(self.tang),data=self.tang)
f.create_dataset('rang',shape=np.shape(self.rang),data=self.rang)
f.create_dataset('tauk',shape=np.shape(self.tauk),data=self.tauk)
f.create_dataset('fGHz',shape=np.shape(self.fGHz),data=self.fGHz)
f.create_dataset('Ctt_y',shape=np.shape(self.Ctt.y),data=self.Ctt.y)
f.create_dataset('Cpp_y',shape=np.shape(self.Cpp.y),data=self.Cpp.y)
f.create_dataset('Cpt_y',shape=np.shape(self.Cpt.y),data=self.Cpt.y)
f.create_dataset('Ctp_y',shape=np.shape(self.Ctp.y),data=self.Ctp.y)
f.create_dataset('Tx',shape=np.shape(a),data=a)
f.create_dataset('Rx',shape=np.shape(b),data=b)
f.close()
except:
f.close()
raise NameError('Channel.Ctilde: issue when writting h5py file')
def loadh5(self,Lfilename,idx,output=True):
""" load Ctilde object in hdf5 format
Parameters
----------
Lfilename : string
Layout filename
idx : int
file identifier number
output : bool
return an output precised in return
Returns
-------
if output:
(Layout filename , Tx position, Rx position)
"""
_Lfilename=Lfilename.split('.')[0]
_filename= _Lfilename +'_' + str(idx).zfill(5) + '.hdf5'
filename=pyu.getlong(_filename,pstruc['DIRCT'])
try:
f=h5py.File(filename,'r')
self.fGHz = f['fGHz'][:]
self.tang = f['tang'][:]
self.rang = f['rang'][:]
self.tauk = f['tauk'][:]
self.Ta = f['Ta'][:]
self.Tb = f['Tb'][:]
Ctt = f['Ctt_y'][:]
Cpp = f['Cpp_y'][:]
Ctp = f['Ctp_y'][:]
Cpt = f['Cpt_y'][:]
self.Ctt = bs.FUsignal(self.fGHz, Ctt)
self.Ctp = bs.FUsignal(self.fGHz, Ctp)
self.Cpt = bs.FUsignal(self.fGHz, Cpt)
self.Cpp = bs.FUsignal(self.fGHz, Cpp)
tx = f['Tx'][:]
rx = f['Rx'][:]
self.nfreq = len(self.fGHz)
self.nray = np.shape(self.Cpp.y)[0]
f.close()
except:
f.close()
raise NameError('Channel.Ctilde: issue when reading h5py file')
if output :
return (Lfilename ,tx,rx)
def _saveh5(self,filenameh5,grpname):
""" save Ctilde object in hdf5 format compliant with Link Class
Parameters
----------
filenameh5 : str
file name of h5py file Link format
grpname : int
groupname in filenameh5
"""
# back to global frame
if self.islocal:
self.locbas()
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
# try/except to avoid loosing the h5 file if
# read/write error
try:
fh5=h5py.File(filename,'a')
if not grpname in fh5['Ct'].keys():
fh5['Ct'].create_group(grpname)
else :
print('Warning : Ct/'+grpname +'already exists in '+filenameh5)
f=fh5['Ct/'+grpname]
# save channel in global basis
f.create_dataset('Ta',shape=np.shape(self.Ta),data=self.Ta)
f.create_dataset('Tb',shape=np.shape(self.Tb),data=self.Tb)
f.create_dataset('tang',shape=np.shape(self.tang),data=self.tang)
f.create_dataset('rang',shape=np.shape(self.rang),data=self.rang)
f.create_dataset('tauk',shape=np.shape(self.tauk),data=self.tauk)
f.create_dataset('fGHz',shape=np.shape(self.fGHz),data=self.fGHz)
f.create_dataset('Ctt_y',shape=np.shape(self.Ctt.y),data=self.Ctt.y)
f.create_dataset('Cpp_y',shape=np.shape(self.Cpp.y),data=self.Cpp.y)
f.create_dataset('Cpt_y',shape=np.shape(self.Cpt.y),data=self.Cpt.y)
f.create_dataset('Ctp_y',shape=np.shape(self.Ctp.y),data=self.Ctp.y)
fh5.close()
except:
fh5.close()
raise NameError('Channel.Ctilde: issue when writting h5py file')
def los(self,**kwargs):
""" Line of site channel
Parameters
----------
d(m)
fGHz (,Nf)
tang (1x2)
rang (1x2)
"""
defaults = {'pa':np.r_[197,189.8,1.65]
,'pb': np.r_[220,185,6]
,'fGHz':np.r_[32.6]
,'Ta':np.eye(3)
,'Tb':np.array([[0.28378894, -0.8972627, -0.33820628],
[-0.57674955, -0.44149706, 0.68734293],
[-0.76604425, 0., -0.64278784]])
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
self.pa = kwargs['pa']
self.pb = kwargs['pb']
self.fGHz = kwargs['fGHz']
self.Ta = kwargs['Ta']
self.Tb = kwargs['Tb']
self.nray = 1
si = self.pb-self.pa
d = np.r_[np.sqrt(np.sum(si*si))]
si = si/d
self.tauk = d/0.3
#
# ka = - kb for LOS
#
tha = np.arccos(si[2])
pha = np.arctan2(si[1],si[0])
thb = np.arccos(-si[2])
phb = np.arctan2(-si[1],-si[0])
self.tang = np.array([tha,pha]).reshape((1,2))
self.rang = np.array([thb,phb]).reshape((1,2))
U = np.ones(len(self.fGHz),dtype=complex)/d[0]
Z = np.zeros(len(self.fGHz),dtype=complex)
self.Ctt = bs.FUsignal(self.fGHz, U)
self.Ctp = bs.FUsignal(self.fGHz, Z)
self.Cpt = bs.FUsignal(self.fGHz, Z)
self.Cpp = bs.FUsignal(self.fGHz, U)
self.locbas()
def _loadh5(self,filenameh5,grpname,**kwargs):
""" load Ctilde object in hdf5 format
Parameters
----------
filenameh5 : str
file name of h5py file Link format
grpname : int
groupname in filenameh5
"""
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
try:
fh5=h5py.File(filename,'r')
f = fh5['Ct/'+grpname]
self.fGHz = f['fGHz'][:]
self.tang = f['tang'][:]
self.rang = f['rang'][:]
self.tauk = f['tauk'][:]
self.Ta = f['Ta'][:]
self.Tb = f['Tb'][:]
Ctt = f['Ctt_y'][:]
Cpp = f['Cpp_y'][:]
Ctp = f['Ctp_y'][:]
Cpt = f['Cpt_y'][:]
self.Ctt = bs.FUsignal(self.fGHz, Ctt)
self.Ctp = bs.FUsignal(self.fGHz, Ctp)
self.Cpt = bs.FUsignal(self.fGHz, Cpt)
self.Cpp = bs.FUsignal(self.fGHz, Cpp)
self.nfreq = len(self.fGHz)
self.nray = np.shape(self.Cpp.y)[0]
fh5.close()
except:
fh5.close()
raise NameError('Channel.Ctilde: issue when reading h5py file')
def mobility(self, v, dt):
""" modify channel for uniform mobility
Parameters
----------
v : float
velocity (m/s)
dt : float
delta t (s)
Notes
-----
Calculate a channel field from Ctilde and v(terminal vitese)
and dt(time of deplacement)
dt en s (observation time between 2 Rx position)
v en m/s (vitesse de changement de Rx)
Returns
-------
tau : modified Ctilde
"""
c = 0.3 # m/ns celerity of light
tauk = self.tauk
tang = self.tang
rang = self.rang
rk = tauk * c
rk_mod = abs(rk)
sk_ch = rk / rk_mod
# cos_alph =dot(v/abs(v),sk_ch)
cos_alph = (v * sk_ch) / abs(v)
self.cos_alph = cos_alph
rk_ch = rk_mod * cos_alph * abs(v) * dt
sk_ch_ch = (rk + v * dt) / (rk_ch + cos_alph * abs(v) * dt)
tauk_ch = (abs(rk_ch) * sk_ch_ch) / c
return(tauk_ch)
def plotd (self, d='doa', **kwargs):
""" plot direction of arrival/departure
Parameters
----------
d: string
'doa' | 'dod'
display direction of departure | arrival
fig : plt.figure
ax : plt.axis
phi: tuple (-180, 180)
phi angle
normalize: bool
energy normalized
reverse : bool
inverse theta and phi represenation
polar : bool
polar representation
cmap: matplotlib.cmap
mode: 'center' | 'mean' | 'in'
see bsignal.energy
s : float
scatter dot size
fontsize: float
edgecolors: bool
colorbar: bool
title : bool
"""
defaults = {
'fig': [],
'ax': [],
'phi':(-180, 180),
'normalize':False,
'reverse' : True,
'cmap':plt.cm.hot_r,
'mode':'center',
's':30,
'fontsize':22,
'edgecolors':'none',
'b3d':False,
'polar':False,
'colorbar':False,
'title' : False
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
if d =='dod':
tit = 'DOD : A'
di = getattr(self, 'tang')
elif d == 'doa':
tit = 'DOA : B'
di = getattr(self, 'rang')
else :
raise AttributeError('d attribute can only be doa or dod')
# remove non plt.scatter kwargs
phi = kwargs.pop('phi')
b3d = kwargs.pop('b3d')
the = (0,180)
fontsize = kwargs.pop('fontsize')
polar = kwargs.pop('polar')
fig = kwargs.pop('fig')
ax = kwargs.pop('ax')
colorbar = kwargs.pop('colorbar')
reverse = kwargs.pop('reverse')
normalize = kwargs.pop('normalize')
mode = kwargs.pop('mode')
title = kwargs.pop('title')
if fig == []:
fig = plt.figure()
Ett, Epp, Etp, Ept = self.energy(mode=mode,Friis=True)
Etot = Ett+Epp+Etp+Ept + 1e-15
if normalize:
Emax = max(Etot)
Etot = Etot / Emax
#
#
#
# col = 1 - (10*log10(Etot)-Emin)/(Emax-Emin)
# WARNING polar plot require radian angles
if polar :
al = 1.
alb = 180. / np.pi
phi=np.array(phi)
the=np.array(the)
if reverse :
phi[0] = phi[0]*np.pi/180
phi[1] = phi[1]*np.pi/180
the[0] = the[0]
the[1] = the[1]
else :
phi[0] = phi[0]
phi[1] = phi[1]
the[0] = the[0]*np.pi/180
the[1] = the[1]*np.pi/180
else :
al = 180. / np.pi
alb = 180. / np.pi
col = 10 * np.log10(Etot)
kwargs['c'] = col
if len(col) != len(di):
print("len(col):", len(col))
print("len(di):", len(dir))
if b3d:
ax = fig.add_subplot(111,projection='3d')
ax.scatter(1.05*array(xa),1.05*array(ya),1.05*array(za),'b')
ax.scatter(1.05*array(xb),1.05*array(yb),1.05*array(zb),'r')
else:
if ax == []:
ax = fig.add_subplot(111, polar=polar)
if reverse :
scat = ax.scatter(di[:, 1] * al, di[:, 0] * alb, **kwargs)
ax.axis((phi[0], phi[1], the[0], the[1]))
ax.set_xlabel('$\phi(^{\circ})$', fontsize=fontsize)
ax.set_ylabel("$\\theta_t(^{\circ})$", fontsize=fontsize)
else:
scat = ax.scatter(di[:, 0] * al, di[:, 1] * alb, **kwargs)
ax.axis((the[0], the[1], phi[0], phi[1]))
ax.set_xlabel("$\\theta_t(^{\circ})$", fontsize=fontsize)
ax.set_ylabel('$\phi(^{\circ})$', fontsize=fontsize)
if title:
ax.set_title(tit, fontsize=fontsize+2)
ll = ax.get_xticklabels()+ax.get_yticklabels()
for l in ll:
l.set_fontsize(fontsize)
if colorbar:
#divider = make_axes_locatable(ax)
#cax = divider.append_axes("right",size="5%",pad=0.05)
clb = plt.colorbar(scat,ax=ax)
if normalize:
clb.set_label('dB',size=fontsize)
else:
clb.set_label('Path Loss (dB)',size=fontsize)
for t in clb.ax.get_yticklabels():
t.set_fontsize(fontsize)
return (fig, ax)
def doadod(self, **kwargs):
""" doadod scatter plot
Parameters
----------
phi : tuple (-180, 180)
phi angle
normalize : bool
energy normalized
reverse : bool
inverse theta and phi representation
polar : bool
polar representation
cmap : matplotlib.cmap
mode : string
'center' | 'mean' | 'in'
s : float
scatter dot size
fontsize : float
edgecolors : bool
colorbar : bool
xa :
xb :
Notes
-----
scatter plot of the DoA-DoD channel structure
the energy is color coded over all couples of DoA-DoD
Examples
--------
>>> from pylayers.antprop.channel import *
See Also
--------
pylayers.signal.bsignal.energy
"""
defaults = {
'phi':(-180, 180),
'normalize':False,
'reverse' : True,
'cmap':plt.cm.hot_r,
'mode':'center',
's':30,
'fontsize':12,
'edgecolors':'none',
'polar':False,
'b3d':False,
'mode':'mean',
'colorbar':False,
'xa':0,
'xb':1
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
xa = kwargs.pop('xa')
xb = kwargs.pop('xb')
if 'fig' not in kwargs:
fig = plt.gcf()
kwargs['fig']=fig
else:
fig = kwargs['fig']
ax1 = fig.add_subplot(121,polar=kwargs['polar'])
ax2 = fig.add_subplot(122,polar=kwargs['polar'])
if xa<xb:
fig,ax1 = self.plotd(d='dod',ax=ax1,**kwargs)
fig,ax2 = self.plotd(d='doa',ax=ax2,**kwargs)
else:
fig,ax1 = self.plotd(d='doa',ax=ax1,**kwargs)
fig,ax2 = self.plotd(d='dod',ax=ax2,**kwargs)
return fig,[ax1,ax2]
def locbas(self,**kwargs):
""" global reference frame to local reference frame
If Tt and Tr are [] the global channel is retrieved
Parameters
----------
Ta : rotation matrix 3x3 side a
default []
Tb : rotation matrix 3x3 side b
default []
Returns
-------
This method affects the boolean islocal
This method update the ray propagation channel in either local or global frame
self.Ta and self.Tb are updated with input parameters Ta an Tb
C : ray propagation channel (2x2xrxf) complex
either local or global depends on self.islocal boolean value
Examples
--------
>>> C = Ctilde()
>>> Ta = MEulerAngle(np.pi/2,np.pi/2,np.pi/2.)
>>> Tb = MEulerAngle(np.pi/3,np.pi/3,np.pi/3.)
>>> C.locbas(Ta=Ta,Tb=Tb)
"""
# get Ctilde frequency axes
fGHz = self.fGHz
# if rotation matrices are passed in argument
# back to global if local
if ('Ta' in kwargs) & ('Tb' in kwargs):
if self.islocal:
self.locbas()
self.islocal=False
self.Tb = kwargs['Tb']
self.Ta = kwargs['Ta']
# angular axes
#
# tang : r x 2
# rang : r x 2
#
# Ra : 2 x 2 x r
# Rb : 2 x 2 x r
#
# tangl : r x 2
# rangl : r x 2
#
tangl,Ra = geu.BTB(self.tang, self.Ta)
rangl,Rb = geu.BTB(self.rang, self.Tb)
if self.islocal:
Ra = Ra.transpose((1,0,2))
self.islocal=False
else:
Rb = Rb.transpose((1,0,2))
self.islocal=True
#
# update direction of departure and arrival
#
self.tangl = tangl
self.rangl = rangl
#uf = np.ones(self.nfreq)
#
# r0 : r x 1(f)
#
#r0 = rb00
r0 = Rb[0,0,:][:, None]
#r1 = rb01
r1 = Rb[0,1,:][:, None]
t00 = r0 * self.Ctt.y + r1 * self.Cpt.y
t01 = r0 * self.Ctp.y + r1 * self.Cpp.y
#r0 = rb10
r0 = Rb[1, 0,:][:, None]
#r1 = rb11
r1 = Rb[1, 1,:][:, None]
t10 = r0 * self.Ctt.y + r1 * self.Cpt.y
t11 = r0 * self.Ctp.y + r1 * self.Cpp.y
#r0 = ra00
r0 = Ra[0, 0, :][:, None]
#r1 = ra10
r1 = Ra[1, 0, :][:, None]
Cttl = t00 * r0 + t01 * r1
Cptl = t10 * r0 + t11 * r1
#r0 = ra01
r0 = Ra[0, 1, :][:, None]
#r1 = ra11
r1 = Ra[1, 1, :][:, None]
Ctpl = t00 * r0 + t01 * r1
Cppl = t10 * r0 + t11 * r1
self.Ctt = bs.FUsignal(fGHz, Cttl)
self.Ctp = bs.FUsignal(fGHz, Ctpl)
self.Cpt = bs.FUsignal(fGHz, Cptl)
self.Cpp = bs.FUsignal(fGHz, Cppl)
#return self
def Cg2Cl(self, Tt=[], Tr=[]):
""" global reference frame to local reference frame
If Tt and Tr are [] the global channel is retrieved
Parameters
----------
Tt : Tx rotation matrix 3x3
default []
Tr : Rx rotation matrix 3x3
default []
Returns
-------
Cl : Ctilde local
Examples
--------
"""
# get frequency axes
fGHz = self.fGHz
if (Tt !=[]) & (Tr!=[]):
self.Ta = Tt
self.Tb = Tr
else:
if (hasattr(self,'Ta')) & (hasattr(self, 'Tb')):
self.Ta = self.Ta.transpose()
self.Tb = self.Tb.transpose()
else:
return
# get angular axes
# Rt (2x2)
# Rr (2x2)
#
# tang : r x 2
# rang : r x 2
#
# Rt : 2 x 2 x r
# Rr : 2 x 2 x r
#
# tangl : r x 2
# rangl : r x 2
#
tangl , Ra = geu.BTB(self.tang, self.Ta)
rangl , Rb = geu.BTB(self.rang, self.Tb)
Rb = Rb.transpose((1,0,2))
#
# update direction of departure and arrival
#
self.tang = tangl
self.rang = rangl
#uf = np.ones(self.nfreq)
#
# r0 : r x 1(f)
#
#r0 = np.outer(Rr[0, 0,:], uf)
r0 = Rr[0,0,:][:,None]
#r1 = np.outer(Rr[0, 1,:], uf)
r1 = Rr[0,1,:][:,None]
t00 = r0 * self.Ctt.y + r1 * self.Cpt.y
t01 = r0 * self.Ctp.y + r1 * self.Cpp.y
#r0 = np.outer(Rr[1, 0,:], uf)
r0 = Rr[1, 0,:][:,None]
#r1 = np.outer(Rr[1, 1,:], uf)
r1 = Rr[1, 1,:][:,None]
t10 = r0 * self.Ctt.y + r1 * self.Cpt.y
t11 = r0 * self.Ctp.y + r1 * self.Cpp.y
#r0 = np.outer(Rt[0, 0,:], uf)
r0 = Rt[0,0,:][:,None]
#r1 = np.outer(Rt[1, 0,:], uf)
r1 = Rt[1,0,:][:,None]
Cttl = t00 * r0 + t01 * r1
Cptl = t10 * r0 + t11 * r1
#r0 = np.outer(Rt[0, 1,:], uf)
r0 = Rt[0,1,:][:,None]
#r1 = np.outer(Rt[1, 1,:], uf)
r1 = Rt[1,1,:][:,None]
Ctpl = t00 * r0 + t01 * r1
Cppl = t10 * r0 + t11 * r1
self.Ctt = bs.FUsignal(fGHz, Cttl)
self.Ctp = bs.FUsignal(fGHz, Ctpl)
self.Cpt = bs.FUsignal(fGHz, Cptl)
self.Cpp = bs.FUsignal(fGHz, Cppl)
return self
def show(self, **kwargs):
""" show the propagation channel
Parameters
----------
typ : 'm', 'l20' , 'r'
cmap : colormap
default hot
fontsize : int
default 14
"""
defaults = {'typ': 'm',
'cmap': plt.cm.hot,
'fontsize':14}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
if 'fig' not in kwargs:
kwargs['fig'] = plt.figure()
ax1 = kwargs['fig'].add_subplot(221)
fig, ax1 = self.Ctt.imshow(ax=ax1,**kwargs)
ax1.set_xlabel('Frequency (GHz)',fontsize=kwargs['fontsize'])
ax1.set_title(u'$C_{\\theta\\theta}$',fontsize=kwargs['fontsize'])
ax2 = kwargs['fig'].add_subplot(222)
fig, ax2 = self.Ctp.imshow(ax=ax2,**kwargs)
ax2.set_xlabel('Frequency (GHz)',fontsize=kwargs['fontsize'])
ax2.set_title(u'$C_{\\theta\phi}$',fontsize=kwargs['fontsize'])
ax3 = kwargs['fig'].add_subplot(223)
fig, ax3 = self.Cpt.imshow(ax=ax3,**kwargs)
ax3.set_xlabel('Frequency (GHz)',fontsize=kwargs['fontsize'])
ax3.set_title(u'$C_{\phi\\theta}$',fontsize=kwargs['fontsize'])
ax4 = kwargs['fig'].add_subplot(224)
fig, ax4 = self.Cpp.imshow(ax=ax4,**kwargs)
ax4.set_xlabel('Frequency (GHz)',fontsize=kwargs['fontsize'])
ax4.set_title(u'$C_{\phi\phi}$',fontsize=kwargs['fontsize'])
return fig, (ax1, ax2, ax3, ax4)
def check_reciprocity(self, C):
""" check channel reciprocity
Parameters
----------
C : Ctilde
Notes
-----
This is not properly implemented
"""
issue=[]
assert np.allclose(self.tauk, C.tauk)
for r in range(self.nray):
if not np.allclose(self.Ctt.y[r,:], C.Ctt.y[r,:]):
issue.append(r)
if len(issue) == 0:
print("Channel is reciprocal")
else:
print("WARNING Reciprocity issue WARNING")
print(len(issue),'/',self.nray, 'rays are not reciprocal,')
print("rays number with an issue :",issue)
# assert np.allclose(self.tang,C.rang)
# assert np.allclose(self.rang,C.tang)
def energy(self,mode='mean',Friis=True,sumray=False):
""" calculates energy on each channel
Parameters
----------
mode : string
'mean'
Friis: boolean
True
sumray: boolean
False
Returns
-------
ECtt : Energy on co channel tt
ECpp : Energy on co channel pp
ECtp : Energy on co channel tp
ECpt : Energy on co channel pt
See Also
--------
pylayers.signal.bsignal.FUsignal.energy
Notes
-----
r x f+
axis 0 : ray
axis 1 : frequency
"""
#
# r x f
# axis 0 : ray
# axis 1 : frequency
#
ECtt = self.Ctt.energy(axis=1,Friis=Friis,mode=mode)
ECtp = self.Ctp.energy(axis=1,Friis=Friis,mode=mode)
ECpt = self.Cpt.energy(axis=1,Friis=Friis,mode=mode)
ECpp = self.Cpp.energy(axis=1,Friis=Friis,mode=mode)
if sumray:
ECtt = np.sum(ECtt,axis=0)
ECtp = np.sum(ECtp,axis=0)
ECpt = np.sum(ECpt,axis=0)
ECpp = np.sum(ECpp,axis=0)
return ECtt, ECpp, ECtp, ECpt
def cut(self,threshold_dB=50):
""" cut rays from a energy threshold
Parameters
----------
threshold : float
default 0.99
"""
Ett, Epp, Etp, Ept = self.energy()
Etot = Ett+Epp+Etp+Ept
u = np.argsort(Etot)[::-1]
#cumE = np.cumsum(Etot[u])/sum(Etot)
profdB = 10*np.log10(Etot[u]/np.max(Etot))
#v1 = np.where(cumE<threshold)[0]
v = np.where(profdB>-threshold_dB)[0]
w = u[v]
self.selected = w
self.Eselected = Etot[w]
self.tauk = self.tauk[w]
self.tang = self.tang[w,:]
self.rang = self.rang[w,:]
self.Ctt.y = self.Ctt.y[w,:]
self.Cpp.y = self.Cpp.y[w,:]
self.Ctp.y = self.Ctp.y[w,:]
self.Cpt.y = self.Cpt.y[w,:]
def sort(self,typ='tauk'):
""" sort Ctilde with respect to typ (default tauk)
Parameters
----------
typ : string
sort w.r.t
'tauk' : delay (default)
'att' : theta Tx
'atp' : phi Tx
'art' : theta Rx
'arp' : phi Rx
'energy' : energy
"""
if typ == 'tauk':
u = np.argsort(self.tauk)
if typ == 'att':
u = np.argsort(self.tang[:, 0])
if typ == 'atp':
u = np.argsort(self.tang[:, 1])
if typ == 'art':
u = np.argsort(self.rang[:, 0])
if typ == 'arp':
u = np.argsort(self.rang[:, 1])
if typ == 'energy':
Ett, Epp, Etp, Ept = self.energy()
Etot = Ett+Epp+Etp+Ept
u = np.argsort(Etot)
self.tauk = self.tauk[u]
self.tang = self.tang[u,:]
self.rang = self.rang[u,:]
self.Ctt.y = self.Ctt.y[u,:]
self.Cpp.y = self.Cpp.y[u,:]
self.Ctp.y = self.Ctp.y[u,:]
self.Cpt.y = self.Cpt.y[u,:]
def prop2tran(self,a=[],b=[],Friis=True,debug=False):
r""" transform propagation channel into transmission channel
Parameters
----------
a : antenna or array a
b : antenna or array b
Ta : np.array(3x3)
unitary matrice for antenna orientation
Tb : np.array(3x3)
unitary matrice for antenna orientation
Friis : boolean
if True scale with :math:`-j\frac{\lambda}{f}`
debug : boolean
if True the antenna gain for each ray is stored
Returns
-------
H : Tchannel(bs.FUsignal)
"""
freq = self.fGHz
nfreq = self.nfreq
nray = self.nray
sh = np.shape(self.Ctt.y)
# select default antennas
# omni polar theta 't' <=> vertical polarization
#
if a ==[]:
a = ant.Antenna('Omni',param={'pol':'t','GmaxdB':0},fGHz=self.fGHz)
if b ==[]:
b = ant.Antenna('Omni',param={'pol':'t','GmaxdB':0},fGHz=self.fGHz)
a.eval(th = self.tangl[:, 0], ph = self.tangl[:, 1])
Fat = bs.FUsignal(a.fGHz, a.Ft)
Fap = bs.FUsignal(a.fGHz, a.Fp)
#b.eval(th=self.rangl[:, 0], ph=self.rangl[:, 1], grid=False)
b.eval(th = self.rangl[:, 0], ph = self.rangl[:, 1])
Fbt = bs.FUsignal(b.fGHz, b.Ft)
Fbp = bs.FUsignal(b.fGHz, b.Fp)
#
# C : 2 x 2 x r x f
#
# Ctt : r x f (complex FUsignal)
# Cpp : r x f (complex FUsignal)
# Ctp : r x f (complex FUsignal)
# Cpt : r x f (complex FUsignal)
#
# a.Ft = r x (Na) x f (complex ndarray)
# a.Fp = r x (Na) x f (complex ndarray)
# b.Ft = r x (Nb) x f (complex ndarray)
# b.Fp = r x (Nb) x f (complex ndarray)
#
# (r x f ) (r x Nt x f )
#
# This exploit * overloading in FUsignal
t1 = self.Ctt * Fat + self.Ctp * Fap
t2 = self.Cpt * Fat + self.Cpp * Fap
# depending on SISO or MIMO case
# the shape of the received fields T1 and T2
#
# In MIMO case
# a.Ft.y.shape == (r x Na x f)
# a.Fp.y.shape == (r x Na x f)
# In SISO case
# a.Ft.y.shape == (r x f)
# a.Fp.y.shape == (r x f)
#
if len(t1.y.shape)==3:
T1 = t1.y[:,None,:,:]
T2 = t2.y[:,None,:,:]
else:
T1 = t1.y[:,None,None,:]
T2 = t2.y[:,None,None,:]
if len(Fbt.y.shape)==3:
FBt = Fbt.y[:,:,None,:]
FBp = Fbp.y[:,:,None,:]
else:
FBt = Fbt.y[:,None,None,:]
FBp = Fbp.y[:,None,None,:]
# determine the common interval on frequency axis
if np.sum(t1.x!=Fbt.x)>0:
t1x_int = (np.round(t1.x*100)).astype(int)
Fbtx_int = (np.round(Fbt.x*100)).astype(int)
inter = np.intersect1d(t1x_int,Fbtx_int)
ut = np.in1d(t1x_int,inter)
uf = np.in1d(Fbtx_int,inter)
else:
ut = np.arange(len(t1.x))
uf = np.arange(len(Fbt.x))
assert(len(t1.x[ut])==len(Fbt.x[uf])),"problem in common index plage calculation"
alpha1 = np.einsum('ljkm,lkim->ljim',FBt[...,uf],T1[...,ut])
alpha2 = np.einsum('ljkm,lkim->ljim',FBp[...,uf],T2[...,ut])
#alpha = t1 * Fbt + t2 * Fbp
# Nd x Nr x Nt x Nf
alpha = alpha1 + alpha2
self.fGHz = t1.x[ut]
H = Tchannel(x = self.fGHz,
y = alpha,
tau = self.tauk,
dod = self.tang,
doa = self.rang)
if debug :
H.alpha=alpha
H.Fat=Fat.y
H.Fap=Fap.y
H.Fbt=Fbt.y
H.Fbp=Fbp.y
H.Gat=10*np.log10(np.sum(Fat.y*np.conj(Fat.y),axis=1)/len(Fat.x))
H.Gap=10*np.log10(np.sum(Fap.y*np.conj(Fap.y),axis=1)/len(Fap.x))
H.Gbt=10*np.log10(np.sum(Fbt.y*np.conj(Fbt.y),axis=1)/len(Fbt.x))
H.Gbp=10*np.log10(np.sum(Fbp.y*np.conj(Fbp.y),axis=1)/len(Fbp.x))
if Friis:
H.applyFriis()
return H
if __name__ == "__main__":
plt.ion()
doctest.testmod()
| [
2,
532,
9,
83,
19617,
25,
18274,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
492,
1459,
21412,
3712,
279,
2645,
6962,
13,
415,
22930,
13,
17620,
198,
198,
492,
44619,
388,
6874,
3712,
198,
220,
220,
220,
1058,
30814,
25,
198,
198,
... | 1.73881 | 96,160 |
########################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
########################################################################
import json
import os
from typing import Any, Dict
from aws_cdk import Duration, Stack
from aws_cdk.aws_iam import AnyPrincipal, Effect, PolicyStatement
from aws_cdk.aws_kms import Key
from aws_cdk.aws_lambda import DockerImageCode, DockerImageFunction, Tracing
from aws_cdk.aws_logs import RetentionDays
from aws_cdk.aws_secretsmanager import Secret, SecretStringGenerator
from aws_cdk.aws_sqs import Queue
from constructs import Construct
class CdkStack(Stack): # type: ignore
"""Stack for the AWS Lambda Oracle connection example."""
def __init__(self, scope: Construct, construct_id: str, **kwargs: Dict[str, Any]) -> None:
"""CDK entry point.
Args:
scope (Construct): scope of the cdk stack
construct_id (str): construct id of the stack
"""
super().__init__(scope, construct_id, **kwargs)
kms_key = Key(
self,
"PyOracleKMSKey",
description="KMS key for Py Oracle Connection",
enable_key_rotation=True,
pending_window=Duration.days(7),
)
secret = self.build_connection_secret(kms_key)
self.build_lambda(kms_key, secret)
def build_lambda(self, kms_key: Key, secret: Secret) -> DockerImageFunction:
"""Build Lambda function with connection to Oracle database.
Args:
kms_key (Key): encryption key for the secret and env variables
secret (Secret): secret used to store Oracle connection details
Returns:
DockerImageFunction: lambda function
"""
stack_path = os.path.dirname(os.path.realpath(__file__))
lambda_path = os.path.join(stack_path, "..", "..", "lambda")
dlq = Queue(
self,
"PyOracleConnectionLambdaDLQ",
encryption_master_key=kms_key,
retention_period=Duration.days(5),
)
dlq.add_to_resource_policy(
PolicyStatement(
actions=["sqs:*"],
effect=Effect.DENY,
principals=[AnyPrincipal()],
resources=[dlq.queue_arn],
conditions={
"Bool": {"aws:secureTransport": "false"},
},
),
)
fn = DockerImageFunction(
self,
"PyOracleConnectionLambda",
function_name="py-oracle-connection-example",
code=DockerImageCode.from_image_asset(directory=lambda_path),
description="Example Lambda to illustrate connection to Oracle using Python",
dead_letter_queue=dlq,
environment={
"POWERTOOLS_SERVICE_NAME": "connection-example",
"POWERTOOLS_METRICS_NAMESPACE": "PyOracleConn",
"REGION": self.region,
"SECRET_NAME": secret.secret_name,
},
environment_encryption=kms_key,
memory_size=128,
tracing=Tracing.ACTIVE,
reserved_concurrent_executions=5,
timeout=Duration.seconds(45),
)
kms_key.grant_decrypt(fn)
secret.grant_read(fn)
return fn
def build_connection_secret(self, kms_key: Key) -> Secret:
"""Secret for the Oracle DB Connection.
Args:
kms_key (Key): kms key for encryption
Returns:
Secret: secret in secret manager
"""
template = SecretStringGenerator(
secret_string_template=json.dumps({"host": "", "port": "", "sid": "", "username": ""}),
generate_string_key="password",
)
return Secret(
self,
"PyOracleConnectionCredentials",
generate_secret_string=template,
encryption_key=kms_key,
secret_name="py-oracle-connection-credentials",
)
| [
29113,
29113,
7804,
198,
2,
15069,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
12,
15,
198,
29113,
29113,
7804,
198,
11748,
33918,
198,
11748,
28... | 2.249314 | 1,821 |
#import unreal
import os
print("Start script web client")
os.system("C:/GIT/ProjectOazis/Plugins/UnrealPythonScripting/Content/Python/start.bat")
print("start_client.bat") | [
2,
11748,
22865,
198,
11748,
28686,
198,
4798,
7203,
10434,
4226,
3992,
5456,
4943,
198,
418,
13,
10057,
7203,
34,
14079,
38,
2043,
14,
16775,
46,
1031,
271,
14,
23257,
1040,
14,
3118,
5305,
37906,
7391,
278,
14,
19746,
14,
37906,
14,... | 3.109091 | 55 |
from pyclesperanto_prototype._tier0 import Image
from pyclesperanto_prototype._tier1 import gaussian_blur
from pyclesperanto_prototype._tier0 import create_like
from pyclesperanto_prototype._tier0 import plugin_function
from pyclesperanto_prototype._tier2 import subtract_images
@plugin_function(categories=['filter', 'background removal', 'in assistant'])
def subtract_gaussian_background(input : Image, destination : Image = None, sigma_x : float = 2, sigma_y : float = 2, sigma_z : float = 2):
"""Applies Gaussian blur to the input image and subtracts the result from the original.
Parameters
----------
input : Image
destination : Image
sigmaX : Number
sigmaY : Number
sigmaZ : Number
Returns
-------
destination
References
----------
..[1] https://clij.github.io/clij2-docs/reference_subtractGaussianBackground
"""
temp1 = create_like(destination)
gaussian_blur(input, temp1, sigma_x, sigma_y, sigma_z)
return subtract_images(input, temp1, destination)
| [
6738,
12972,
5427,
525,
14723,
62,
38124,
13557,
24948,
15,
1330,
7412,
198,
6738,
12972,
5427,
525,
14723,
62,
38124,
13557,
24948,
16,
1330,
31986,
31562,
62,
2436,
333,
198,
6738,
12972,
5427,
525,
14723,
62,
38124,
13557,
24948,
15,
... | 2.95493 | 355 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2019-2021 Megvii Inc. All rights reserved.
import math
import os
from collections import OrderedDict
from loguru import logger
import torch
from torch.nn.parallel import DistributedDataParallel
from cvpods.checkpoint import DefaultCheckpointer
from cvpods.data import build_test_loader, build_train_loader
from cvpods.data.samplers.infinite import Infinite
from cvpods.evaluation import (
DatasetEvaluator,
inference_on_dataset,
inference_on_files,
print_csv_format,
verify_results
)
from cvpods.modeling.nn_utils.module_converter import maybe_convert_module
from cvpods.modeling.nn_utils.precise_bn import get_bn_modules
from cvpods.solver import build_lr_scheduler, build_optimizer
from cvpods.utils import comm
from cvpods.utils.compat_wrapper import deprecated
from cvpods.utils.dump.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
from . import hooks
from .base_runner import RUNNERS, SimpleRunner
@RUNNERS.register()
class DefaultRunner(SimpleRunner):
"""
A runner with default training logic. It does the following:
1. Create a :class:`DefaultRunner` using model, optimizer, dataloader
defined by the given config. Create a LR scheduler defined by the config.
2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when
`resume_or_load` is called.
3. Register a few common hooks defined by the config.
It is created to simplify the **standard model training workflow** and reduce code boilerplate
for users who only need the standard training workflow, with standard features.
It means this class makes *many assumptions* about your training logic that
may easily become invalid in a new research. In fact, any assumptions beyond those made in the
:class:`DefaultRunner` are too much for research.
The code of this class has been annotated about restrictive assumptions it makes.
When they do not work for you, you're encouraged to:
1. Overwrite methods of this class, OR:
2. Use :class:`DefaultRunner`, which only does minimal SGD training and
nothing else. You can then add your own hooks if needed. OR:
3. Write your own training loop similar to `tools/plain_train_net.py`.
See the :doc:`/tutorials/training` tutorials for more details.
Note that the behavior of this class, like other functions/classes in
this file, is not stable, since it is meant to represent the "common default behavior".
It is only guaranteed to work well with the standard models and training workflow in cvpods.
To obtain more stable behavior, write your own training logic with other public APIs.
Examples:
::
runner = DefaultRunner(cfg)
runner.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
runner.train()
Attributes:
scheduler:
checkpointer (DefaultCheckpointer):
cfg (config dict):
"""
def __init__(self, cfg, build_model):
"""
Args:
cfg (config dict):
"""
self.data_loader = self.build_train_loader(cfg)
# Assume these objects must be constructed in this order.
model = build_model(cfg)
self.model = maybe_convert_module(model)
logger.info(f"Model: \n{self.model}")
# Assume these objects must be constructed in this order.
self.optimizer = self.build_optimizer(cfg, self.model)
if cfg.TRAINER.FP16.ENABLED:
self.mixed_precision = True
if cfg.TRAINER.FP16.TYPE == "APEX":
from apex import amp
self.model, self.optimizer = amp.initialize(
self.model, self.optimizer, opt_level=cfg.TRAINER.FP16.OPTS.OPT_LEVEL
)
else:
self.mixed_precision = False
# For training, wrap with DDP. But don't need this for inference.
if comm.get_world_size() > 1:
torch.cuda.set_device(comm.get_local_rank())
if cfg.MODEL.DDP_BACKEND == "torch":
self.model = DistributedDataParallel(
self.model,
device_ids=[comm.get_local_rank()],
broadcast_buffers=False,
find_unused_parameters=True
)
elif cfg.MODEL.DDP_BACKEND == "apex":
from apex.parallel import DistributedDataParallel as ApexDistributedDataParallel
self.model = ApexDistributedDataParallel(self.model)
else:
raise ValueError("non-supported DDP backend: {}".format(cfg.MODEL.DDP_BACKEND))
super().__init__(
self.model,
self.data_loader,
self.optimizer,
)
if not cfg.SOLVER.LR_SCHEDULER.get("EPOCH_WISE", False):
epoch_iters = -1
else:
epoch_iters = cfg.SOLVER.LR_SCHEDULER.get("EPOCH_ITERS")
logger.warning(f"Setup LR Scheduler in EPOCH mode: {epoch_iters}")
auto_scale_config(cfg, self.data_loader)
self.scheduler = self.build_lr_scheduler(cfg, self.optimizer, epoch_iters=epoch_iters)
# Assume no other objects need to be checkpointed.
# We can later make it checkpoint the stateful hooks
self.checkpointer = DefaultCheckpointer(
# Assume you want to save checkpoints together with logs/statistics
self.model,
cfg.OUTPUT_DIR,
optimizer=self.optimizer,
scheduler=self.scheduler,
)
self.start_iter = 0
self.start_epoch = 0
self.max_iter = cfg.SOLVER.LR_SCHEDULER.MAX_ITER
self.max_epoch = cfg.SOLVER.LR_SCHEDULER.MAX_EPOCH
self.window_size = cfg.TRAINER.WINDOW_SIZE
self.cfg = cfg
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
a `last_checkpoint` file), resume from the file. Resuming means loading all
available states (eg. optimizer and scheduler) and update iteration counter
from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
Otherwise, this is considered as an independent training. The method will load model
weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
from iteration 0.
Args:
resume (bool): whether to do resume or not
"""
self.checkpointer.resume = resume
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration (or iter zero if there's no checkpoint).
self.start_iter = (self.checkpointer.resume_or_load(
self.cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1)
if self.max_epoch is not None:
if isinstance(self.data_loader.sampler, Infinite):
length = len(self.data_loader.sampler.sampler)
else:
length = len(self.data_loader)
self.start_epoch = self.start_iter // length
def build_hooks(self):
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns:
list[HookBase]:
"""
cfg = self.cfg
# cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
ret = [
hooks.OptimizationHook(
accumulate_grad_steps=cfg.SOLVER.BATCH_SUBDIVISIONS,
grad_clipper=None,
mixed_precision=cfg.TRAINER.FP16.ENABLED
),
hooks.LRScheduler(self.optimizer, self.scheduler),
hooks.IterationTimer(),
hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
cfg.TEST.EVAL_PERIOD,
self.model,
# Build a new data loader to not affect training
self.build_train_loader(cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
)
if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
else None,
]
# Do PreciseBN before checkpointer, because it updates the model and need to
# be saved by checkpointer.
# This is not always the best: if checkpointing has a different frequency,
# some checkpoints may have more precise statistics than others.
if comm.is_main_process():
ret.append(hooks.PeriodicCheckpointer(
self.checkpointer,
cfg.SOLVER.CHECKPOINT_PERIOD,
max_iter=self.max_iter,
max_epoch=self.max_epoch
))
# Do evaluation after checkpointer, because then if it fails,
# we can use the saved checkpoint to debug.
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
if comm.is_main_process():
# Here the default print/log frequency of each writer is used.
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(
self.build_writers(), period=self.cfg.GLOBAL.LOG_INTERVAL
))
# Put `PeriodicDumpLog` after writers so that can dump all the files,
# including the files generated by writers
return ret
def build_writers(self):
"""
Build a list of :class:`EventWriter` to be used.
It now consists of a :class:`CommonMetricPrinter`,
:class:`TensorboardXWriter` and :class:`JSONWriter`.
Args:
output_dir: directory to store JSON metrics and tensorboard events
max_iter: the total number of iterations
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
return [
# It may not always print what you want to see, since it prints "common" metrics only.
CommonMetricPrinter(
self.max_iter,
window_size=self.window_size,
epoch=self.max_epoch,
),
JSONWriter(
os.path.join(self.cfg.OUTPUT_DIR, "metrics.json"),
window_size=self.window_size
),
TensorboardXWriter(
self.cfg.OUTPUT_DIR,
window_size=self.window_size
),
]
def train(self):
"""
Run training.
Returns:
OrderedDict of results, if evaluation is enabled. Otherwise None.
"""
if self.max_epoch is None:
logger.info("Starting training from iteration {}".format(self.start_iter))
else:
logger.info("Starting training from epoch {}".format(self.start_epoch))
super().train(self.start_iter, self.start_epoch, self.max_iter)
if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():
assert hasattr(
self, "_last_eval_results"
), "No evaluation results obtained during training!"
verify_results(self.cfg, self._last_eval_results)
return self._last_eval_results
@classmethod
def build_optimizer(cls, cfg, model):
"""
Returns:
torch.optim.Optimizer:
It now calls :func:`cvpods.solver.build_optimizer`.
Overwrite it if you'd like a different optimizer.
"""
return build_optimizer(cfg, model)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer, **kwargs):
"""
It now calls :func:`cvpods.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer, **kwargs)
@classmethod
def build_train_loader(cls, cfg):
"""
Returns:
iterable
It now calls :func:`cvpods.data.build_train_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_train_loader(cfg)
@classmethod
def build_test_loader(cls, cfg):
"""
Returns:
iterable
It now calls :func:`cvpods.data.build_test_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_test_loader(cfg)
@classmethod
def build_evaluator(cls, cfg, dataset_name):
"""
Returns:
DatasetEvaluator or None
It is not implemented by default.
"""
raise NotImplementedError(
# TODO: add this tutorial
"""
If you want DefaultRunner to automatically run evaluation,
please implement `build_evaluator()` in subclasses (see train_net.py for example).
Alternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example).
"""
)
@classmethod
def test(cls, cfg, model, evaluators=None, output_folder=None):
"""
Args:
cfg (config dict):
model (nn.Module):
evaluators (list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
``cfg.DATASETS.TEST``.
Returns:
dict: a dict of result metrics
"""
if isinstance(evaluators, DatasetEvaluator):
evaluators = [evaluators]
if evaluators is not None:
assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
len(cfg.DATASETS.TEST), len(evaluators)
)
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg)
# When evaluators are passed in as arguments,
# implicitly assume that evaluators can be created before data_loader.
if evaluators is not None:
evaluator = evaluators[idx]
else:
try:
evaluator = cls.build_evaluator(
cfg, dataset_name, data_loader.dataset, output_folder=output_folder)
except NotImplementedError:
logger.warn(
"No evaluator found. Use `DefaultRunner.test(evaluators=)`, "
"or implement its `build_evaluator` method.")
results[dataset_name] = {}
continue
results_i = inference_on_dataset(model, data_loader, evaluator)
if cfg.TEST.ON_FILES:
results_i = inference_on_files(evaluator)
else:
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results_i, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results_i
)
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
@RUNNERS.register()
@deprecated("Use DefaultRunner instead.")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
1439,
2489,
10395,
13,
198,
2,
770,
2393,
468,
587,
9... | 2.253537 | 6,926 |
import numpy as np
from util import *
import matplotlib.pyplot as plt
import matplotlib
f, axs = plt.subplots(1, 2, figsize=(6,4),
gridspec_kw={'width_ratios':[1,1], 'hspace':0.1, 'wspace':0.1})
w = 1.5
h = 4.5
NA = 1.0
x = np.linspace(-w, w, 2**10)
pols = [None, 0]
s = 0.65*NA
for i, pol in enumerate(pols):
print(pol)
center = h00(x, phi=0, NA=NA, n=1.33, phi_p=pol)
shiftr = h00(x-s, phi=0, NA=NA, n=1.33, phi_p=pol)
shiftl = h00(x+s, phi=0, NA=NA, n=1.33, phi_p=pol)
axs[i].plot(x, shiftl + 3.3, '-k', lw=0.5)
axs[i].plot(x, center + 2.3, '-k', lw=0.5)
axs[i].plot(x, shiftr + 1.3, '-k', lw=0.5)
axs[i].plot(x, center + shiftl + shiftr, '-k', lw=0.5)
axs[i].plot([s,s], [-100, 100], ':k', lw=0.5)
axs[i].plot([-s,-s], [-100, 100], ':k', lw=0.5)
axs[i].plot([0,0], [-100, 100], ':k', lw=0.5)
axs[i].set_xlim([-w,w])
axs[i].set_ylim([0,h])
axs[0].annotate(r"${h'}_0^{0(p)}(x + x')$", xy=(0,0), xytext=(-1.6, 3.3), textcoords='data', ha='right', va='center')
axs[0].annotate(r"${h'}_0^{0(p)}(x)$", xy=(0,0), xytext=(-1.6, 2.3), textcoords='data', ha='right', va='center')
axs[0].annotate(r"${h'}_0^{0(p)}(x - x')$", xy=(0,0), xytext=(-1.6, 1.3), textcoords='data', ha='right', va='center')
axs[0].annotate(r"Sum", xy=(0,0), xytext=(-1.6, 0), textcoords='data', ha='right', va='center')
axs[0].set_title(r"Without polarizer")
axs[1].set_title(r"With polarizer")
axs[0].set_axis_off()
axs[1].set_axis_off()
# Plot PSFs
plt.savefig('psf-min.pdf', bbox_inches='tight')
| [
11748,
299,
32152,
355,
45941,
198,
6738,
7736,
1330,
1635,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
198,
198,
69,
11,
7877,
82,
796,
458,
83,
13,
7266,
489,
1747,
7,
16,
11,
362,
... | 1.804878 | 861 |
from src import npyscreen
| [
6738,
12351,
1330,
45941,
28349,
1361,
198
] | 3.714286 | 7 |
#
# Copyright (c), 2018, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
from typing import TYPE_CHECKING, Any, Dict, Optional, Iterator, Union, Type
from .namespaces import NamespacesType
from .xpath_context import ContextRootType, XPathContext
from .xpath2 import XPath2Parser
if TYPE_CHECKING:
from .xpath1 import XPath1Parser
from .xpath30 import XPath30Parser
ParserType = Union[Type[XPath1Parser], Type[XPath2Parser], Type[XPath30Parser]]
else:
ParserType = XPath2Parser
def select(root: ContextRootType,
path: str,
namespaces: Optional[NamespacesType] = None,
parser: Optional[ParserType] = None,
**kwargs: Any) -> Any:
"""
XPath selector function that apply a *path* expression on *root* Element.
:param root: an Element or ElementTree instance.
:param path: the XPath expression.
:param namespaces: a dictionary with mapping from namespace prefixes into URIs.
:param parser: the parser class to use, that is :class:`XPath2Parser` for default.
:param kwargs: other optional parameters for the parser instance or the dynamic context.
:return: a list with XPath nodes or a basic type for expressions based \
on a function or literal.
"""
context_kwargs = {
'item': kwargs.pop('item', None),
'position': kwargs.pop('position', 1),
'size': kwargs.pop('size', 1),
'axis': kwargs.pop('axis', None),
'variables': kwargs.pop('variables', None),
'current_dt': kwargs.pop('current_dt', None),
'timezone': kwargs.pop('timezone', None),
}
_parser = (parser or XPath2Parser)(namespaces, **kwargs)
root_token = _parser.parse(path)
context = XPathContext(root, **context_kwargs)
return root_token.get_results(context)
def iter_select(root: ContextRootType,
path: str,
namespaces: Optional[NamespacesType] = None,
parser: Optional[ParserType] = None,
**kwargs: Any) -> Iterator[Any]:
"""
A function that creates an XPath selector generator for apply a *path* expression
on *root* Element.
:param root: an Element or ElementTree instance.
:param path: the XPath expression.
:param namespaces: a dictionary with mapping from namespace prefixes into URIs.
:param parser: the parser class to use, that is :class:`XPath2Parser` for default.
:param kwargs: other optional parameters for the parser instance or the dynamic context.
:return: a generator of the XPath expression results.
"""
context_kwargs = {
'item': kwargs.pop('item', None),
'position': kwargs.pop('position', 1),
'size': kwargs.pop('size', 1),
'axis': kwargs.pop('axis', None),
'variables': kwargs.pop('variables', None),
'current_dt': kwargs.pop('current_dt', None),
'timezone': kwargs.pop('timezone', None),
}
_parser = (parser or XPath2Parser)(namespaces, **kwargs)
root_token = _parser.parse(path)
context = XPathContext(root, **context_kwargs)
return root_token.select_results(context)
class Selector(object):
"""
XPath selector class. Create an instance of this class if you want to apply an XPath
selector to several target data.
:param path: the XPath expression.
:param namespaces: a dictionary with mapping from namespace prefixes into URIs.
:param parser: the parser class to use, that is :class:`XPath2Parser` for default.
:param kwargs: other optional parameters for the XPath parser instance.
:ivar path: the XPath expression.
:vartype path: str
:ivar parser: the parser instance.
:vartype parser: XPath1Parser or XPath2Parser
:ivar root_token: the root of tokens tree compiled from path.
:vartype root_token: XPathToken
"""
@property
def namespaces(self) -> Dict[str, str]:
"""A dictionary with mapping from namespace prefixes into URIs."""
return self.parser.namespaces
def select(self, root: ContextRootType, **kwargs: Any) -> Any:
"""
Applies the instance's XPath expression on *root* Element.
:param root: an Element or ElementTree instance.
:param kwargs: other optional parameters for the XPath dynamic context.
:return: a list with XPath nodes or a basic type for expressions based on \
a function or literal.
"""
if 'variables' not in kwargs and self._variables:
kwargs['variables'] = self._variables
context = XPathContext(root, **kwargs)
return self.root_token.get_results(context)
def iter_select(self, root: ContextRootType, **kwargs: Any) -> Iterator[Any]:
"""
Creates an XPath selector generator for apply the instance's XPath expression
on *root* Element.
:param root: an Element or ElementTree instance.
:param kwargs: other optional parameters for the XPath dynamic context.
:return: a generator of the XPath expression results.
"""
if 'variables' not in kwargs and self._variables:
kwargs['variables'] = self._variables
context = XPathContext(root, **kwargs)
return self.root_token.select_results(context)
| [
2,
198,
2,
15069,
357,
66,
828,
2864,
11,
311,
1797,
4090,
357,
24274,
3961,
329,
13435,
10422,
737,
198,
2,
1439,
2489,
10395,
13,
198,
2,
770,
2393,
318,
9387,
739,
262,
2846,
286,
262,
17168,
13789,
13,
198,
2,
4091,
262,
2393,... | 2.759779 | 1,994 |
# test overloaded java methods dispatch logic in PyReflectedFunction
# needs to grow more tests. Uses javatests.JOverload as a bag of overloaded methods.
# (can be adapted to test alternative re-implemations even while they are developed
# write a *Envl class and change/add to to_test for that)
import sys
import unittest
import java
from java.util import ArrayList
from javatests import JOverload, Reflection
from org.python.core import PyReflectedFunction
jo = JOverload()
to_test = [extract_ov_meths(JOverload,PyReflFuncEnvl)]
if __name__ == '__main__' and not sys.argv[1:] == ['break-out']:
try:
import test_support
except ImportError:
unittest.main()
else:
test_support.run_unittest(OverloadedDispatchTests, VarargsDispatchTests, ComplexOverloadingTests)
| [
2,
1332,
50068,
20129,
5050,
27965,
9156,
287,
9485,
8134,
12609,
22203,
198,
2,
2476,
284,
1663,
517,
5254,
13,
36965,
474,
615,
265,
3558,
13,
41,
5886,
2220,
355,
257,
6131,
286,
50068,
5050,
13,
198,
2,
357,
5171,
307,
16573,
28... | 2.98155 | 271 |
import numpy as np
import pandas as pd
import json
import argparse
import os
if __name__ == "__main__":
args = get_args()
result_filepath = os.path.join(args.result_dir, 'results.csv')
df = pd.read_csv(result_filepath, index_col=0)
stats = evaluate_results(df)
print(json.dumps(stats, indent=4)) | [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
33918,
198,
11748,
1822,
29572,
198,
11748,
28686,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
26498,
796,
... | 2.564516 | 124 |
import random
from decimal import Decimal
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.db import models
from django.db.models import Sum
from django.utils.translation import ugettext_lazy as _
from django.utils.module_loading import import_by_path
from django.conf import settings
from .utils import model_dict_fn, format_datetime
from .util import (
number_to_hex,
hex_to_number,
b32_encode,
b32_decode,
pack,
unpack,
shorten_text,
)
User = settings.AUTH_USER_MODEL
class UserAdapterBase(object):
"""
Base version of UserAdapter. This can be (optionally) subclassed somewhere else which can then be set
to be used by system via `settings.KIRPPU_USER_ADAPTER`.
"""
@classmethod
@classmethod
# The actual class is found by string in settings.
UserAdapter = import_by_path(settings.KIRPPU_USER_ADAPTER)
| [
11748,
4738,
198,
6738,
32465,
1330,
4280,
4402,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198,
6738,
42625,
14208,
13,
7295,
13,
12102,
2024,
1330,
797,
25636,
47139,
1352,
198,
6738,
42625,
14208,
13,... | 2.977707 | 314 |
# -*- coding: utf-8 -*-
from flask import current_app
from . import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42903,
1330,
1459,
62,
1324,
198,
6738,
764,
1330,
20613,
198,
6738,
266,
9587,
2736,
1018,
13,
12961,
1330,
7716,
62,
28712,
62,
17831,
11,
2198,
62,
28712,
62,
... | 3.486111 | 72 |
# Generated by Django 3.1.2 on 2020-10-12 05:01
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
17,
319,
12131,
12,
940,
12,
1065,
8870,
25,
486,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#!/usr/bin/python36
import subprocess
import cv2
cap = cv2.VideoCapture(0)
ret, image = cap.read()
cv2.imwrite('/root/Udev_automation/defaulter.jpg' , image)
#cv2.imshow('hi' , image)
#cv2.waitKey()
cv2.destroyAllWindows()
subprocess.getoutput("ansible-playbook /root/Udev_automation/security.yml --vault-password-file=/root/Udev_automation/mypasswd")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
2623,
198,
198,
11748,
850,
14681,
198,
11748,
269,
85,
17,
220,
198,
198,
11128,
796,
269,
85,
17,
13,
10798,
49630,
7,
15,
8,
198,
1186,
11,
2939,
796,
1451,
13,
961,
3419,
198,
198,
33967,
... | 2.425676 | 148 |
"""Provide a strategy class to read ANSI encoded yWriter projects.
Copyright (c) 2021 Peter Triesberger
For further information see https://github.com/peter88213/PyWriter
Published under the MIT License (https://opensource.org/licenses/mit-license.php)
"""
import os
import xml.etree.ElementTree as ET
from pywriter.yw.utf8_tree_reader import Utf8TreeReader
class AnsiTreeReader(Utf8TreeReader):
"""Read ANSI encoded yWriter xml project file."""
def read_element_tree(self, ywProject):
"""Parse the yWriter xml file located at filePath, fetching the Novel attributes.
Return a message beginning with SUCCESS or ERROR.
Override the superclass method.
"""
_TEMPFILE = '._tempfile.xml'
try:
with open(ywProject.filePath, 'r') as f:
project = f.readlines()
project[0] = project[0].replace('<?xml version="1.0" encoding="iso-8859-1"?>',
'<?xml version="1.0" encoding="cp1252"?>')
with open(_TEMPFILE, 'w') as f:
f.writelines(project)
ywProject.tree = ET.parse(_TEMPFILE)
os.remove(_TEMPFILE)
except:
return 'ERROR: Can not process "' + os.path.normpath(ywProject.filePath) + '".'
return 'SUCCESS: XML element tree read in.'
| [
37811,
15946,
485,
257,
4811,
1398,
284,
1100,
3537,
11584,
30240,
331,
34379,
4493,
13,
201,
198,
201,
198,
15269,
357,
66,
8,
33448,
5613,
309,
1678,
21041,
201,
198,
1890,
2252,
1321,
766,
3740,
1378,
12567,
13,
785,
14,
79,
2357,
... | 2.246774 | 620 |
#!/usr/bin/env python
"""
Find initial mutual exclusive cliques by aligning
input reads against itself.
This is for ICE2 --- it uses either DALIGNER (default) or BLASR.
Several differences with the old IceInit:
1. It can be run in a standalone manner! Does not have to tie with the whole ICE2 framework.
2. It does NOT use pbcore. All pbcore is replaced with BioPython or other functions.
3. The BLASR version is expected to be SA4.0+, so options are -- not -.
4. It does not use QVs!
"""
__author__ = 'etseng@pacb.com'
import os
import os.path as op
import time
import logging
import subprocess
import networkx as nx
import numpy as np
from Bio import SeqIO
import pbtranscript.ice.pClique as pClique
from pbtranscript.ice.IceUtils import set_probqv_from_model
from pbtranscript.Utils import real_upath, execute
from pbtranscript.ice_daligner import DalignerRunner
from cupcake2.ice2.IceUtils2 import blasr_against_ref2, daligner_against_ref2
class IceInit2(object):
"""Iterative clustering and error correction."""
# version using BLASR; fallback if daligner fails
def _align_withBLASR(self, queryFa, targetFa, outFN):
"""Align input reads against itself using BLASR."""
if op.exists(outFN):
logging.info("{0} already exists. No need to run BLASR.".format(outFN))
else:
cmd = "blasr {q} ".format(q=real_upath(queryFa)) + \
"{t} ".format(t=real_upath(targetFa)) + \
"-m 5 --maxLCPLength 15 " + \
"--nproc {cpu} ".format(cpu=self.sge_opts.blasr_nproc) + \
"--minAlnLength {aln} ".format(aln=self.ice_opts.min_match_len) + \
"--maxScore {score} ".format(score=self.ice_opts.maxScore) + \
"--bestn {n} --nCandidates {n2} ".format(n=self.ice_opts.bestn, n2=self.ice_opts.bestn*2) + \
"--out {o} ".format(o=real_upath(outFN)) + \
"1>/dev/null 2>/dev/null"
logging.info("Calling {cmd}".format(cmd=cmd))
execute(cmd)
# align with DALIGNER
def _align_withDALIGNER(self, queryFa, output_dir):
"""
Align input reads against itself using DALIGNER.
"""
# run this locally
# Liz: is_FL is currently turned OFF! because LA4Ice has ICE_FL(-E) set with 200/50bp missed, too strict
runner = DalignerRunner(query_filename=queryFa, target_filename=queryFa,
query_converted=False, target_converted=False,
is_FL=False, same_strand_only=True,
use_sge=False, sge_opts=None,
cpus=4)
runner.run(min_match_len=self.ice_opts.min_match_len,
output_dir=output_dir,
sensitive_mode=self.ice_opts.sensitive_mode)
return runner
# version using BLASR
def _makeGraphFromM5(self, m5FN):
"""Construct a graph from a BLASR M5 file."""
alignGraph = nx.Graph()
for r in blasr_against_ref2(output_filename=m5FN,
is_FL=True,
sID_starts_with_c=False,
qver_get_func=self.qver_get_func,
qvmean_get_func=self.qvmean_get_func,
ece_penalty=self.ice_opts.ece_penalty,
ece_min_len=self.ice_opts.ece_min_len,
max_missed_start=self.ice_opts.max_missed_start,
max_missed_end=self.ice_opts.max_missed_end,
full_missed_start=self.ice_opts.full_missed_start,
full_missed_end=self.ice_opts.full_missed_end):
if r.qID == r.cID:
continue # self hit, ignore
if r.ece_arr is not None:
logging.debug("adding edge {0},{1}".format(r.qID, r.cID))
alignGraph.add_edge(r.qID, r.cID)
return alignGraph
def _makeGraphFromLA4Ice(self, runner):
"""Construct a graph from a LA4Ice output file."""
alignGraph = nx.Graph()
for la4ice_filename in runner.la4ice_filenames:
count = 0
start_t = time.time()
for r in daligner_against_ref2(
query_dazz_handler=runner.query_dazz_handler,
target_dazz_handler=runner.target_dazz_handler,
la4ice_filename=la4ice_filename,
is_FL=True, sID_starts_with_c=False,
qver_get_func=self.qver_get_func, qvmean_get_func=self.qvmean_get_func,
qv_prob_threshold=.03, ece_min_len=self.ice_opts.ece_min_len,
ece_penalty=self.ice_opts.ece_penalty,
same_strand_only=True, no_qv_or_aln_checking=False,
max_missed_start=self.ice_opts.max_missed_start,
max_missed_end=self.ice_opts.max_missed_end,
full_missed_start=self.ice_opts.full_missed_start,
full_missed_end=self.ice_opts.full_missed_end):
if r.qID == r.cID:
continue # self hit, ignore
if r.ece_arr is not None:
alignGraph.add_edge(r.qID, r.cID)
count += 1
logging.debug("total {0} edges added from {1}; took {2} sec"
.format(count, la4ice_filename, time.time()-start_t))
return alignGraph
@classmethod
def _findCliques(self, alignGraph, readsFa):
"""
Find all mutually exclusive cliques within the graph, with decreased
size.
alignGraph - a graph, each node represent a read and each edge
represents an alignment between two end points.
Return a dictionary of clique indices and nodes.
key = index of a clique
value = nodes within a clique
Cliques are ordered by their size descendingly: index up, size down
Reads which are not included in any cliques will be added as cliques
of size 1.
"""
uc = {} # To keep cliques found
used = [] # nodes within any cliques
ind = 0 # index of clique to discover
deg = alignGraph.degree().items()
# Sort tuples of (node, degree) by degree, descendingly
deg.sort(key=lambda x: x[1], reverse=True)
for d in deg:
node = d[0] # node which has the largest degree in alignGraph
if node not in alignGraph:
continue
# just get the immediate neighbors since we're looking for perfect
# cliques
subGraph = alignGraph.subgraph([node] + alignGraph.neighbors(node))
subNodes = subGraph.nodes()
# Convert from networkx.Graph to a sparse matrix
S, H = pClique.convert_graph_connectivity_to_sparse(
subGraph, subNodes)
# index of the 'node' in the sub-graph
seed_i = subNodes.index(node)
# Grasp a clique from subGraph, and return indices of clique nodes
# setting gamma=0.8 means to find quasi-0.8-cliques!
tQ = pClique.grasp(S, H, gamma=0.8, maxitr=5, given_starting_node=seed_i)
if len(tQ) > 0:
c = [subNodes[i] for i in tQ] # nodes in the clique
uc[ind] = c # Add the clique to uc
ind += 1
used += c # Add clique nodes to used
# Remove clique nodes from alignGraph and continue
alignGraph.remove_nodes_from(c)
# write each orphan as a singleton cluster
for r in SeqIO.parse(open(readsFa), 'fasta'):
if r.id not in used:
uc[ind] = [r.id]
ind += 1
return uc
def init_cluster_by_clique(self):
"""
Only called once and in the very beginning, when (probably a subset)
of sequences are given to generate the initial cluster.
readsFa --- initial fasta filename, probably called *_split00.fasta
qver_get_func --- function that returns QVs on reads
qvmean_get_func --- function that returns the mean QV on reads
bestn --- parameter in BLASR, higher helps in finding perfect
cliques but bigger output
nproc, maxScore --- parameter in BLASR, set maxScore appropriate
to input transcript length
ece_penalty, ece_min_len --- parameter in isoform hit calling
Self-blasr input then iteratively find all mutually exclusive
cliques (in decreasing size)
Returns dict of cluster_index --> list of seqids
which is the 'uc' dict that can be used by IceIterative
"""
alignGraph = None
if self.ice_opts.aligner_choice == 'blasr':
outFN = self.readsFa + '.self.blasr'
self._align_withBLASR(queryFa=self.readsFa, targetFa=self.readsFa, outFN=outFN)
alignGraph = self._makeGraphFromM5(m5FN=outFN)
elif self.ice_opts.aligner_choice == 'daligner':
try:
runner = self._align_withDALIGNER(queryFa=self.readsFa,
output_dir=op.dirname(real_upath(self.readsFa)))
alignGraph = self._makeGraphFromLA4Ice(runner=runner)
runner.clean_run()
except RuntimeError: # daligner probably crashed, fall back to blasr
outFN = self.readsFa + '.self.blasr'
self._align_withBLASR(queryFa=self.readsFa, targetFa=self.readsFa, outFN=outFN)
alignGraph = self._makeGraphFromM5(m5FN=outFN)
else:
raise Exception, "Unrecognized aligner_choice {0}!".format(self.ice_opts.aligner_choice)
uc = IceInit2._findCliques(alignGraph=alignGraph, readsFa=self.readsFa)
return uc
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
16742,
4238,
13584,
8568,
537,
6368,
416,
10548,
278,
198,
15414,
9743,
1028,
2346,
13,
198,
198,
1212,
318,
329,
23358,
17,
11420,
340,
3544,
2035,
360,
1847,
16284,
1137,
... | 2.081611 | 4,693 |
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.paths.resource_path import ResourcePath
from office365.sharepoint.base_entity import BaseEntity
from office365.sharepoint.sharing.userDirectoryInfo import UserDirectoryInfo
| [
6738,
2607,
24760,
13,
43282,
13,
421,
10640,
13,
15271,
62,
27184,
62,
22766,
1330,
4809,
32180,
20746,
198,
6738,
2607,
24760,
13,
43282,
13,
6978,
82,
13,
31092,
62,
6978,
1330,
20857,
15235,
198,
6738,
2607,
24760,
13,
20077,
4122,
... | 4.257576 | 66 |
u"""
This listens to room status update and sends relevant HTTP
calls to the meteor app.
"""
from __future__ import unicode_literals
import requests
import signals
import settings
SUBSCRIBED_LISTENERS = {}
retriable_adapter = requests.adapters.HTTPAdapter(max_retries=5)
session = requests.Session()
session.mount("http://wc-status.meteor.com", retriable_adapter)
| [
84,
37811,
198,
1212,
35019,
284,
2119,
3722,
4296,
290,
12800,
5981,
14626,
198,
66,
5691,
284,
262,
19999,
598,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
7007,
198,
198,
11748,
... | 3.236842 | 114 |
from sympy.core import (S, pi, oo, symbols, Function,
Rational, Integer, Tuple, Derivative)
from sympy.integrals import Integral
from sympy.concrete import Sum
from sympy.functions import exp, sin, cos
from sympy import mathematica_code as mcode
x, y, z = symbols('x,y,z')
f = Function('f')
| [
6738,
10558,
88,
13,
7295,
1330,
357,
50,
11,
31028,
11,
267,
78,
11,
14354,
11,
15553,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
46863,
11,
34142... | 2.574803 | 127 |
# 汉明距离
while 1:
n = int(input())
nums = list(map(int, input().split()))
n_bin = []
while n > 0:
temp = n % 2
n_bin.append(temp)
n = n // 2
print(n_bin)
nums_bin = dict()
for i in range(len(nums)):
nums_bin[i] = []
while nums[i] > 0:
temp = nums[i] % 2
nums_bin[i].append(temp)
nums[i] = nums[i] // 2
print(nums_bin)
distance = dict()
for index in range(len(nums)):
count = 0
distance[index] = []
i = len(n_bin) - 1
j = len(nums_bin[index]) - 1
while i >= 0 & j >= 0:
if n_bin[i] != nums_bin[i]:
count += 0
i -= 1
j -= 1
distance[index].append(count)
# while i < range
# for i in range(len(n_bin),-1,-1):
#
# if i < len(nums_bin[i]):
| [
2,
10545,
109,
231,
23626,
236,
164,
115,
251,
163,
99,
119,
198,
4514,
352,
25,
198,
220,
220,
220,
299,
796,
493,
7,
15414,
28955,
198,
220,
220,
220,
997,
82,
796,
1351,
7,
8899,
7,
600,
11,
5128,
22446,
35312,
3419,
4008,
62... | 1.686679 | 533 |
# Copyright 2020 The Khronos Group Inc.
# SPDX-License-Identifier: Apache-2.0
import argparse
main()
| [
2,
15069,
12131,
383,
5311,
1313,
418,
4912,
3457,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
11748,
1822,
29572,
198,
198,
12417,
3419,
628
] | 2.971429 | 35 |
#!/usr/bin/env python
import numpy as np
import math
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import cuda, optimizers, serializers, Variable
from chainer import function
from chainer.utils import type_check
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
6333,
263,
198,
11748,
6333,
263,
13,
12543,
2733,
355,
376,
198,
11748,
6333,
263,
13,
28751,
355,
406,
198,
6738,
6... | 3.493151 | 73 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2018-12-16 08:55
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
1558,
319,
2864,
12,
1065,
12,
1433,
8487,
25,
2816,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,... | 2.754386 | 57 |
from keras.models import Model
from keras.layers import Input, Conv2D, Activation, BatchNormalization, Flatten, Dense, Conv2DTranspose, Reshape
from utils import get_channels_axis
| [
6738,
41927,
292,
13,
27530,
1330,
9104,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
23412,
11,
34872,
17,
35,
11,
13144,
341,
11,
347,
963,
26447,
1634,
11,
1610,
41769,
11,
360,
1072,
11,
34872,
17,
35,
8291,
3455,
11,
1874,
71,
... | 3.172414 | 58 |
import sys
sys.path.append('./utils')
from napari_plugin_engine import napari_hook_implementation
from inspect import getmembers, isfunction
import inspect
from utils.utils import *
from iam_structure import iam_structure
function_list = getmembers(iam_structure, isfunction)
magic_function_list = prepare_functions(function_list)
@napari_hook_implementation | [
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7,
4458,
14,
26791,
11537,
198,
198,
6738,
25422,
2743,
62,
33803,
62,
18392,
1330,
25422,
2743,
62,
25480,
62,
320,
32851,
198,
6738,
10104,
1330,
651,
30814,
11,
318,
8818,
198,
11... | 3.361111 | 108 |
"""
quantnn.examples.simple
=======================
This module provides a simple toy example to illustrate the basic
functionality of quantile regression neural networks. The task is a simple
1-dimensional regression problem of a signal with heteroscedastic noise:
.. math::
y = \sin(x) + \cdot \cos(x) \cdot \mathcal{N}(0, 1)
"""
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from matplotlib.cm import magma
from matplotlib.colors import Normalize
def create_training_data(n=1_000_000):
"""
Create training data by randomly sampling the range :math:`[-\pi, \pi]`
and computing y.
Args:
n(int): How many sample to compute.
Return:
Tuple ``(x, y)`` containing the input samples ``x`` given as 2D array
with samples along first and input features along second dimension
and the corresponding :math:`y` values in ``y``.
"""
x = 2.0 * np.pi * np.random.random(size=n) - np.pi
y = np.sin(x) + 1.0 * np.cos(x) * np.random.randn(n)
return x, y
def create_validation_data(x):
"""
Creates validation data for the toy example.
In contrast to the generation of the training data this function allows
specifying the x value of the data which allows plotting the predicted
result over an arbitrary domain.
Args:
x: Arbitrary array containing the x values for which to compute
corresponding y values.
Return:
Numpy array containing the y values corresponding to the given x
values.
"""
y = np.sin(x) + 1.0 * np.cos(x) * np.random.randn(*x.shape)
return y
def plot_histogram(x, y):
"""
Plot 2D histogram of data.
"""
# Calculate histogram
bins_x = np.linspace(-np.pi, np.pi, 201)
bins_y = np.linspace(-4, 4, 201)
x_img, y_img = np.meshgrid(bins_x, bins_y)
img, _, _ = np.histogram2d(x, y, bins=(bins_x, bins_y), density=True)
# Plot results
f, ax = plt.subplots(1, 1, figsize=(10, 6))
m = ax.pcolormesh(x_img, y_img, img.T, vmin=0, vmax=0.3, cmap="magma")
x_sin = np.linspace(-np.pi, np.pi, 1001)
y_sin = np.sin(x_sin)
ax.plot(x_sin, y_sin, c="grey", label="$y=\sin(x)$", lw=3)
ax.set_ylim([-2, 2])
ax.set_xlabel("x")
ax.set_ylabel("y")
plt.colorbar(m, label="Normalized frequency")
plt.legend()
def plot_results(x_train, y_train, x_val, y_pred, y_mean, quantiles):
"""
Plots the predicted quantiles against empirical quantiles.
"""
# Calculate histogram and empirical quantiles.
bins_x = np.linspace(-np.pi, np.pi, 201)
bins_y = np.linspace(-4, 4, 201)
x_img, y_img = np.meshgrid(bins_x, bins_y)
img, _, _ = np.histogram2d(x_train, y_train, bins=(bins_x, bins_y), density=True)
norm = np.trapz(img, x=0.5 * (bins_y[1:] + bins_y[:-1]), axis=1)
img_normed = img / norm.reshape(-1, 1)
img_cdf = sp.integrate.cumtrapz(
img_normed, x=0.5 * (bins_y[1:] + bins_y[:-1]), axis=1
)
x_centers = 0.5 * (bins_x[1:] + bins_x[:-1])
y_centers = 0.5 * (bins_y[2:] + bins_y[:-2])
norm = Normalize(0, 1)
plt.figure(figsize=(10, 6))
img = plt.contourf(
x_centers,
y_centers,
img_cdf.T,
levels=quantiles,
norm=norm,
cmap="magma",
)
for i in range(0, 13, 1):
l_q = plt.plot(x_val, y_pred[:, i], lw=2, ls="--", color="grey")
handles = l_q
handles += plt.plot(x_val, y_mean, c="k", ls="--", lw=2)
labels = ["Predicted quantiles", "Predicted mean"]
plt.legend(handles=handles, labels=labels)
plt.xlim([-np.pi, np.pi])
plt.ylim([-3, 3])
plt.xlabel("x")
plt.ylabel("y")
plt.grid(False)
plt.colorbar(img, label=r"Empirical quantiles")
| [
37811,
198,
40972,
20471,
13,
1069,
12629,
13,
36439,
198,
4770,
1421,
18604,
198,
198,
1212,
8265,
3769,
257,
2829,
13373,
1672,
284,
19418,
262,
4096,
198,
11244,
286,
5554,
576,
20683,
17019,
7686,
13,
383,
4876,
318,
257,
2829,
198,... | 2.265738 | 1,652 |
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
from ...basemodel import ini
import re, os
from ...basemodel.message import *
| [
2235,
2488,
7753,
201,
198,
2,
201,
198,
2,
15069,
357,
66,
8,
2813,
532,
2864,
11,
8180,
10501,
13,
1439,
2489,
10395,
29847,
11473,
29,
201,
198,
2,
201,
198,
2,
770,
1430,
290,
262,
19249,
5696,
389,
11971,
290,
925,
1695,
201,... | 3.296703 | 182 |
from questionary.prompts import confirm
from questionary.prompts import text
from questionary.prompts import select
from questionary.prompts import rawselect
from questionary.prompts import password
from questionary.prompts import checkbox
AVAILABLE_PROMPTS = {
"confirm": confirm.confirm,
"text": text.text,
"select": select.select,
"rawselect": rawselect.rawselect,
"password": password.password,
"checkbox": checkbox.checkbox,
# backwards compatible names
"list": select.select,
"rawlist": rawselect.rawselect,
"input": text.text,
}
| [
6738,
1808,
560,
13,
16963,
457,
82,
1330,
6216,
198,
6738,
1808,
560,
13,
16963,
457,
82,
1330,
2420,
198,
6738,
1808,
560,
13,
16963,
457,
82,
1330,
2922,
198,
6738,
1808,
560,
13,
16963,
457,
82,
1330,
8246,
19738,
198,
6738,
180... | 2.969231 | 195 |
m=1
n=2
e=4
d=8
key=20
| [
76,
28,
16,
198,
77,
28,
17,
198,
68,
28,
19,
198,
67,
28,
23,
198,
2539,
28,
1238,
628,
198
] | 1.190476 | 21 |
from collections import OrderedDict
from typing import List
from pg2avro import get_avro_schema, get_avro_row_dict
import json
def test_get_avro_row_row_types():
"""
Test generating Avro rows from different source row data.
TODO: Cover more than the simplest golden path.
"""
columns = [
{"name": "name", "type": "varchar", "nullable": False},
{"name": "number", "type": "float4", "nullable": False},
{"name": "list", "type": "_varchar", "nullable": False},
{"name": "is_working", "type": "bool", "nullable": False},
]
table_name = "test_table"
namespace = "test_namespace"
schema = get_avro_schema(table_name, namespace, columns)
expected = [
{
"name": "example-01",
"number": 1.0,
"list": ["list", "of", "strings"],
"is_working": True,
},
{
"name": "example-02",
"number": 2.5,
"list": ["another", "list", "of", "strings"],
"is_working": False,
},
]
rows_data = [
# Compatible Row objects.
[
Row("example-01", 1.0, "list of strings".split(), True),
Row("example-02", 2.5, "another list of strings".split(), False),
],
# Compatible Dicts.
[
{
"name": "example-01",
"number": 1.0,
"list": "list of strings".split(),
"is_working": True,
},
{
"name": "example-02",
"number": 2.5,
"list": "another list of strings".split(),
"is_working": False,
},
],
# Compatible Dicts, but extended class.
[
OrderedDict(
{
"name": "example-01",
"number": 1.0,
"list": "list of strings".split(),
"is_working": True,
}
),
OrderedDict(
{
"name": "example-02",
"number": 2.5,
"list": "another list of strings".split(),
"is_working": False,
}
),
],
# Compatible Tuples.
[
("example-01", 1.0, "list of strings".split(), True),
("example-02", 2.5, "another list of strings".split(), False),
],
]
for row_data in rows_data:
actual = [get_avro_row_dict(r, schema) for r in row_data]
assert expected == actual
def test_get_avro_row_dict_special_data_types():
"""
Test generating Avro rows from data, using special types.
"""
columns = [
{"name": "json_col", "type": "json"},
{"name": "jsonb_col", "type": "jsonb"},
{"name": "empty_list", "type": "_varchar"},
]
table_name = "test_table"
namespace = "test_namespace"
schema = get_avro_schema(table_name, namespace, columns)
json_1 = {"key1": "val1"}
json_2 = {"key2": "val2", "key3": [1, 2], "key4": {"key5": "val5"}}
expected = [
{
"json_col": json.dumps(json_1),
"jsonb_col": json.dumps(json_2),
"empty_list": [],
},
{
"json_col": json.dumps(json_2),
"jsonb_col": json.dumps(json_1),
"empty_list": None,
},
]
actual = [
get_avro_row_dict(r, schema)
for r in [(json_1, json_2, []), (json_2, json_1, None)]
]
assert expected == actual
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
19720,
1330,
7343,
198,
6738,
23241,
17,
615,
305,
1330,
651,
62,
615,
305,
62,
15952,
2611,
11,
651,
62,
615,
305,
62,
808,
62,
11600,
198,
11748,
33918,
628,
198,
4299,
1332,
62,... | 1.895735 | 1,899 |
import math
import gs
import gs.plus.render as render
import gs.plus.input as input
import gs.plus.scene as scene
import gs.plus.clock as clock
import gs.plus.audio as audio
import globals
import level_game
scn = None
dt_sec = 1.0 / 60.0
screen_clock = 0.0
title_music = None
| [
11748,
10688,
201,
198,
201,
198,
11748,
308,
82,
201,
198,
11748,
308,
82,
13,
9541,
13,
13287,
355,
8543,
201,
198,
11748,
308,
82,
13,
9541,
13,
15414,
355,
5128,
201,
198,
11748,
308,
82,
13,
9541,
13,
29734,
355,
3715,
201,
1... | 2.479339 | 121 |
import errno
import os
import subprocess
import time
from importlib import import_module
from typing import Union
from sklearn.model_selection import ParameterGrid, ParameterSampler
| [
11748,
11454,
3919,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
640,
198,
6738,
1330,
8019,
1330,
1330,
62,
21412,
198,
6738,
19720,
1330,
4479,
198,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
25139,
2357,
41339,
11,
... | 3.9375 | 48 |
# Command to run: python manage.py shell < scripts/migration/set_challenge_slug_field.py
# TODO: Run the code using a function based approach
import traceback
from challenges.models import Challenge
challenges = Challenge.objects.all()
try:
for challenge in challenges:
challenge_title = challenge.title.split(' ')
challenge_slug = '-'.join(challenge_title).lower()
challenge.slug = challenge_slug
challenge.save()
except Exception as e:
print(e)
print(traceback.print_exc())
| [
2,
9455,
284,
1057,
25,
21015,
6687,
13,
9078,
7582,
1279,
14750,
14,
76,
4254,
14,
2617,
62,
36747,
3540,
62,
6649,
1018,
62,
3245,
13,
9078,
198,
2,
16926,
46,
25,
5660,
262,
2438,
1262,
257,
2163,
1912,
3164,
198,
198,
11748,
1... | 2.927374 | 179 |
from django.conf.urls import include, url
from honey import views as honey_views
# custom views
urlpatterns = [
url(r'^', include('honey.project_urls')),
url(r'^', include('honey.idea_urls')),
url(r'^events/add/',
view=honey_views.EventCreateView.as_view(),
name="event-create"),
url(r'^events/(?P<slug>[-\w]+)/',
view=honey_views.EventDetailView.as_view(),
name="event-detail"),
url(r'^events/$',
view=honey_views.EventListView.as_view(),
name="event-list"),
url(r'^topic/add/',
view=honey_views.TopicCreateView.as_view(),
name="topic-create"),
url(r'^topic/(?P<slug>[-\w]+)/',
view=honey_views.TopicDetailView.as_view(),
name="topic-detail"),
url(r'^topic/$',
view=honey_views.TopicListView.as_view(),
name="topic-list"),
url(r'^technology/add/',
view=honey_views.TechnologyCreateView.as_view(),
name="technology-create"),
url(r'^technology/(?P<slug>[-\w]+)/',
view=honey_views.TechnologyDetailView.as_view(),
name="technology-detail"),
url(r'^technology/$',
view=honey_views.TechnologyListView.as_view(),
name="technology-list"),
url(r'^$',
view=honey_views.DashboardView.as_view(),
name="dashboard",),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
11,
19016,
198,
6738,
12498,
1330,
5009,
355,
12498,
62,
33571,
628,
198,
2,
2183,
5009,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
3256,
2291,
... | 2.178982 | 609 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Treats a decision tree as a representation transformation layer.
A decision tree transformer takes features as input and returns the probability
of reaching each leaf as output. The routing throughout the tree is learnable
via backpropagation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.ops import gen_training_ops
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_layer
from tensorflow.contrib.tensor_forest.hybrid.python.ops import training_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
class DecisionsToDataLayer(hybrid_layer.HybridLayer):
"""A layer that treats soft decisions as data."""
class KFeatureDecisionsToDataLayer(hybrid_layer.HybridLayer):
"""A layer that treats soft decisions made on single features as data."""
# pylint: disable=unused-argument
class HardDecisionsToDataLayer(DecisionsToDataLayer):
"""A layer that learns a soft decision tree but treats it as hard at test."""
class StochasticHardDecisionsToDataLayer(HardDecisionsToDataLayer):
"""A layer that learns a soft decision tree by sampling paths."""
class StochasticSoftDecisionsToDataLayer(StochasticHardDecisionsToDataLayer):
"""A layer that learns a soft decision tree by sampling paths."""
| [
2,
15069,
1584,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
201,
198,
2,
201,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
201,
198,
2,
345,
743,
407,
779,
428,
2393,
2... | 3.616013 | 612 |
#!/usr/bin/python
# -*- coding: utf8 -*-
import codecs
import os
from time import sleep
from flask import Flask, request, render_template, redirect, url_for, send_from_directory
from markdown_to_html import markdown_to_html, markdown_meta
from utils import get_tutorial_settings
from selenium import webdriver
import json
app = Flask(__name__, static_url_path='/static')
app.config['FREEZER_DESTINATION'] = 'docs'
app.jinja_options = {'extensions': ['jinja2.ext.with_', 'jinja2.ext.i18n']}
CNAME = 'www.ucimeshardverom.sk'
SRC_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
LOGO_PYCON = 'logo/pycon.svg'
# Github Pages required file
@app.route('/CNAME')
# 404 website
@app.errorhandler(404)
@app.route('/')
if __name__ == "__main__":
app.run(debug=True, host=os.environ.get('FLASK_HOST', '127.0.0.1'),
port=int(os.environ.get('FLASK_PORT', 5000)))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
198,
11748,
40481,
82,
198,
11748,
28686,
198,
6738,
640,
1330,
3993,
198,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
8543,
62,
... | 2.490251 | 359 |
from dataclasses import fields
from email import message
from turtle import up
import shopify
from shopify.resources import fulfillment_service
from pyactiveresource.activeresource import ActiveResource
from shopify.resources.custom_collection import CustomCollection
import logging
logger = logging.getLogger('__default__')
_ignore_status = ('cancelled', 'error', 'failure')
@add_request_handler
@request_handler
@request_handler
@request_handler
@update_request_handler
@add_request_handler
@update_request_handler
@request_handler
@request_handler
@request_handler
@update_request_handler
# @update_request_handler
# def update_inventory(inv_item_id, data,**kwargs):
# inv_item = shopify.InventoryItem.find(inv_item_id)
# inv_item._update(data)
# inv_item.save()
# update_inventory_level(inv_item_id, data, **kwargs)
@request_handler
@request_handler
@update_request_handler
@update_request_handler
def update_line_items(order_id, items, **kwargs):
"""not supported in shopify (after order has been created)"""
pass
# order = shopify.Order.find(order_id, fields='line_items')
# line_items = order.line_items
# for item in line_items:
# if items.get(str(item.variant_id)) is not None:
# from pprint import pprint
# print('-------------line_item',item.quantity)
# # item._update({'quantity': 1})
# item.quantity = 1
# suc = item.save()
# logger.debug(f'update line item {item.variant_id} in order {order_id}: {suc}')
@update_request_handler
@add_request_handler
def update_customer(order_id, customer_data, customer_id=None, **kwargs):
""""shopify can not update billing address with api"""
order = shopify.Order.find(order_id, fields='id,customer')
if customer_id is None:
customer_id = order.customer.id
update_order = {}
customer_update = {}
if kwargs.get('is_billing'):
# update_order['billing_address'] = customer_data
if customer_data.get('email'):
update_order['email'] = customer_data['email']
customer_update['email'] = customer_data['email']
if customer_data.get('phone'):
update_order['phone'] = customer_data['phone']
customer_update['phone'] = customer_data['phone']
if kwargs.get('is_shipping'):
update_order['shipping_address'] = customer_data
if len(update_order) > 0:
order._update(update_order)
suc = order.save()
if len(customer_update) > 0:
customer = shopify.Customer.find(customer_id)
if customer_data.get('address1') and customer_data.get('zip') \
and customer_data.get('province') and customer_data.get('country'):
customer_update['addresses'] = customer.addresses + [customer_data]
customer._update(customer_update)
suc = customer.save()
return order, suc
@request_handler
@update_request_handler
@update_request_handler
@add_request_handler
@update_request_handler
@request_handler
@add_request_handler
@request_handler
@add_request_handler
| [
6738,
4818,
330,
28958,
1330,
7032,
198,
6738,
3053,
1330,
3275,
198,
6738,
28699,
1330,
510,
198,
11748,
6128,
1958,
198,
6738,
6128,
1958,
13,
37540,
1330,
32402,
62,
15271,
198,
6738,
12972,
5275,
31092,
13,
5275,
31092,
1330,
14199,
... | 2.557785 | 1,246 |
import os
import pandas as pd
import numpy as np
import random
from pathlib import Path
from CellData import CellData
from random import shuffle
# For reproducibility
random.seed(0)
np.random.seed(0)
os.chdir(open("../data_dir").read().strip())
# Load fixed order of perts and cell types
perts = pd.read_csv("Hodos/their_data/hodos_data_large_tensor/perts.csv", sep="\t", header=None, names=["Values"])['Values'].values.tolist()
cell_types = pd.read_csv("Hodos/their_data/hodos_data_large_tensor/cell_types.csv", sep="\t", header=None, names=["Values"])['Values'].values.tolist()
# Get all the available profiles
all_data = []
cell_data = CellData("Hodos/their_data/hodos_data_large_tensor", None, None, "trt_cp", revision=True)
for i in range(len(cell_data.train_data)):
meta_object = cell_data.train_meta[i]
all_data.append(meta_object[0] + "," + meta_object[1])
# shuffle the profiles for randomness
shuffle(all_data)
# split the profiles into ten folds
folds = np.array_split(all_data, 10)
# write the folds to the disk
for i, fold in enumerate(folds):
with open("Hodos/their_data/hodos_folds_their_data/"+str(i+1), 'w+') as f:
f.write('\n'.join(list(fold.flatten())))
# Construct input for methods from Hodos et al where test set is replaced by nan values
for i, fold in enumerate(folds):
for cell in cell_types:
profiles = []
for pert in perts:
p = None
# This is the test profile
if cell + "," + pert in fold:
profiles.append(','.join(['nan' for _ in range(978)]))
continue
if pert in cell_data.meta_dictionary_pert.keys():
p = cell_data.get_profile_cell_pert(cell_data.train_data, cell_data.meta_dictionary_pert[pert], cell, pert)
# the profile is not test but it does not exist in the data
if p is None:
profiles.append(','.join(['nan' for _ in range(978)]))
continue
p = np.squeeze(p)
profiles.append(','.join([str(num) for num in p]))
print(f"Cell {cell} number of perts {len(profiles)}")
# write the olds on the disk
cell_path = "Hodos/their_data/input/fold_" + str(i + 1) + "/"
Path(cell_path).mkdir(parents=True, exist_ok=True)
with open(cell_path + cell + ".csv", 'w+') as f:
f.write('\n'.join(profiles))
| [
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
12440,
6601,
1330,
12440,
6601,
198,
6738,
4738,
1330,
36273,
198,
198,
2,
1114,
81... | 2.367226 | 1,013 |
from tool.runners.python import SubmissionPy
| [
6738,
2891,
13,
36740,
13,
29412,
1330,
42641,
20519,
628
] | 4.6 | 10 |
from rest_framework import viewsets
from rest_framework.permissions import AllowAny
from rest_framework.generics import CreateAPIView
from queueapi.models import cnapWithService
from queueapi.serializers import QueueSerializer, QueueCreateSerializer
| [
6738,
1334,
62,
30604,
1330,
5009,
1039,
198,
198,
6738,
1334,
62,
30604,
13,
525,
8481,
1330,
22507,
7149,
198,
6738,
1334,
62,
30604,
13,
8612,
873,
1330,
13610,
2969,
3824,
769,
198,
198,
6738,
16834,
15042,
13,
27530,
1330,
269,
7... | 3.953125 | 64 |
# Generated by Django 2.1.8 on 2019-04-18 18:16
from django.db import migrations
import phonenumber_field.modelfields
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
23,
319,
13130,
12,
3023,
12,
1507,
1248,
25,
1433,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
198,
11748,
32896,
268,
4494,
62,
3245,
13,
19849,
25747,
628
] | 3 | 40 |
from typing import Optional
from pyqtgraph.Qt import QtGui
import pyqtgraph as pg
# Aviso: Toda parte de interface está aqui
# - console log
# - main grid
#
# Eu poderia ter separado em classes mas não fiz :( | [
6738,
19720,
1330,
32233,
198,
6738,
12972,
80,
25297,
1470,
13,
48,
83,
1330,
33734,
8205,
72,
198,
198,
11748,
12972,
80,
25297,
1470,
355,
23241,
198,
198,
2,
317,
4703,
78,
25,
309,
11329,
636,
68,
390,
7071,
1556,
6557,
14839,
... | 2.815789 | 76 |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 10:33:38 2020
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
from scipy.special import ncfdtrinc
from statsmodels.stats.robust_compare import TrimmedMean, scale_transform
from statsmodels.tools.testing import Holder
from statsmodels.stats.base import HolderTuple
def effectsize_oneway(means, vars_, nobs, use_var="unequal", ddof_between=0):
"""
Effect size corresponding to Cohen's f = nc / nobs for oneway anova
This contains adjustment for Welch and Brown-Forsythe Anova so that
effect size can be used with FTestAnovaPower.
Parameters
----------
means : array_like
Mean of samples to be compared
vars_ : float or array_like
Residual (within) variance of each sample or pooled
If ``vars_`` is scalar, then it is interpreted as pooled variance that
is the same for all samples, ``use_var`` will be ignored.
Otherwise, the variances are used depending on the ``use_var`` keyword.
nobs : int or array_like
Number of observations for the samples.
If nobs is scalar, then it is assumed that all samples have the same
number ``nobs`` of observation, i.e. a balanced sample case.
Otherwise, statistics will be weighted corresponding to nobs.
Only relative sizes are relevant, any proportional change to nobs does
not change the effect size.
use_var : {"unequal", "equal", "bf"}
If ``use_var`` is "unequal", then the variances can differ across
samples and the effect size for Welch anova will be computed.
ddof_between : int
Degrees of freedom correction for the weighted between sum of squares.
The denominator is ``nobs_total - ddof_between``
This can be used to match differences across reference literature.
Returns
-------
f2 : float
Effect size corresponding to squared Cohen's f, which is also equal
to the noncentrality divided by total number of observations.
Notes
-----
This currently handles the following cases for oneway anova
- balanced sample with homoscedastic variances
- samples with different number of observations and with homoscedastic
variances
- samples with different number of observations and with heteroskedastic
variances. This corresponds to Welch anova
In the case of "unequal" and "bf" methods for unequal variances, the
effect sizes do not directly correspond to the test statistic in Anova.
Both have correction terms dropped or added, so the effect sizes match up
with using FTestAnovaPower.
If all variances are equal, then all three methods result in the same
effect size. If variances are unequal, then the three methods produce
small differences in effect size.
Note, the effect size and power computation for BF Anova was not found in
the literature. The correction terms were added so that FTestAnovaPower
provides a good approximation to the power.
Status: experimental
We might add additional returns, if those are needed to support power
and sample size applications.
Examples
--------
The following shows how to compute effect size and power for each of the
three anova methods. The null hypothesis is that the means are equal which
corresponds to a zero effect size. Under the alternative, means differ
with two sample means at a distance delta from the mean. We assume the
variance is the same under the null and alternative hypothesis.
``nobs`` for the samples defines the fraction of observations in the
samples. ``nobs`` in the power method defines the total sample size.
In simulations, the computed power for standard anova,
i.e.``use_var="equal"`` overestimates the simulated power by a few percent.
The equal variance assumption does not hold in this example.
>>> from statsmodels.stats.oneway import effectsize_oneway
>>> from statsmodels.stats.power import FTestAnovaPower
>>>
>>> nobs = np.array([10, 12, 13, 15])
>>> delta = 0.5
>>> means_alt = np.array([-1, 0, 0, 1]) * delta
>>> vars_ = np.arange(1, len(means_alt) + 1)
>>>
>>> f2_alt = effectsize_oneway(means_alt, vars_, nobs, use_var="equal")
>>> f2_alt
0.04581300813008131
>>>
>>> kwds = {'effect_size': np.sqrt(f2_alt), 'nobs': 100, 'alpha': 0.05,
... 'k_groups': 4}
>>> power = FTestAnovaPower().power(**kwds)
>>> power
0.39165892158983273
>>>
>>> f2_alt = effectsize_oneway(means_alt, vars_, nobs, use_var="unequal")
>>> f2_alt
0.060640138408304504
>>>
>>> kwds['effect_size'] = np.sqrt(f2_alt)
>>> power = FTestAnovaPower().power(**kwds)
>>> power
0.5047366512800622
>>>
>>> f2_alt = effectsize_oneway(means_alt, vars_, nobs, use_var="bf")
>>> f2_alt
0.04391324307956788
>>>
>>> kwds['effect_size'] = np.sqrt(f2_alt)
>>> power = FTestAnovaPower().power(**kwds)
>>> power
0.3765792117047725
"""
# the code here is largely a copy of onway_generic with adjustments
means = np.asarray(means)
n_groups = means.shape[0]
if np.size(nobs) == 1:
nobs = np.ones(n_groups) * nobs
nobs_t = nobs.sum()
if use_var == "equal":
if np.size(vars_) == 1:
var_resid = vars_
else:
vars_ = np.asarray(vars_)
var_resid = ((nobs - 1) * vars_).sum() / (nobs_t - n_groups)
vars_ = var_resid # scalar, if broadcasting works
weights = nobs / vars_
w_total = weights.sum()
w_rel = weights / w_total
# meanw_t = (weights * means).sum() / w_total
meanw_t = w_rel @ means
f2 = np.dot(weights, (means - meanw_t)**2) / (nobs_t - ddof_between)
if use_var.lower() == "bf":
weights = nobs
w_total = weights.sum()
w_rel = weights / w_total
meanw_t = w_rel @ means
# TODO: reuse general case with weights
tmp = ((1. - nobs / nobs_t) * vars_).sum()
statistic = 1. * (nobs * (means - meanw_t)**2).sum()
statistic /= tmp
f2 = statistic * (1. - nobs / nobs_t).sum() / nobs_t
# correction factor for df_num in BFM
df_num2 = n_groups - 1
df_num = tmp**2 / ((vars_**2).sum() +
(nobs / nobs_t * vars_).sum()**2 -
2 * (nobs / nobs_t * vars_**2).sum())
f2 *= df_num / df_num2
return f2
def convert_effectsize_fsqu(f2=None, eta2=None):
"""convert squared effect sizes in f family
f2 is signal to noise ratio, var_explained / var_residual
eta2 is proportion of explained variance, var_explained / var_total
uses the relationship:
f2 = eta2 / (1 - eta2)
Parameters
----------
f2 : None or float
Squared Cohen's F effect size. If f2 is not None, then eta2 will be
computed.
eta2 : None or float
Squared eta effect size. If f2 is None and eta2 is not None, then f2 is
computed.
Returns
-------
res : Holder instance
An instance of the Holder class with f2 and eta2 as attributes.
"""
if f2 is not None:
eta2 = 1 / (1 + 1 / f2)
elif eta2 is not None:
f2 = eta2 / (1 - eta2)
res = Holder(f2=f2, eta2=eta2)
return res
def _fstat2effectsize(f_stat, df):
"""Compute anova effect size from F-statistic
This might be combined with convert_effectsize_fsqu
Parameters
----------
f_stat : array_like
Test statistic of an F-test
df : tuple
degrees of freedom ``df = (df1, df2)`` where
- df1 : numerator degrees of freedom, number of constraints
- df2 : denominator degrees of freedom, df_resid
Returns
-------
res : Holder instance
This instance contains effect size measures f2, eta2, omega2 and eps2
as attributes.
Notes
-----
This uses the following definitions:
- f2 = f_stat * df1 / df2
- eta2 = f2 / (f2 + 1)
- omega2 = (f2 - df1 / df2) / (f2 + 2)
- eps2 = (f2 - df1 / df2) / (f2 + 1)
This differs from effect size measures in other function which define
``f2 = f_stat * df1 / nobs``
or an equivalent expression for power computation. The noncentrality
index for the hypothesis test is in those cases given by
``nc = f_stat * df1``.
Currently omega2 and eps2 are computed in two different ways. Those
values agree for regular cases but can show different behavior in corner
cases (e.g. zero division).
"""
df1, df2 = df
f2 = f_stat * df1 / df2
eta2 = f2 / (f2 + 1)
omega2_ = (f_stat - 1) / (f_stat + (df2 + 1) / df1)
omega2 = (f2 - df1 / df2) / (f2 + 1 + 1 / df2) # rewrite
eps2_ = (f_stat - 1) / (f_stat + df2 / df1)
eps2 = (f2 - df1 / df2) / (f2 + 1) # rewrite
return Holder(f2=f2, eta2=eta2, omega2=omega2, eps2=eps2, eps2_=eps2_,
omega2_=omega2_)
# conversion functions for Wellek's equivalence effect size
# these are mainly to compare with literature
def wellek_to_f2(eps, n_groups):
"""Convert Wellek's effect size (sqrt) to Cohen's f-squared
This computes the following effect size :
f2 = 1 / n_groups * eps**2
Parameters
----------
eps : float or ndarray
Wellek's effect size used in anova equivalence test
n_groups : int
Number of groups in oneway comparison
Returns
-------
f2 : effect size Cohen's f-squared
"""
f2 = 1 / n_groups * eps**2
return f2
def f2_to_wellek(f2, n_groups):
"""Convert Cohen's f-squared to Wellek's effect size (sqrt)
This computes the following effect size :
eps = sqrt(n_groups * f2)
Parameters
----------
f2 : float or ndarray
Effect size Cohen's f-squared
n_groups : int
Number of groups in oneway comparison
Returns
-------
eps : float or ndarray
Wellek's effect size used in anova equivalence test
"""
eps = np.sqrt(n_groups * f2)
return eps
def fstat_to_wellek(f_stat, n_groups, nobs_mean):
"""Convert F statistic to wellek's effect size eps squared
This computes the following effect size :
es = f_stat * (n_groups - 1) / nobs_mean
Parameters
----------
f_stat : float or ndarray
Test statistic of an F-test.
n_groups : int
Number of groups in oneway comparison
nobs_mean : float or ndarray
Average number of observations across groups.
Returns
-------
eps : float or ndarray
Wellek's effect size used in anova equivalence test
"""
es = f_stat * (n_groups - 1) / nobs_mean
return es
def confint_noncentrality(f_stat, df, alpha=0.05,
alternative="two-sided"):
"""
Confidence interval for noncentrality parameter in F-test
This does not yet handle non-negativity constraint on nc.
Currently only two-sided alternative is supported.
Parameters
----------
f_stat : float
df : tuple
degrees of freedom ``df = (df1, df2)`` where
- df1 : numerator degrees of freedom, number of constraints
- df2 : denominator degrees of freedom, df_resid
alpha : float, default 0.05
alternative : {"two-sided"}
Other alternatives have not been implements.
Returns
-------
float
The end point of the confidence interval.
Notes
-----
The algorithm inverts the cdf of the noncentral F distribution with
respect to the noncentrality parameters.
See Steiger 2004 and references cited in it.
References
----------
.. [1] Steiger, James H. 2004. “Beyond the F Test: Effect Size Confidence
Intervals and Tests of Close Fit in the Analysis of Variance and
Contrast Analysis.” Psychological Methods 9 (2): 164–82.
https://doi.org/10.1037/1082-989X.9.2.164.
See Also
--------
confint_effectsize_oneway
"""
df1, df2 = df
if alternative in ["two-sided", "2s", "ts"]:
alpha1s = alpha / 2
ci = ncfdtrinc(df1, df2, [1 - alpha1s, alpha1s], f_stat)
else:
raise NotImplementedError
return ci
def confint_effectsize_oneway(f_stat, df, alpha=0.05, nobs=None):
"""
Confidence interval for effect size in oneway anova for F distribution
This does not yet handle non-negativity constraint on nc.
Currently only two-sided alternative is supported.
Parameters
----------
f_stat : float
df : tuple
degrees of freedom ``df = (df1, df2)`` where
- df1 : numerator degrees of freedom, number of constraints
- df2 : denominator degrees of freedom, df_resid
alpha : float, default 0.05
nobs : int, default None
Returns
-------
Holder
Class with effect size and confidence attributes
Notes
-----
The confidence interval for the noncentrality parameter is obtained by
inverting the cdf of the noncentral F distribution. Confidence intervals
for other effect sizes are computed by endpoint transformation.
R package ``effectsize`` does not compute the confidence intervals in the
same way. Their confidence intervals can be replicated with
>>> ci_nc = confint_noncentrality(f_stat, df1, df2, alpha=0.1)
>>> ci_es = smo._fstat2effectsize(ci_nc / df1, df1, df2)
See Also
--------
confint_noncentrality
"""
df1, df2 = df
if nobs is None:
nobs = df1 + df2 + 1
ci_nc = confint_noncentrality(f_stat, df, alpha=alpha)
ci_f2 = ci_nc / nobs
ci_res = convert_effectsize_fsqu(f2=ci_f2)
ci_res.ci_omega2 = (ci_f2 - df1 / df2) / (ci_f2 + 1 + 1 / df2)
ci_res.ci_nc = ci_nc
ci_res.ci_f = np.sqrt(ci_res.f2)
ci_res.ci_eta = np.sqrt(ci_res.eta2)
ci_res.ci_f_corrected = np.sqrt(ci_res.f2 * (df1 + 1) / df1)
return ci_res
def anova_generic(means, variances, nobs, use_var="unequal",
welch_correction=True, info=None):
"""
Oneway anova based on summary statistics
Parameters
----------
means : array_like
Mean of samples to be compared
variances : float or array_like
Residual (within) variance of each sample or pooled.
If ``variances`` is scalar, then it is interpreted as pooled variance
that is the same for all samples, ``use_var`` will be ignored.
Otherwise, the variances are used depending on the ``use_var`` keyword.
nobs : int or array_like
Number of observations for the samples.
If nobs is scalar, then it is assumed that all samples have the same
number ``nobs`` of observation, i.e. a balanced sample case.
Otherwise, statistics will be weighted corresponding to nobs.
Only relative sizes are relevant, any proportional change to nobs does
not change the effect size.
use_var : {"unequal", "equal", "bf"}
If ``use_var`` is "unequal", then the variances can differ across
samples and the effect size for Welch anova will be computed.
welch_correction : bool
If this is false, then the Welch correction to the test statistic is
not included. This allows the computation of an effect size measure
that corresponds more closely to Cohen's f.
info : not used yet
Returns
-------
res : results instance
This includes `statistic` and `pvalue`.
"""
options = {"use_var": use_var,
"welch_correction": welch_correction
}
if means.ndim != 1:
raise ValueError('data (means, ...) has to be one-dimensional')
nobs_t = nobs.sum()
n_groups = len(means)
# mean_t = (nobs * means).sum() / nobs_t
if use_var == "unequal":
weights = nobs / variances
else:
weights = nobs
w_total = weights.sum()
w_rel = weights / w_total
# meanw_t = (weights * means).sum() / w_total
meanw_t = w_rel @ means
statistic = np.dot(weights, (means - meanw_t)**2) / (n_groups - 1.)
df_num = n_groups - 1.
if use_var == "unequal":
tmp = ((1 - w_rel)**2 / (nobs - 1)).sum() / (n_groups**2 - 1)
if welch_correction:
statistic /= 1 + 2 * (n_groups - 2) * tmp
df_denom = 1. / (3. * tmp)
elif use_var == "equal":
# variance of group demeaned total sample, pooled var_resid
tmp = ((nobs - 1) * variances).sum() / (nobs_t - n_groups)
statistic /= tmp
df_denom = nobs_t - n_groups
elif use_var == "bf":
tmp = ((1. - nobs / nobs_t) * variances).sum()
statistic = 1. * (nobs * (means - meanw_t)**2).sum()
statistic /= tmp
df_num2 = n_groups - 1
df_denom = tmp**2 / ((1. - nobs / nobs_t) ** 2 *
variances ** 2 / (nobs - 1)).sum()
df_num = tmp**2 / ((variances ** 2).sum() +
(nobs / nobs_t * variances).sum() ** 2 -
2 * (nobs / nobs_t * variances ** 2).sum())
pval2 = stats.f.sf(statistic, df_num2, df_denom)
options["df2"] = (df_num2, df_denom)
options["df_num2"] = df_num2
options["pvalue2"] = pval2
else:
raise ValueError('use_var is to be one of "unequal", "equal" or "bf"')
pval = stats.f.sf(statistic, df_num, df_denom)
res = HolderTuple(statistic=statistic,
pvalue=pval,
df=(df_num, df_denom),
df_num=df_num,
df_denom=df_denom,
nobs_t=nobs_t,
n_groups=n_groups,
means=means,
nobs=nobs,
vars_=variances,
**options
)
return res
def anova_oneway(data, groups=None, use_var="unequal", welch_correction=True,
trim_frac=0):
"""oneway anova
This implements standard anova, Welch and Brown-Forsythe, and trimmed
(Yuen) variants of those.
Parameters
----------
data : tuple of array_like or DataFrame or Series
Data for k independent samples, with k >= 2.
The data can be provided as a tuple or list of arrays or in long
format with outcome observations in ``data`` and group membership in
``groups``.
groups : ndarray or Series
If data is in long format, then groups is needed as indicator to which
group or sample and observations belongs.
use_var : {"unequal", "equal" or "bf"}
`use_var` specified how to treat heteroscedasticity, unequal variance,
across samples. Three approaches are available
"unequal" : Variances are not assumed to be equal across samples.
Heteroscedasticity is taken into account with Welch Anova and
Satterthwaite-Welch degrees of freedom.
This is the default.
"equal" : Variances are assumed to be equal across samples.
This is the standard Anova.
"bf: Variances are not assumed to be equal across samples.
The method is Browne-Forsythe (1971) for testing equality of means
with the corrected degrees of freedom by Merothra. The original BF
degrees of freedom are available as additional attributes in the
results instance, ``df_denom2`` and ``p_value2``.
welch_correction : bool
If this is false, then the Welch correction to the test statistic is
not included. This allows the computation of an effect size measure
that corresponds more closely to Cohen's f.
trim_frac : float in [0, 0.5)
Optional trimming for Anova with trimmed mean and winsorized variances.
With the default trim_frac equal to zero, the oneway Anova statistics
are computed without trimming. If `trim_frac` is larger than zero,
then the largest and smallest observations in each sample are trimmed.
The number of trimmed observations is the fraction of number of
observations in the sample truncated to the next lower integer.
`trim_frac` has to be smaller than 0.5, however, if the fraction is
so large that there are not enough observations left over, then `nan`
will be returned.
Returns
-------
res : results instance
The returned HolderTuple instance has the following main attributes
and some additional information in other attributes.
statistic : float
Test statistic for k-sample mean comparison which is approximately
F-distributed.
pvalue : float
If ``use_var="bf"``, then the p-value is based on corrected
degrees of freedom following Mehrotra 1997.
pvalue2 : float
This is the p-value based on degrees of freedom as in
Brown-Forsythe 1974 and is only available if ``use_var="bf"``.
df = (df_denom, df_num) : tuple of floats
Degreeds of freedom for the F-distribution depend on ``use_var``.
If ``use_var="bf"``, then `df_denom` is for Mehrotra p-values
`df_denom2` is available for Brown-Forsythe 1974 p-values.
`df_num` is the same numerator degrees of freedom for both
p-values.
Notes
-----
Welch's anova is correctly sized (not liberal or conservative) in smaller
samples if the distribution of the samples is not very far away from the
normal distribution. The test can become liberal if the data is strongly
skewed. Welch's Anova can also be correctly sized for discrete
distributions with finite support, like Lickert scale data.
The trimmed version is robust to many non-normal distributions, it stays
correctly sized in many cases, and is more powerful in some cases with
skewness or heavy tails.
Trimming is currently based on the integer part of ``nobs * trim_frac``.
The default might change to including fractional observations as in the
original articles by Yuen.
See Also
--------
anova_generic
References
----------
Brown, Morton B., and Alan B. Forsythe. 1974. “The Small Sample Behavior
of Some Statistics Which Test the Equality of Several Means.”
Technometrics 16 (1) (February 1): 129–132. doi:10.2307/1267501.
Mehrotra, Devan V. 1997. “Improving the Brown-Forsythe Solution to the
Generalized Behrens-Fisher Problem.” Communications in Statistics -
Simulation and Computation 26 (3): 1139–1145.
doi:10.1080/03610919708813431.
"""
if groups is not None:
uniques = np.unique(groups)
data = [data[groups == uni] for uni in uniques]
else:
# uniques = None # not used yet, add to info?
pass
args = list(map(np.asarray, data))
if any([x.ndim != 1 for x in args]):
raise ValueError('data arrays have to be one-dimensional')
nobs = np.array([len(x) for x in args], float)
# n_groups = len(args) # not used
# means = np.array([np.mean(x, axis=0) for x in args], float)
# vars_ = np.array([np.var(x, ddof=1, axis=0) for x in args], float)
if trim_frac == 0:
means = np.array([x.mean() for x in args])
vars_ = np.array([x.var(ddof=1) for x in args])
else:
tms = [TrimmedMean(x, trim_frac) for x in args]
means = np.array([tm.mean_trimmed for tm in tms])
# R doesn't use uncorrected var_winsorized
# vars_ = np.array([tm.var_winsorized for tm in tms])
vars_ = np.array([tm.var_winsorized * (tm.nobs - 1) /
(tm.nobs_reduced - 1) for tm in tms])
# nobs_original = nobs # store just in case
nobs = np.array([tm.nobs_reduced for tm in tms])
res = anova_generic(means, vars_, nobs, use_var=use_var,
welch_correction=welch_correction)
return res
def equivalence_oneway_generic(f_stat, n_groups, nobs, equiv_margin, df,
alpha=0.05, margin_type="f2"):
"""Equivalence test for oneway anova (Wellek and extensions)
This is an helper function when summary statistics are available.
Use `equivalence_oneway` instead.
The null hypothesis is that the means differ by more than `equiv_margin`
in the anova distance measure.
If the Null is rejected, then the data supports that means are equivalent,
i.e. within a given distance.
Parameters
----------
f_stat : float
F-statistic
n_groups : int
Number of groups in oneway comparison.
nobs : ndarray
Array of number of observations in groups.
equiv_margin : float
Equivalence margin in terms of effect size. Effect size can be chosen
with `margin_type`. default is squared Cohen's f.
df : tuple
degrees of freedom ``df = (df1, df2)`` where
- df1 : numerator degrees of freedom, number of constraints
- df2 : denominator degrees of freedom, df_resid
alpha : float in (0, 1)
Significance level for the hypothesis test.
margin_type : "f2" or "wellek"
Type of effect size used for equivalence margin.
Returns
-------
results : instance of HolderTuple class
The two main attributes are test statistic `statistic` and p-value
`pvalue`.
Notes
-----
Equivalence in this function is defined in terms of a squared distance
measure similar to Mahalanobis distance.
Alternative definitions for the oneway case are based on maximum difference
between pairs of means or similar pairwise distances.
The equivalence margin is used for the noncentrality parameter in the
noncentral F distribution for the test statistic. In samples with unequal
variances estimated using Welch or Brown-Forsythe Anova, the f-statistic
depends on the unequal variances and corrections to the test statistic.
This means that the equivalence margins are not fully comparable across
methods for treating unequal variances.
References
----------
Wellek, Stefan. 2010. Testing Statistical Hypotheses of Equivalence and
Noninferiority. 2nd ed. Boca Raton: CRC Press.
Cribbie, Robert A., Chantal A. Arpin-Cribbie, and Jamie A. Gruman. 2009.
“Tests of Equivalence for One-Way Independent Groups Designs.” The Journal
of Experimental Education 78 (1): 1–13.
https://doi.org/10.1080/00220970903224552.
Jan, Show-Li, and Gwowen Shieh. 2019. “On the Extended Welch Test for
Assessing Equivalence of Standardized Means.” Statistics in
Biopharmaceutical Research 0 (0): 1–8.
https://doi.org/10.1080/19466315.2019.1654915.
"""
nobs_t = nobs.sum()
nobs_mean = nobs_t / n_groups
if margin_type == "wellek":
nc_null = nobs_mean * equiv_margin**2
es = f_stat * (n_groups - 1) / nobs_mean
type_effectsize = "Wellek's psi_squared"
elif margin_type in ["f2", "fsqu", "fsquared"]:
nc_null = nobs_t * equiv_margin
es = f_stat / nobs_t
type_effectsize = "Cohen's f_squared"
else:
raise ValueError('`margin_type` should be "f2" or "wellek"')
crit_f = stats.ncf.ppf(alpha, df[0], df[1], nc_null)
if margin_type == "wellek":
# TODO: do we need a sqrt
crit_es = crit_f * (n_groups - 1) / nobs_mean
elif margin_type in ["f2", "fsqu", "fsquared"]:
crit_es = crit_f / nobs_t
reject = (es < crit_es)
pv = stats.ncf.cdf(f_stat, df[0], df[1], nc_null)
pwr = stats.ncf.cdf(crit_f, df[0], df[1], 1e-13) # scipy, cannot be 0
res = HolderTuple(statistic=f_stat,
pvalue=pv,
effectsize=es, # match es type to margin_type
crit_f=crit_f,
crit_es=crit_es,
reject=reject,
power_zero=pwr,
df=df,
f_stat=f_stat,
type_effectsize=type_effectsize
)
return res
def equivalence_oneway(data, equiv_margin, groups=None, use_var="unequal",
welch_correction=True, trim_frac=0, margin_type="f2"):
"""equivalence test for oneway anova (Wellek's Anova)
The null hypothesis is that the means differ by more than `equiv_margin`
in the anova distance measure.
If the Null is rejected, then the data supports that means are equivalent,
i.e. within a given distance.
Parameters
----------
data : tuple of array_like or DataFrame or Series
Data for k independent samples, with k >= 2.
The data can be provided as a tuple or list of arrays or in long
format with outcome observations in ``data`` and group membershipt in
``groups``.
equiv_margin : float
Equivalence margin in terms of effect size. Effect size can be chosen
with `margin_type`. default is squared Cohen's f.
groups : ndarray or Series
If data is in long format, then groups is needed as indicator to which
group or sample and observations belongs.
use_var : {"unequal", "equal" or "bf"}
`use_var` specified how to treat heteroscedasticity, unequal variance,
across samples. Three approaches are available
"unequal" : Variances are not assumed to be equal across samples.
Heteroscedasticity is taken into account with Welch Anova and
Satterthwaite-Welch degrees of freedom.
This is the default.
"equal" : Variances are assumed to be equal across samples.
This is the standard Anova.
"bf: Variances are not assumed to be equal across samples.
The method is Browne-Forsythe (1971) for testing equality of means
with the corrected degrees of freedom by Merothra. The original BF
degrees of freedom are available as additional attributes in the
results instance, ``df_denom2`` and ``p_value2``.
welch_correction : bool
If this is false, then the Welch correction to the test statistic is
not included. This allows the computation of an effect size measure
that corresponds more closely to Cohen's f.
trim_frac : float in [0, 0.5)
Optional trimming for Anova with trimmed mean and winsorized variances.
With the default trim_frac equal to zero, the oneway Anova statistics
are computed without trimming. If `trim_frac` is larger than zero,
then the largest and smallest observations in each sample are trimmed.
The number of trimmed observations is the fraction of number of
observations in the sample truncated to the next lower integer.
`trim_frac` has to be smaller than 0.5, however, if the fraction is
so large that there are not enough observations left over, then `nan`
will be returned.
margin_type : "f2" or "wellek"
Type of effect size used for equivalence margin, either squared
Cohen's f or Wellek's psi. Default is "f2".
Returns
-------
results : instance of HolderTuple class
The two main attributes are test statistic `statistic` and p-value
`pvalue`.
"""
# use anova to compute summary statistics and f-statistic
res0 = anova_oneway(data, groups=groups, use_var=use_var,
welch_correction=welch_correction,
trim_frac=trim_frac)
f_stat = res0.statistic
res = equivalence_oneway_generic(f_stat, res0.n_groups, res0.nobs_t,
equiv_margin, res0.df, alpha=0.05,
margin_type=margin_type)
return res
def _power_equivalence_oneway_emp(f_stat, n_groups, nobs, eps, df, alpha=0.05):
"""empirical power of oneway equivalence test
This only returns post-hoc, empirical power.
Warning: eps is currently effect size margin as defined as in Wellek, and
not the signal to noise ratio (Cohen's f family).
Parameters
----------
f_stat : float
F-statistic from oneway anova, used to compute empirical effect size
n_groups : int
Number of groups in oneway comparison.
nobs : ndarray
Array of number of observations in groups.
eps : float
Equivalence margin in terms of effect size given by Wellek's psi.
df : tuple
Degrees of freedom for F distribution.
alpha : float in (0, 1)
Significance level for the hypothesis test.
Returns
-------
pow : float
Ex-post, post-hoc or empirical power at f-statistic of the equivalence
test.
"""
res = equivalence_oneway_generic(f_stat, n_groups, nobs, eps, df,
alpha=alpha, margin_type="wellek")
nobs_mean = nobs.sum() / n_groups
fn = f_stat # post-hoc power, empirical power at estimate
esn = fn * (n_groups - 1) / nobs_mean # Wellek psi
pow_ = stats.ncf.cdf(res.crit_f, df[0], df[1], nobs_mean * esn)
return pow_
def power_equivalence_oneway(f2_alt, equiv_margin, nobs_t, n_groups=None,
df=None, alpha=0.05, margin_type="f2"):
"""
Power of oneway equivalence test
Parameters
----------
f2_alt : float
Effect size, squared Cohen's f, under the alternative.
equiv_margin : float
Equivalence margin in terms of effect size. Effect size can be chosen
with `margin_type`. default is squared Cohen's f.
nobs_t : ndarray
Total number of observations summed over all groups.
n_groups : int
Number of groups in oneway comparison. If margin_type is "wellek",
then either ``n_groups`` or ``df`` has to be given.
df : tuple
Degrees of freedom for F distribution,
``df = (n_groups - 1, nobs_t - n_groups)``
alpha : float in (0, 1)
Significance level for the hypothesis test.
margin_type : "f2" or "wellek"
Type of effect size used for equivalence margin, either squared
Cohen's f or Wellek's psi. Default is "f2".
Returns
-------
pow_alt : float
Power of the equivalence test at given equivalence effect size under
the alternative.
"""
# one of n_groups or df has to be specified
if df is None:
if n_groups is None:
raise ValueError("either df or n_groups has to be provided")
df = (n_groups - 1, nobs_t - n_groups)
# esn = fn * (n_groups - 1) / nobs_mean # Wellek psi
# fix for scipy, ncf does not allow nc == 0, fixed in scipy master
if f2_alt == 0:
f2_alt = 1e-13
# effect size, critical value at margin
# f2_null = equiv_margin
if margin_type in ["f2", "fsqu", "fsquared"]:
f2_null = equiv_margin
elif margin_type == "wellek":
if n_groups is None:
raise ValueError("If margin_type is wellek, then n_groups has "
"to be provided")
# f2_null = (n_groups - 1) * n_groups / nobs_t * equiv_margin**2
nobs_mean = nobs_t / n_groups
f2_null = nobs_mean * equiv_margin**2 / nobs_t
f2_alt = nobs_mean * f2_alt**2 / nobs_t
else:
raise ValueError('`margin_type` should be "f2" or "wellek"')
crit_f_margin = stats.ncf.ppf(alpha, df[0], df[1], nobs_t * f2_null)
pwr_alt = stats.ncf.cdf(crit_f_margin, df[0], df[1], nobs_t * f2_alt)
return pwr_alt
def simulate_power_equivalence_oneway(means, nobs, equiv_margin, vars_=None,
k_mc=1000, trim_frac=0,
options_var=None, margin_type="f2"
): # , anova_options=None): #TODO
"""Simulate Power for oneway equivalence test (Wellek's Anova)
This function is experimental and written to evaluate asymptotic power
function. This function will change without backwards compatibility
constraints. The only part that is stable is `pvalue` attribute in results.
Effect size for equivalence margin
"""
if options_var is None:
options_var = ["unequal", "equal", "bf"]
if vars_ is not None:
stds = np.sqrt(vars_)
else:
stds = np.ones(len(means))
nobs_mean = nobs.mean()
n_groups = len(nobs)
res_mc = []
f_mc = []
reject_mc = []
other_mc = []
for _ in range(k_mc):
y0, y1, y2, y3 = [m + std * np.random.randn(n)
for (n, m, std) in zip(nobs, means, stds)]
res_i = []
f_i = []
reject_i = []
other_i = []
for uv in options_var:
# for welch in options_welch:
# res1 = sma.anova_generic(means, vars_, nobs, use_var=uv,
# welch_correction=welch)
res0 = anova_oneway([y0, y1, y2, y3], use_var=uv,
trim_frac=trim_frac)
f_stat = res0.statistic
res1 = equivalence_oneway_generic(f_stat, n_groups, nobs.sum(),
equiv_margin, res0.df,
alpha=0.05,
margin_type=margin_type)
res_i.append(res1.pvalue)
es_wellek = f_stat * (n_groups - 1) / nobs_mean
f_i.append(es_wellek)
reject_i.append(res1.reject)
other_i.extend([res1.crit_f, res1.crit_es, res1.power_zero])
res_mc.append(res_i)
f_mc.append(f_i)
reject_mc.append(reject_i)
other_mc.append(other_i)
f_mc = np.asarray(f_mc)
other_mc = np.asarray(other_mc)
res_mc = np.asarray(res_mc)
reject_mc = np.asarray(reject_mc)
res = Holder(f_stat=f_mc,
other=other_mc,
pvalue=res_mc,
reject=reject_mc
)
return res
def test_scale_oneway(data, method="bf", center="median", transform="abs",
trim_frac_mean=0.1, trim_frac_anova=0.0):
"""Oneway Anova test for equal scale, variance or dispersion
This hypothesis test performs a oneway anova test on transformed data and
includes Levene and Brown-Forsythe tests for equal variances as special
cases.
Parameters
----------
data : tuple of array_like or DataFrame or Series
Data for k independent samples, with k >= 2. The data can be provided
as a tuple or list of arrays or in long format with outcome
observations in ``data`` and group membership in ``groups``.
method : {"unequal", "equal" or "bf"}
How to treat heteroscedasticity across samples. This is used as
`use_var` option in `anova_oneway` and refers to the variance of the
transformed data, i.e. assumption is on 4th moment if squares are used
as transform.
Three approaches are available:
"unequal" : Variances are not assumed to be equal across samples.
Heteroscedasticity is taken into account with Welch Anova and
Satterthwaite-Welch degrees of freedom.
This is the default.
"equal" : Variances are assumed to be equal across samples.
This is the standard Anova.
"bf" : Variances are not assumed to be equal across samples.
The method is Browne-Forsythe (1971) for testing equality of means
with the corrected degrees of freedom by Merothra. The original BF
degrees of freedom are available as additional attributes in the
results instance, ``df_denom2`` and ``p_value2``.
center : "median", "mean", "trimmed" or float
Statistic used for centering observations. If a float, then this
value is used to center. Default is median.
transform : "abs", "square" or callable
Transformation for the centered observations. If a callable, then this
function is called on the centered data.
Default is absolute value.
trim_frac_mean=0.1 : float in [0, 0.5)
Trim fraction for the trimmed mean when `center` is "trimmed"
trim_frac_anova : float in [0, 0.5)
Optional trimming for Anova with trimmed mean and Winsorized variances.
With the default trim_frac equal to zero, the oneway Anova statistics
are computed without trimming. If `trim_frac` is larger than zero,
then the largest and smallest observations in each sample are trimmed.
see ``trim_frac`` option in `anova_oneway`
Returns
-------
res : results instance
The returned HolderTuple instance has the following main attributes
and some additional information in other attributes.
statistic : float
Test statistic for k-sample mean comparison which is approximately
F-distributed.
pvalue : float
If ``method="bf"``, then the p-value is based on corrected
degrees of freedom following Mehrotra 1997.
pvalue2 : float
This is the p-value based on degrees of freedom as in
Brown-Forsythe 1974 and is only available if ``method="bf"``.
df : (df_denom, df_num)
Tuple containing gegrees of freedom for the F-distribution depend
on ``method``. If ``method="bf"``, then `df_denom` is for Mehrotra
p-values `df_denom2` is available for Brown-Forsythe 1974 p-values.
`df_num` is the same numerator degrees of freedom for both
p-values.
See Also
--------
anova_oneway
scale_transform
"""
data = map(np.asarray, data)
xxd = [scale_transform(x, center=center, transform=transform,
trim_frac=trim_frac_mean) for x in data]
res = anova_oneway(xxd, groups=None, use_var=method,
welch_correction=True, trim_frac=trim_frac_anova)
res.data_transformed = xxd
return res
def equivalence_scale_oneway(data, equiv_margin, method='bf', center='median',
transform='abs', trim_frac_mean=0.,
trim_frac_anova=0.):
"""Oneway Anova test for equivalence of scale, variance or dispersion
This hypothesis test performs a oneway equivalence anova test on
transformed data.
Note, the interpretation of the equivalence margin `equiv_margin` will
depend on the transformation of the data. Transformations like
absolute deviation are not scaled to correspond to the variance under
normal distribution.
"""
data = map(np.asarray, data)
xxd = [scale_transform(x, center=center, transform=transform,
trim_frac=trim_frac_mean) for x in data]
res = equivalence_oneway(xxd, equiv_margin, use_var=method,
welch_correction=True, trim_frac=trim_frac_anova)
res.x_transformed = xxd
return res
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
1526,
1248,
838,
25,
2091,
25,
2548,
12131,
198,
198,
13838,
25,
5264,
69,
2448,
21841,
727,
198,
34156,
25,
347,
10305,
12,
18,
198,
198... | 2.455345 | 17,501 |
#author: SAURABH ANNADATE
import pandas as pd
import glob
import os
path = '/home/PICARRO/July/' #path to the folder consisting daywise folders of the PICARRO data
arr = os.listdir(path)
all_files = [] # list of all the data files need to merged
for i in arr:
all_files = all_files + glob.glob(path+i + "/*.dat")
li = [] # list for Picarro files as pandas dataframes
for filename in all_files:
frame = pd.read_csv(filename,sep="\s+") #piccaro
li.append(frame)
frame = pd.concat(li, axis=0, ignore_index=True) #concatanate all separate dataframes into one dataframe
frame['DATE_TIME'] = frame['DATE']+" "+ frame['TIME'] #merge DATE and TIME columns to DATE_TIME
frame['DATE_TIME'] = pd.to_datetime(frame['DATE_TIME']) #read it as pandas datetime format
frame = frame.sort_values(by = 'DATE_TIME') #sort the dataframe by DATE_TIME
#frame.to_csv('merged_picarro.csv')
piccaro_1min = frame.resample('1Min',on='DATE_TIME').mean() #1 minute average file
piccaro_1min.to_csv('picarro_1minavg_july.csv') #save as csv file
| [
2,
9800,
25,
14719,
45570,
33,
39,
40126,
2885,
6158,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
15095,
198,
11748,
28686,
198,
198,
6978,
796,
31051,
11195,
14,
47,
2149,
1503,
13252,
14,
16157,
14,
6,
220,
1303,
6978,
... | 2.57767 | 412 |
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import matplotlib.pylab as plt
import os
import warnings
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
warnings.filterwarnings("ignore", category=DeprecationWarning)
n_cpus = 6
#
# see https://github.com/tensorflow/probability/blob/master/tensorflow_probability/
# g3doc/api_docs/python/tfp/distributions/GaussianProcessRegressionModel.md
#
tfd = tfp.distributions
psd_kernels = tfp.positive_semidefinite_kernels
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
11192,
273,
11125,
62,
1676,
65,
1799,
355,
256,
46428,
198,
11748,
2603,
29487,
8019,
13,
79,
2645,
397,
355,
458,
83,
198,
11748,
28686,
198,
11748,... | 2.618557 | 194 |
from earley import Rule
from reader import CStream, L2, Literal
import earley
import sys
import pprint
table = {
u'(': u'leftparen', u')': u'rightparen',
u'+': u'plus',
u'if': u'if',
u'elif': u'elif',
u'else': u'else',
# u'[': u'lb', u']': u'rb',
# u'{': u'lc', u'}': u'rc',
# u'and': u'and', u'or': u'or', u'not': u'not',
u'=': u'let', u':=': u'set',
# u'<': u'chain',
# u'>': u'chain',
# u'<=': u'chain',
# u'>=': u'chain',
# u'==': u'chain',
# u'!=': u'chain',
# u'^': u'op', u'&': u'op', u'<<': u'op',
# u'>>': u'op', u'!': u'op', u'*': u'op',
# u'/': u'op', u'%': u'op', u'+': u'op',
# u'-': u'op', u'|': u'op', u'++': u'op',
# u':': u'symbol',
u'.': u'dot'}
grammar = [
Rule('file', ['statement'],
"new_list"),
Rule('file', ['file', 'newline', 'statement'],
"append", [0, 2]),
Rule('statement', ['clause'],
"through"),
Rule('statement', ['statement', 'if', 'clause'],
"inline_if", [2, 0]),
Rule('statement', ['exprs', 'indent', 'file', 'dedent'],
"call_with_block", [0, 2]),
Rule('statement', ['cond'],
"through"),
Rule('cond', ['cond_chain', 'otherwise'],
"cond"),
Rule('otherwise', [], "nothing"),
Rule('otherwise', ['newline', 'else', 'indent', 'file', 'dedent'],
"through", [3]),
Rule('cond_chain', ['if', 'clause', 'indent', 'file', 'dedent'],
'tuple_list', [1, 3]),
Rule('cond_chain', ['cond_chain', 'newline', 'elif', 'clause', 'indent', 'file', 'dedent'],
'tuple_append', [0, 3, 5]),
Rule('statement', ['symbol', 'let', 'statement'],
"let", [0, 2]),
Rule('statement', ['symbol', 'set', 'statement'],
"set", [0, 2]),
Rule('clause', ['expr'],
"through"),
Rule('clause', ['expr', 'exprs'],
"call"),
Rule('exprs', ['expr'],
"new_list"),
Rule('exprs', ['exprs', 'expr'],
"append"),
Rule('expr', ['postfix'],
"through"),
Rule('postfix', ['postfix', 'dot', 'symbol'],
"getattr", [0, 2]),
Rule('postfix', ['term'],
"through"),
Rule('term', ["symbol"],
"through"),
Rule('term', ["int"],
"through"),
Rule('term', ["float"],
"through"),
Rule('term', ["string"],
"through"),
Rule('term', ['leftparen', 'expr', 'rightparen'],
"through", [1]),
]
source = open(sys.argv[1]).read()
stream = L2(CStream(source), table)
parser = earley.parser(grammar, 'file')
indent_stack = []
indent = stream.first.start.col
line = stream.first.start.lno
while stream.filled:
if line < stream.first.start.lno:
while stream.first.start.col < indent and 'dedent' in parser.expect:
start = stream.first.start
parser.step(Literal(start, start, 'dedent', ''))
indent = indent_stack.pop()
assert stream.first.start.col >= indent
if stream.first.start.col == indent and 'newline' in parser.expect:
start = stream.first.start
parser.step(Literal(start, start, 'newline', ''))
if stream.first.start.col > indent and 'indent' in parser.expect:
start = stream.first.start
parser.step(Literal(start, start, 'indent', ''))
indent_stack.append(indent)
indent = stream.first.start.col
line = stream.first.start.lno
token = stream.advance()
parser.step(token)
if len(parser.chart[-1]) == 0:
raise Exception("parse error at: {}".format(token))
while 'dedent' in parser.expect:
stop = token.stop
parser.step(Literal(stop, stop, 'dedent', ''))
#earley.print_result(parser)
arg = None
result = traverse(parser, parser.root, 0, len(parser.input), globals(), arg)
pprint.pprint(result)
| [
6738,
1027,
1636,
1330,
14330,
198,
6738,
9173,
1330,
327,
12124,
11,
406,
17,
11,
25659,
1691,
198,
11748,
1027,
1636,
198,
11748,
25064,
198,
11748,
279,
4798,
198,
198,
11487,
796,
1391,
198,
220,
220,
220,
334,
6,
7,
10354,
334,
... | 2.140607 | 1,778 |
import argparse
import re
parser = argparse.ArgumentParser(description='Create a file of changed Customers from three unfiltered xls files')
parser.add_argument("-s","--source", dest="source_path", help="The Datahub log from which this programm will retrieve the IDocs", required=True)
parser.add_argument("-t", "--target", dest="target_path", help="The file which will contain the IDocs", required=True)
parser.add_argument("-it", "--idoc-types", dest="idoc_types", nargs="+", help="The IDoc types that will be filtered", required=True)
args = parser.parse_args()
if __name__ == "__main__":
main() | [
11748,
1822,
29572,
198,
11748,
302,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
11213,
11639,
16447,
257,
2393,
286,
3421,
36707,
422,
1115,
3684,
346,
4400,
2124,
7278,
3696,
11537,
198,
198,
48610,
13,
2860,
62,
491... | 3.275676 | 185 |
from .utils import *
from .cosmology import *
from .cosmotherm_wrapper import *
from .specdist_functions import *
from .pispec_functions import *
from .interpolate_ct_spectra import *
from .standard_mu_and_y_distortions import *
from .firas_data import *
from .edges_data import *
from scipy.linalg import cholesky, LinAlgError
from scipy.linalg import block_diag
| [
6738,
764,
26791,
1330,
1635,
198,
6738,
764,
6966,
29126,
1330,
1635,
198,
6738,
764,
6966,
13552,
76,
62,
48553,
1330,
1635,
198,
6738,
764,
4125,
10210,
396,
62,
12543,
2733,
1330,
1635,
198,
6738,
764,
79,
271,
43106,
62,
12543,
2... | 2.912 | 125 |
from smtp import SMTPClient, SMTPDisconnectedException
import logging
| [
6738,
895,
34788,
1330,
9447,
7250,
11792,
11,
9447,
51,
5760,
271,
15236,
16922,
198,
11748,
18931,
628
] | 3.944444 | 18 |
import base64
import logging
import random
import re
import string
import time
import pyotp
import six
from cryptography.fernet import Fernet
from django.conf import settings
from bitcaster.config.environ import env
logger = logging.getLogger(__name__)
totp = TOTP('base32secret3232', interval=1)
| [
11748,
2779,
2414,
198,
11748,
18931,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
4731,
198,
11748,
640,
198,
198,
11748,
12972,
313,
79,
198,
11748,
2237,
198,
6738,
45898,
13,
69,
1142,
316,
1330,
38982,
316,
198,
6738,
42625,
1420... | 3.234043 | 94 |
from app import db
from app import login
from datetime import datetime
# import bcrypt
# import hashlib
# import base64
from flask_login import UserMixin
from time import time
import jwt
from app import app
# import os
# from Crypto.Cipher import AES
from app.tools import encrypt, decrypt, hash_str, check_hash
from app.tools import hash_str_with_pepper, format_email
# Database entries have three tiers of security
# 1. Plain Text
# - Entries that cannot be used to identify someone
# - Entries that might be valudes and might need to be queried en masse
# - Example: Time stamps, rating values
#
# 2. Encrypted with Nonce and Peppered Hash
# - Original characters need to be accessible
# - Needs to be unique, and thus compared at a population level when an
# account is created
# - Pepper is used instead of a salt so a new possible entry can be
# hashed in the same way as all other hashes in the database column
# and thus compared
# - Example: Email, username
#
# 3. Hashed with unique salt and encrypted
# - Original characters to not need to be accessible
# - Encryption means attacker needs access to database and server
# managing database to get encryption keys
# - Example: Password
#
# Encryption is AES 256 using EAX mode which allows for stream encoding.
# Stream encoding means encoded output length will be proportional to plain
# text length. Therefore, for encrypting emails, pad emails to hide their
# length.
#
# Encryption uses a nonce (once only number) so when checking if a value
# exists in a database, the value cannot be encrypted and compared to existing
# encrypted values - as all have different nonces.
#
# Therefore, for values that need to be stored with encryption, and compared
# over a population the value must be stored in two ways:
# 1. Encrypted with nonce
# 2. Hashed with pepper
#
#
# Size of Encrypted Email storage in database:
# - Emails may have 64 + 1 + 255 characters = 320 characters
# - Therefore, emails are padded to 320 characters
# - Encrypted bytes includes encryped email (320 bytes) + tag (16 bytes)
# + nonce (16 bytes) = 352 bytes
# - Encoding in Base64: 352 * 8 bits / 6 bits = 469.333 b64 characters
# - 352 bytes = 2816 bits
# - 469 b64 chars = 2814 bits, so need 470 b64 chars to cover 352 bytes.
# - But then it won't split evenly into 8 bits, so need another 2 b64 chars
# - Therefore total is 472 b64 chars
# - This is then stored in the database as utf-8, which will be a full 472
# bytes
#
# Size of Encrypted username storage in database:
# - Usernames may have 32 characters
# - Therefore, emails are padded to 32 characters
# - Encrypted bytes includes encryped username (32 bytes) + tag (16 bytes)
# + nonce (16 bytes) = 64 bytes
# - Encoding in Base64: 64 * 8 bits / 6 bits = 85.333 b64 characters
# - 64 bytes = 512 bits
# - 85 b64 chars = 510 bits, so need 86 b64 chars to cover 64 bytes.
# - But then it won't split evenly into 8 bits, so need another 2 b64 chars
# - Therefore total is 88 b64 chars
# - This is then stored in the database as utf-8, which will be a full 88
# bytes
#
# Size of password:
# - Password hash is 60 bytes + tag (16) + nonce (16) = 92 bytes = 736 bits
# - 124 b64 chars = 744 bits = 93 bytes
# - Therefore saved string will be 124 chars
# Size of hash: bcrypt output will always be 60 b64 chars.
# Size of email hash will be 60-29 = 31 as don't want to store the pepper
# Username size will be limited to 32 characters
@login.user_loader
| [
6738,
598,
1330,
20613,
198,
6738,
598,
1330,
17594,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
2,
1330,
275,
29609,
198,
2,
1330,
12234,
8019,
198,
2,
1330,
2779,
2414,
198,
6738,
42903,
62,
38235,
1330,
11787,
35608,
259,
198,
67... | 3.260557 | 1,113 |
from spinn_machine.utilities.progress_bar import ProgressBar
from spinnman.messages.eieio.data_messages.eieio_data_header \
import EIEIODataHeader
import numpy
import logging
logger = logging.getLogger(__name__)
class EIEIOSpikeRecorder(object):
""" Records spikes using EIEIO format
"""
@property
@record.setter
| [
6738,
599,
3732,
62,
30243,
13,
315,
2410,
13,
33723,
62,
5657,
1330,
18387,
10374,
198,
6738,
599,
3732,
805,
13,
37348,
1095,
13,
68,
494,
952,
13,
7890,
62,
37348,
1095,
13,
68,
494,
952,
62,
7890,
62,
25677,
3467,
198,
220,
22... | 2.764228 | 123 |
import matplotlib
import matplotlib.pyplot as plt
p = Plotter()
line = p.plot_single({
'id': 1,
'title': 'single_line',
'xlabel': 't',
'ylabel': 'l1',
'x_data': range(4),
'y_data': [1, 2, 3, 4]
})
plt.pause(1)
p.plot_single({
'id': 1,
'title': 'single_line',
'xlabel': 't',
'ylabel': 'l1',
'x_data': range(5),
'y_data': [1, 2, 3, 5, 8],
})
p.plot_multiple({
'id': 2,
'title': 'multiple_lines',
'xlabel': 't',
'ylabel': ['l1', 'l2'],
'x_data': [range(4), range(5)],
'y_data': [[1, 2, 3, 4], [1, 2, 3, 5, 8]],
})
plt.pause(1)
p.plot_multiple({
'id': 2,
'title': 'multiple_lines',
'xlabel': 't',
'ylabel': ['l1', 'l2'],
'x_data': [range(5), range(6)],
'y_data': [[1, 2, 3, 4, 5], [1, 2, 3, 5, 8, 13]],
})
plt.show(block=True)
| [
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
198,
79,
796,
28114,
353,
3419,
198,
1370,
796,
279,
13,
29487,
62,
29762,
15090,
198,
220,
220,
220,
705,
312,
10354,
352,
11,
198,
2... | 1.866213 | 441 |
import pytest
from astropy import units as u
from astropy.coordinates import (
CartesianRepresentation,
get_body_barycentric, solar_system_ephemeris
)
from astropy.tests.helper import assert_quantity_allclose
from poliastro.constants import J2000
from poliastro.bodies import (
Sun, Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune, Pluto,
)
from poliastro.frames import (
ICRS,
HCRS, MercuryICRS, VenusICRS, GCRS, MarsICRS, JupiterICRS, SaturnICRS, UranusICRS, NeptuneICRS, PlutoICRS
)
@pytest.mark.parametrize("body, frame", [
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
(Pluto, PlutoICRS),
])
@pytest.mark.parametrize("body, frame", [
(Sun, HCRS),
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Earth, GCRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
])
@pytest.mark.parametrize("body, frame", [
(Sun, HCRS),
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Earth, GCRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
])
| [
11748,
12972,
9288,
198,
198,
6738,
6468,
28338,
1330,
4991,
355,
334,
198,
6738,
6468,
28338,
13,
37652,
17540,
1330,
357,
198,
220,
220,
220,
13690,
35610,
40171,
341,
11,
198,
220,
220,
220,
651,
62,
2618,
62,
65,
560,
28577,
11,
... | 2.368421 | 551 |
import PySimpleGUI as sg
import traceback
import os
import subprocess
from mini_programs.decontamination_workflow import run_decontaminationPE
from mini_programs.decontamination_workflow import run_decontamination
# Minimal GUI to run decontamination using BWA
if __name__ == "__main__":
layout = [
[sg.Text('Press START to initiate the decontamination process')],
[sg.Text('--- Non paired end mode ---')],
[sg.Text('Path to .fastq file:')],
[sg.Input(key='_FASTQ_'), sg.FileBrowse()],
[sg.Text('--- Paired end mode ---')],
[sg.Text('Path to forward reads file (.fastq):')],
[sg.Input(key='_FASTQ_FWD_'), sg.FileBrowse()],
[sg.Text('Path to forward reads file (.fastq):')],
[sg.Input(key='_FASTQ_REV_'), sg.FileBrowse()],
[sg.Text('--- Common input ---')],
[sg.Text('Path to the reference(s) of the sequencing target(s) (.fasta, bwa indexed):')],
[sg.Input(key='_REFS_'), sg.FilesBrowse()],
[sg.Text('Path to the reference(s) of the contaminant(s) (.fasta, bwa indexed):')],
[sg.Input(key='_CONTS_'), sg.FilesBrowse()],
[sg.Text('Output folder:')],
[sg.Input(key="_OUT_"), sg.FolderBrowse()],
[sg.Text('Note: to index a reference genome run the command: ')],
[sg.Text('bwa index path/to/the/reference.fasta')],
[sg.Button("START"), sg.Button("QUIT")]
]
window = sg.Window('Decontaminate FASTQ files', layout)
while True:
try:
event, values = window.Read()
print(values)
if event in ("QUIT", None):
break
values["_FASTQ_"] = values["_FASTQ_"].strip()
values["_FASTQ_FWD_"] = values["_FASTQ_FWD_"].strip()
values["_FASTQ_REV_"] = values["_FASTQ_REV_"].strip()
values["_OUT_"] = values["_OUT_"].strip()
references = [ x.strip() for x in values["_REFS_"].split(";") ]
contaminants = [ x.strip() for x in values["_CONTS_"].split(";") ]
if not os.path.isdir(values["_OUT_"]):
os.mkdir(values["_OUT_"])
subprocess.Popen(["xdg-open", values["_OUT_"]])
if values["_FASTQ_"] != '' and values["_FASTQ_FWD_"] == '' and values["_FASTQ_REV_"] == '':
# non PE mode
run_decontamination(values["_FASTQ_"], references, contaminants, values["_OUT_"])
elif values["_FASTQ_"] == '' and values["_FASTQ_FWD_"] != '' and values["_FASTQ_REV_"] != '':
run_decontaminationPE(values["_FASTQ_FWD_"], values["_FASTQ_REV_"], references, contaminants, values["_OUT_"])
else:
raise Exception("Error in .fastq input files. Please select either PE or non PE modes, do not provide both inputs")
sg.Popup('Decontamination was completed sucessfully', keep_on_top=True)
except Exception as e:
tb = traceback.format_exc()
sg.popup_error('An error happened:', e, tb)
| [
11748,
9485,
26437,
40156,
355,
264,
70,
198,
11748,
12854,
1891,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
6738,
9927,
62,
23065,
82,
13,
12501,
756,
24979,
62,
1818,
11125,
1330,
1057,
62,
12501,
756,
24979,
11401,
198,
6738,
99... | 2.165131 | 1,411 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Pub-quality figures: maps of xcorr and lag for different variables
See Jupyter notebook for full guided analysis
Created on Fri Dec 18 13:59:51 2020
Updated Mon Feb 22 2021
@author: lizz
"""
from netCDF4 import Dataset
from scipy import interpolate
import pyproj as pyproj
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LightSource
from mpl_toolkits.axes_grid1 import make_axes_locatable
import iceutils as ice
import nifl_helper as nifl
# ### Define where the necessary data lives
# In[ ]:
flowline_fpath = '/Users/lizz/Documents/GitHub/Data_unsynced/Felikson-flowlines/netcdfs/glaciera199.nc'
velocity_fpath='/Users/lizz/Documents/GitHub/Data_unsynced/Gld-Stack/'
gl_bed_fpath ='/Users/lizz/Documents/GitHub/Data_unsynced/BedMachine-Greenland/BedMachineGreenland-2017-09-20.nc'
catchment_smb_fpath = '/Users/lizz/Documents/GitHub/Data_unsynced/Helheim-processed/smb_rec._.BN_RACMO2.3p2_ERA5_3h_FGRN055.1km.MM.csv'
runoff_fpath = '/Users/lizz/Documents/GitHub/Data_unsynced/Helheim-processed/runoff._.BN_RACMO2.3p2_ERA5_3h_FGRN055.1km.MM.csv'
termini_fpath = '/Users/lizz/Documents/GitHub/Data_unsynced/Helheim-processed/HLM_terminus_widthAVE.csv'
# ### Define the domain of analysis
# We will analyse along flowlines defined by Denis Felikson in his previous work, saved and shared as NetCDF files. The flowlines are numbered 01-10 across the terminus; flowline 05 is close to the middle. Note that Helheim Glacier has two large branches. For now we'll study the main trunk, `glaciera199.nc`. The more southerly trunk is `glacierb199.nc`.
# In[ ]:
ncfile = Dataset(flowline_fpath, 'r')
xh = ncfile['flowline05'].variables['x'][:]
yh = ncfile['flowline05'].variables['y'][:]
ncfile.close()
# In[ ]:
## Define points at which to extract
upstream_max = 500 # index of last xh,yh within given distance of terminus--pts roughly 50m apart
xys = [(xh[i], yh[i]) for i in range(0, upstream_max, 20)][2::]
# ## Import and invert velocity observations
# In[ ]:
## Set up combined hdf5 stack
hel_stack = ice.MagStack(files=[velocity_fpath+'vx.h5', velocity_fpath+'vy.h5'])
data_key = 'igram' # B. Riel convention for access to datasets in hdf5 stack
# In[ ]:
# Create an evenly spaced time array for time series predictions
t_grid = np.linspace(hel_stack.tdec[0], hel_stack.tdec[-1], 1000)
# First convert the time vectors to a list of datetime
dates = ice.tdec2datestr(hel_stack.tdec, returndate=True)
dates_grid = ice.tdec2datestr(t_grid, returndate=True)
# Build the collection
collection = nifl.build_collection(dates)
# Construct a priori covariance
Cm = nifl.computeCm(collection)
iCm = np.linalg.inv(Cm)
# Instantiate a model for inversion
model = ice.tseries.Model(dates, collection=collection)
# Instantiate a model for prediction
model_pred = ice.tseries.Model(dates_grid, collection=collection)
## Access the design matrix for plotting
G = model.G
# Create lasso regression solver that does the following:
# i) Uses an a priori covariance matrix for damping out the B-splines
# ii) Uses sparsity-enforcing regularization (lasso) on the integrated B-splines
solver = ice.tseries.select_solver('lasso', reg_indices=model.itransient, penalty=0.05,
rw_iter=1, regMat=iCm)
# Now that we are set up with our data and machinery, we'll ask the inversion to make us a continuous time series of velocity at each point we wish to study.
# In[ ]:
preds = []
for j, xy in enumerate(xys):
try:
pred, st, lt = nifl.VSeriesAtPoint(xy, vel_stack=hel_stack, collection=collection,
model=model, model_pred=model_pred, solver=solver,
t_grid=t_grid, sigma=2.5, data_key='igram')
preds.append(pred)
except AssertionError: # catches failed inversion
print('Insufficient data for point {}. Removing'.format(j))
xys.remove(xy)
continue
# ## Comparison data sets
# ### Catchment-integrated SMB
# We load in a 1D timeseries of surface mass balance integrated over the whole Helheim catchment. This data is monthly surface mass balance from the HIRHAM5 model, integrated over the Helheim catchment defined by K. Mankoff, with processing steps (coordinate reprojection, Delaunay triangulation, nearest-neighbor search and area summing) in `catchment-integrate-smb.py`.
# In[ ]:
## Read in RACMO monthly int from Denis
smb_racmo = pd.read_csv(catchment_smb_fpath, index_col=0, parse_dates=True)
smb_tr = smb_racmo.loc[smb_racmo.index.year >= 2006]
smb = smb_tr.loc[smb_tr.index.year <2018].squeeze()
smb_d = [d.utctimetuple() for d in smb.index]
smb_d_interp = [ice.timeutils.datestr2tdec(d[0], d[1], d[2]) for d in smb_d]
smb_func = interpolate.interp1d(smb_d_interp, smb)
# Now, we compute the normalized cross-correlation between catchment-integrated SMB and surface velocity at each point along the flowline. We will draw on the inverted velocity series saved in `preds` above. We save the value of the maximum normalized cross-correlation, and the value in days of the lag where it occurs, to compare with other variables later.
# In[ ]:
smb_corr_amax = []
smb_lag_amax = []
smb_significance = []
for xy, pred in zip(xys, preds):
corr, lags, ci = nifl.Xcorr1D(xy, series_func=smb_func, series_dates=smb_d_interp,
velocity_pred=pred, t_grid=t_grid, t_limits=(2009,2017),
diff=1, normalize=True, pos_only=True)
smb_corr_amax.append(corr[abs(corr).argmax()])
smb_lag_amax.append(lags[abs(corr).argmax()])
smb_significance.append(abs(corr[abs(corr).argmax()]) > ci[abs(corr).argmax()])
# ### Runoff
# We import monthly runoff from the RACMO model, integrated over the Helheim catchment and shared as a CSV by Denis Felikson. Because this data is catchment-integrated, we interpolate a single 1D time series that will be used at all points.
# In[ ]:
## Read in RACMO monthly int from Denis
runoff_racmo = pd.read_csv(runoff_fpath, index_col=0, parse_dates=True)
runoff_tr = runoff_racmo.loc[runoff_racmo.index.year >= 2006]
runoff = runoff_tr.loc[runoff_tr.index.year <2018].squeeze()
runoff_d = [d.utctimetuple() for d in runoff.index]
d_interp = [ice.timeutils.datestr2tdec(d[0], d[1], d[2]) for d in runoff_d]
runoff_func = interpolate.interp1d(d_interp, runoff)
# We compute the normalized cross-correlation between catchment-integrated runoff and surface velocity at each same point. Again we save the value of the maximum normalized cross-correlation, and the value in days of the lag where it occurs, to compare with other variables.
# In[ ]:
runoff_corr_amax = []
runoff_lag_amax = []
runoff_significance = []
for xy, pred in zip(xys, preds):
corr, lags, ci = nifl.Xcorr1D(xy, series_func=runoff_func, series_dates=d_interp,
velocity_pred=pred, t_grid=t_grid, t_limits=(2009,2017),
diff=1, normalize=True, pos_only=True)
runoff_corr_amax.append(corr[abs(corr).argmax()])
runoff_lag_amax.append(lags[abs(corr).argmax()])
runoff_significance.append(abs(corr[abs(corr).argmax()]) > ci[abs(corr).argmax()])
# ### Terminus position change
# We import width-averaged terminus position change processed by Leigh Stearns. These data give terminus position in km from a baseline, so they do not need to be processed into a coordinate system.
# In[ ]:
termini = pd.read_csv(termini_fpath, index_col=0, parse_dates=True, usecols=[0,1])
trmn = termini.loc[termini.index.year >= 2006]
tm = trmn.loc[trmn.index.year <2017].squeeze()
## smooth a little to make more comparable with SMB and runoff
td = tm.rolling('10D').mean() # approximately 3 measurements per window
termini_d = [d.utctimetuple() for d in td.index]
tm_d_interp = [ice.timeutils.datestr2tdec(d[0], d[1], d[2]) for d in termini_d]
termini_func = interpolate.interp1d(tm_d_interp, td)
# In[ ]:
terminus_corr_amax = []
terminus_lag_amax = []
terminus_significance = []
for xy, pred in zip(xys, preds):
corr, lags, ci = nifl.Xcorr1D(xy, series_func=termini_func, series_dates=tm_d_interp,
velocity_pred=pred, t_grid=t_grid, t_limits=(2009,2017),
diff=1, normalize=True, pos_only=True)
terminus_corr_amax.append(corr[abs(corr).argmax()])
terminus_lag_amax.append(lags[abs(corr).argmax()])
terminus_significance.append(abs(corr[abs(corr).argmax()]) > ci[abs(corr).argmax()])
# ### Bed topography
# Mostly we will use this for plotting and for defining a standard coordinate system. However, future analyses could combine bed topography with calving position or other variables to analyse effect on surface velocity.
# In[ ]:
## Read in and interpolate BedMachine topography
fh = Dataset(gl_bed_fpath, mode='r')
xx = fh.variables['x'][:].copy() #x-coord (polar stereo (70, 45))
yy = fh.variables['y'][:].copy() #y-coord
s_raw = fh.variables['surface'][:].copy() #surface elevation
h_raw=fh.variables['thickness'][:].copy() # Gridded thickness
b_raw = fh.variables['bed'][:].copy() # bed topo
thick_mask = fh.variables['mask'][:].copy()
ss = np.ma.masked_where(thick_mask !=2, s_raw)#mask values: 0=ocean, 1=ice-free land, 2=grounded ice, 3=floating ice, 4=non-Greenland land
hh = np.ma.masked_where(thick_mask !=2, h_raw)
bb = b_raw #don't mask, to allow bed sampling from modern bathymetry (was subglacial in ~2006)
fh.close()
# In[ ]:
## Interpolate in area of Helheim
xl, xr = 6100, 6600
yt, yb = 12700, 13100
x_hel = xx[xl:xr]
y_hel = yy[yt:yb]
s_hel = ss[yt:yb, xl:xr]
b_hel = bb[yt:yb, xl:xr]
S_helheim = interpolate.RectBivariateSpline(x_hel, y_hel[::-1], s_hel.T[::,::-1]) #interpolating surface elevation provided
B_helheim = interpolate.RectBivariateSpline(x_hel, y_hel[::-1], b_hel.T[::,::-1]) #interpolating surface elevation provided
# ## Plotting
# First, we plot the max correlation at each point for a single variable.
# In[ ]:
ls = LightSource(azdeg=225, altdeg=80)
fig, ax = plt.subplots(1)
# ax.contourf(x_hel, y_hel, b_hel, cmap='gist_earth', alpha=0.5)
rgb = ls.shade(np.asarray(b_hel), cmap=plt.get_cmap('gist_earth'), blend_mode='overlay',
dx=np.mean(np.diff(x_hel)), dy=np.mean(np.diff(y_hel)), vert_exag=5.)
ax.imshow(rgb, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc = ax.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=terminus_corr_amax, cmap='RdBu', vmin=-0.5, vmax=0.5)
cb = fig.colorbar(sc, ax=ax)
cb.ax.set_title('Max. xcorr')
ax.set(xlim=(270000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]', ylabel='Northing [km]')
plt.show()
# Now, let's compare the patterns of correlation and lag for each variable.
# In[ ]:
div_colors = 'RdBu' # choose divergent colormap
corrnorm_min, corrnorm_max = -0.3, 0.3
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1,ncols=3, figsize=(14, 4))
# ax1.contourf(x_hel, y_hel, b_hel, cmap='gist_earth', alpha=0.5)
ax1.imshow(rgb, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc1 = ax1.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=smb_corr_amax, cmap=div_colors,
vmin=corrnorm_min, vmax=corrnorm_max)
## set up correctly scaled colorbar
div1 = make_axes_locatable(ax1)
cax1 = div1.append_axes("right", size="5%", pad=0.1)
plt.colorbar(sc1, cax=cax1)
# cb1.ax.set_title('AMax. xcorr')
ax1.set(xlim=(270000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]', ylabel='Northing [km]', title='Catchment SMB')
# ax2.contourf(x_hel, y_hel, b_hel, cmap='gist_earth', alpha=0.5)
ax2.imshow(rgb, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc2 = ax2.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=runoff_corr_amax, cmap=div_colors,
vmin=corrnorm_min, vmax=corrnorm_max)
## set up correctly scaled colorbar
div2 = make_axes_locatable(ax2)
cax2 = div2.append_axes("right", size="5%", pad=0.1)
fig.colorbar(sc2, cax=cax2)
# cb2.ax.set_title('AMax. xcorr')
ax2.set(xlim=(270000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]', ylabel='Northing [km]', title='Catchment-integrated runoff')
# ax3.contourf(x_hel, y_hel, b_hel, cmap='gist_earth', alpha=0.5)
ax3.imshow(rgb, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc3 = ax3.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=terminus_corr_amax, cmap=div_colors,
vmin=corrnorm_min, vmax=corrnorm_max)
## set up correctly scaled colorbar
div3 = make_axes_locatable(ax3)
cax3 = div3.append_axes("right", size="5%", pad=0.1)
cb3 = fig.colorbar(sc3, cax=cax3)
cb3.ax.set_title('AMax. xcorr')
ax3.set(xlim=(270000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]', ylabel='Northing [km]', title='Terminus position')
plt.tight_layout()
# plt.savefig('/Users/lizz/Desktop/20210105-map_xcorr_amax.png')
# In[ ]:
## Plot spatial pattern of lag at the absolute max xcorr
div_colors = 'RdBu' # choose divergent colormap
lagnorm_min, lagnorm_max = -365, 365
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1,ncols=3, figsize=(14, 4))
# ax1.contourf(x_hel, y_hel, b_hel, cmap='gist_earth', alpha=0.5)
ax1.imshow(rgb, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc1 = ax1.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=smb_lag_amax, cmap=div_colors,
vmin=lagnorm_min, vmax=lagnorm_max)
## set up correctly scaled colorbar
div1 = make_axes_locatable(ax1)
cax1 = div1.append_axes("right", size="5%", pad=0.1)
plt.colorbar(sc1, cax=cax1)
# cb1.ax.set_title('Lag [d] at peak xcorr')
ax1.set(xlim=(270000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]', ylabel='Northing [km]', title='Catchment SMB')
# ax2.contourf(x_hel, y_hel, b_hel, cmap='gist_earth', alpha=0.5)
ax2.imshow(rgb, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc2 = ax2.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=runoff_lag_amax, cmap=div_colors,
vmin=lagnorm_min, vmax=lagnorm_max)
## set up correctly scaled colorbar
div2 = make_axes_locatable(ax2)
cax2 = div2.append_axes("right", size="5%", pad=0.1)
fig.colorbar(sc2, cax=cax2)
# cb2.ax.set_title('Lag [d] at peak xcorr')
ax2.set(xlim=(270000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]', ylabel='Northing [km]', title='Catchment runoff')
# ax3.contourf(x_hel, y_hel, b_hel, cmap='gist_earth', alpha=0.5)
ax3.imshow(rgb, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc3 = ax3.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=terminus_lag_amax, cmap=div_colors,
vmin=lagnorm_min, vmax=lagnorm_max)
## set up correctly scaled colorbar
div3 = make_axes_locatable(ax3)
cax3 = div3.append_axes("right", size="5%", pad=0.1)
cb3 = fig.colorbar(sc3, cax=cax3)
cb3.ax.set_title('Lag [d] at peak xcorr')
ax3.set(xlim=(270000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]', ylabel='Northing [km]', title='Terminus position')
plt.tight_layout()
plt.show()
# plt.savefig('/Users/lizz/Desktop/20210105-map_lag_amax.png')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
14876,
12,
13237,
5538,
25,
8739,
286,
2124,
10215,
81,
290,
19470,
329,
1180,
9633,
198,
6214,
449,
... | 2.337964 | 6,983 |
# third party
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import LargeBinary
# relative
from . import Base
| [
2,
2368,
2151,
198,
6738,
44161,
282,
26599,
1330,
29201,
198,
6738,
44161,
282,
26599,
1330,
34142,
198,
6738,
44161,
282,
26599,
1330,
13601,
33,
3219,
198,
198,
2,
3585,
198,
6738,
764,
1330,
7308,
628
] | 3.944444 | 36 |
import unittest
from katas.kyu_7.reverse_it import reverse_it
| [
11748,
555,
715,
395,
198,
198,
6738,
479,
265,
292,
13,
2584,
84,
62,
22,
13,
50188,
62,
270,
1330,
9575,
62,
270,
628
] | 2.666667 | 24 |
"""This module provides services views for server app."""
from aiohttp import web
services_routes = web.RouteTableDef()
@services_routes.view('/spreadsheet')
class SpreadsheetView(web.View):
"""View to interact with spreadsheet`s data"""
async def get(self):
"""
Return formatted authorization url in order to
get access to user`s google spreadsheet account.
"""
spreadsheet_auth = self.request.app["spreadsheet_auth"]
spreadsheet_auth_url = spreadsheet_auth.auth_url
return web.json_response(
data={
"success": True,
"auth_url": spreadsheet_auth_url
},
status=200
)
| [
37811,
1212,
8265,
3769,
2594,
5009,
329,
4382,
598,
526,
15931,
198,
198,
6738,
257,
952,
4023,
1330,
3992,
628,
198,
30416,
62,
81,
448,
274,
796,
3992,
13,
43401,
10962,
7469,
3419,
628,
198,
31,
30416,
62,
81,
448,
274,
13,
1177... | 2.450172 | 291 |
from datetime import datetime, timezone
from pynws import raw_data
import pytest
from tests.helpers import setup_app
LATLON = (0, 0)
STATION = "ABC"
USERID = "test"
WFO = "ABC"
X = 0
Y = 0
ZONE = "test"
| [
6738,
4818,
8079,
1330,
4818,
8079,
11,
640,
11340,
198,
198,
6738,
279,
2047,
18504,
1330,
8246,
62,
7890,
198,
11748,
12972,
9288,
198,
198,
6738,
5254,
13,
16794,
364,
1330,
9058,
62,
1324,
198,
198,
43,
1404,
43,
1340,
796,
357,
... | 2.45977 | 87 |
# selection sort
# pylint: disable=import-error
from random_list import random_list as rl
# [x,x,x] [x,x,x,x,x,x,x,x,x,x]
# loop over the len of numbers
# result = [x]
# search ahead in numbers for the smallest (mark ith as smallest, loop til end)
array = rl(10)
print(selection_sort(array))
| [
2,
6356,
3297,
201,
198,
2,
279,
2645,
600,
25,
15560,
28,
11748,
12,
18224,
201,
198,
6738,
4738,
62,
4868,
1330,
4738,
62,
4868,
355,
374,
75,
201,
198,
2,
685,
87,
11,
87,
11,
87,
60,
685,
87,
11,
87,
11,
87,
11,
87,
11,
... | 2.475806 | 124 |
# Configuration file, type values between ''
# Put Facebook login here
config_email = ''
config_password = ''
# Put link to your friend page here. (from your browser)
# Either: https://www.facebook.com/xxx/friends
# Or: https://www.facebook.com/profile.php?id=xxx&%2Ffriends&sk=friends&source_ref=pb_friends_tl
config_url = ''
# Put your name, including spaces and capital letters here
config_name = ''
| [
2,
28373,
2393,
11,
2099,
3815,
1022,
10148,
198,
198,
2,
5930,
3203,
17594,
994,
198,
11250,
62,
12888,
796,
10148,
198,
11250,
62,
28712,
796,
10148,
198,
198,
2,
5930,
2792,
284,
534,
1545,
2443,
994,
13,
357,
6738,
534,
6444,
8,... | 3.327869 | 122 |
# -*- coding: utf-8 -*-
import json
import boto3
import requests
from api.rdb.config import is_test, is_production
from api.rdb.utils.apigateway import get_api_url
from api.rdb.utils.service_framework import STATUS_OK, STATUS_BAD_REQUEST
from ..utilities import invoke, get_lambda_test_data, get_lambda_fullpath
# noinspection PyUnusedLocal
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
33918,
198,
198,
11748,
275,
2069,
18,
198,
11748,
7007,
198,
198,
6738,
40391,
13,
4372,
65,
13,
11250,
1330,
318,
62,
9288,
11,
318,
62,
25493,
198,
6738... | 2.883333 | 120 |
from . import reflection
from . import wrapping
from .reflection import get_callargs
from .wrapping import update_wrapper, wraps, get_argspec
| [
6738,
764,
1330,
14580,
198,
6738,
764,
1330,
27074,
198,
198,
6738,
764,
5420,
1564,
1330,
651,
62,
13345,
22046,
198,
6738,
764,
29988,
2105,
1330,
4296,
62,
48553,
11,
27521,
11,
651,
62,
853,
16684,
198
] | 3.864865 | 37 |
print('Hipermercado Preço Bão - Confira abaixo o descontão do bão que a gente preparou pra você!'
'\n')
p = float(input('Qual o preço original do produto? '))
print(f'\nAproveite! Agora este produto está custando R${p*0.95:.2f} no saldão da quinta verde!'
'\nBoas compras!')
| [
4798,
10786,
39,
9346,
647,
66,
4533,
3771,
16175,
78,
347,
28749,
532,
7326,
8704,
450,
64,
844,
78,
267,
1715,
756,
28749,
466,
275,
28749,
8358,
257,
308,
21872,
9198,
280,
7201,
12776,
25792,
13679,
198,
220,
220,
220,
220,
220,
... | 2.322581 | 124 |
# Generated by Django 2.1.2 on 2018-10-25 05:29
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
17,
319,
2864,
12,
940,
12,
1495,
8870,
25,
1959,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from homeassistant import config_entries
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
import logging
from UponorJnap import UponorJnap
from homeassistant.const import (
CONF_HOST,
CONF_NAME
)
from .const import (
DOMAIN,
SIGNAL_UPONOR_STATE_UPDATE,
DEVICE_MANUFACTURER
)
_LOGGER = logging.getLogger(__name__)
| [
6738,
1363,
562,
10167,
1330,
4566,
62,
298,
1678,
198,
11748,
2322,
37623,
5623,
355,
2322,
198,
11748,
1363,
562,
10167,
13,
16794,
364,
13,
11250,
62,
12102,
341,
355,
269,
85,
198,
11748,
18931,
198,
198,
6738,
14438,
273,
41,
77,... | 2.640288 | 139 |
# -*- coding: utf-8 -*-
""" Routes Module
Currently this module contains all of the routes for the auth blueprint
"""
from app import db
from app.models import User
from app.oauth import OAuthSignIn
from app.forms import SignInForm
from app.forms import SignUpForm
from sqlalchemy import func, or_
from flask import render_template, request, flash, session, redirect,\
url_for, send_file, Response, abort
from flask_login import current_user, login_user, logout_user, login_required
from app.auth import auth_bp
@auth_bp.route('/login', methods=['GET', 'POST'])
def login():
"""Login Route
This route leads to the login page.
Args:
None
Returns:
if GET returns the rendered login page
if POST returns a validated user redirected to the
non-public index page
Raises:
redirects back to login if login not valid
"""
form = SignInForm()
if current_user.is_authenticated:
return redirect(url_for('auth.logged_in'))
if request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('auth.login'))
login_user(user, remember=form.remember_me.data)
return redirect(url_for('main.index'))
return render_template('login.html', title='CONP | Log In', form=form, error=form.errors)
@auth_bp.route('/success', methods=['GET', 'POST'])
@login_required
def logged_in():
""" Success Route
This route catches a successful login
TO DO: This seems quite redudant
Args:
None
Returns:
redirect to non-public index page
"""
if request.method == 'GET':
# Protected user content can be handled here
return redirect(url_for('main.index'))
@auth_bp.route('/logout')
def logout():
""" Logout Route
The route to log a user out of the portal
Args:
None
Returns:
redirect to the public index page
"""
logout_user()
return redirect(url_for('main.public'))
# This is the first step in the login process: the 'login with X' buttons
# should direct users here with the provider name filled in
@auth_bp.route('/authorize/<provider>')
def oauth_authorize(provider):
""" Authorize Provider Route
First step in OATH dance to autorize the use to a provider
Args:
provider for oauth
Returns:
oauth.authorize function if successful
redirect to index if failed
"""
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
oauth = OAuthSignIn.get_provider(provider)
return oauth.authorize()
# This is step two. The OAuth provider then sends its reply to this route
@auth_bp.route('/callback/<provider>')
def oauth_callback(provider):
""" Callback Provider Route
This is the second step in the OAuth process that assigns
a token to the session
Args:
provider for oauth
Returns:
Adds token to session if successful
If successful and no user is assigned to the oauth_id
redirects to register account
If unsuccessful, returns redirect to login
"""
if not current_user.is_anonymous:
return redirect(url_for('main.public'))
oauth = OAuthSignIn.get_provider(provider)
# This is step three. The code from the provider's reply is sent back to
# the provider and the provider returns an authentication token
access_token, oauth_id = oauth.callback()
if access_token is None or oauth_id is None:
flash('Authentication failed. Please contact an admin if '
'this problem is persistent')
return redirect(url_for('auth.login'))
user = User.query.filter_by(oauth_id=oauth_id).first()
if user is None:
return redirect(url_for("auth.register"))
login_user(user, remember=True)
session['active_token'] = access_token
return redirect(url_for('auth.logged_in'))
@auth_bp.route('/register')
def register():
""" Register Route
The route to lead to the register template
Args:
None
Returns
Rendered template for register.html
"""
form = SignUpForm()
return render_template('register.html', title='CONP | Register', form=form)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
39602,
274,
19937,
628,
220,
220,
220,
16888,
428,
8265,
4909,
477,
286,
262,
11926,
329,
262,
6284,
30881,
198,
37811,
198,
6738,
598,
1330,
20613,
198,
6738,
59... | 2.587577 | 1,787 |
from mcts.nodes import MonteCarloTreeSearchNode
| [
6738,
285,
310,
82,
13,
77,
4147,
1330,
22489,
9914,
5439,
27660,
18243,
19667,
201,
198,
201
] | 2.941176 | 17 |
"""Tests various errors that can be thrown by binding."""
from typing import Generator, List, Tuple, Union
from dinao.backend import Connection
from dinao.binding import FunctionBinder
from dinao.binding.binders import BoundedGeneratingQuery
from dinao.binding.errors import (
BadReturnTypeError,
CannotInferMappingError,
FunctionAlreadyBoundError,
MissingTemplateArgumentError,
MultipleConnectionArgumentError,
NoPoolSetError,
PoolAlreadySetError,
TemplateError,
)
from dinao.binding.templating import Template
import pytest
from tests.binding.mocks import MockConnection, MockConnectionPool
def test_cannot_infer_generic(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that binding a function to typed generics raises an error."""
binder, _ = binder_and_pool
with pytest.raises(CannotInferMappingError, match="Unable to determine mapper for typing.Union"):
@binder.query("SELECT * FROM table")
def test_cannot_infer_nested_generic(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that binding a function to typed generics as row types raises."""
binder, _ = binder_and_pool
with pytest.raises(CannotInferMappingError, match="Unable to determine row mapper for typing.List\\[str\\]"):
@binder.query("SELECT * FROM table")
def test_binding_generator_throws(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that binding a function to generate when send type and return type are specified."""
binder, pool = binder_and_pool
with pytest.raises(CannotInferMappingError, match="Only yield_type"):
@binder.query("SELECT some_num FROM table LIMIT 3")
def test_bounded_generating_query_throws(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that BoundedGeneratingQuery raises if not bound to a generator."""
binder, pool = binder_and_pool
with pytest.raises(BadReturnTypeError, match="Expected results type to be Generator"):
BoundedGeneratingQuery(binder, Template("SELECT * FROM table"), not_a_generator)
def test_binder_execute_bad_type(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that binding a function specifying an invalid return type for execution raises an exception."""
binder, _ = binder_and_pool
with pytest.raises(BadReturnTypeError, match="can only return None or int"):
@binder.execute("INSERT INTO TABLE (#{arg1})")
def test_binder_raises_for_template(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that a bad template causes an error at binding time."""
binder, _ = binder_and_pool
with pytest.raises(TemplateError, match="#{arg1"):
@binder.execute("INSERT INTO table #{arg1")
def test_double_binding_raises(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that binding a function more than once results in an error."""
binder, _ = binder_and_pool
match = "has already been bounded by"
with pytest.raises(FunctionAlreadyBoundError, match=match):
@binder.execute("UPDATE table SET col = #{arg1}")
@binder.execute("INSERT INTO TABLE (#{arg1})")
with pytest.raises(FunctionAlreadyBoundError, match=match):
@binder.execute("UPDATE table SET col = #{arg1}")
@binder.query("SELECT * FROM table WHERE col = #{arg1})")
with pytest.raises(FunctionAlreadyBoundError, match=match):
@binder.execute("UPDATE table SET col = #{arg1}")
@binder.transaction()
def test_args_mismatch_raises(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests an error is raised if a template is bound to a function without a matching argument."""
binder, _ = binder_and_pool
with pytest.raises(MissingTemplateArgumentError, match="specified in template but is not an argument of"):
@binder.execute("INSERT INTO table (#{arg})")
def test_binder_raises_for_no_pool():
"""Tests an error is raised when a bind has no pool but an operation requiring one is performed."""
binder = FunctionBinder()
@binder.execute("INSERT INTO table (#{arg})")
with pytest.raises(NoPoolSetError, match="No connection pool"):
test_bound_execute("testing")
with pytest.raises(NoPoolSetError, match="No connection pool"):
with binder.connection() as cnx: # noqa: F841
pass # pragma: no cover
def test_binder_raises_for_pool_set_twice(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests an error is raised when a binder has its pool set twice."""
binder, _ = binder_and_pool
pool = MockConnectionPool([])
with pytest.raises(PoolAlreadySetError, match="only be set once"):
binder.pool = pool
def test_binder_raises_for_double_connection_arg(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests an error is raised when a bound function specifies it would like more than one connection."""
binder, _ = binder_and_pool
with pytest.raises(MultipleConnectionArgumentError, match="Connection argument specified multiple times for"):
@binder.transaction()
| [
37811,
51,
3558,
2972,
8563,
326,
460,
307,
8754,
416,
12765,
526,
15931,
198,
198,
6738,
19720,
1330,
35986,
11,
7343,
11,
309,
29291,
11,
4479,
198,
198,
6738,
288,
1437,
78,
13,
1891,
437,
1330,
26923,
198,
6738,
288,
1437,
78,
1... | 2.968 | 1,750 |
#!/usr/bin/env python
# http://gerrit-documentation.googlecode.com/svn/Documentation/2.2.2/cmd-
# query.html
import pkg_resources
import subprocess
from datetime import datetime
import simplejson as json
import time
import tempfile
import textwrap
import pydoc
import os
VALID_SCORES = ['-2', '-1', '-0', '0', '+0', '+1', '+2']
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
2638,
1378,
1362,
799,
12,
22897,
341,
13,
13297,
8189,
13,
785,
14,
21370,
77,
14,
24941,
341,
14,
17,
13,
17,
13,
17,
14,
28758,
12,
198,
2,
12405,
13,
6494,
198,
198,
... | 2.688 | 125 |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with this
work for additional information regarding copyright ownership. The ASF
licenses this file to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance with the
License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
The code in this file was developed at ChemOS Inc. (2019).
'''
__author__ = 'Florian Hase'
#=========================================================================
import numpy as np
import pandas as pd
from datetime import datetime
from utilities import Logger
#=======================================================================
#=======================================================================
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
7061,
6,
198,
26656,
15385,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
393,
517,
220,
198,
3642,
2455,
273,
5964,
11704,
13,
4091,
262,
28536,
2393,
9387,
351,
42... | 4.312057 | 282 |
# -*- coding: utf-8 -*-
from os import chdir
from os.path import abspath, dirname
from setuptools import find_packages, setup
chdir(dirname(abspath(__file__)))
version = {}
with open('README.rst') as f:
readme = f.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
author='Brandon Davidson',
author_email='brad@oatmail.org',
classifiers=[
'Development Status :: 4 - Beta',
"Intended Audience :: Developers",
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: System :: Archiving :: Packaging',
],
description="Collection of utilities for publishing Python packages to PyPI-compatible indexes hosted on S3.",
entry_points={
'console_scripts': ['stick=stick.commands:cli']
},
extras_require={
'dev': [
'setuptools-version-command',
]
},
include_package_data=True,
install_requires=requirements,
long_description=readme,
name='stick',
packages=find_packages(exclude=('docs')),
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
url='https://github.com/brandond/stick',
version_command=('git describe --tags --dirty', 'pep440-git-full'),
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
28686,
1330,
442,
15908,
198,
6738,
28686,
13,
6978,
1330,
2352,
6978,
11,
26672,
3672,
198,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
19... | 2.584337 | 498 |
# coding:utf-8
""" 表示関連
"""
import maya.cmds as cmds
def message(s, do_print=True):
""" 簡易 inVewMessage """
cmds.inViewMessage(smg=s, pos="topCenter", bkc="0x00000000", fade=True)
if do_print:
print(s) | [
2,
19617,
25,
40477,
12,
23,
201,
198,
37811,
5525,
94,
101,
163,
97,
118,
38461,
95,
34460,
96,
201,
198,
37811,
201,
198,
11748,
743,
64,
13,
28758,
82,
355,
23991,
82,
201,
198,
201,
198,
201,
198,
4299,
3275,
7,
82,
11,
466,... | 1.88 | 125 |
import torch
from torch import nn
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
628,
197,
197,
198,
197,
197
] | 2.857143 | 14 |
import click
from apisrun.services.apisrun import ApisrunService
@click.command()
@click.option(
"-f", "--file", help="Specify a file or manifest path (default: apisrun.yml)"
)
def up(file):
"""Builds, (re)creates and starts microservices."""
ApisrunService().create_node(file)
| [
11748,
3904,
198,
6738,
2471,
271,
5143,
13,
30416,
13,
499,
271,
5143,
1330,
5949,
271,
5143,
16177,
628,
198,
31,
12976,
13,
21812,
3419,
198,
31,
12976,
13,
18076,
7,
198,
220,
220,
220,
27444,
69,
1600,
366,
438,
7753,
1600,
103... | 2.807692 | 104 |
import pygame
import world
from settings import *
import settings
import map
import os
import simulation
from monitor import *
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (50,50)
#def collision_with_walls(self, person_id, newpos):
# self.players[person_id].rect.topleft = newpos
# return pygame.sprite.spritecollideany(self.players[person_id], self.walls) | [
11748,
12972,
6057,
198,
11748,
995,
198,
6738,
6460,
1330,
1635,
198,
11748,
6460,
198,
11748,
3975,
198,
11748,
28686,
198,
11748,
18640,
198,
6738,
5671,
1330,
1635,
198,
198,
418,
13,
268,
2268,
17816,
10305,
43,
62,
42937,
62,
2892... | 2.716312 | 141 |
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import random
import sqlite3
import time
# Headers.
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'referrer': 'https://google.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
'Pragma': 'no-cache',
}
# categories lookup dictionary.
categories_dic = {"buy": "https://www.propertyfinder.ae/en/buy/properties-for-sale.html",
"rent": "https://www.propertyfinder.ae/en/rent/properties-for-rent.html",
"commercial buy": "https://www.propertyfinder.ae/en/commercial-buy/properties-for-sale.html",
"commercial rent": "https://www.propertyfinder.ae/en/commercial-rent/properties-for-rent.html"
}
def get_proxies():
"""
This method extract the available free proxies from
https://free-proxy-list.net/ and return
a list of proxies.
"""
proxy_list = []
res = requests.get('https://free-proxy-list.net/', headers=headers)
soup = BeautifulSoup(res.text,"lxml")
for items in soup.select("#proxylisttable tbody tr"):
proxy_list.append(':'.join([item.text for item in items.select("td")[:2]]))
return proxy_list
def pick_random(proxies):
"""
This method select and then return the
random proxy from the list of proxies.
"""
random_proxy = None
if proxies:
random_proxy = proxies[random.randint(0, len(proxies) - 1)]
return random_proxy
def get_listing_urls(driver, category, category_url):
"""
This method extracting the listing url from all the pages
of the specified category. It returns the list of
urls of the listing and their categories.
"""
listing_urls = []
listing_categories = []
driver.get(category_url)
time.sleep(3)
while True:
soup = BeautifulSoup(driver.page_source, "html.parser")
for link in soup.find_all("a", {"class": "card card--clickable"}):
listing_urls.append(link.get("href"))
listing_categories.append(category)
try:
driver.find_element_by_class_name("pagination__link.pagination__link--next").click()
time.sleep(3)
except:
break
return listing_urls, listing_categories
def create_table():
"""
This method creating The table if don't exist, then
creating table.
"""
try:
sqliteConnection = sqlite3.connect('listings.db')
cursor = sqliteConnection.cursor()
print("Successfully Connected to SQLite")
sqlite_create_table_query= """ CREATE TABLE Listings
(id integer primary key autoincrement,
category varchar(30),
url varchar(200));"""
cursor.execute(sqlite_create_table_query)
sqliteConnection.commit()
print("Table Has been Created")
except sqlite3.Error as error:
print("Failed to Connect: ", error)
finally:
if sqliteConnection:
sqliteConnection.close()
print("The SQLite connection is closed")
def insert_data(records):
"""
This method inserting data into the table.
"""
try:
sqliteConnection = sqlite3.connect('listings.db')
cursor = sqliteConnection.cursor()
print("Successfully Connected to SQLite")
cursor.executemany("insert into Listings(category, url) values (?,?)", records)
sqliteConnection.commit()
print("Records Have Been Inserted")
cursor.close()
except sqlite3.Error as error:
print("Error While Inserting Records: ", error)
cursor.close()
main() | [
11748,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
11748,
4738,
198,
11748,
44161,
578,
18,
198,
11748,
640,
628,
198,
2,
7123,
364,
13,
198,
50145,
796,
1391,
198,
220,
... | 2.361877 | 1,705 |
import upydev
import json
import os
import textwrap
import webbrowser
UPYDEV_PATH = upydev.__path__[0]
HELP_INFO_ARG = '''Mode/Tools:
> DEVICE MANAGEMENT: '$ upydev dm' to see help on device management.
ACTIONS : config, check, set, make_group, mg_group, see, gg
> FILEIO: '$ upydev fio' to see help on file input/ouput operations.
ACTIONS: put, get, fget, dsync, rsync, backup, install, update_upyutils
> FIRMWARE: '$ upydev fw' to see help on firmware operations.
ACTIONS: fwr, flash, mpyx
> KEYGEN: '$ upydev kg' to see help on keygen operations.
ACTIONS: gen_rsakey, rf_wrkey, sslgen_key
> REPLS: '$ upydev rp' to see help on repls modes.
ACTIONS: repl, rpl, wrepl, wssrepl, srepl
> SHELL-REPLS: '$ upydev sh' to see help on shell-repls modes.
ACTIONS: shell, shl, ssl_wrepl, ssl, sh_srepl, shr, wssl, set_wss, ble, jupyterc
> DEBUGGING: '$ upydev db' to see help on debugging operations.
ACTIONS: ping, probe, scan, run, timeit, diagnose, errlog, stream_test,
sysctl, log, debug, pytest-setup, pytest
> GROUP COMMAND MODE: '$ upydev gp' to see help on group mode options.
OPTIONS: -G, -GP
> HELP: '$ upydev h' or '$ upydev help' to see help (without optional args)
'$ upydev -h' or '$ upydev --help' to see full help info.
- To see help about a any ACTION/COMMAND
put %% before that ACTION/COMMAND as : $ upydev %%ACTION
ACTIONS: help, h, dm, fio, fw, kg, rp, sh, db, gp, gc, wu, sd, pro, docs,
udocs, mdocs.
upy Commands:
> GENERAL: do '$ upydev gc' to see General commmands help.
> WIFI UTILS: do '$ upydev wu' to see Wifi utils commands help.
> SD: do '$ upydev sd' to see SD utils commands help.
> PROTOTYPE: do '$ upydev pro' to see Prototype utils commands help.
'''
| [
11748,
510,
5173,
1990,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
2420,
37150,
198,
11748,
3992,
40259,
198,
198,
8577,
56,
39345,
62,
34219,
796,
510,
5173,
1990,
13,
834,
6978,
834,
58,
15,
60,
628,
198,
39,
3698,
47,
62,
... | 2.462069 | 725 |
# Code to get the current weather information.
import urllib.request
import json
import secrets
import time
# Downtown San Francisco:
URL = 'https://forecast.weather.gov/MapClick.php?lat=37.775&lon=-122.418&unit=0&lg=english&FcstType=json'
# Get just current status.
DARK_SKY_URL = "https://api.darksky.net/forecast/" + secrets.DARK_SKY_SECRET_KEY + \
"/" + secrets.LAT_LONG + "?exclude=minutely,hourly,daily,alerts,flags"
DARK_SKY_MAX_DAILY = 1000
DARK_SKY_MIN_PERIOD = 60*60*24/DARK_SKY_MAX_DAILY
g_last_dark_sky_temp = None
g_last_dark_sky_time = None
# Get the current temperature outside in degrees Fahrenheit.
| [
2,
6127,
284,
651,
262,
1459,
6193,
1321,
13,
198,
198,
11748,
2956,
297,
571,
13,
25927,
198,
11748,
33918,
198,
11748,
13141,
198,
11748,
640,
198,
198,
2,
25657,
2986,
6033,
25,
198,
21886,
796,
705,
5450,
1378,
754,
2701,
13,
23... | 2.596708 | 243 |
import multiprocessing
import time
| [
11748,
18540,
305,
919,
278,
198,
11748,
640,
628,
628,
198
] | 3.545455 | 11 |
from midiutil.MidiFile import MIDIFile
import music_models, argparse, random, note_timing, copy, json, subprocess
#If this value is supplied, the Key class will write down all notes it generates in the file specified.
dir_write_note = ''
if __name__ == '__main__' :
main()
| [
6738,
3095,
72,
22602,
13,
44,
19830,
8979,
1330,
25269,
5064,
576,
198,
11748,
2647,
62,
27530,
11,
1822,
29572,
11,
4738,
11,
3465,
62,
16514,
278,
11,
4866,
11,
33918,
11,
850,
14681,
198,
198,
2,
1532,
428,
1988,
318,
14275,
11,... | 3.0625 | 96 |
hist([1,2,3,4]) | [
198,
10034,
26933,
16,
11,
17,
11,
18,
11,
19,
12962
] | 1.454545 | 11 |
import os
import unittest
from app import create_app, db
from app.default_conf import Config
if __name__ == '__main__':
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
11748,
28686,
198,
11748,
555,
715,
395,
198,
198,
6738,
598,
1330,
2251,
62,
1324,
11,
20613,
198,
6738,
598,
13,
12286,
62,
10414,
1330,
17056,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
... | 2.586667 | 75 |
import os
import logging
import cv2
from new_tools.media import check_image, FPS
from new_timer import get_now_time, AutoTimer
# Set logging config.
FORMAT = '%(asctime)s [%(levelname)s] %(message)s'
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
logging.basicConfig(level=logging.INFO, format=FORMAT, datefmt=DATE_FORMAT)
with AutoTimer("Record video", decimal=2):
# User input his name.
name = input("{} [INFO] Please input your name: ".format(get_now_time()))
video_dir = input("{} [INFO] Please input save directory: ".format(get_now_time()))
if not os.path.exists(video_dir):
raise FileNotFoundError("{} directory doesn't exist !".format(video_dir))
# Calculate fps.
fps = FPS()
# Webcam source and some information.
run = True
vc = cv2.VideoCapture(0)
video_width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))
video_height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))
video_fps = 60
video_name = os.path.join(video_dir, "{}_{}.mp4".format(get_now_time("%Y%m%d%H%M%S"), name))
logging.info("Video info")
logging.info("Video Resolution: {} × {}.".format(video_width, video_height))
logging.info("Video FPS: {} fps.".format(video_fps))
logging.info("Video file path: {}.".format(video_name))
# Video format.
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
video = cv2.VideoWriter(video_name, fourcc, video_fps, (video_width, video_height))
fps.start()
while run and vc.isOpened():
_, frame = vc.read()
state, frame = check_image(frame)
if state == 0:
# Save frame.
video.write(frame)
# Show frame.
cv2.putText(frame, "Video Resolution: {} x {}".format(video_width, video_height), (0, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
cv2.putText(frame, "Video FPS: {} fps.".format(video_fps), (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
cv2.putText(frame, "Video name: {}".format(os.path.basename(video_name)), (0, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
cv2.putText(frame, "Video Real-Time FPS: {:.2f} fps.".format(fps.fps()), (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1)
if key == ord('q'):
fps.stop()
logging.debug("fps elapsed time: {}".format(fps.elapsed_time))
logging.info("Video Real-Time FPS: {:.2f} fps.".format(fps.fps()))
run = False
cv2.destroyAllWindows()
video.release()
vc.release()
# Update frame
fps.update()
logging.debug("fps frame: {}".format(fps.frame))
# Get elapsed time.
fps.stop()
logging.info("{} saved successfully !".format(video_name)) if os.path.exists(video_name) else logging.error("{} saved failed !".format(video_name)) | [
11748,
28686,
201,
198,
11748,
18931,
201,
198,
11748,
269,
85,
17,
201,
198,
6738,
649,
62,
31391,
13,
11431,
1330,
2198,
62,
9060,
11,
22082,
201,
198,
6738,
649,
62,
45016,
1330,
651,
62,
2197,
62,
2435,
11,
11160,
48801,
201,
19... | 2.095073 | 1,441 |
import constants
import math
import matplotlib.pyplot as plt
import numpy as np
#Defining the 'air resistance constant', compiling all of the constants that affect the force of drag on our basketball
AIR_RES_CONSTANT = constants.AIR_DENSITY * constants.AIR_RES_C * math.pi * constants.DIAMETER * constants.DIAMETER / 8.0
def get_euler_solution(derivative, numsteps, upper, initial):
"""
Returns an explicit Euler's method solution to a given differential equation,
called `derivative`, of the form dy/dx = f(x, y)
Can operate on numpy vectors
Runs with `numsteps` iterations, from x = 0 to x = `upper`
Uses `initial` as the initial value for y
"""
inputs = [0]
outputs = [initial]
delta = float(upper) / numsteps
for i in range(1, numsteps):
inputs.append(inputs[i-1] + delta)
outputs.append(outputs[i-1] + delta * derivative(inputs[i-1], outputs[i-1]))
return inputs, outputs
def generate_function(inputs, outputs):
"""
Creates afunction from a numeric approximation thereof
Accesses the output at an input value nearest to the corresponding input values
The function generated is effectively a function of only x, but has y as a dummy parameter
Assumes a linearly increasing sequence `inputs`
"""
return function
def air_res(speed):
"""
Returns the magnitude of air resistance force based on the absolute speed of a moving object
"""
return AIR_RES_CONSTANT * speed * speed
def get_acceleration(time, velocity):
"""
Gets the instantaneous acceleration
at the given time and velocity of a moving object
Takes velocity as a two-dimensional vector
"""
speed = math.hypot(velocity[0], velocity[1])
angle = math.acos(velocity[0] / speed)
return np.array([-air_res(speed) * math.cos(angle) / constants.MASS,
-air_res(speed) * math.sin(angle) / constants.MASS - constants.GRAVITY])
def solve_boundary_problem(endtime, launch_angle, num_iterations):
"""
Solves the boundary value problem explicitly
First obtains an Euler's method solution giving the velocity vectors at each time step,
then obtains an Euler's method solution for the positions given the velocity vectors
Returns a list of x values and y values, ready for plotting
"""
times, velocities = get_euler_solution(get_acceleration, num_iterations, endtime,
np.array([math.cos(launch_angle) * constants.LAUNCH_SPEED, math.sin(launch_angle) * constants.LAUNCH_SPEED]))
times, positions = get_euler_solution(generate_function(times, velocities), num_iterations, endtime, np.array([constants.PLAYER_WIDTH, constants.PLAYER_WIDTH]))
return unpack_vectors(positions)
def unpack_vectors(vector_list):
"""
Takes a list of 2d vectors and returns two lists,
each containing the corresponding components in association
"""
xs = []
ys = []
for v in vector_list:
xs.append(v[0])
ys.append(v[1])
return xs, ys
#47 pi / 128 - good lower shot
#56 pi / 128 - good upper shot
def newton_zero(function, derivative, guess, num_iterations, epsilon=0.01):
"""
Uses newton's method to determine a zero of a given function
Takes in the function and its derivative, as functions
If the function value at the determined zero is further from zero than `epsilon`,
the zero determined is known to be fraudulent
Returns the final determined input for the zero and whether or not this is a "true" zero
"""
for i in range(num_iterations):
guess -= function(guess) / derivative(guess)
true_zero = abs(function(guess)) < epsilon
return guess, true_zero
def try_newton_guess(times, positions, velocities, guess):
"""
Tries to determine a time at which the position of the basketball
is at the basket
Uses Newton's method of approximation on a system:
- x(t) - x_b = 0,
- y(t) - y_b = 0,
trying to find a solution to this system.
TODODODO:
"""
pass
if __name__ == "__main__":
main() | [
11748,
38491,
198,
11748,
10688,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
7469,
3191,
262,
705,
958,
6625,
6937,
3256,
33393,
477,
286,
262,
38491,
326,
2689,
262,
... | 2.936324 | 1,382 |
import numpy as np
import math
import os
from PIL import Image as Img
# Cut grey scale image into several small block and different character for different scale
# reference:http://paulbourke.net/dataformats/asciiart/
# 10 levels
gscale = '@%#+=:. '
# 70 levels (works worse than less levels)
# gscale_more = "$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\|()1{}[]?-_+~<>i!lI;:,\"^`'. "
| [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
28686,
198,
6738,
350,
4146,
1330,
7412,
355,
1846,
70,
198,
198,
2,
9712,
13791,
5046,
2939,
656,
1811,
1402,
2512,
290,
1180,
2095,
329,
1180,
5046,
198,
2,
4941,
171,
12... | 2.483871 | 155 |
"""
Ticker data is stored on disk at the location specified by
``config['paths']['save']`` with two columns (price and date). Data can be
loaded into Python and accessed via the `Ticker` class. This class has methods
to calculate descriptive statistics such as trailing and rolling returns of
differing time periods. Multiple `Ticker` objects can be combined into a
`Portfolio` object with optional weighting between the holdings.
The data module also contains several useful helper functions for parsing
common financial periods and querying valid market days.
"""
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
import os
import re
from warnings import warn
import pandas as pd
import pandas_market_calendars as mcal
import pytz
import numpy as np
from . import conf
from .download import metals, timeseries
from .exceptions import TickerDataError
from .mappings import forex, ticker2name
def annualize(total_return, period):
"""Convert raw returns over time period to compounded annual rate
:param float total_return: Raw return
:param str/int period: Financial period interpretable by ``parse_period``
:return float:
"""
years = parse_period(period) / 365
return (1 + total_return) ** (1/years) - 1
def market_day(direction, reference='today', search_days=7):
"""Return the closest completed and valid market day
If ``reference == 'today'``, the current time will be compared to the
market's closing time. For future or past reference dates, it is assumed
that the time of day referenced is after close.
:param str direction: One of previous, latest, next
:param str reference: Day to search relative to (yyyy-mm-dd format or
``'today'`` for the current day
:param int search_days: Symmetrical number of days to check on either
side of the provided ``reference``
:return np.datetime64: Date stamp
"""
# Load calendar and format datetime reference
nyse = mcal.get_calendar('NYSE')
if reference == 'today':
ref_day = date.today()
else:
ref_day = datetime.strptime(reference, '%Y-%m-%d').date()
# Build list of valid market days within window and determine closest index to reference
search_window = timedelta(days=search_days)
recent = nyse.valid_days(start_date=ref_day - search_window, end_date=ref_day + search_window)
if len(recent) == 0:
raise RuntimeError(f'No valid dates found within {search_days} days, try expanding window')
diffs = pd.DataFrame(recent - pd.Timestamp(ref_day, tz='UTC'))
idx = diffs[diffs <= timedelta(days=0)].idxmax()
# Check whether market has closed if latest valid date is today
latest_valid = recent[idx][0].to_numpy()
closing_time = nyse.schedule(start_date=latest_valid, end_date=latest_valid).market_close[0]
now = datetime.now(tz=pytz.timezone(conf['locale']))
if closing_time.date() == now.date() and closing_time > now:
idx -= 1
# Adjust returned date by requested direction
assert direction in ['previous', 'latest', 'next'], f'Invalid direction {direction}'
if direction == 'previous':
idx -= 1
elif direction == 'next':
idx += 1
return recent[idx][0].to_numpy()
def parse_period(period):
"""Convert various financial periods to number of days
:param int/str period: Number of days for the return window or one of
the following keyword strings. Each keyword can be adjusted using a
dash and modifier (i.e. 2-day, 6-year, etc)
* day
* month
* quarter
* year
* ytd
:return int days:
"""
if isinstance(period, int):
days = period
elif isinstance(period, str):
today = datetime.today()
keyword_durations = {
'day': 1,
'week': 7,
'month': 30,
'quarter': 91,
'year': 365,
'ytd': (today - datetime(today.year, 1, 1)).days}
if '-' in period:
multiplier, keyword = period.split('-')
multiplier = int(multiplier)
else:
keyword = period
multiplier = 1
if keyword not in keyword_durations:
raise ValueError(f'{period} string does not match supported formats')
days = multiplier * keyword_durations[keyword]
else:
raise ValueError(f'Exepcted type int or str, but received {type(period)}')
return days
class Portfolio:
"""Combination of several holdings
:param [str] tickers: Iterable list of case-insensitive stock abbreviations
:param [float] weights: Percent of portfolio each stock makes up (leave as
``None`` for even splits)
"""
def __str__(self):
"""Human readable naming for all holdings"""
return ', '.join([f'{h.symbol} ({w:.2f})' for h, w in zip(self.holdings, self.weights)])
def __repr__(self):
"""Displayable instance name for print() function"""
return f'Portfolio[{str(self)}]'
def expected_return(self, period, n=1000):
"""Monte-Carlo simulation of typical return and standard deviation
:param int or str period: Number of days for the return window or a
keyword string such as daily, monthly, yearly, 5-year, etc.
:param int n: Number of simulations to run
:return 3-tuple(float): Mean and standard deviation of return and
least number of data points used for an individual holding
"""
sample_pools = [h.metric(f'rolling/{period}', average=False) for h in self.holdings]
missing = [len(s) == 0 for s in sample_pools]
if any(missing):
too_long = ', '.join([self.holdings[i].symbol for i, m in enumerate(missing) if m])
raise RuntimeError(f'Insufficient data for {period} period for holdings {too_long}')
individual = np.stack([s.sample(n, replace=True).values for s in sample_pools])
composite = np.sum(individual * np.array(self.weights).reshape((-1, 1)), axis=0)
return composite.mean(), composite.std(), min(len(s) for s in sample_pools)
@property
def name(self):
"""Human readable naming for all holdings via call to internal __str__"""
return str(self)
class Ticker:
"""Manages ticker data and calculates descriptive statistics
Price data is stored on disk in a CSV and loaded into the ``data``
attribute as a Pandas ``DataFrame``. Prices are indexed by date in
descending order (most recent first).
The free-tier of Alpha-Vantage limits users to 5 API calls/minute.
Therefore, the ``data`` attribute is only refreshed on explicit calls.
Auto-refreshing on each initialization might exceed the rate limit if the
calling scope creates several ``Ticker`` objects simultaneously.
"""
def __init__(self, symbol, merge=None):
"""Load data from disk and format in Pandas
:param str symbol: Case-insensitive stock abbreviation
:param str merge: Add a ``relative`` column for price compared to
another ticker (i.e. XAU for gold oz) for any overlapping dates.
"""
self.symbol = symbol.upper()
self.csv_path = os.path.join(conf['paths']['save'], f'{symbol.lower()}.csv')
if os.path.isfile(self.csv_path):
self.data = pd.read_csv(
self.csv_path,
converters={'price': self._force_float},
parse_dates=['date'],
index_col=['date'])
if merge is not None:
relative_csv = os.path.join(conf['paths']['save'], f'{merge}.csv')
if os.path.isfile(relative_csv):
rel = pd.read_csv(
relative_csv,
converters={'price': self._force_float},
parse_dates=['date'],
index_col=['date'])
rel.rename(columns={'price': 'other'}, inplace=True)
combined = self.data.join(rel)
self.data['relative'] = combined.apply(lambda row: row.price / row.other, axis=1)
else:
self.data = pd.DataFrame(columns=['price'])
self._sort_dates()
def __str__(self):
"""Full company name"""
return ticker2name.get(self.symbol.upper(), 'Unknown')
def __repr__(self):
"""Displayable instance name for print() function"""
return f'Ticker({self.symbol})'
@staticmethod
def _force_float(raw):
"""Cast various data types to float"""
if isinstance(raw, float):
return raw
elif isinstance(raw, int):
return float(raw)
elif isinstance(raw, str):
clean = re.sub('[^0-9\.]+', '', raw)
return float(clean)
else:
raise NotImplementedError(f'Unsure how to cast {raw} of type {type(raw)}')
def _nearest(self, target_date):
"""Determine closest available business date to the target
:param np.datetime64 or str target_date: Timestamp to use for indexing. Can
be a preformatted NumPy object or plain string in yyyy-mm-dd format
:return np.datetime64:
"""
if isinstance(target_date, str):
target_date = pd.Timestamp(datetime.strptime(target_date, '%Y-%m-%d')).to_numpy()
if target_date in self.data.index.values:
return target_date
else:
if target_date > self.data.index.max():
warn('Target date exceeds max downloaded')
idx = self.data.index.get_loc(target_date, method='nearest')
return self.data.index[idx].to_numpy()
def _rolling(self, period, average=True):
"""Calculate rolling return of price data
Pandas ``pct_change`` returns the percent difference in descending
order, i.e. ``(data[idx + n] - data[idx]) / data[idx]`` so values at
higher indices must increase in order for the change to be positive
:param str period: Number of days for the return window
:param bool average: Whether to take the mean rolling return or
return all individual datapoints
:return float or pd.Series: Rolling return(s)
"""
days = parse_period(period)
rolling = self.data.price.pct_change(days).dropna()
if average:
return rolling.mean()
else:
return rolling
def _sort_dates(self):
"""Place most recent dates at bottom
Order is assumed by some metrics like ``_rolling``, so we need to
share this between the constructor and refresh methods for consistency
"""
self.data.sort_index(inplace=True)
def _trailing(self, period, end='today'):
"""Calculate trailing return of price data
:param str period: Number of days for the return window
:param str end: End date for point to point calculation. Either
keyword ``today`` or a timestamp formatted ``yyyy-mm-dd``
:return float:
"""
# Setup end date
if end == 'today':
end_dt = date.today()
else:
end_dt = datetime.strptime(end, '%Y-%m-%d')
# Calculate trailing date
if '-' in period:
multiplier, keyword = period.split('-')
multiplier = int(multiplier)
else:
keyword = period
multiplier = 1
if keyword in ['day', 'month', 'year']:
trail_dt = end_dt - relativedelta(**{keyword + 's': multiplier})
else:
trail_dt = end_dt - timedelta(days=parse_period(period))
# Convert to NumPy format to extract prices from index
end_price = self.price(pd.Timestamp(end_dt).to_numpy())
trail_price = self.price(pd.Timestamp(trail_dt).to_numpy())
return (end_price - trail_price) / trail_price
@property
def has_csv(self):
"""Check whether correspond CSV exists on disk"""
return os.path.isfile(self.csv_path)
@property
def is_current(self):
"""Check if instance has the most recent ticker data
:return bool: Whether the latest timestamp matches the last market day
"""
if len(self.data) == 0:
return False
latest_close = market_day('latest')
return latest_close <= self.data.index.max()
def metric(self, metric_name, **kwargs):
"""Parse metric names and dispatch to appropriate internal method
:param str metric_name: 2-3 part string separated by forward slashes
1) metric_type: matching an implemented internal method (i.e.
rolling, trailing, etc)
2) period: a financial period interpretable by ``parse_period``
3) options: one or more compatible key-letter flags
* a: annualized
:param dict kwargs: Metric-specifc arguments to be forwarded
:return float: Calculated metric
:raises NotImplementedError: For metric names with no corresponding
class method
"""
if len(self.data) == 0:
raise TickerDataError(f'No data available for {self.symbol}, try running .refresh()')
try:
metric_type, period, *options = metric_name.split('/')
except ValueError:
raise ValueError(f'Metric {metric_name} does not match metric_type/period/options format')
try:
method = getattr(self, '_' + metric_type)
except AttributeError:
raise NotImplementedError(f'No metric defined for {metric_type}')
result = method(period, **kwargs)
if len(options) > 0:
if 'a' in options:
result = annualize(result, period)
else:
warn(f'Ignoring unknown metric option(s) {options}')
return result
@property
def name(self):
"""Full company name via call to internal __str__"""
return str(self)
def price(self, date, exact=False):
"""Retrieve price by date from data attribute
:param np.datetime64 or str date: Timestamp to use for indexing. Can
be a preformatted NumPy object or plain string in yyyy-mm-dd format
:param bool exact: Whether to require an exact timestamp match or use
the closest date if the requested one is missing (due to date, non-
business day, etc). If ``True`` a ``ValueError`` will be raised for
unavailable dates
:return float: Price on the requested date
"""
if isinstance(date, str):
date = pd.Timestamp(datetime.strptime(date, '%Y-%m-%d')).to_numpy()
if date not in self.data.index:
if not exact:
date = self._nearest(date)
else:
raise ValueError(f'Requested date {date} not in data')
return self.data.loc[date].price
def refresh(self):
"""Refresh local ticker data
Idempotent behavior if data is already current
"""
# Check status of existing data
if self.is_current:
return
if self.has_csv:
if self.data.index.max() < datetime.today() - timedelta(days=100):
length = 'full'
else:
length = 'compact'
existing = self.data
else:
length = 'full'
existing = None
# Merge data from Alpha-Vantage or Metals API with existing and write to disk
if self.symbol in forex:
new = metals(self.symbol)
else:
new = timeseries(self.symbol, length)
if existing is not None:
combined = pd.concat([new, existing])
combined = combined[~combined.index.duplicated()]
self.data = combined
else:
self.data = new
self._sort_dates()
self.data.to_csv(self.csv_path)
| [
37811,
198,
51,
15799,
1366,
318,
8574,
319,
11898,
379,
262,
4067,
7368,
416,
198,
15506,
11250,
17816,
6978,
82,
6,
7131,
6,
21928,
20520,
15506,
351,
734,
15180,
357,
20888,
290,
3128,
737,
6060,
460,
307,
198,
14578,
656,
11361,
2... | 2.468731 | 6,492 |