text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket # UDP通信用
import threading # マルチスレッド用
import time # ウェイト時間用
import numpy as np # 画像データの配列用
# import libh264decoder # H.264のデコード用(自分でビルドしたlibh264decoder.so)
class Tello:
"""Telloドローンと通信するラッパークラス"""
def __init__(self, local_ip, local_port, imperial=False, command_timeout=.3, tello_ip='172.20.72.33', tello_port=8889):
"""
クラスの初期化.ローカルのIP/ポートをバインドし,Telloをコマンドモードにする.
:param local_ip (str): バインドする(UDPサーバにする)ローカルのIPアドレス
:param local_port (int): バインドするローカルのポート番号
:param imperial (bool): Trueの場合,速度の単位はマイル/時,距離の単位はフィート.
Falseの場合, 速度の単位はkm/h,距離はメートル.デフォルトはFalse
:param command_timeout (int|float): コマンドの応答を待つ時間.デフォルトは0.3秒.
:param tello_ip (str): TelloのIPアドレス.EDUでなければ192.168.10.1
:param tello_port (int): Telloのポート.普通は8889
"""
self.abort_flag = False # 中断フラグ
# self.decoder = libh264decoder.H264Decoder() # H.264のデコード関数を登録
self.command_timeout = command_timeout # タイムアウトまでの時間
self.imperial = imperial # 速度と距離の単位を選択
self.response = None # Telloが応答したデータが入る
self.frame = None # BGR並びのnumpy配列 -- カメラの出力した現在の画像
self.is_freeze = False # カメラ出力を一時停止(フリーズ)するかどうかのフラグ
self.last_frame = None # 一時停止時に出力する画像
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # コマンド送受信のソケット
# self.socket_video = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # ビデオストリーム受信用のソケット
self.tello_address = (tello_ip, tello_port) # IPアドレスとポート番号のタプル(変更不可能)
self.local_video_port = 11111 # ビデオ受信のポート番号
self.last_height = 0 # get_heightで確認した最終の高度
self.socket.bind((local_ip, local_port)) # コマンド受信のUDPサーバのスタート(バインド)
# コマンドに対する応答の受信スレッド
self.receive_thread = threading.Thread(target=self._receive_thread) # スレッドの作成
self.receive_thread.daemon = True # メインプロセスの終了と一緒にスレッドが死ぬように設定
self.receive_thread.start() # スレッドスタート
# ビデオ受信の開始 -- コマンド送信: command, streamon
self.socket.sendto(b'command', self.tello_address) # 'command'を送信し,TelloをSDKモードに
print ('sent: command')
# self.socket.sendto(b'streamon', self.tello_address) # 'streamon'を送信し,ビデオのストリーミングを開始
# print ('sent: streamon')
# self.socket_video.bind((local_ip, self.local_video_port)) # ビデオ受信のUDPサーバのスタート(バインド)
# ビデオ受信のスレッド
# self.receive_video_thread = threading.Thread(target=self._receive_video_thread) # スレッドの作成
# self.receive_video_thread.daemon = True # メインプロセスの終了と一緒にスレッドが死ぬように設定
# self.receive_video_thread.start() # スレッドスタート
def __del__(self):
"""ローカルのソケットを閉じる"""
self.socket.close() # コマンド送受信のソケットを閉じる
# self.socket_video.close() # ビデオ受信のソケットを閉じる
# def read(self):
# """カメラで受信した最新の画像を返す"""
# if self.is_freeze: # 一時停止フラグがTrueのときは,保存してある画像を返す
# return self.last_frame
# else: # そうでないときは,最新の画像を返す
# return self.frame
# def video_freeze(self, is_freeze=True):
# """ビデオ出力の一時停止 -- is_freezeフラグをTrueにセットすること"""
# self.is_freeze = is_freeze # 一時停止フラグの状態をセット
# if is_freeze: # Trueのときは,現在の画像をlast_frameに保存しておく
# self.last_frame = self.frame
def _receive_thread(self):
"""
Telloからの応答を監視する
スレッドとして走らせる.Telloが最後に返した応答をself.responseに格納する
"""
while True:
try:
self.response, ip = self.socket.recvfrom(3000) # Telloからの応答を受信(最大3000バイトまで一度に受け取れる)
#print(self.response)
except socket.error as exc: # エラー時の処理
print ("Caught exception socket.error : %s" % exc)
# def _receive_video_thread(self):
"""
Telloからのビデオストリーミング(H.264のrawデータ)を監視する
スレッドとして走らせる.Telloから受信した最新の画像をself.frameに格納する
"""
packet_data = "" # 変数を初期化
while True:
try:
res_string, ip = self.socket_video.recvfrom(2048) # Telloからの画像データを受信(最大2048バイトまで一度に受け取れる)
packet_data += res_string # packet_dataに受信データを連結して1つの長いデータにする
# フレームの最後
if len(res_string) != 1460: # 受信データのバイト数が1460以外のとき,packet_dataをデコードしframeを得る.
for frame in self._h264_decode(packet_data): # デコードしたデータには何枚分かの画像が入っているので,枚数分繰り返す
self.frame = frame
packet_data = "" # 変数を初期化
except socket.error as exc:
print ("Caught exception socket.error : %s" % exc)
# def _h264_decode(self, packet_data):
"""
Telloから受信したH.264の生データをデコードする
:param packet_data: H.264のrawデータ
:return: デコードされた画像のリスト(複数枚の画像が入っていることもある)
"""
res_frame_list = [] # リストの初期化
frames = self.decoder.decode(packet_data) # packet_dataをデコードする
for framedata in frames: # 何枚分かの画像が入っているので,枚数分繰り返す
(frame, w, h, ls) = framedata # データの分解
if frame is not None: # frameの中身が空でないとき
# print 'frame size %i bytes, w %i, h %i, linesize %i' % (len(frame), w, h, ls)
frame = np.fromstring(frame, dtype=np.ubyte, count=len(frame), sep='') # 文字列データをnp.ubyte型の配列に作りなおす
frame = (frame.reshape((h, ls / 3, 3))) # RGBを考慮して3次元配列にする
frame = frame[:, :w, :] # 画像の幅のぶんだけ取り出し,右側のゴミは捨てる
res_frame_list.append(frame) # リストの要素として追加
return res_frame_list # 複数枚の画像が入ったリストとして返す
def send_command(self, command):
"""
Telloへコマンドを送信し,応答を待つ
:param command: 送信するコマンド
:return (str): Telloの応答
"""
print (">> send cmd: {}".format(command))
self.abort_flag = False # 中断フラグを倒す
timer = threading.Timer(self.command_timeout, self.set_abort_flag) # タイムアウト時間が立ったらフラグを立てるタイマースレッドを作成
self.socket.sendto(command.encode('utf-8'), self.tello_address) # コマンドを送信
timer.start() # スレッドスタート
while self.response is None: # タイムアウト前に応答が来たらwhile終了
if self.abort_flag is True: # タイムアウト時刻になったらブレイク
break
timer.cancel() # スレッド中断
if self.response is None: # 応答データが無い時
response = 'none_response'
else: # 応答データがあるとき
response = self.response.decode('utf-8')
self.response = None # _receive_threadスレッドが次の応答を入れてくれるので,ここでは空にしておく
return response # 今回の応答データを返す
def set_abort_flag(self):
"""
self.abort_flagのフラグをTrueにする
send_command関数の中のタイマーで呼ばれる.
この関数が呼ばれるということは,応答が来なくてタイムアウトした,ということ.
"""
self.abort_flag = True
def takeoff(self):
"""
離陸開始
Returns:
str: Telloからの応答.'OK'または'FALSE'.
"""
return self.send_command('takeoff')
def set_speed(self, speed):
"""
スピードを設定
この関数の引数にはkm/hかマイル/hを使う.
Tello APIは 1〜100 センチメートル/秒を使う
Metric: .1 to 3.6 km/h
Imperial: .1 to 2.2 Mile/h
Args:
speed (int|float): スピード
Returns:
str: Telloからの応答.'OK'または'FALSE'.
"""
speed = float(speed)
if self.imperial is True: # 単位系に応じて計算
speed = int(round(speed * 44.704)) # Mile/h -> cm/s
else:
speed = int(round(speed * 27.7778)) # km/h -> cm/s
return self.send_command('speed %s' % speed)
def rotate_cw(self, degrees):
"""
時計回りの旋回
Args:
degrees (int): 旋回角度, 1〜360度
Returns:
str: Telloからの応答.'OK'または'FALSE'.
"""
return self.send_command('cw %s' % degrees)
def rotate_ccw(self, degrees):
"""
反時計回りの旋回
Args:
degrees (int): 旋回角度, 1〜360度.
Returns:
str: Telloからの応答.'OK'または'FALSE'.
"""
return self.send_command('ccw %s' % degrees)
def flip(self, direction):
"""
宙返り
Args:
direction (str): 宙返りする方向の文字, 'l', 'r', 'f', 'b'.
Returns:
str: Telloからの応答.'OK'または'FALSE'.
"""
return self.send_command('flip %s' % direction)
def get_response(self):
"""
Telloの応答を返す
Returns:
int: Telloの応答
"""
response = self.response
return response
def get_height(self):
"""
Telloの高度(dm)を返す
Returns:
int: Telloの高度(dm)
"""
height = self.send_command('height?')
height = str(height)
print("Debug" + height)
height = filter(str.isdigit, height)
try:
height = int(height)
self.last_height = height
except:
height = self.last_height
pass
return height
def get_battery(self):
"""
バッテリー残量をパーセンテージで返す
Returns:
int: バッテリー残量のパーセンテージ
"""
battery = self.send_command('battery?')
try:
battery = int(battery)
except:
pass
return battery
def get_flight_time(self):
"""
飛行時間を秒数で返す
Returns:
int: 飛行の経過時間
"""
flight_time = self.send_command('time?')
try:
flight_time = int(flight_time)
except:
pass
return flight_time
def get_speed(self):
"""
現在のスピードを返す
Returns:
int: 現在スピード, km/h または Mile/h
"""
speed = self.send_command('speed?')
try:
speed = float(speed)
if self.imperial is True:
speed = round((speed / 44.704), 1) # cm/s -> mile/h
else:
speed = round((speed / 27.7778), 1) # cm/s -> km/h
except:
pass
return speed
def land(self):
"""
着陸を開始
Returns:
str: Telloからの応答.'OK'または'FALSE'.
"""
return self.send_command('land')
def move(self, direction, distance):
"""
direction の方向へ distance の距離だけ移動する.
この引数にはメートルまたはフィートを使う.
Tello API は 20〜500センチメートルを使う.
Metric: .02 〜 5 メートル
Imperial: .7 〜 16.4 フィート
Args:
direction (str): 移動する方向の文字列,'forward', 'back', 'right' or 'left'.
distance (int|float): 移動する距離.(メートルまたはフィート)
Returns:
str: Telloからの応答.'OK'または'FALSE'.
"""
distance = float(distance)
if self.imperial is True:
distance = int(round(distance * 30.48)) # feet -> cm
else:
distance = int(round(distance * 100)) # m -> cm
return self.send_command('%s %s' % (direction, distance))
def move_backward(self, distance):
"""
distance の距離だけ後進する.
Tello.move()のコメントを見ること.
Args:
distance (int): 移動する距離
Returns:
str: Telloからの応答.'OK'または'FALSE'.
"""
return self.move('back', distance)
def move_down(self, distance):
"""
distance の距離だけ降下する.
Tello.move()のコメントを見ること.
Args:
distance (int): 移動する距離
Returns:
str: Telloからの応答.'OK'または'FALSE'.
"""
return self.move('down', distance)
def move_forward(self, distance):
"""
distance の距離だけ前進する.
Tello.move()のコメントを見ること.
Args:
distance (int): 移動する距離
Returns:
str: Telloからの応答.'OK'または'FALSE'.
"""
return self.move('forward', distance)
def move_left(self, distance):
"""
distance の距離だけ左移動する.
Tello.move()のコメントを見ること.
Args:
distance (int): 移動する距離
Returns:
str: Telloからの応答.'OK'または'FALSE'.
"""
return self.move('left', distance)
def move_right(self, distance):
"""
distance の距離だけ右移動する.
Tello.move()のコメントを見ること.
Args:
distance (int): 移動する距離
Returns:
str: Telloからの応答.'OK'または'FALSE'.
"""
return self.move('right', distance)
def move_up(self, distance):
"""
distance の距離だけ上昇する.
Tello.move()のコメントを見ること.
Args:
distance (int): 移動する距離
Returns:
str: Telloからの応答.'OK'または'FALSE'.
"""
return self.move('up', distance)
|
{"hexsha": "60868eb91d50a098ba4708a40949387b7096cd65", "size": 12804, "ext": "py", "lang": "Python", "max_stars_repo_path": "tello.py", "max_stars_repo_name": "uzumal/Tello_Keyboard", "max_stars_repo_head_hexsha": "1065b03be6ac72f0afd3d235b171888e8e0427e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tello.py", "max_issues_repo_name": "uzumal/Tello_Keyboard", "max_issues_repo_head_hexsha": "1065b03be6ac72f0afd3d235b171888e8e0427e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tello.py", "max_forks_repo_name": "uzumal/Tello_Keyboard", "max_forks_repo_head_hexsha": "1065b03be6ac72f0afd3d235b171888e8e0427e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8991596639, "max_line_length": 123, "alphanum_fraction": 0.546704155, "include": true, "reason": "import numpy", "num_tokens": 4631}
|
"""Simple LSTM layer implementation.
Source: Andrej Karpathy (https://gist.github.com/karpathy/587454dc0146a6ae21fc)
"""
import numpy as np
class LSTM(object):
def init(self, n_input, n_hidden):
WLSTM = np.random.rand(n_input + n_hidden + 1, 4 * n_hidden) / np.sqrt(n_input + n_hidden)
WLSTM[0, :] = 0
return WLSTM
def Sigmoid(self, z):
return 1.0 / (1.0 + np.exp(-z))
def Forward(self, X, WLSTM, c0=None, h0=None):
n_hidden = WLSTM.shape[1] / 4
n_input = X.shape[2]
batch_size = X.shape[1]
num_steps = X.shape[0]
Hin = np.zeros((num_steps, batch_size, n_input + n_hidden + 1))
IFOG = np.zeros((num_steps, batch_size, 4 * n_hidden)) # input, input gate, forget, output
IFOGf = np.zeros((num_steps, batch_size, 4 * n_hidden))
C = np.zeros((num_steps, batch_size, n_hidden))
Ct = np.zeros((num_steps, batch_size, n_hidden))
Hout = np.zeros((num_steps, batch_size, n_hidden))
h0 = h0 if h0 is not None else np.zeros((batch_size, n_hidden))
c0 = c0 if c0 is not None else np.zeros((batch_size, n_hidden))
for t in range(num_steps):
Hin[t, :, 0] = 1
Hin[t, :, 1:n_input+1] = X[t]
Hin[t, :, n_input+1:] = h0 if t == 0 else Hout[t-1]
IFOG[t] = np.dot(Hin[t], WLSTM)
IFOGf[t, :, :n_hidden] = np.tanh(IFOG[t, :, :n_hidden])
IFOGf[t, :, n_hidden:] = self.Sigmoid(IFOG[t, :, n_hidden:])
C[t] = IFOGf[t, :, :n_hidden] * IFOGf[t, :, n_hidden:2*n_hidden]
prevc = c0 if t == 0 else C[t-1]
C[t] += IFOGf[t, :, 2*n_hidden:3*n_hidden] * prevc
Ct[t] = np.tanh(C[t])
Hout[t] = Ct[t] * IFOGf[t, :, 3*n_hidden:]
cache = {
'Hin': Hin,
'WLSTM': WLSTM,
'Hout': Hout,
'IFOG': IFOG,
'IFOGf': IFOGf,
'C': C,
'Ct': Ct,
'c0': c0
}
return Hout, C[t], Hout[t], cache
def Backward(self, dHout_in, cache, dcn=None, dhn=None):
WLSTM = cache['WLSTM']
Hout = cache['Hout']
Hin = cache['Hin']
IFOG = cache['IFOG']
IFOGf = cache['IFOGf']
C = cache['C']
Ct = cache['Ct']
c0 = cache['c0']
num_steps = Hin.shape[0]
batch_size = Hin.shape[1]
n_hidden = Hout.shape[2]
n_input = Hin.shape[2] - n_hidden - 1
dIFOGf = np.zeros(IFOGf.shape)
dIFOG = np.zeros(IFOG.shape)
dWLSTM = np.zeros(WLSTM.shape)
dC = np.zeros(C.shape)
dX = np.zeros((num_steps, batch_size, n_input))
dHin = np.zeros(Hin.shape)
dHout = dHout_in.copy()
dh0 = np.zeros((batch_size, n_hidden))
dc0 = np.zeros((batch_size, n_hidden))
if dcn is not None:
dC[num_steps-1] += dcn.copy()
if dhn is not None:
dHout[num_steps-1] += dhn.copy()
for t in reversed(range(num_steps)):
dIFOGf[t, :, 3*n_hidden:] = Ct[t] * dHout[t] # output gate.
dC[t] += (1 - Ct[t]**2) * (IFOGf[t, :, 3*n_hidden:] * dHout[t])
if t > 0:
dIFOGf[t, :, 2*n_hidden:3*n_hidden] = dC[t] * C[t-1] # forget gate.
dC[t-1] = dC[t] * IFOGf[t, :, 2*n_hidden:3*n_hidden]
else:
dIFOGf[t, :, 2*n_hidden:3*n_hidden] = dC[t] * c0 # forget gate.
dc0 = dC[t] * IFOGf[t, :, 2*n_hidden:3*n_hidden]
dIFOGf[t, :, :n_hidden] = dC[t] * IFOGf[t, :, n_hidden:2*n_hidden] # input.
dIFOGf[t, :, n_hidden:2*n_hidden] = dC[t] * IFOGf[t, :, :n_hidden] # input gate.
dIFOG[t, :, :n_hidden] = (1 - IFOGf[t, :, :n_hidden] ** 2) * dIFOGf[t, :, :n_hidden]
y = IFOGf[t, :, n_hidden:]
dIFOG[t, :, n_hidden:] = y * (1 - y) * dIFOGf[t, :, n_hidden:]
dWLSTM += np.dot(Hin[t].transpose(), dIFOG[t])
dHin[t] = np.dot(dIFOG[t], WLSTM.transpose())
dX[t] = dHin[t, :, 1:n_input+1]
if t > 0:
dHout[t-1] += dHin[t, :, n_input+1:]
else:
dh0 += dHin[t, :, n_input+1:]
return dX, dWLSTM, dc0, dh0
|
{"hexsha": "404b6860f30a1f32cb1305767a60753741ec2800", "size": 3590, "ext": "py", "lang": "Python", "max_stars_repo_path": "lstm.py", "max_stars_repo_name": "prasanna08/MachineLearning", "max_stars_repo_head_hexsha": "5ccd17db85946630730ee382b7cc258d4fa866e8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-07-22T18:07:53.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-26T06:30:15.000Z", "max_issues_repo_path": "lstm.py", "max_issues_repo_name": "prasanna08/MachineLearning", "max_issues_repo_head_hexsha": "5ccd17db85946630730ee382b7cc258d4fa866e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-01-08T12:16:53.000Z", "max_issues_repo_issues_event_max_datetime": "2017-01-08T12:21:17.000Z", "max_forks_repo_path": "lstm.py", "max_forks_repo_name": "prasanna08/MachineLearning", "max_forks_repo_head_hexsha": "5ccd17db85946630730ee382b7cc258d4fa866e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9516129032, "max_line_length": 92, "alphanum_fraction": 0.5949860724, "include": true, "reason": "import numpy", "num_tokens": 1436}
|
import os
import sys
import glob
import json
import scipy.signal as signal
import numpy.ma as ma
import numpy as np
import matplotlib
import matplotlib.pylab as plt
import matplotlib.dates as mdates
import datetime
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
From http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
matplotlib.rcParams['font.size'] = 8
def process(f, i):
path = 'time_series_images/' + os.path.basename(f) + '.png'
if os.path.exists(path):
print('Exists, skipping ...')
return
j = json.loads(open(f).read())
p = j['features'][0]['properties']
# fr = p['water_area_filled_fraction']
t = p['water_area_time']
v1 = p['water_area_value']
v2 = p['water_area_filled']
t_jrc = p['water_area_time_jrc']
v_jrc = p['water_area_value_jrc']
filled_fr = list(zip(v1, v2))
filled_fr = [(o[1]-o[0])/o[1] for o in filled_fr]
mask = ma.masked_greater_equal(filled_fr, 0.5)
# t = list(ma.masked_array(t, mask).compressed())
# v1 = list(ma.masked_array(v1, mask).compressed())
# v2 = list(ma.masked_array(v2, mask).compressed())
if not len(t):
print('Empty, skipping ...')
return
years = mdates.YearLocator() # every year
v2_filtered = savitzky_golay(np.array(v2), window_size=15, order=4)
# v2_filtered = signal.medfilt(v2, 7)
# v2_filtered = lowess(v2, t)
# v2_filtered = lowess(v2, t, frac=1./50)
t = [datetime.datetime.fromtimestamp(tt / 1000) for tt in t]
t_jrc = [datetime.datetime.fromtimestamp(tt_jrc / 1000) for tt_jrc in t_jrc]
s_scale = 'Scale: {:.2f}'.format(p['scale']) + '$m$'
s_area = 'Area: {:.2f}'.format(p['area']/(1000*1000)) + '$km^2$, ' + '{:.2f}'.format(100 * p['area']/(1000*1000)) + '$ha$'
title = s_scale + ', ' + s_area
fig = plt.figure(figsize=(11, 4))
ax = fig.add_subplot(111)
ax.xaxis.set_major_locator(years)
# fig.autofmt_xdate()
ax.set_xlim([datetime.date(1985, 1, 1), datetime.date(2019, 1, 1)])
ax.grid(color='k', linestyle='-', linewidth=1, alpha=0.2)
plt.title(title)
plt.xticks(rotation=90)
ax.plot(t_jrc, v_jrc, marker='.', c='r', markersize=2, linewidth=0, alpha=0.05)
ax.plot(t, v1, marker='.', c='b', markersize=2, linewidth=0, alpha=0.05)
ax.plot(t, v2, marker='.', c='k', markersize=3, linewidth=0, alpha=0.8)
# for SG
if len(t) != len(v2_filtered):
print('Bad, shapes are not equal, skipping line plotting ...')
else:
ax.plot(t, v2_filtered, marker='.', c='k', markersize=0, linewidth=2, alpha=0.1)
# for LOWESS
# v2_filtered_t = [datetime.datetime.fromtimestamp(t / 1000) for t in v2_filtered[:, 0]]
# ax.plot(v2_filtered_t, v2_filtered[:, 1], marker='.', c='k', markersize=0, linewidth=2, alpha=0.1)
path = 'time_series_images/' + os.path.basename(f) + '.png'
print(str(i) + ' ' + path)
plt.tight_layout()
plt.savefig(path, dpi=150)
plt.close()
# ========================== JRC
# fig = plt.figure(figsize=(11, 4))
# ax = fig.add_subplot(111)
# ax.xaxis.set_major_locator(years)
# ax.set_xlim([datetime.date(1985, 1, 1), datetime.date(2019, 1, 1)])
# ax.grid(color='k', linestyle='-', linewidth=1, alpha=0.2)
# plt.title(title)
# plt.xticks(rotation=90)
# ax.plot(t_jrc, v_jrc, marker='.', c='r', markersize=2, linewidth=0, alpha=0.8)
# ax.plot(t, v1, marker='.', c='b', markersize=2, linewidth=0, alpha=0.05)
# ax.plot(t, v2, marker='.', c='k', markersize=3, linewidth=0, alpha=0.05)
# for SG
# if len(t) != len(v2_filtered):
# print('Bad, shapes are not equal, skipping line plotting ...')
# else:
# ax.plot(t, v2_filtered, marker='.', c='k', markersize=0, linewidth=2, alpha=0.1)
# path = 'time_series_images/' + os.path.basename(f) + '-jrc.png'
# print(str(i) + ' ' + path)
# plt.tight_layout()
# plt.savefig(path, dpi=150)
# plt.close()
offset = 0
for (i, f) in enumerate(glob.glob('time_series/*.geojson')[offset:]):
print('Processing ' + str(i) + ' ...')
process(f, i + offset)
|
{"hexsha": "b255acd0dcac5f174aa2904755980448a1a9bee3", "size": 7073, "ext": "py", "lang": "Python", "max_stars_repo_path": "time_series_scripts/tasks_generate_thumbs.py", "max_stars_repo_name": "openearth/eo-reservoir", "max_stars_repo_head_hexsha": "fab049d2a88fa59ebad682149b606e30c5e2f94c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "time_series_scripts/tasks_generate_thumbs.py", "max_issues_repo_name": "openearth/eo-reservoir", "max_issues_repo_head_hexsha": "fab049d2a88fa59ebad682149b606e30c5e2f94c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "time_series_scripts/tasks_generate_thumbs.py", "max_forks_repo_name": "openearth/eo-reservoir", "max_forks_repo_head_hexsha": "fab049d2a88fa59ebad682149b606e30c5e2f94c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-03-29T09:29:23.000Z", "max_forks_repo_forks_event_max_datetime": "2018-03-29T09:29:23.000Z", "avg_line_length": 32.7453703704, "max_line_length": 124, "alphanum_fraction": 0.6411706489, "include": true, "reason": "import numpy,import scipy,import statsmodels", "num_tokens": 2165}
|
from tkinter import *
import numpy as np
def bomb(A,x):
mine=0
if A[x]==1:
mine=-1
else:
if x+6<35:
if A[x+6]==1:
mine=mine+1
if x-6>0:
if A[x-6]==1:
mine=mine+1
if (x+1)%6!=0:
if A[x+1]==1:
mine=mine+1
if (x-1)%6!=5:
if A[x-1]==1:
mine=mine+1
return mine
def func00(A):
b00['state']=DISABLED
mines=bomb(A,0)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b00.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func01(A):
b01['state']=DISABLED
mines=bomb(A,1)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b01.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func02(A):
b02['state']=DISABLED
mines=bomb(A,2)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b02.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func03(A):
b03['state']=DISABLED
mines=bomb(A,3)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b03.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func04(A):
b04['state']=DISABLED
mines=bomb(A,4)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b04.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func05(A):
b05['state']=DISABLED
mines=bomb(A,5)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b05.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func10(A):
b10['state']=DISABLED
mines=bomb(A,6)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b10.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func11(A):
b11['state']=DISABLED
mines=bomb(A,7)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b11.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func12(A):
b12['state']=DISABLED
mines=bomb(A,8)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b12.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func13(A):
b13['state']=DISABLED
mines=bomb(A,9)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b13.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func14(A):
b14['state']=DISABLED
mines=bomb(A,10)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b14.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func15(A):
b15['state']=DISABLED
mines=bomb(A,11)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b15.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func20(A):
b20['state']=DISABLED
mines=bomb(A,12)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b20.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func21(A):
b21['state']=DISABLED
mines=bomb(A,13)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b21.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func22(A):
b22['state']=DISABLED
mines=bomb(A,14)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b22.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func23(A):
b23['state']=DISABLED
mines=bomb(A,15)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b23.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func24(A):
b24['state']=DISABLED
mines=bomb(A,16)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b24.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func25(A):
b25['state']=DISABLED
mines=bomb(A,17)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b25.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func30(A):
b30['state']=DISABLED
mines=bomb(A,18)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b30.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func31(A):
b31['state']=DISABLED
mines=bomb(A,19)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b31.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func32(A):
b32['state']=DISABLED
mines=bomb(A,20)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b32.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func33(A):
b33['state']=DISABLED
mines=bomb(A,21)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b33.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func34(A):
b34['state']=DISABLED
mines=bomb(A,22)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b34.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func35(A):
b35['state']=DISABLED
mines=bomb(A,23)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b35.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func40(A):
b40['state']=DISABLED
mines=bomb(A,24)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b40.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func41(A):
b41['state']=DISABLED
mines=bomb(A,25)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b41.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func42(A):
b42['state']=DISABLED
mines=bomb(A,26)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b42.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func43(A):
b43['state']=DISABLED
mines=bomb(A,27)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b43.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func44(A):
b44['state']=DISABLED
mines=bomb(A,28)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b44.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func45(A):
b45['state']=DISABLED
mines=bomb(A,29)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b45.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func50(A):
b50['state']=DISABLED
mines=bomb(A,30)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b50.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func51(A):
b51['state']=DISABLED
mines=bomb(A,31)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b51.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func52(A):
b52['state']=DISABLED
mines=bomb(A,32)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b52.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func53(A):
b53['state']=DISABLED
mines=bomb(A,33)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b53.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func54(A):
b54['state']=DISABLED
mines=bomb(A,34)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b54.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
def func55(A):
b55['state']=DISABLED
mines=bomb(A,35)
global c
global b
global l1
if mines==-1:
for x in range(36):
b[x]['state']=DISABLED
for i in l1:
b[i].configure(bg='red')
else:
b55.configure(bg='gray',text=str(mines),fg='green')
c=c-1
# print(c)
if c==0:
for i in l1:
b[i].configure(bg='green')
if __name__ == "__main__":
A=[]
c=0
list=[]
l1=[]
for i in range(10):
list.append(np.random.randint(0,36))
for i in range(36):
if i in list:
A.append(1)
else:
A.append(0)
for i in range(len(A)):
if A[i]==0:
c=c+1
else:
l1.append(i)
# print(c)
master=Tk()
master.title("Minesweeper")
b00=Button(master,bg='blue',command=lambda:func00(A),height=1,width=1)
b00.grid(row=0,column=0)
b01=Button(master,bg='blue',command=lambda:func01(A),height=1,width=1)
b01.grid(row=0,column=1)
b02=Button(master,bg='blue',command=lambda:func02(A),height=1,width=1)
b02.grid(row=0,column=2)
b03=Button(master,bg='blue',command=lambda:func03(A),height=1,width=1)
b03.grid(row=0,column=3)
b04=Button(master,bg='blue',command=lambda:func04(A),height=1,width=1)
b04.grid(row=0,column=4)
b05=Button(master,bg='blue',command=lambda:func05(A),height=1,width=1)
b05.grid(row=0,column=5)
b10=Button(master,bg='blue',command=lambda:func10(A),height=1,width=1)
b10.grid(row=1,column=0)
b11=Button(master,bg='blue',command=lambda:func11(A),height=1,width=1)
b11.grid(row=1,column=1)
b12=Button(master,bg='blue',command=lambda:func12(A),height=1,width=1)
b12.grid(row=1,column=2)
b13=Button(master,bg='blue',command=lambda:func13(A),height=1,width=1)
b13.grid(row=1,column=3)
b14=Button(master,bg='blue',command=lambda:func14(A),height=1,width=1)
b14.grid(row=1,column=4)
b15=Button(master,bg='blue',command=lambda:func15(A),height=1,width=1)
b15.grid(row=1,column=5)
b20=Button(master,bg='blue',command=lambda:func20(A),height=1,width=1)
b20.grid(row=2,column=0)
b21=Button(master,bg='blue',command=lambda:func21(A),height=1,width=1)
b21.grid(row=2,column=1)
b22=Button(master,bg='blue',command=lambda:func22(A),height=1,width=1)
b22.grid(row=2,column=2)
b23=Button(master,bg='blue',command=lambda:func23(A),height=1,width=1)
b23.grid(row=2,column=3)
b24=Button(master,bg='blue',command=lambda:func24(A),height=1,width=1)
b24.grid(row=2,column=4)
b25=Button(master,bg='blue',command=lambda:func25(A),height=1,width=1)
b25.grid(row=2,column=5)
b30=Button(master,bg='blue',command=lambda:func30(A),height=1,width=1)
b30.grid(row=3,column=0)
b31=Button(master,bg='blue',command=lambda:func31(A),height=1,width=1)
b31.grid(row=3,column=1)
b32=Button(master,bg='blue',command=lambda:func32(A),height=1,width=1)
b32.grid(row=3,column=2)
b33=Button(master,bg='blue',command=lambda:func33(A),height=1,width=1)
b33.grid(row=3,column=3)
b34=Button(master,bg='blue',command=lambda:func34(A),height=1,width=1)
b34.grid(row=3,column=4)
b35=Button(master,bg='blue',command=lambda:func35(A),height=1,width=1)
b35.grid(row=3,column=5)
b40=Button(master,bg='blue',command=lambda:func40(A),height=1,width=1)
b40.grid(row=4,column=0)
b41=Button(master,bg='blue',command=lambda:func41(A),height=1,width=1)
b41.grid(row=4,column=1)
b42=Button(master,bg='blue',command=lambda:func42(A),height=1,width=1)
b42.grid(row=4,column=2)
b43=Button(master,bg='blue',command=lambda:func43(A),height=1,width=1)
b43.grid(row=4,column=3)
b44=Button(master,bg='blue',command=lambda:func44(A),height=1,width=1)
b44.grid(row=4,column=4)
b45=Button(master,bg='blue',command=lambda:func45(A),height=1,width=1)
b45.grid(row=4,column=5)
b50=Button(master,bg='blue',command=lambda:func50(A),height=1,width=1)
b50.grid(row=5,column=0)
b51=Button(master,bg='blue',command=lambda:func51(A),height=1,width=1)
b51.grid(row=5,column=1)
b52=Button(master,bg='blue',command=lambda:func52(A),height=1,width=1)
b52.grid(row=5,column=2)
b53=Button(master,bg='blue',command=lambda:func53(A),height=1,width=1)
b53.grid(row=5,column=3)
b54=Button(master,bg='blue',command=lambda:func54(A),height=1,width=1)
b54.grid(row=5,column=4)
b55=Button(master,bg='blue',command=lambda:func55(A),height=1,width=1)
b55.grid(row=5,column=5)
b=(b00,b01,b02,b03,b04,b05,b10,b11,b12,b13,b14,b15,b20,b21,b22,b23,b24,b25,b30,b31,b32,b33,b34,b35,b40,b41,b42,b43,b44,b45,b50,b51,b52,b53,b54,b55)
mainloop()
|
{"hexsha": "855fd0a0416215779c4edd7f2084488b886ac585", "size": 17085, "ext": "py", "lang": "Python", "max_stars_repo_path": "minesweeper.py", "max_stars_repo_name": "preetam2030/Minesweeper", "max_stars_repo_head_hexsha": "e1be3cb985e7552a066fd09a492a2cd279d171ba", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "minesweeper.py", "max_issues_repo_name": "preetam2030/Minesweeper", "max_issues_repo_head_hexsha": "e1be3cb985e7552a066fd09a492a2cd279d171ba", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "minesweeper.py", "max_forks_repo_name": "preetam2030/Minesweeper", "max_forks_repo_head_hexsha": "e1be3cb985e7552a066fd09a492a2cd279d171ba", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3918741809, "max_line_length": 149, "alphanum_fraction": 0.5990049751, "include": true, "reason": "import numpy", "num_tokens": 5951}
|
"""TODO(rpeloff)
Author: Ryan Eloff
Contact: ryan.peter.eloff@gmail.com
Date: October 2019
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from moonshot.baselines import fast_dtw
from moonshot.experiments.flickr_speech import flickr_speech
from moonshot.experiments.flickr_vision import flickr_vision
from moonshot.experiments.flickr_multimodal import flickr_multimodal
def augment_square_crop(image, size=224, random_scales=None,
horizontal_flip=True, colour=False):
"""Augment image (scale, flip, colour) and select random square crop."""
# get shorter side of image
image_shape = tf.shape(image)
h, w = image_shape[0], image_shape[1]
short_edge = tf.minimum(w, h)
# scale augmentation
if random_scales is None:
# random resize along shorter edge in [256, 480]
# maxval - minval = power of 2 => unbiased random integers
rand_resize = tf.random.uniform(
[], minval=tf.maximum(256, size), maxval=(480+1), dtype=tf.int32)
else:
# random resize along shorter edge in `random_scales` if specified
rand_scale_idx = tf.random.uniform(
[], maxval=tf.shape(random_scales)[0], dtype=tf.int32)
rand_resize = tf.convert_to_tensor(rand_resize)[rand_scale_idx]
resize_hw = (rand_resize * h/short_edge, rand_resize * w/short_edge)
image = tf.image.resize(image, resize_hw, method="lanczos3")
# horizontal flip augmentation
if horizontal_flip:
image = tf.image.random_flip_left_right(image)
# colour augmentation (ordering of these ops matters so we shuffle them)
if colour:
color_ordering = tf.random.uniform([], maxval=1, dtype=tf.int32)
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
# crop augmentation, random sample square (size, size, 3) from resized image
image = tf.image.random_crop(image, size=(size, size, 3))
# make sure that we still have an image in range [0, 1]
image = image - tf.reduce_min(image)
image = tf.math.divide_no_nan(image, tf.reduce_max(image))
return image
def resize_square_crop(image, size=224):
"""Resize image along short edge and center square crop."""
# get shorter side of image
image_shape = tf.shape(image)
h, w = image_shape[0], image_shape[1]
short_edge = tf.minimum(w, h)
# resize image
resize_hw = (size * h/short_edge, size * w/short_edge)
image = tf.image.resize(image, resize_hw, method="lanczos3")
# center square crop
image_shape = tf.shape(image)
h, w = image_shape[0], image_shape[1]
h_shift = tf.cast((h - size) / 2, tf.int32)
w_shift = tf.cast((w - size) / 2, tf.int32)
image = tf.image.crop_to_bounding_box(
image, h_shift, w_shift, size, size)
# make sure that we still have an image in range [0, 1]
image = image - tf.reduce_min(image)
image = tf.math.divide_no_nan(image, tf.reduce_max(image))
return image
def load_and_preprocess_image(image_path, crop_size=224, augment_crop=False,
normalise=True, random_scales=None,
horizontal_flip=True, colour=False):
"""Load image at path and square crop with optional augmentation."""
# read and decode image
image = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image, channels=3)
# scale image to range [0, 1] expected by tf.image functions
image = tf.cast(image, tf.float32) / 255.
# random crop image for testing
if augment_crop:
image = augment_square_crop(
image, size=crop_size, random_scales=random_scales,
horizontal_flip=horizontal_flip, colour=colour)
else:
image = resize_square_crop(image, size=crop_size)
tf.debugging.assert_greater_equal(image, tf.constant(0.))
tf.debugging.assert_less_equal(image, tf.constant(1.))
# normalise image from [0, 1] to range [-1, 1]
if normalise:
image *= 2.
image -= 1.
tf.debugging.assert_greater_equal(image, tf.constant(-1.))
tf.debugging.assert_less_equal(image, tf.constant(1.))
return image
def create_flickr_vision_train_data(data_sets, embed_dir=None):
"""Load train and validation Flickr vision data."""
flickr8k_image_dir = None
if "flickr8k" in data_sets:
flickr8k_image_dir = os.path.join("data", "external", "flickr8k_images")
flickr30k_image_dir = None
if "flickr30k" in data_sets:
flickr30k_image_dir = os.path.join(
"data", "external", "flickr30k_images")
mscoco_train_image_dir = None
mscoco_dev_image_dir = None
if "mscoco" in data_sets:
mscoco_train_image_dir = os.path.join(
"data", "external", "mscoco", "train2017")
mscoco_dev_image_dir = os.path.join(
"data", "external", "mscoco", "val2017")
flickr_train_exp = flickr_vision.FlickrVision(
keywords_split="background_train",
flickr8k_image_dir=flickr8k_image_dir,
flickr30k_image_dir=flickr30k_image_dir,
mscoco_image_dir=mscoco_train_image_dir, embed_dir=embed_dir)
flickr_dev_exp = flickr_vision.FlickrVision(
keywords_split="background_dev",
flickr8k_image_dir=flickr8k_image_dir,
flickr30k_image_dir=flickr30k_image_dir,
mscoco_image_dir=mscoco_dev_image_dir, embed_dir=embed_dir)
return flickr_train_exp, flickr_dev_exp
def load_and_preprocess_speech(speech_path, features, max_length=130,
reinterpolate=None, scaling=None):
# load speech features from numpy binary file
if isinstance(speech_path, tf.Tensor):
speech_path = speech_path.numpy().decode("utf-8")
speech_features = np.load(speech_path)
# scale speech features
if scaling == "global":
speech_features -= flickr_speech.train_global_mean[features]
speech_features /= np.sqrt(flickr_speech.train_global_var[features])
elif scaling == "features":
speech_features -= flickr_speech.train_features_mean[features]
speech_features /= np.sqrt(flickr_speech.train_features_var[features])
elif scaling == "segment":
speech_features = tf.math.divide_no_nan(
speech_features - np.mean(speech_features),
np.sqrt(np.var(speech_features)))
elif scaling == "segment_mean":
speech_features = speech_features - np.mean(speech_features)
# center pad speech features (or crop if longer than max length)
if reinterpolate is None:
# add "height" dim
speech_features = tf.expand_dims(speech_features, axis=0)
# crop/pad the speech features "image"
speech_features = tf.image.resize_with_crop_or_pad(
speech_features, target_height=1, target_width=max_length)
# remove "height" dim
speech_features = tf.squeeze(speech_features, axis=0)
# re-interpolate speech features to max length
else:
speech_features = fast_dtw.dtw_reinterp2d(
speech_features, max_length, interp=reinterpolate)
return speech_features
def create_flickr_audio_train_data(features, embed_dir=None,
speaker_mode="baseline"):
"""Load train and validation Flickr audio data."""
flickr_train_exp = flickr_speech.FlickrSpeech(
features=features, keywords_split="background_train",
embed_dir=embed_dir, speaker_mode=speaker_mode)
flickr_dev_exp = flickr_speech.FlickrSpeech(
features=features, keywords_split="background_dev", embed_dir=embed_dir,
speaker_mode=speaker_mode)
return flickr_train_exp, flickr_dev_exp
def create_flickr_multimodal_train_data(
features, speech_embed_dir=None, image_embed_dir=None,
speech_preprocess_func=None, image_preprocess_func=None,
speaker_mode="baseline", unseen_match_set=False):
"""Load train and validation paired Flickr 8k and Flickr Audio data."""
flickr_train_exp = flickr_multimodal.FlickrMultimodal(
features=features, keywords_split="background_train",
flickr8k_image_dir=os.path.join("data", "external", "flickr8k_images"),
speech_embed_dir=speech_embed_dir, image_embed_dir=image_embed_dir,
speech_preprocess_func=speech_preprocess_func,
image_preprocess_func=image_preprocess_func,
speaker_mode=speaker_mode, unseen_match_set=unseen_match_set)
flickr_dev_exp = flickr_multimodal.FlickrMultimodal(
features=features, keywords_split="background_dev",
flickr8k_image_dir=os.path.join("data", "external", "flickr8k_images"),
speech_embed_dir=speech_embed_dir, image_embed_dir=image_embed_dir,
speech_preprocess_func=speech_preprocess_func,
image_preprocess_func=image_preprocess_func,
speaker_mode=speaker_mode, unseen_match_set=unseen_match_set)
return flickr_train_exp, flickr_dev_exp
def embedding_to_example_protobuf(embedding):
"""Create tf.Example message (protobuf) from an embedding array."""
feature = {
"embed": tf.train.Feature(
float_list=tf.train.FloatList(value=embedding))}
example_proto = tf.train.Example(
features=tf.train.Features(feature=feature))
return example_proto
def parse_embedding_protobuf(example_proto):
"""Parse a serialized tf.Example embedding with variable length."""
feature_description = {
"embed": tf.io.FixedLenSequenceFeature(
[], tf.float32, allow_missing=True)}
return tf.io.parse_single_example(
example_proto, feature_description)
def create_balanced_batch_dataset(p, k, label_datasets):
"""Creates a dataset that samples a balanced batch from `label_datasets`.
`p` is number of classes per batch, `k` is number of samples per class,
`label_datasets` is list of datasets corresponding to class labels.
"""
num_labels = len(label_datasets)
def label_generator():
# sample labels that will compose the balanced batch
labels = np.random.choice(range(num_labels), p, replace=False)
for label in labels:
for _ in range(k):
yield label
choice_dataset = tf.data.Dataset.from_generator(label_generator, tf.int64)
balanced_dataset = tf.data.experimental.choose_from_datasets(
label_datasets, choice_dataset)
return balanced_dataset
|
{"hexsha": "7e3272a041e13bbabf1ad1e11cc68fa904a6317e", "size": 10789, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/moonshot/baselines/dataset.py", "max_stars_repo_name": "rpeloff/moonshot", "max_stars_repo_head_hexsha": "f58ddaa15c2bea416731e3bd1f2c5de86d6aa115", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-10-29T09:50:59.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-22T19:01:07.000Z", "max_issues_repo_path": "src/moonshot/baselines/dataset.py", "max_issues_repo_name": "rpeloff/moonshot", "max_issues_repo_head_hexsha": "f58ddaa15c2bea416731e3bd1f2c5de86d6aa115", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/moonshot/baselines/dataset.py", "max_forks_repo_name": "rpeloff/moonshot", "max_forks_repo_head_hexsha": "f58ddaa15c2bea416731e3bd1f2c5de86d6aa115", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3321799308, "max_line_length": 80, "alphanum_fraction": 0.698489202, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2546}
|
"""Tools to Evaluate Recommendation models with Ranking Metrics."""
import numpy as np
def calc_ndcg_at_k(y_true: np.ndarray, y_score: np.ndarray, k: int) -> float:
"""Calculate a nDCG score for a given user."""
y_max_sorted = y_true[y_true.argsort()[::-1]]
y_true_sorted = y_true[y_score.argsort()[::-1]]
num_items = y_true.shape[0]
k = num_items if num_items < k else k
dcg_score = y_true_sorted[0] - 1
for i in np.arange(1, k):
dcg_score += y_true_sorted[i] / np.log2(i + 1)
max_score = 2 ** (y_max_sorted[0]) - 1
for i in np.arange(1, k):
max_score += y_max_sorted[i] / np.log2(i + 1)
return dcg_score / max_score
class PredictRankings:
"""Predict rankings by trained recommendations."""
def __init__(self, user_embed: np.ndarray, item_embed: np.ndarray, item_bias: np.ndarray) -> None:
"""Initialize Class."""
self.user_embed = user_embed
self.item_embed = item_embed
self.item_bias = item_bias
def predict(self, users: np.array, items: np.array) -> np.ndarray:
"""Predict scores for each user-item pairs."""
# predict ranking score for each user
user_emb = self.user_embed[users].reshape(1, self.user_embed.shape[1])
item_emb = self.item_embed[items]
scores = (user_emb @ item_emb.T).flatten() + self.item_bias[items]
return scores
def aoa_evaluator(user_embed: np.ndarray, item_embed: np.ndarray, item_bias: np.ndarray,
test: np.ndarray, at_k: int = 3) -> float:
"""Calculate ranking metrics with average-over-all evaluator."""
users = test[:, 0]
items = test[:, 1]
ratings = test[:, 2]
# define model
model = PredictRankings(user_embed=user_embed, item_embed=item_embed, item_bias=item_bias)
# calculate ranking metrics
results = []
np.random.seed(12345)
for user in set(users):
indices = users == user
items_for_user = items[indices]
ratings_for_user = ratings[indices]
scores = model.predict(users=user, items=items_for_user)
results.append(calc_ndcg_at_k(ratings_for_user, scores, at_k))
return np.mean(results)
|
{"hexsha": "b68e1b974deb89e243861041fdf92710ed98a0d7", "size": 2191, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/evaluate/evaluator.py", "max_stars_repo_name": "usaito/asymmetric-tri-rec-real", "max_stars_repo_head_hexsha": "bd734362ffa498ba0ed44bdd536083246650949f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2020-04-27T02:11:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T06:32:22.000Z", "max_issues_repo_path": "src/evaluate/evaluator.py", "max_issues_repo_name": "aaditkamat/asymmetric-tri-rec-real", "max_issues_repo_head_hexsha": "4cc44a31d12a6b373e71c7da0c2e02d22e864f63", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-11-13T18:47:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:32:39.000Z", "max_forks_repo_path": "src/evaluate/evaluator.py", "max_forks_repo_name": "aaditkamat/asymmetric-tri-rec-real", "max_forks_repo_head_hexsha": "4cc44a31d12a6b373e71c7da0c2e02d22e864f63", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-03-29T07:37:11.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T02:17:43.000Z", "avg_line_length": 33.196969697, "max_line_length": 102, "alphanum_fraction": 0.6526700137, "include": true, "reason": "import numpy", "num_tokens": 558}
|
#!/ebio/ag-neher/share/programs/bin/python2.7
#
#script that reads in precomputed repeated prediction of influenza and
#and plots the average prediction quality as a function of the diffusion constant and the
#scale parameter gamma.
#
#
import glob,argparse,sys
sys.path.append('/ebio/ag-neher/share/users/rneher/FluPrediction_code/flu/src')
import test_flu_prediction as test_flu
import numpy as np
import matplotlib.pyplot as plt
import analysis_utils as AU
file_formats = [] #['.svg', '.pdf']
# set matplotlib plotting parameters
plt.rcParams.update(test_flu.mpl_params)
figure_folder = '../figures_ms/'
analysis_folder = test_flu.flu_analysis_folder
# parse the commandline arguments
parser = test_flu.make_flu_parser()
params=parser.parse_args()
params.year='????'
params.sample_size = 100
Dlist = [0.2, 0.5]
glist = [1.0,2.0,3.0, 5.0]
olist = [0.1]
boost = 0.0
base_name, name_mod = test_flu.get_fname(params)
#remove year
base_name = '_'.join(base_name.split('_')[:1]+base_name.split('_')[2:])
base_name = base_name.replace('_????','')
LLy = AU.laessig_years(years)
params.collapse = False
# load data for different diffusion constants, distance_scales, and koel boosts
prediction_distance={}
normed_distance={}
metric = 'nuc'
for D in Dlist:
for gamma in glist:
for omega in olist:
params.diffusion, params.gamma, params.omega = D,gamma, omega
prediction_distance[(D,gamma,omega)]={}
normed_distance[(D,gamma,omega)]={}
years, tmp_pred, tmp_normed = AU.load_prediction_data(params, metric)
prediction_distance[(D,gamma,omega)].update(tmp_pred)
normed_distance[(D,gamma,omega)].update(tmp_normed)
# make figure showing the dependence on the scale parameter
fig = plt.figure(figsize = (12,9))
ax= plt.subplot(111)
plt.plot(glist, np.ones_like(glist)*normed_distance[(Dlist[0],glist[0],olist[0])][('L&L',boost,'L\&L')][0], c='k', lw=2, label = r"L\&L")
for omega in olist:
for di,D in enumerate(Dlist):
label_mod = r' $D='+str(D)+r'$'
plt.plot(glist, [normed_distance[(D,gamma,omega)][('fitness,terminal nodes',boost,'pred(T)')][0] for gamma in glist],
marker= 'o',ms=10, lw= 2, label = r'top ranked terminal nodes'+label_mod)
# plt.plot(glist, [normed_distance[(D,gamma,omega)][('polarizer,terminal',boost,'')][0] for gamma in glist],
# marker= 'o',ms=10, lw= 2, ls=':', label = r'polarizer external'+label_mod)
plt.plot(glist, [normed_distance[(D,gamma,omega)][('fitness,internal nodes',boost,'pred(I)')][0] for gamma in glist],
marker= 'o',ms=10, lw= 2, ls='--', label = r'top ranked internal nodes'+label_mod)
plt.plot(glist, [normed_distance[(D,gamma,omega)][('expansion, internal nodes', 0.0, 'growth')][0] for gamma in glist],
marker= 'o',ms=10, lw= 2, ls='-.', label = r'expansion'+label_mod)
# plt.plot(glist, [normed_distance[(D,gamma,omega)][('polarizer,internal',boost,'')][0] for gamma in glist],
# marker= 'o',ms=10, lw= 2, ls=':', label = r'polarizer interal'+label_mod)
# boost = 0.0
# for di,D in enumerate(Dlist):
# plt.plot(glist, [normed_distance[(D,gamma,omega)][('internal and expansion',boost,'pred(I)+growth')][0] for gamma in glist],
# marker= 'o',ms=10, lw= 2, label = 'Internal nodes + Koel('+str(boost)+') + growth'+label_mod)
ax.set_xlabel(r'scale parameter $\gamma$')
ax.set_ylabel(r'normalized distance $\bar{d}$')
plt.text(0.02,0.93,'Fig.~4-S1', transform = plt.gca().transAxes, fontsize = 20)
ax.set_ylim([0,1])
ax.set_xlim([min(glist)*0.9,max(glist)*1.1])
#plt.xscale('log')
plt.legend()
for ff in file_formats:
plt.savefig(figure_folder+'Fig4_S1_D_gamma_dependence_'+metric+ff)
##################################################################################
## Fig 4-2 varying gamma
##################################################################################
# make figure
plt.figure(figsize = (12,6))
boost=0.0
D=0.2
title_str = r'Varying $\gamma:\; \bar{d}='\
+', '.join(map(str,[np.round(normed_distance[(D,gamma,omega)][('fitness,terminal nodes',boost,'pred(T)')][0],2)\
for gamma in glist]))+'$' #+r' $ for $\gamma = 1.0, 2.0, 3.0, 5.0$'
#plt.title(title_str, fontsize = 16)
# plot line for random expection
plt.plot([min(years)-0.5,max(years)+0.5], [1,1], lw=2, c='k')
# add shaded boxes and optimal
method, sym, col, shift, label = ('fitness,terminal nodes',0.0,'pred(T)'), 's', 'k', -0.25, 'pred(T)'
method, sym, col, shift, label = ('polarizer,internal',0.0,''), 's', 'k', -0.25, 'pred(T)'
for yi,year in enumerate(years):
plt.gca().add_patch(plt.Rectangle([year-0.5, 0.2], 1.0, 1.8, color='k', alpha=0.05*(1+np.mod(year,2))))
plt.plot([year-0.5, year+0.5], [prediction_distance[(D,gamma,omega)][('minimal',boost,'minimal')][yi],
prediction_distance[(D,gamma,omega)][('minimal',boost,'minimal')][yi]],
lw=2, c='k', ls = '--')
plt.plot(year+np.linspace(-0.5, 0.5,9)[1:-1:2], [prediction_distance[(D,gamma,omega)][(method[0], boost, method[-1])][yi] for gamma in glist],
sym, c= col, ms=8,ls='-', label=label+r' $\bar{d}='+str(np.round(normed_distance[(D,gamma,omega)][method][0],2))+'$')
# set limits, ticks, legends
plt.ylim([0.2, 1.7])
plt.yticks([0.5, 1, 1.5])
plt.xlim([min(years)-0.5,max(years)+0.5])
plt.xticks(years[::2])
plt.ylabel(r'$\Delta(\mathrm{prediction})$ to next season')
#plt.ylabel('nucleodide distance to next season\n(relative to average)')
plt.xlabel('year')
#plt.legend(loc=9, ncol=1,numpoints=1)
#add panel label
plt.text(0.02,0.93,'Fig.~3-S2', transform = plt.gca().transAxes, fontsize = 20)
#save figure
plt.tight_layout()
for ff in file_formats:
plt.savefig(figure_folder+'Fig4_s2_'+base_name+'_'+name_mod+'_gamma_revised'+ff)
|
{"hexsha": "e839cd5cb31c3f4bae724482eba0f946f9ae27e0", "size": 5893, "ext": "py", "lang": "Python", "max_stars_repo_path": "flu/figure_scripts/fig4_s1_parameter_dependence.py", "max_stars_repo_name": "iosonofabio/FitnessInference", "max_stars_repo_head_hexsha": "3de97a9301733ac9e47ebc78f4e76f7530ccb538", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2015-08-24T05:20:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-13T13:58:31.000Z", "max_issues_repo_path": "flu/figure_scripts/fig4_s1_parameter_dependence.py", "max_issues_repo_name": "iosonofabio/FitnessInference", "max_issues_repo_head_hexsha": "3de97a9301733ac9e47ebc78f4e76f7530ccb538", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2015-08-24T04:43:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-06T00:33:44.000Z", "max_forks_repo_path": "flu/figure_scripts/fig4_s1_parameter_dependence.py", "max_forks_repo_name": "iosonofabio/FitnessInference", "max_forks_repo_head_hexsha": "3de97a9301733ac9e47ebc78f4e76f7530ccb538", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-08-30T18:52:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-15T06:47:04.000Z", "avg_line_length": 44.9847328244, "max_line_length": 146, "alphanum_fraction": 0.6329543526, "include": true, "reason": "import numpy", "num_tokens": 1800}
|
[STATEMENT]
lemma hd_sort_remdups: "hd (sort (remdups l)) = hd (sort l)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hd (sort (remdups l)) = hd (sort l)
[PROOF STEP]
by (metis hd_sort_Min remdups_eq_nil_iff set_remdups)
|
{"llama_tokens": 107, "file": "Extended_Finite_State_Machines_FSet_Utils", "length": 1}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 25 22:35:56 2021
@author: innat
"""
# ref: https://github.com/VcampSoldiers/Swin-Transformer-Tensorflow
# ref: https://keras.io/examples/vision/swin_transformers/
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Model, Sequential, Input, layers, applications
patch_size = (4, 4) # 4-by-4 sized patches
dropout_rate = 0.5 # Dropout rate
num_heads = 8 # Attention heads
embed_dim = 64 # Embedding dimension
num_mlp = 128 # MLP layer size
qkv_bias = True # Convert embedded patches to query, key, and values with a learnable additive value
window_size = 2 # Size of attention window
shift_size = 2 # Size of shifting window
image_dimension = 256 # Initial image size / Input size of the transformer model
num_patch_x = image_dimension // patch_size[0]
num_patch_y = image_dimension // patch_size[1]
def window_partition(x, window_size):
_, height, width, channels = x.shape
patch_num_y = height // window_size
patch_num_x = width // window_size
x = tf.reshape(
x, shape=(-1, patch_num_y, window_size, patch_num_x, window_size, channels)
)
x = tf.transpose(x, (0, 1, 3, 2, 4, 5))
windows = tf.reshape(x, shape=(-1, window_size, window_size, channels))
return windows
def window_reverse(windows, window_size, height, width, channels):
patch_num_y = height // window_size
patch_num_x = width // window_size
x = tf.reshape(
windows,
shape=(-1, patch_num_y, patch_num_x, window_size, window_size, channels),
)
x = tf.transpose(x, perm=(0, 1, 3, 2, 4, 5))
x = tf.reshape(x, shape=(-1, height, width, channels))
return x
class DropPath(layers.Layer):
def __init__(self, drop_prob=None, **kwargs):
super(DropPath, self).__init__(**kwargs)
self.drop_prob = drop_prob
def call(self, x):
input_shape = tf.shape(x)
batch_size = input_shape[0]
rank = x.shape.rank
shape = (batch_size,) + (1,) * (rank - 1)
random_tensor = (1 - self.drop_prob) + tf.random.uniform(shape, dtype=x.dtype)
path_mask = tf.floor(random_tensor)
output = tf.math.divide(x, 1 - self.drop_prob) * path_mask
return output
class WindowAttention(layers.Layer):
def __init__(
self, dim, window_size, num_heads, qkv_bias=True, dropout_rate=0.0, **kwargs
):
super(WindowAttention, self).__init__(**kwargs)
self.dim = dim
self.window_size = window_size
self.num_heads = num_heads
self.scale = (dim // num_heads) ** -0.5
self.qkv = layers.Dense(dim * 3, use_bias=qkv_bias)
self.dropout = layers.Dropout(dropout_rate)
self.proj = layers.Dense(dim)
def build(self, input_shape):
num_window_elements = (2 * self.window_size[0] - 1) * (
2 * self.window_size[1] - 1
)
self.relative_position_bias_table = self.add_weight(
shape=(num_window_elements, self.num_heads),
initializer=tf.initializers.Zeros(),
trainable=True,
)
coords_h = np.arange(self.window_size[0])
coords_w = np.arange(self.window_size[1])
coords_matrix = np.meshgrid(coords_h, coords_w, indexing="ij")
coords = np.stack(coords_matrix)
coords_flatten = coords.reshape(2, -1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.transpose([1, 2, 0])
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.relative_position_index = tf.Variable(
initial_value=tf.convert_to_tensor(relative_position_index), trainable=False
)
def call(self, x, mask=None):
_, size, channels = x.shape
head_dim = channels // self.num_heads
x_qkv = self.qkv(x)
x_qkv = tf.reshape(x_qkv, shape=(-1, size, 3, self.num_heads, head_dim))
x_qkv = tf.transpose(x_qkv, perm=(2, 0, 3, 1, 4))
q, k, v = x_qkv[0], x_qkv[1], x_qkv[2]
q = q * self.scale
k = tf.transpose(k, perm=(0, 1, 3, 2))
attn = q @ k
num_window_elements = self.window_size[0] * self.window_size[1]
relative_position_index_flat = tf.reshape(
self.relative_position_index, shape=(-1,)
)
relative_position_bias = tf.gather(
self.relative_position_bias_table, relative_position_index_flat
)
relative_position_bias = tf.reshape(
relative_position_bias, shape=(num_window_elements, num_window_elements, -1)
)
relative_position_bias = tf.transpose(relative_position_bias, perm=(2, 0, 1))
attn = attn + tf.expand_dims(relative_position_bias, axis=0)
if mask is not None:
nW = mask.get_shape()[0]
mask_float = tf.cast(
tf.expand_dims(tf.expand_dims(mask, axis=1), axis=0), tf.float32
)
attn = (
tf.reshape(attn, shape=(-1, nW, self.num_heads, size, size))
+ mask_float
)
attn = tf.reshape(attn, shape=(-1, self.num_heads, size, size))
attn = keras.activations.softmax(attn, axis=-1)
else:
attn = keras.activations.softmax(attn, axis=-1)
attn = self.dropout(attn)
x_qkv = attn @ v
x_qkv = tf.transpose(x_qkv, perm=(0, 2, 1, 3))
x_qkv = tf.reshape(x_qkv, shape=(-1, size, channels))
x_qkv = self.proj(x_qkv)
x_qkv = self.dropout(x_qkv)
return x_qkv
class SwinTransformer(layers.Layer):
def __init__(
self,
dim,
num_patch,
num_heads,
window_size=7,
shift_size=0,
num_mlp=1024,
qkv_bias=True,
dropout_rate=0.0,
**kwargs,
):
super(SwinTransformer, self).__init__(**kwargs)
self.dim = dim # number of input dimensions
self.num_patch = num_patch # number of embedded patches
self.num_heads = num_heads # number of attention heads
self.window_size = window_size # size of window
self.shift_size = shift_size # size of window shift
self.num_mlp = num_mlp # number of MLP nodes
self.norm1 = layers.LayerNormalization(epsilon=1e-5)
self.attn = WindowAttention(
dim,
window_size=(self.window_size, self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
dropout_rate=dropout_rate,
)
self.drop_path = DropPath(dropout_rate)
self.norm2 = layers.LayerNormalization(epsilon=1e-5)
self.mlp = keras.Sequential(
[
layers.Dense(num_mlp),
layers.Activation(keras.activations.gelu),
layers.Dropout(dropout_rate),
layers.Dense(dim),
layers.Dropout(dropout_rate),
]
)
if min(self.num_patch) < self.window_size:
self.shift_size = 0
self.window_size = min(self.num_patch)
def build(self, input_shape):
if self.shift_size == 0:
self.attn_mask = None
else:
height, width = self.num_patch
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
mask_array = np.zeros((1, height, width, 1))
count = 0
for h in h_slices:
for w in w_slices:
mask_array[:, h, w, :] = count
count += 1
mask_array = tf.convert_to_tensor(mask_array)
# mask array to windows
mask_windows = window_partition(mask_array, self.window_size)
mask_windows = tf.reshape(
mask_windows, shape=[-1, self.window_size * self.window_size]
)
attn_mask = tf.expand_dims(mask_windows, axis=1) - tf.expand_dims(
mask_windows, axis=2
)
attn_mask = tf.where(attn_mask != 0, -100.0, attn_mask)
attn_mask = tf.where(attn_mask == 0, 0.0, attn_mask)
self.attn_mask = tf.Variable(initial_value=attn_mask, trainable=False)
def call(self, x):
height, width = self.num_patch
_, num_patches_before, channels = x.shape
x_skip = x
x = self.norm1(x)
x = tf.reshape(x, shape=(-1, height, width, channels))
if self.shift_size > 0:
shifted_x = tf.roll(
x, shift=[-self.shift_size, -self.shift_size], axis=[1, 2]
)
else:
shifted_x = x
x_windows = window_partition(shifted_x, self.window_size)
x_windows = tf.reshape(
x_windows, shape=(-1, self.window_size * self.window_size, channels)
)
attn_windows = self.attn(x_windows, mask=self.attn_mask)
attn_windows = tf.reshape(
attn_windows, shape=(-1, self.window_size, self.window_size, channels)
)
shifted_x = window_reverse(
attn_windows, self.window_size, height, width, channels
)
if self.shift_size > 0:
x = tf.roll(
shifted_x, shift=[self.shift_size, self.shift_size], axis=[1, 2]
)
else:
x = shifted_x
x = tf.reshape(x, shape=(-1, height * width, channels))
x = self.drop_path(x)
x = x_skip + x
x_skip = x
x = self.norm2(x)
x = self.mlp(x)
x = self.drop_path(x)
x = x_skip + x
return x
class PatchExtract(layers.Layer):
def __init__(self, patch_size, **kwargs):
super(PatchExtract, self).__init__(**kwargs)
self.patch_size_x = patch_size[0]
self.patch_size_y = patch_size[0]
def call(self, images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(
images=images,
sizes=(1, self.patch_size_x, self.patch_size_y, 1),
strides=(1, self.patch_size_x, self.patch_size_y, 1),
rates=(1, 1, 1, 1),
padding="VALID",
)
patch_dim = patches.shape[-1]
patch_num = patches.shape[1]
return tf.reshape(patches, (batch_size, patch_num * patch_num, patch_dim))
class PatchEmbedding(layers.Layer):
def __init__(self, num_patch, embed_dim, **kwargs):
super(PatchEmbedding, self).__init__(**kwargs)
self.num_patch = num_patch
self.proj = layers.Dense(embed_dim)
self.pos_embed = layers.Embedding(input_dim=num_patch, output_dim=embed_dim)
def call(self, patch):
pos = tf.range(start=0, limit=self.num_patch, delta=1)
return self.proj(patch) + self.pos_embed(pos)
class PatchMerging(tf.keras.layers.Layer):
def __init__(self, num_patch, embed_dim):
super(PatchMerging, self).__init__()
self.num_patch = num_patch
self.embed_dim = embed_dim
self.linear_trans = layers.Dense(2 * embed_dim, use_bias=False)
def call(self, x):
height, width = self.num_patch
_, _, C = x.get_shape().as_list()
x = tf.reshape(x, shape=(-1, height, width, C))
x0 = x[:, 0::2, 0::2, :]
x1 = x[:, 1::2, 0::2, :]
x2 = x[:, 0::2, 1::2, :]
x3 = x[:, 1::2, 1::2, :]
x = tf.concat((x0, x1, x2, x3), axis=-1)
x = tf.reshape(x, shape=(-1, (height // 2) * (width // 2), 4 * C))
return self.linear_trans(x)
|
{"hexsha": "d7c7bf47f010ce3fee0a0fef88c0ee7c967c3613", "size": 12486, "ext": "py", "lang": "Python", "max_stars_repo_path": "swin_blocks.py", "max_stars_repo_name": "zoeyingz/EfficientNet-Hybrid-Swin-Transformer", "max_stars_repo_head_hexsha": "6adbe312e6f2406077fe8234c3c0a25547b4eeb6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-23T17:51:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T17:51:46.000Z", "max_issues_repo_path": "swin_blocks.py", "max_issues_repo_name": "zoeyingz/EfficientNet-Hybrid-Swin-Transformer", "max_issues_repo_head_hexsha": "6adbe312e6f2406077fe8234c3c0a25547b4eeb6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "swin_blocks.py", "max_forks_repo_name": "zoeyingz/EfficientNet-Hybrid-Swin-Transformer", "max_forks_repo_head_hexsha": "6adbe312e6f2406077fe8234c3c0a25547b4eeb6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4023323615, "max_line_length": 111, "alphanum_fraction": 0.5661540926, "include": true, "reason": "import numpy", "num_tokens": 3058}
|
SUBROUTINE WGEOM(IA,IB,X,Y,Z,NM,NP,NAT,NSA,NPLA,VGA,BDSK,
2 ZLDA,NWG,VG,ZLD,WV,NFS1,NFS2)
COMMON /DIPOLES/ H1,H2,S
DIMENSION IA(1),IB(1),X(1),Y(1),Z(1),NSA(1),NPLA(1),BDSK(1)
COMPLEX VGA(1),ZLDA(1),VG(1),ZLD(1)
DATA H1,H2,S /1.,1.,.5/
C
C GEOMETRY FOR WIND
C
PRINT*,'WGEOM5, FOR MONOPOLE MAG BOOM AND ONE ANTENNA'
C
C SPECIFY H = WIRE LENGTH AND NM1 = NUMBER OF SEGMENTS IN EACH ONE.
NM1 = 6
C INSURE THAT NM1 IS AN EVEN NUMBER
NM1 = 2*((NM1+1)/2)
NM = 2*NM1
C THE NUMBER OF POINTS IS
NP = 2*NM1+NINTER+3
NP1 = NM1+1
C THE SEGMENT SIZE IS
H = H1
DH = H/NM1
C DEFINE COORDINATES OF NP POINTS AND NM SEGMENTS
DO I = 1,NP1
X(I) = 0.
Y(I) = 0.
Z(I) = (-H/2.0) + (I-1)*DH
print*,i,z(i)
IF(I.NE.NP1) THEN
IA(I) = I
IB(I) = I+1
ENDIF
ENDDO
C DEFINE GENERATOR LOCATION AND VALUE
IGN = (NM1/2) + 1
VG(IGN) = CMPLX(1.,0.)
ZLD(IGN) = CMPLX(0.,0.)
C INDICATE TWO-PORT COUPLING COMPUTATION DESIRED
NFS1 = IGN
C THE SEGMENT SIZE IS
H = H2
DH = H/NM1
C DEFINE COORDINATES OF NP POINTS AND NM SEGMENTS
DO I = NP1+1,NP
X(I) = S
Y(I) = 0.
Z(I) = (-H/2.0) + (I-1-NP1)*DH
print*,i,z(i)
IF(I.NE.NP1) THEN
IA(I) = I
IB(I) = I+1
ENDIF
ENDDO
C DEFINE GENERATOR LOCATION AND VALUE
IGN = (NM1/2) + 1 + NP1
VG(IGN) = CMPLX(1.,0.)
ZLD(IGN) = CMPLX(0.,0.)
C DO INTERCONNECTION
DH = S/NINTER
X(NP+1) = X(IGN)
Y(NP+1) = Y(IGN)
Z(NP+1) = Z(IGN)
DO I = NP+1,NP+2+NINTER
X(I) = X(I-1) + DH
Y(I) = Y(I-1)
Z(I) = Z(I-1)
ZLD(I) = CMPLX(0.,1.E6)
print*,i,X(I),z(i)
IF(I.NE.NP+2+NINTER) THEN
IA(I) = I
IB(I) = I+1
ENDIF
ENDDO
C INDICATE NO ATTACHMENTS
NAT=0
C INDICATE TWO-PORT COUPLING COMPUTATION DESIRED
NFS2 = IGN
print*,'wgeom,np,nm,nfs1,nfs2',np,nm,nfs1,nfs2
RETURN
END
|
{"hexsha": "a5c54be7a8a574786da8296b6fd3a06d1908a486", "size": 1764, "ext": "for", "lang": "FORTRAN", "max_stars_repo_path": "WAVES_VMS_Fortran/PJK_Fortran/wind_dir/wgeom5_2.for", "max_stars_repo_name": "lynnbwilsoniii/Wind_Decom_Code", "max_stars_repo_head_hexsha": "ef596644fe0ed3df5ff3b462602e7550a04323e2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "WAVES_VMS_Fortran/PJK_Fortran/wind_dir/wgeom5_2.for", "max_issues_repo_name": "lynnbwilsoniii/Wind_Decom_Code", "max_issues_repo_head_hexsha": "ef596644fe0ed3df5ff3b462602e7550a04323e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "WAVES_VMS_Fortran/PJK_Fortran/wind_dir/wgeom5_2.for", "max_forks_repo_name": "lynnbwilsoniii/Wind_Decom_Code", "max_forks_repo_head_hexsha": "ef596644fe0ed3df5ff3b462602e7550a04323e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.512195122, "max_line_length": 67, "alphanum_fraction": 0.5986394558, "num_tokens": 883}
|
import mdtraj as md
import time
import numpy as np
"""
A simple way to get conformations from a trajectory.
Provide phi and psi angle pairs to get PDBs of the molecule in these conformations.
These should correspond to energy wells on the free energy surface.
You are much better at peak picking than any algorithm I could write.
"""
TRAJ = "outputs/production_aaa_capped_amber_equilibrated_amber_112650_010322/trajectory.dcd"
TOP = "outputs/production_aaa_capped_amber_equilibrated_amber_112650_010322/topology.pdb"
OUT = "outputs/aaa_conformations"
# phi and psi angles. we'll search for angles with a tolerance about +-5 deg to make sure we find something
angle_pairs = (
(-150, 150),
(-65, 150),
(-65, -30),
(-150, -30),
(55, 30)
)
pairs = np.round(np.deg2rad(angle_pairs), 1)
found_pairs = np.zeros(len(angle_pairs))
print("Initialising...")
stride = 10000
t = md.iterload(TRAJ, top=TOP, chunk=1, stride=stride)
total_frames = 0
for _ in t:
total_frames += 1
print(f"Counting chunks... {total_frames} ", end="\r")
total_frames *= stride
print(f"{total_frames} frames total ")
print("Starting... ")
outfile = md.formats.PDBTrajectoryFile(OUT, "w")
chunk_size = 500
reference=None
traj = md.iterload(TRAJ, top=TOP, chunk=chunk_size)
time_start = time.time()
for i, chunk in enumerate(traj):
if not reference:
reference = chunk
chunk = chunk.superpose(
reference
)
_, chunk_phis = md.compute_phi(chunk)
_, chunk_psis = md.compute_psi(chunk)
# print(dihedrals.shape)
for phi, psi in pairs:
dihedrals = np.hstack((chunk_phis, chunk_psis))
np.round(dihedrals, 1, dihedrals)
dihedrals[:, :3] -= phi
dihedrals[:, 3:] -= psi
match_dihedrals = np.where(~np.any(dihedrals, axis=1))[0]
# and ~np.any(chunk_psis - psi, axis=1)
if match_dihedrals.size > 0:
print(f"Found match for pair {phi}, {psi}: idx {match_dihedrals}")
# outfile.write(
# chunk.xyz,
# cell_lengths = chunk.unitcell_lengths,
# cell_angles = chunk.unitcell_angles)
speed = chunk_size // (time.time() - time_start)
time_start = time.time()
frames_remaining = total_frames - (i * chunk_size)
print(f"{i*100*chunk_size/total_frames:.1f}%, {speed:.1f} frames per sec, {frames_remaining} frames remaining ", end="\r")
outfile.close()
print(f"\n\nDone, saved to {OUT}")
|
{"hexsha": "462a707ae7046a286321633e9f382edc6f583902", "size": 2457, "ext": "py", "lang": "Python", "max_stars_repo_path": "extract_conformations.py", "max_stars_repo_name": "meyresearch/ANI-Peptides", "max_stars_repo_head_hexsha": "84684b484119699cb5458f4c2aed5fa8a482c315", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-31T16:43:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:43:55.000Z", "max_issues_repo_path": "extract_conformations.py", "max_issues_repo_name": "meyresearch/ANI-Peptides", "max_issues_repo_head_hexsha": "84684b484119699cb5458f4c2aed5fa8a482c315", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "extract_conformations.py", "max_forks_repo_name": "meyresearch/ANI-Peptides", "max_forks_repo_head_hexsha": "84684b484119699cb5458f4c2aed5fa8a482c315", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1012658228, "max_line_length": 142, "alphanum_fraction": 0.6711436711, "include": true, "reason": "import numpy", "num_tokens": 698}
|
import numpy as np
from load_screens import load_screens
from scipy.special import stdtr
# Load batch-corrected screens
screens = load_screens()
# Remove cell lines with any missing genes
# (not required for DepMap 18Q3, but is for more recent releases)
# You can use other strategies to remove NaNs instead, like imputing,
# removing genes with any missing cell lines
screens.dropna(axis=1, inplace=True)
# Warp screen data and intercept based on covariance of screens
cholsigmainv = np.linalg.cholesky(np.linalg.inv(np.cov(screens.T)))
warped_screens = screens.values @ cholsigmainv
warped_intercept = cholsigmainv.sum(axis=0)
# Then just run linear regression; this implementation is based on
# https://pingouin-stats.org/generated/pingouin.linear_regression.html
def linear_regression(warped_screens, warped_intercept):
GLS_coef = np.empty((len(warped_screens), len(warped_screens)))
GLS_se = np.empty((len(warped_screens), len(warped_screens)))
ys = warped_screens.T
for gene_index in range(len(warped_screens)):
X = np.stack((warped_intercept, warped_screens[gene_index]), axis=1)
coef, residues = np.linalg.lstsq(X, ys, rcond=None)[:2]
df = warped_screens.shape[1] - 2
GLS_coef[gene_index] = coef[1]
GLS_se[gene_index] = \
np.sqrt(np.linalg.pinv(X.T @ X)[1, 1] * residues / df)
return GLS_coef, GLS_se
GLS_coef, GLS_se = linear_regression(warped_screens, warped_intercept)
df = warped_screens.shape[1] - 2
GLS_p = 2 * stdtr(df, -np.abs(GLS_coef / GLS_se))
np.fill_diagonal(GLS_p, 1)
# Save everything
np.save('GLS_p.npy', GLS_p)
np.save('GLS_sign.npy', np.sign(GLS_coef))
screens.index.to_series().to_csv('genes.txt', index=False, header=False)
|
{"hexsha": "e88d049b942882fa1374fdd36567215a75d418d2", "size": 1736, "ext": "py", "lang": "Python", "max_stars_repo_path": "gene_pairs.py", "max_stars_repo_name": "kundajelab/coessentiality", "max_stars_repo_head_hexsha": "ae462f3073469245c84d85c7b49a4d5671f6b2a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2019-12-30T19:14:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T01:29:38.000Z", "max_issues_repo_path": "gene_pairs.py", "max_issues_repo_name": "kundajelab/coessentiality", "max_issues_repo_head_hexsha": "ae462f3073469245c84d85c7b49a4d5671f6b2a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-09T16:30:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T19:17:00.000Z", "max_forks_repo_path": "gene_pairs.py", "max_forks_repo_name": "kundajelab/coessentiality", "max_forks_repo_head_hexsha": "ae462f3073469245c84d85c7b49a4d5671f6b2a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-11-27T01:23:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-01T05:01:16.000Z", "avg_line_length": 36.1666666667, "max_line_length": 76, "alphanum_fraction": 0.7367511521, "include": true, "reason": "import numpy,from scipy", "num_tokens": 493}
|
""" Prioritized Experience Replay implementations.
1. ProportionalSampler implements the proportional-based prioritization
using the SumTree in `data_structures.py`.
2. RankSampler implements the rank-based prioritization using the
PriorityQueue in `data_structures.py`.
"""
import torch
import numpy as np
from .data_structures import SumTree
from .mem_efficient_experience_replay import MemoryEfficientExperienceReplay
class ProportionalSampler:
""" Implements the proportional-based sampling in [Prioritized
Experience Replay](https://arxiv.org/pdf/1511.05952.pdf).
"""
# pylint: disable=too-many-instance-attributes, bad-continuation
# nine attrs is reasonable in this case.
def __init__( # pylint: disable=bad-continuation
self,
er,
alpha=0.6,
beta=None,
async_memory: bool = True,
optim_steps=None,
epsilon=0.000_000_1,
**kwargs,
) -> None:
if not isinstance(er, MemoryEfficientExperienceReplay) or er.is_async:
raise RuntimeError(
"ER must be non-async MemoryEfficentExperienceReplay."
)
self._er = er
self._sumtree = SumTree(capacity=self._er.capacity)
self.__alpha = alpha
self.__beta = beta
if self.__beta is not None and optim_steps:
print(self.__beta, optim_steps)
self.__beta_step = (1 - self.__beta) / optim_steps
else:
self.__beta_step = None
self.__epsilon = epsilon
self.__max = 1
if async_memory:
import concurrent.futures
self._executor = concurrent.futures.ThreadPoolExecutor(
max_workers=1
)
self.push = self._async_push
self.sample = self._async_sample
self.push_and_sample = self._async_push_and_sample
self._sample_result = None
self._push_result = None
else:
self.push = self._push
self.sample = self._sample
self.push_and_sample = self._push_and_sample
self.__is_async = async_memory
def __wait(self):
if self._push_result is not None:
self._push_result.result()
self._push_result = None
if self._sample_result is not None:
self._sample_result.result()
def _push(self, transition, priority=None):
pos = self._er.push(transition)
priority = priority or (self.__epsilon ** self.__alpha + self.__max)
self._sumtree.update(pos, priority)
def _async_push(self, transition, priority=None):
self.__wait()
self._push_result = self._executor.submit(
self._push, transition, priority
)
def _sample(self):
idxs = []
batch_size = self.batch_size
probs = [] # keep the un-normalized probabilites
mem_size = len(self)
total_prob = self._sumtree.get_sum()
segment_sz = total_prob / batch_size
for i in range(batch_size):
start = i * segment_sz
end = (i + 1) * segment_sz
idx, prob = self._sumtree.get(np.random.uniform(start, end))
idxs.append(idx)
probs.append(prob)
# compute the importance sampling weights
if self.__beta is not None:
weights = torch.tensor(probs) / total_prob # pylint: disable=E1102
weights = (mem_size * weights) ** -self.__beta
weights /= weights.max()
else:
# we basically disable importance sampling
weights = torch.tensor(probs).fill_(1) # pylint: disable=E1102
if self.__beta_step:
# anneal the beta
self.__beta = min(self.__beta + self.__beta_step, 1)
return self._er.sample(gods_idxs=idxs), idxs, weights
def _async_sample(self):
self.__wait()
if self._sample_result is None:
batch = self._sample()
else:
batch = self._sample_result.result()
self._sample_result = self._executor.submit(self._sample)
return batch
def _push_and_sample(self, transition: list):
if isinstance(transition[0], list):
for trans in transition:
self._push(trans)
else:
self._push(transition)
return self._sample()
def _async_push_and_sample(self, transition):
self.__wait()
if self._sample_result is not None:
batch = self._sample_result.result()
else:
batch = self._sample()
self._sample_result = self._executor.submit(
self._push_and_sample, transition
)
return batch
def update(self, idxs, priorities):
""" Updates the priorities of the last transitions sampled. """
if self.__is_async:
self.__wait()
for priority, idx in zip(priorities, idxs):
priority = (priority + self.__epsilon) ** self.__alpha
self._sumtree.update(idx, priority)
self.__max = max(priority, self.__max)
@property
def batch_size(self) -> int:
""" Batch size, duh!
"""
return self._er.batch_size
def __len__(self):
return len(self._er)
def __str__(self):
props = (
"capacity={0}, size={1}, α={2}, β={3}, batch={4}, async={5}"
).format(
self._er.capacity,
len(self._er),
self.__alpha,
self.__beta,
self.batch_size,
self.__is_async,
)
return f"ProportionalExperienceReplay({props})"
|
{"hexsha": "4cc5f79525ea912a536923517a32347a5c98fd94", "size": 5670, "ext": "py", "lang": "Python", "max_stars_repo_path": "wintermute/replay/prioritized_replay.py", "max_stars_repo_name": "floringogianu/wintermute", "max_stars_repo_head_hexsha": "097aed1017192dff616bcd9c5083bb74c4aa71f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-04-02T11:33:48.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-04T16:29:10.000Z", "max_issues_repo_path": "wintermute/replay/prioritized_replay.py", "max_issues_repo_name": "floringogianu/wintermute", "max_issues_repo_head_hexsha": "097aed1017192dff616bcd9c5083bb74c4aa71f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-12-30T15:16:26.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-19T09:50:59.000Z", "max_forks_repo_path": "wintermute/replay/prioritized_replay.py", "max_forks_repo_name": "floringogianu/wintermute", "max_forks_repo_head_hexsha": "097aed1017192dff616bcd9c5083bb74c4aa71f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3259668508, "max_line_length": 79, "alphanum_fraction": 0.5989417989, "include": true, "reason": "import numpy", "num_tokens": 1273}
|
from sklearn import linear_model
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
import math
import os
from EnergyIntensityIndicators.pull_eia_api import GetEIAData
from EnergyIntensityIndicators.Residential.residential_floorspace import ResidentialFloorspace
from EnergyIntensityIndicators.utilities.dataframe_utilities \
import DFUtilities as df_utils
class WeatherFactors:
def __init__(self, sector, directory, activity_data=None, residential_floorspace=None, nominal_energy_intensity=None,
end_year=2018, projections=False):
self.end_year = end_year
self.directory = directory
self.sector = sector
self.activity_data = activity_data
self.nominal_energy_intensity = nominal_energy_intensity
self.residential_floorspace = residential_floorspace
self.eia_data = GetEIAData(self.sector)
self.projections = projections
print("WEATHER FACTORS os.getcwd()", os.getcwd())
# self.lmdi_prices = pd.read_excel(os.path.join(
# "..", 'Indicators_Spreadsheets_2020',
# 'EnergyPrices_by_Sector_010820_DBB.xlsx'
# ), sheet_name='LMDI-Prices', header=14, usecols='A:B, EY')
self.lmdi_prices = pd.read_excel(os.path.join(
os.getcwd(), 'Indicators_Spreadsheets_2020',
'EnergyPrices_by_Sector_010820_DBB.xlsx'
), sheet_name='LMDI-Prices', header=14, usecols='A:B, EY')
self.regions_subregions = ['northeast', 'new_england', 'middle_atlantic', 'midwest',
'east_north_central', 'west_north_central', 'south',
'south_atlantic', 'east_south_central', 'west_south_central',
'west', 'mountain', 'pacific']
self.sub_regions_dict = {'northeast': ['New England', 'Middle Atlantic'],
'midwest': ['East North Central', 'West North Central'],
'south': ['South Atlantic', 'East South Central', 'West South Central'],
'west': ['Mountain', 'Pacific']}
@staticmethod
def adjust_data(subregions, hdd_by_division, hdd_activity_weights, cooling=True, cdd_by_division=None, \
cdd_activity_weights=None, use_weights_1961_90=True):
"""Calculate weights for adjusted weather factors prediction
"""
years_1961_90 = list(range(1961, 1990 + 1))
years_1981_2010 = list(range(1981, 1990 + 1))
if cooling:
cdd_by_division = cdd_by_division.set_index('Year')
cdd_by_division.index = cdd_by_division.index.astype(int)
averages_1961_90_cooling = cdd_by_division.loc[years_1961_90, :].mean(axis=0)
averages_1981_2010_cooling = cdd_by_division.loc[years_1981_2010, :].mean(axis=0)
hdd_by_division = hdd_by_division.set_index('Year')
hdd_by_division.index = hdd_by_division.index.astype(int)
averages_1961_90_heating = hdd_by_division.loc[years_1961_90, :].mean(axis=0)
averages_1981_2010_heating = hdd_by_division.loc[years_1981_2010, :].mean(axis=0)
all_s_weights_heating = []
all_s_weights_cooling = []
for s in subregions:
if use_weights_1961_90:
subregion_weights_heating = averages_1961_90_heating.loc[s] * hdd_activity_weights[s]
if cooling:
subregion_weights_cooling = averages_1961_90_cooling.loc[s] * cdd_activity_weights[s]
all_s_weights_cooling.append(subregion_weights_cooling)
else:
subregion_weights_heating = averages_1981_2010_heating.loc[s] * hdd_activity_weights[s]
if cooling:
subregion_weights_cooling = averages_1981_2010_cooling.loc[s] * cdd_activity_weights[s]
all_s_weights_cooling.append(subregion_weights_cooling)
all_s_weights_heating.append(subregion_weights_heating)
weights_dict = dict()
if cooling:
weights_cooling = sum(all_s_weights_cooling)
weights_dict['cooling'] = weights_cooling
weights_heating = sum(all_s_weights_heating)
weights_dict['heating'] = weights_heating
return weights_dict
def process_prices(self, weather_factors_df):
"""Process price data
"""
lmdi_prices = self.lmdi_prices
selected_variable = [1] * len(weather_factors_df)
return selected_variable
@staticmethod
def cbecs_1995_shares():
"""Calculate fuels and elec shares for the commercial sector from CBECS 1995 data
"""
electricty_consumption_tbtu = {'Northeast': 436, 'Midwest': 558, 'South': 1027, 'West': 587}
electricty_consumption_tbtu['Total'] = sum(electricty_consumption_tbtu.values())
electricity_df = pd.DataFrame.from_dict(electricty_consumption_tbtu, orient='index', \
columns=['electricity_consumption_tbtu'])
energy_tbtu = {'Northeast': 1035, 'Midwest': 1497, 'South': 1684, 'West': 1106}
energy_tbtu['Total'] = sum(energy_tbtu.values())
energy_df = pd.DataFrame.from_dict(energy_tbtu, orient='index', columns=['energy'])
shares_df = energy_df.merge(electricity_df, left_index=True, right_index=True, how='outer')
shares_df['elec_share'] = shares_df.electricity_consumption_tbtu.divide(shares_df.loc['Total', \
'electricity_consumption_tbtu'])
shares_df['fuel_consumption'] = shares_df.energy.subtract(shares_df.electricity_consumption_tbtu)
shares_df['fuels_share'] = shares_df.fuel_consumption.divide(shares_df.loc['Total', 'fuel_consumption'])
return shares_df
@staticmethod
def recs_1993_shares():
"""Calculate fuels and elec shares for the residential sector from RECS 1993 data
"""
electricty_consumption_tbtu = {'Northeast': 470, 'Midwest': 740, 'South': 1510, 'West': 560}
electricty_consumption_tbtu['Total'] = sum(electricty_consumption_tbtu.values())
electricity_df = pd.DataFrame.from_dict(electricty_consumption_tbtu, orient='index', \
columns=['electricity_consumption_tbtu'])
energy_tbtu = {'Northeast': 2380, 'Midwest': 3130, 'South': 2950, 'West': 1550}
energy_tbtu['Total'] = sum(energy_tbtu.values())
energy_df = pd.DataFrame.from_dict(energy_tbtu, orient='index', columns=['energy'])
shares_df = energy_df.merge(electricity_df, left_index=True, right_index=True, how='outer')
shares_df['elec_share'] = shares_df.electricity_consumption_tbtu.divide(shares_df.loc['Total', \
'electricity_consumption_tbtu'])
shares_df['fuel_consumption'] = shares_df.energy.subtract(shares_df.electricity_consumption_tbtu)
shares_df['fuels_share'] = shares_df.fuel_consumption.divide(shares_df.loc['Total', 'fuel_consumption'])
return shares_df
def regional_shares(self, dataframe, cols):
"""Calulate shares of regional totals by subregion
"""
dataframe = dataframe.set_index('regions_subregions')
weights_data = dict()
for col in cols:
shares_dict = dict()
for r_, subregions in self.sub_regions_dict.items():
subregions = [s.lower().replace(' ', '_') for s in subregions]
regions_ = subregions + [r_]
region_total = dataframe.loc[r_, col]
for r in regions_:
share_value = dataframe.loc[r, col] / region_total
shares_dict[r] = share_value
weights_data[col] = shares_dict
return weights_data
def gather_weights_data(self):
"""Calculate weights to aggregate subregions into four regions
"""
if self.sector == 'residential':
electricity_data = {'total_elec_tbtu': {'northeast': 470, 'midwest': 740,
'south': 1510, 'west': 560},
'heating_tbtu': {'northeast': 12 * 3.412, 'midwest': 22 * 3.412,
'south': 61 * 3.412, 'west': 25 * 3.412},
'cooling_tbtu': {'northeast': 40, 'midwest': 80,
'south': 310, 'west': 30}}
fuels_data = {'all_energy_tbtu': {'northeast': 2380, 'midwest': 3130,
'south': 2950, 'west': 1550},
'electricity_tbtu': {'northeast': 470, 'midwest': 740,
'south': 1510, 'west': 560},
'heating_all_energy_tbtu': {'northeast': 1490, 'midwest': 1920,
'south': 1210, 'west': 700}}
# Residential Heating Households Millions
heating_activity = [4.1, 1, 3.1, 5.8, 3.5, 2.4, 18.8, 10.7, 3.4, 4.8, 8.3, 2, 6.3]
# Residential Cooling Households Millions
cooling_activity = [10.9, 2.1, 8.8, 16.4, 10.8, 5.6, 29.4, 15, 5.3, 9.2, 7.1, 2.1, 5.1]
all_energy = [19.1, 4.9, 14.2, 23.2, 16.3, 6.9, 32.8, 16.8, 5.9, 10.1, 19.4, 5.3, 14.1]
electricity = [1.9, 0.5, 1.4, 2.9, 1.6, 1.3, 14.6, 8.7, 2.5, 3.4, 5.6, 1.4, 4.2]
elif self.sector == 'commercial':
electricity_data = {'total_elec_tbtu': {'northeast': 436, 'midwest': 558,
'south': 1027, 'west': 587},
'heating_tbtu': {'northeast': 18, 'midwest': 23,
'south': 43, 'west': 28},
'cooling_tbtu': {'northeast': 44, 'midwest': 60,
'south': 172, 'west': 64}}
fuels_data = {'all_energy_tbtu': {'northeast': 1035, 'midwest': 1497,
'south': 1684, 'west': 1106},
'electricity_tbtu': {'northeast': 436, 'midwest': 558,
'south': 1027, 'west': 587},
'heating_all_energy_tbtu': {'northeast': 385, 'midwest': 668, 'south': 376,
'west': 275}}
# Commercial Heating Floorspace Million SF
heating_activity = [657, 137, 520, 779, 345, 434, 3189, 1648, 1140, 401, 1219, 469, 750]
# Commercial Cooling Floorspace Million SF
cooling_activity = [5919, 1472, 4447, 10860, 7301, 3559, 13666, 6512, 3265, 3889, 7058, 2812, 4246]
all_energy = [7661, 2031, 5630, 10860, 7301, 3559, 13666, 6512, 3265, 3889, 7065, 2819, 4246]
electricity = [657, 137, 520, 779, 345, 434, 3189, 1648, 1140, 401, 1219, 469, 750]
else:
return None
weights_data_ = {'regions_subregions': self.regions_subregions, 'heating_activity': heating_activity,
'cooling_activity': cooling_activity, 'all_energy': all_energy, 'electricity': electricity}
weights_df = pd.DataFrame(data=weights_data_)
weights_df['fuels'] = weights_df['all_energy'].subtract(weights_df['electricity'])
return weights_df
@staticmethod
def heating_cooling_degree_days(type_day):
regions = ['ENC', 'ESC', 'MATL', 'MTN', 'NENGL', 'PCF', 'SATL', 'WNC', 'WSC', 'USA']
regions_abbrev_dict = {'ENC': 'east_north_central', 'ESC': 'east_south_central', 'MATL': 'middle_atlantic',
'MTN': 'mountain', 'NENGL': 'new_england', 'PCF': 'pacific', 'SATL': 'south_atlantic',
'WNC': 'west_north_central', 'WSC': 'west_south_central', 'USA': 'National'}
dd_data = []
for region in regions:
if self.sector == 'residential':
standard_id = f'AEO.2020.AEO2019REF.KEI_{t}_RESD_NA_NA_NA_{region}_{type_day}.A'
elif self.sector == 'commercial':
standard_id = f'AEO.2020.AEO2019REF.KEI_NA_COMM_NA_NA_NA_{region}_{type_day}.A'
r_df = self.eia_data.eia_api(id_=standard_id, id_type='series')
dd_data.append(r_df)
data_df = df_utils().merge_df_list(dd_data)
return data_df
def heating_cooling_data(self):
"""Collect heating and cooling data (HDD, CDD)"""
if not self.projections:
try:
hdd_by_division_historical = pd.read_csv('./EnergyIntensityIndicators/Data/historical_hdd_census_division.csv').set_index('Year')
cdd_by_division_historical = pd.read_csv('./EnergyIntensityIndicators/Data/historical_cdd_census_division.csv').set_index('Year')
except FileNotFoundError:
hdd_by_division_historical = pd.read_csv('./Data/historical_hdd_census_division.csv').set_index('Year')
cdd_by_division_historical = pd.read_csv('./Data/historical_cdd_census_division.csv').set_index('Year')
else:
hdd_by_division_historical = self.heating_cooling_degree_days(type_day='HDD')
cdd_by_division_historical = self.heating_cooling_degree_days(type_day='CDD')
hdd_by_division = self.eia_data.eia_api(id_='1566347', id_type='category')
hdd_to_drop = [c for c in list(hdd_by_division.columns) if 'Monthly' in c]
hdd_by_division = hdd_by_division.drop(hdd_to_drop, axis=1)
hdd_rename_dict = {c: c.replace(', Annual, Number', '') for c in list(hdd_by_division.columns)}
hdd_by_division = hdd_by_division.rename(columns=hdd_rename_dict)
hdd_by_division = pd.concat([hdd_by_division_historical, hdd_by_division], sort=True)
cdd_by_division = self.eia_data.eia_api(id_='1566348', id_type='category')
cdd_to_drop = [c for c in list(cdd_by_division.columns) if 'Monthly' in c]
cdd_by_division = cdd_by_division.drop(cdd_to_drop, axis=1)
cdd_rename_dict = {c: c.replace(', Annual, Number', '') for c in list(cdd_by_division.columns)}
cdd_by_division = cdd_by_division.rename(columns=cdd_rename_dict)
cdd_by_division = pd.concat([cdd_by_division_historical, cdd_by_division], sort=True)
title_case_regions = [s.replace('_', ' ').title() for s in self.regions_subregions]
hdd_names = [f'Heating Degree-Days, {r}' for r in title_case_regions]
cdd_names = [f'Cooling Degree-Days, {r}' for r in title_case_regions]
hdd_new_names_dict = {name: name_title for name, name_title in zip(hdd_names, title_case_regions)}
cdd_new_names_dict = {name: name_title for name, name_title in zip(cdd_names, title_case_regions)}
hdd_by_division = hdd_by_division.rename(columns=hdd_new_names_dict)
cdd_by_division = cdd_by_division.rename(columns=cdd_new_names_dict)
return hdd_by_division, cdd_by_division
def estimate_regional_shares(self):
"""Spreadsheet equivalent: Commercial --> 'Regional Shares'
assumed commercial floorspace in each region follows same trends as population or housing units"""
regions = ['Northeast', 'Midwest', 'South', 'West']
try:
cbecs_data = pd.read_csv('./EnergyIntensityIndicators/Data/cbecs_data_millionsf.csv').set_index('Year')
except FileNotFoundError:
cbecs_data = pd.read_csv('./Data/cbecs_data_millionsf.csv').set_index('Year')
cbecs_data.index = cbecs_data.index.astype(str)
cbecs_years = list(cbecs_data.index)
cbecs_data = cbecs_data.rename(columns={'Midwest ': 'Midwest', ' South': 'South', ' West': 'West'})
cbecs_data.loc['1979', regions] = cbecs_data.loc['1983', regions].subtract([826, 972, 2665, 1212])
cbecs_data.loc['1979', ['U.S.']] = sum(cbecs_data.loc['1979', regions].values)
cbecs_data['U.S. (calc)'] = cbecs_data.sum(axis=1)
comm_regional_shares = cbecs_data.drop(['U.S.', 'U.S. (calc)'], axis=1).divide(cbecs_data['U.S. (calc)'].values.reshape(len(cbecs_data), 1))
comm_regional_shares_ln = np.log(comm_regional_shares)
residential_data = ResidentialFloorspace(end_year=self.end_year) # change to pull from residential().activity()
final_results_total_floorspace_regions, regional_estimates_all, avg_size_all_regions = residential_data.final_floorspace_estimates()
regional_dfs = [regional_estimates_all[r][['Total']].rename(columns={'Total': r}) for r in regions]
residential_housing_units = df_utils().merge_df_list(regional_dfs)
residential_housing_units['U.S.'] = residential_housing_units.sum(axis=1)
residential_housing_units.index = residential_housing_units.index.astype(str)
regional_shares_residential_housing_units = residential_housing_units.drop('U.S.', axis=1).divide(residential_housing_units['U.S.'].values.reshape(len(residential_housing_units), 1))
regional_shares_residential_housing_units_ln = np.log(regional_shares_residential_housing_units)
regional_shares_residential_housing_units_cbecs_years = regional_shares_residential_housing_units.loc[cbecs_years, :]
regional_shares_residential_housing_units_cbecs_years_ln = np.log(regional_shares_residential_housing_units_cbecs_years)
predictions_df = pd.DataFrame(columns=comm_regional_shares.columns, index=residential_housing_units.index)
for region in comm_regional_shares.columns:
x_values = comm_regional_shares_ln[region].values
X = x_values.transpose()
y = regional_shares_residential_housing_units_cbecs_years_ln[region].values
p = np.polyfit(X, y, 1)
predictions_df[region] = np.exp(regional_shares_residential_housing_units_ln[region].multiply(p[0]).add(p[1]))
predictions_df['Predicted Sum'] = predictions_df.sum(axis=1)
normalized_shares = predictions_df.drop('Predicted Sum', axis=1).divide(predictions_df['Predicted Sum'].values.reshape(len(predictions_df), 1))
return normalized_shares
def commercial_estimate_regional_floorspace(self):
"""Estimate regional floorspace for the commercial sector"""
regional_shares = self.estimate_regional_shares()
commercial_floorspace = self.activity_data
regional_shares_index = regional_shares.index.astype(str)
commercial_floorspace_reshape = commercial_floorspace.loc[regional_shares_index, :]
regional_floorspace = regional_shares.multiply(commercial_floorspace_reshape.values)
return regional_floorspace
def commercial_regional_intensity_aggregate(self):
"""Calculate Energy Intensities (kBtu/sq. ft.) by region and fuel type (i.e. Fuels and Electricity) for use
in calculating weather factors
Returns:
dictionary with keys: 'electricity' and 'fuels', values: dataframes of intensity data for the commercial sector
with Year index and Region columns
"""
regional_floorspace = self.commercial_estimate_regional_floorspace()
total_fuels_to_indicators, elec_to_indicators = self.eia_data.get_seds()
regional_floorspace_index = regional_floorspace.index
elec_to_indicators = elec_to_indicators.loc[regional_floorspace_index, :]
total_fuels_to_indicators = total_fuels_to_indicators.loc[regional_floorspace_index, :]
fuels_regional = regional_floorspace.multiply(total_fuels_to_indicators.drop('National', axis=1).values)
elec_regional = regional_floorspace.multiply(elec_to_indicators.drop('National', axis=1).values)
return {'fuels': fuels_regional, 'electricity': elec_regional}
def residential_regional_intensity_aggregate(self):
"""This function does not need to exist if nominal_energy_intensity is properly formated, change formatting here if not
Returns:
dictionary with keys: 'electricity' and 'fuels', values: dataframes of intensity data for the residential sector
with Year index and Region columns
i.e. {'fuels': fuels_regional, 'electricity': elec_regional}
"""
nominal_energy_intensity = self.nominal_energy_intensity # nominal_energy_intensity should already be formated in this way
return nominal_energy_intensity
def weather_factors(self, region, energy_type, actual_intensity, weights_df, regional_weights):
"""Estimate a simple regression model to fit the regional intensity to a linear function of time (included squared and cubed values of time) and degree days.
-electricity model: constant term, heating degree day (HDD), cooling degree day (CDD), time, time-squared, and time-cubed
-fuels model: contant term?, HDD, HDD*Time, Time, Time-squared and composite fuel price index (the composite fuel price index was developed as a weighted average of the national distillate
fuel oil price index and a national average price for natural gas)
Weather factors are applied at the regional level to generate the weather-normalized intensity indexes for each of the four Census regions
-The weather factors for delivered energy and source energy are computed implicitly. For delivered energy, they are calculated
as the sum of reported electricity and fuels divided by the sum of the weather-adjusted electricity and weather-adjusted fuels.
A similar procedure is followed for source energt. As such, the implied weather factors are a result of the process, not an independent
variable that influences the values of intensity indexes for delivered energy and source energy. All of these computation occur within Commercial_Total worksheet.
TODO: Input data
"""
if energy_type == 'electricity':
energy_type = 'elec'
subregions = self.sub_regions_dict[region]
subregions_lower = [s.lower().replace(' ', '_') for s in subregions]
hdd_activity_weights = [regional_weights['heating_activity'][r_] for r_ in subregions_lower]
hdd_activity_weights_dict = {r : regional_weights['heating_activity'][r_] for r, r_ in zip(subregions, subregions_lower)}
cdd_activity_weights = [regional_weights['cooling_activity'][r_] for r_ in subregions_lower]
cdd_activity_weights_dict = {r : regional_weights['cooling_activity'][r_] for r, r_ in zip(subregions, subregions_lower)}
fuels_weights = [regional_weights['fuels'][r_] for r_ in subregions_lower]
hdd_by_division, cdd_by_division = self.heating_cooling_data()
heating_degree_days = hdd_by_division[subregions]
heating_degree_days = heating_degree_days.reset_index('Year')
heating_degree_days[region] = heating_degree_days[subregions].dot(hdd_activity_weights)
fuels_heating_degree_days = heating_degree_days
fuels_heating_degree_days[region] = fuels_heating_degree_days[subregions].dot(fuels_weights)
weather_factors_df = heating_degree_days[['Year', region]].rename(columns={region: 'HDD'})
weather_factors_df['Year'] = weather_factors_df['Year'].astype(int)
weather_factors_df['Time'] = weather_factors_df['Year'].values - 1969
weather_factors_df['Time^2'] = weather_factors_df[['Time']].pow(2).values
if energy_type == 'elec':
cooling_degree_days = cdd_by_division[subregions]
cooling_degree_days[region] = cooling_degree_days[subregions].dot(cdd_activity_weights)
cooling_degree_days = cooling_degree_days.reset_index('Year')
cooling_degree_days['Year'] = cooling_degree_days['Year'].astype(int)
weather_factors_df_cooling = cooling_degree_days[['Year', region]].rename(columns={region: 'CDD'})
weather_factors_df = weather_factors_df.merge(weather_factors_df_cooling, on='Year', how='outer')
weather_factors_df['Time^3'] = weather_factors_df[['Time']].pow(3).values
weather_factors_df = weather_factors_df.set_index('Year')
weather_factors_df.index = weather_factors_df.index.astype(int)
X_data = weather_factors_df[['HDD', 'CDD', 'Time', 'Time^2', 'Time^3']]
elif energy_type == 'fuels':
weather_factors_df['HDD*Time'] = heating_degree_days[region].multiply(weather_factors_df['Time'])
weather_factors_df['Price'] = self.process_prices(weather_factors_df)
weather_factors_df = weather_factors_df.set_index('Year')
weather_factors_df.index = weather_factors_df.index.astype(int)
X_data = weather_factors_df[['HDD', 'HDD*Time', 'Time', 'Time^2', 'Price']]
# elif self.energy_type == 'delivered':
# weather_factor = (reported_electricity + fuels) / (weather_adjusted_electrity + weather_adjusted_fuels)
# return weather_factor
else:
raise KeyError(f'Missing valid energy type. Type given: {energy_type}')
actual_intensity.index = actual_intensity.index.astype(int)
data = X_data.merge(actual_intensity, left_index=True, right_index=True, how='inner').dropna()
X = data.drop(region.capitalize(), axis=1)
Y = data[[region.capitalize()]]
reg = linear_model.LinearRegression()
reg.fit(X, Y)
coefficients = reg.coef_
coefficients = coefficients[0]
intercept = reg.intercept_
predicted_value_intensity_actualdd = reg.predict(X) # Predicted value of the intensity based on actual degree days
if energy_type == 'elec':
prediction2_weights = self.adjust_data(subregions=subregions, hdd_by_division=heating_degree_days, cdd_by_division=cooling_degree_days,
cdd_activity_weights=cdd_activity_weights_dict, hdd_activity_weights=hdd_activity_weights_dict,
use_weights_1961_90=True)
predicted_value_intensity_ltaveragesdd = intercept + coefficients[0] * prediction2_weights['heating'] + coefficients[1] * prediction2_weights['cooling'] + \
coefficients[2] * data['Time'] + coefficients[3] * data['Time^2'] + coefficients[4] * data['Time^3'] # Predicted value of the intensity based on the long-term averages of the degree days
elif energy_type == 'fuels':
prediction2_weights = self.adjust_data(subregions=subregions, hdd_by_division=heating_degree_days,
hdd_activity_weights=hdd_activity_weights_dict, cooling=False,
use_weights_1961_90=True)
predicted_value_intensity_ltaveragesdd = intercept + coefficients[0] * prediction2_weights['heating'] + coefficients[1] * data['Time'] * prediction2_weights['heating'] + \
coefficients[2] * data['Time'] + coefficients[3] * data['Time^2'] + coefficients[4] * data['Price'] # Predicted value of the intensity based on the long-term averages of the degree days
weather_factor = predicted_value_intensity_actualdd.flatten() / predicted_value_intensity_ltaveragesdd.values.flatten()
try:
weather_normalized_intensity = actual_intensity.loc[data.index].divide(weather_factor.reshape(len(weather_factor), 1))
except Exception:
try:
weather_normalized_intensity = actual_intensity.loc[data.index].divide(weather_factor)
except Exception as e:
raise ValueError(f'Failure to divide: {actual_intensity.shape} by {weather_factor.shape}, failed with error {e}')
weather_factor_df = pd.DataFrame(data={'Year': data.index, f'{region}_weather_factor': weather_factor}).set_index('Year')
return weather_factor_df, weather_normalized_intensity
def national_method1_fixed_end_use_share_weights(self, energy_type_):
"""Used fixed weights to develop from regional factors, weighted by regional energy share from 1995 CBECS
"""
if self.sector == 'commercial':
shares = self.cbecs_1995_shares()
intensity_df = self.commercial_regional_intensity_aggregate()
elif self.sector == 'residential':
intensity_df = self.residential_regional_intensity_aggregate()
shares = self.recs_1993_shares()
if energy_type_ == 'elec':
energy_type = 'electricity'
else:
energy_type = energy_type_
regional_weather_factors = []
weights_df = self.gather_weights_data()
regional_weights = self.regional_shares(dataframe=weights_df, cols=['heating_activity', 'cooling_activity', 'fuels'])
for region in self.sub_regions_dict.keys():
region_cap = region.capitalize()
if self.sector == 'residential':
regional_intensity = intensity_df[region_cap][energy_type_]
elif self.sector == 'commercial':
regional_intensity = intensity_df[energy_type_][region_cap]
weather_factors, weather_normalized_intensity = self.weather_factors(region, energy_type_, actual_intensity=regional_intensity, weights_df=weights_df, regional_weights=regional_weights)
regional_weather_factors.append(weather_factors)
weather_factors_all = pd.concat(regional_weather_factors, axis=1)
weather_factors_all = weather_factors_all.reindex(columns=list(weather_factors_all.columns) + [f'{energy_type_}_weather_factor'])
for y in weather_factors_all.index:
if energy_type == 'electricity':
energy_type = 'elec'
share_name = f'{energy_type}_share'
year_weather = weather_factors_all.drop(f'{energy_type_}_weather_factor', axis=1).loc[y, :]
weights = shares[share_name].drop('Total')
year_factor = year_weather.dot(weights.to_numpy())
weather_factors_all.loc[y, [f'{energy_type_}_weather_factor']] = year_factor
return weather_factors_all
def national_method2_regression_models(self, seds_data, weather_factors):
"""Second regression model"""
seds_data, weather_factors = df_utils().ensure_same_indices(seds_data, weather_factors)
weather_adjusted_consumption = seds_data.drop('National', axis=1).multiply(weather_factors.values)
weather_adjusted_consumption['National'] = weather_adjusted_consumption.sum(axis=1)
implicit_national_weather_factor = seds_data[['National']].divide(weather_adjusted_consumption['National'].values.reshape(len(weather_adjusted_consumption), 1))
return implicit_national_weather_factor
def adjust_for_weather(self, data, energy_type):
"""Adjust data by weather factors
Parameters
----------
data: dataframe
dataset to adjust by weather
energy_type: str
Returns
-------
weather_adjusted_data: dataframe
"""
weather_factors = self.national_method1_fixed_end_use_share_weights(energy_type)
weather_adjusted_data = data / weather_factors[energy_type]
return weather_adjusted_data
def get_weather(self, energy_dict=None, energy_type=None, energy_df=None, weather_adjust=False, seds_data=None):
"""Collect weather data by sector (commercial or residential)"""
if self.sector == 'residential':
if weather_adjust:
for type_, energy_dataframe in energy_dict.items():
weather_adj_energy = self.adjust_for_weather(energy_dataframe, type_)
energy_dict[f'{type_}_weather_adj'] = weather_adj_energy
return energy_dict
else:
weather_factors = dict()
for type_ in energy_dict.keys():
weather_factors_t = self.national_method1_fixed_end_use_share_weights(energy_type_=type_)
if type_ == 'electricity':
type_ = 'elec'
weather_factors[type_] = weather_factors_t
return weather_factors
elif self.sector == 'commercial':
weather_factors = dict()
weather_factors_1 = dict()
for type_ in ['electricity', 'fuels']:
weather_factors_method1 = self.national_method1_fixed_end_use_share_weights(type_)
if not seds_data:
weather_factors_1[type_] = weather_factors_method1
continue
early_years = range(min(weather_factors_method1.index), 1969 + 1)
weather_factors_early = weather_factors_method1.loc[early_years, [f'{type_}_weather_factor']]
weather = weather_factors_method1.drop(f'{type_}_weather_factor', axis=1)
if type_ == 'electricity':
type_ = 'elec'
type_seds = seds_data[type_]
weather_factors_method2 = self.national_method2_regression_models(seds_data=type_seds, weather_factors=weather)
weather_factors_method2 = weather_factors_method2.rename(columns={'National': f'{type_}_weather_factor'})
late_years = range(1970, max(weather_factors_method2.index) + 1)
weather_factors_late = weather_factors_method2.loc[late_years]
weather_factors_t = pd.concat([weather_factors_early, weather_factors_late], sort=True)
weather_factors[type_] = weather_factors_t
if not seds_data:
return weather_factors_1
else:
return weather_factors
if __name__ == '__main__':
pass
|
{"hexsha": "460e4970d0792e92b10c20113dcba627f279f00a", "size": 34359, "ext": "py", "lang": "Python", "max_stars_repo_path": "EnergyIntensityIndicators/weather_factors.py", "max_stars_repo_name": "NREL/EnergyIntensityIndicators", "max_stars_repo_head_hexsha": "6d5a6d528ecd27b930d82088055224473ba2d63e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-07-30T15:02:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T20:02:55.000Z", "max_issues_repo_path": "EnergyIntensityIndicators/weather_factors.py", "max_issues_repo_name": "NREL/EnergyIntensityIndicators", "max_issues_repo_head_hexsha": "6d5a6d528ecd27b930d82088055224473ba2d63e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 36, "max_issues_repo_issues_event_min_datetime": "2020-06-18T15:47:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-13T21:20:49.000Z", "max_forks_repo_path": "EnergyIntensityIndicators/weather_factors.py", "max_forks_repo_name": "NREL/EnergyIntensityIndicators", "max_forks_repo_head_hexsha": "6d5a6d528ecd27b930d82088055224473ba2d63e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-18T13:30:43.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-17T11:34:10.000Z", "avg_line_length": 56.9800995025, "max_line_length": 239, "alphanum_fraction": 0.6482144416, "include": true, "reason": "import numpy", "num_tokens": 8096}
|
from networkx import *
z=[5,3,3,3,3,2,2,2,1,1,1]
print is_valid_degree_sequence(z)
print("Configuration model")
G=configuration_model(z) # configuration model
degree_sequence=list(degree(G).values()) # degree sequence
print("Degree sequence %s" % degree_sequence)
print("Degree histogram")
hist={}
for d in degree_sequence:
if d in hist:
hist[d]+=1
else:
hist[d]=1
print("degree #nodes")
for d in hist:
print('%d %d' % (d,hist[d]))
|
{"hexsha": "feed33313bd4b5c0b922e55107819de224362aa6", "size": 462, "ext": "py", "lang": "Python", "max_stars_repo_path": "gra3.py", "max_stars_repo_name": "0x0mar/PyDetector", "max_stars_repo_head_hexsha": "e58e32c66c3972ceb0cbb65408e0ac797e896604", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-14T10:56:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T10:56:53.000Z", "max_issues_repo_path": "gra3.py", "max_issues_repo_name": "0x0mar/PyDetector", "max_issues_repo_head_hexsha": "e58e32c66c3972ceb0cbb65408e0ac797e896604", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gra3.py", "max_forks_repo_name": "0x0mar/PyDetector", "max_forks_repo_head_hexsha": "e58e32c66c3972ceb0cbb65408e0ac797e896604", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-12-18T00:44:29.000Z", "max_forks_repo_forks_event_max_datetime": "2018-12-18T00:44:29.000Z", "avg_line_length": 24.3157894737, "max_line_length": 58, "alphanum_fraction": 0.6753246753, "include": true, "reason": "from networkx", "num_tokens": 135}
|
from keras.datasets import mnist
from ocr_cnn import OCR_NeuralNetwork
from keras.models import Sequential
from keras.layers import Merge
from preprocessing import preprocess_data
import numpy as np
class ensemble:
def __init__(self, models=[]):
self._models = []
for model in models:
self._models.append(model)
def add_model(self, model):
self._models.append(model)
def compile_model(self, mode="ave",
loss="categorical_crossentropy",
optimizer="adadelta",
metrics=['accuracy', 'precision', 'recall']):
if len(self._models) < 2:
print("You need to at least to add 2 models to build an ensemble")
return
sequentials = []
for model in self._models:
sequentials.append(model._model)
self._ensemble = Sequential()
self._ensemble.add(Merge(sequentials, mode='ave'))
self._ensemble.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy', 'precision', 'recall'])
# Fit all the models and compile it
def fit(self, X_train, y_train, X_test=[], y_test=[], verbose=0):
self._histories = []
self._histories_cont = []
for index, model in enumerate(self._models):
print("Training model " + str(index) + " ...")
window_size = 0;
if index == 0:
window_size = 30
else:
window_size = (-1)
history, history_cont = model.fit(X_train, y_train,
X_test, y_test, forceRetrain = True, verbose=verbose,
initial_epoch=0, window_size=window_size, seed=1337)
self._histories.append(history)
self._histories_cont.append(history_cont)
self.compile_model()
print("Done.\n\n")
def predict(self, X_test, verbose=0):
if not self._ensemble:
print("You must train the net first")
return
X_test, _ , _ = preprocess_data(X_test, [], self._models[0]._nb_classes,
img_rows=self._models[0]._img_rows, img_cols=self._models[0]._img_cols,
verbose=verbose)
return self._ensemble.predict_classes([np.asarray(X_test)] * len(self._models))
def evaluate(self, X_test, y_test, verbose=0):
X_test, y_test, _ = preprocess_data(X_test, y_test, self._models[0]._nb_classes,
img_rows=self._models[0]._img_rows, img_cols=self._models[0]._img_cols,
verbose=verbose)
print('Evaluating ensemble')
score = self._ensemble.evaluate([np.asarray(X_test)] * len(self._models),
y_test,
verbose=verbose)
print('Test accuracy:', score[1]*100, '%')
print('Test error:', (1-score[2])*100, '%')
def main():
## Fast Usage
# Prepare the dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Initialization
nn1 = OCR_NeuralNetwork(10, nb_epochs=2, model_dir="checkpoints", model_name="test1", batch_size=128)
nn2 = OCR_NeuralNetwork(10, nb_epochs=2, model_dir="checkpoints", model_name="test2", batch_size=128)
# You can add models in the constructor or by the add_model method
# as follows
models = [nn1,nn2]
nn_ensemble = ensemble(models=models);
nn3 = OCR_NeuralNetwork(10, nb_epochs=4, model_dir="checkpoints", model_name="test3", batch_size=128)
nn_ensemble.add_model(nn3)
# Training, not needed now because a snapshot of the model
# is present in the folder "checkpoints", if not uncomment this
# line and refit the model
nn_ensemble.fit(X_train, y_train, X_test, y_test, verbose=0)
# Compile the model using the already fit nets. If you are
# fitting from scracth, then uncomment the line above and
# comment this line, since the compilation of the model
# is done in the fit method of the ensamble
#nn_ensemble.compile_model()
# Prediciton
predicted = nn_ensemble.predict(X_test)
# Evaluation
score = nn_ensemble.evaluate(X_test, y_test, verbose=1)
# Execute the module if it is main
if __name__ == "__main__":
main()
|
{"hexsha": "6026d5bfbdb19c4d9b3c4d1df4841f0803a1eb07", "size": 3923, "ext": "py", "lang": "Python", "max_stars_repo_path": "Notebooks/ensemble.py", "max_stars_repo_name": "ieee820/In-Codice-Ratio-OCR-with-CNN", "max_stars_repo_head_hexsha": "7b616822fe98d871ae1bf485ff7d0455922c84a2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Notebooks/ensemble.py", "max_issues_repo_name": "ieee820/In-Codice-Ratio-OCR-with-CNN", "max_issues_repo_head_hexsha": "7b616822fe98d871ae1bf485ff7d0455922c84a2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Notebooks/ensemble.py", "max_forks_repo_name": "ieee820/In-Codice-Ratio-OCR-with-CNN", "max_forks_repo_head_hexsha": "7b616822fe98d871ae1bf485ff7d0455922c84a2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-08-06T06:20:28.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-06T06:20:28.000Z", "avg_line_length": 29.276119403, "max_line_length": 105, "alphanum_fraction": 0.6788172317, "include": true, "reason": "import numpy", "num_tokens": 1027}
|
import pandas as pd
import numpy as np
import datetime
import json
import pickle
from pathlib import Path
from difflib import SequenceMatcher
from pickle_functions import *
from app_functions import *
from process_functions import write_log
path_input = Path.cwd() / 'input'
Path.mkdir(path_input, exist_ok = True)
path_life_table_BE = Path.cwd() / 'input' / 'sterftetafelsAE.xls'
path_geo_BE = Path.cwd() / 'input' / 'municipalities-belgium.geojson'
path_deaths_BE = Path.cwd() / 'input' / 'TF_DEATHS.xlsx'
path_pop_BE = Path.cwd() / 'input' / 'pop_muniBE.xlsx'
path_life_table_BE = Path.cwd() / 'input' / 'sterftetafelsAE.xls'
url_epistat = 'https://epistat.sciensano.be/Data/COVID19BE.xlsx'
BE_data_cases = clean_data_be(url_epistat, cases = True, hosp = False, deaths = False)
BE_data_hosp = clean_data_be(url_epistat, cases = False, hosp = True, deaths = False)
BE_data_cases['CASES'] = BE_data_cases.groupby(['DATE', 'PROVINCE'])['CASES'].sum()
BE_data_cases = BE_data_cases.groupby(['DATE','PROVINCE']).first()
BE_data_cases = BE_data_cases[['CASES']]
BE_data_cases = BE_data_cases.rename(columns={"CASES": "Cases"})
BE_data_hosp['Released from hospital'] = BE_data_hosp.groupby(['PROVINCE'])['NEW_OUT'].cumsum()
BE_data_hosp['Total hospitalized'] = BE_data_hosp.groupby(['PROVINCE'])['NEW_IN'].cumsum()
BE_data_hosp = BE_data_hosp.rename(columns={"TOTAL_IN": "Hospitalized", 'TOTAL_IN_ICU': 'ICU', 'TOTAL_IN_RESP': 'Respiratory'})
BE_data_hosp = BE_data_hosp.reset_index()
BE_data_hosp = BE_data_hosp.rename(columns={"index": "DATE"})
BE_data_hosp['DATE'] = BE_data_hosp['DATE'].astype('str')
BE_data_hosp = BE_data_hosp.set_index(['DATE','PROVINCE'])
BE_total_prov = BE_data_cases.merge(BE_data_hosp, left_index = True, right_index = True, how='outer')
BE_total_prov['Cases'] = BE_total_prov['Cases'].fillna(0.0)
BE_total_prov.insert(loc = 2, column = 'Cumulative cases', value = BE_total_prov.groupby(['PROVINCE'])['Cases'].cumsum())
BE_total_prov_merged = BE_total_prov.reset_index('PROVINCE').copy()
BE_total_merged = BE_total_prov_merged.copy()
BE_total_merged['PROVINCE'] = 'Belgium'
BE_total_merged = BE_total_merged.groupby(level = 0).sum(min_count = 1)
BE_data_deaths = clean_data_be(url_epistat, cases = False, hosp = False, deaths = True)
BE_total_deaths = cum_deaths_by_date(BE_data_deaths)
BE_total_merged = BE_total_merged.merge(BE_total_deaths, left_index = True, right_index = True, how='outer')
for date in set(BE_total_prov_merged.index):
for var in ['Cumulative cases', 'Released from hospital', 'Total hospitalized']:
temp_data = BE_total_prov_merged[var].loc[date].reset_index()
for i in range(len(temp_data[var])):
if np.isnan(temp_data.iloc[i][var]):
BE_total_merged.at[date, var] = np.nan
available_provinces = ['Belgium']
for prov in sorted(set(BE_total_prov_merged['PROVINCE'])):
available_provinces.append(prov)
BE_reg_deaths = clean_data_be(url_epistat, cases = False, hosp = False, deaths = True)
BE_reg_cases = clean_data_be(url_epistat, cases = True, hosp = False, deaths = False)
BE_reg_pop = pd.read_excel(path_pop_BE, sheet_name = 'Bevolking in 2019', header = [1])
BE_reg_pop = BE_reg_pop.loc[(BE_reg_pop['Woonplaats'] == 'Vlaams Gewest') | (BE_reg_pop['Woonplaats'] == 'Waals Gewest') | (BE_reg_pop['Woonplaats'] == 'Brussels Hoofdstedelijk Gewest')]
BE_reg_pop = BE_reg_pop.rename(columns = {'Woonplaats': 'Region', 'Mannen': 'Male', 'Vrouwen': 'Female', 'Totaal': 'Total'})
BE_reg_pop['Region'].loc[BE_reg_pop['Region'] == 'Vlaams Gewest'] = 'Flanders'
BE_reg_pop['Region'].loc[BE_reg_pop['Region'] == 'Waals Gewest'] = 'Wallonia'
BE_reg_pop['Region'].loc[BE_reg_pop['Region'] == 'Brussels Hoofdstedelijk Gewest'] = 'Brussels'
df_reg_male_deaths = BE_reg_deaths.loc[BE_reg_deaths['SEX'] == 'M'].copy()
df_reg_female_deaths = BE_reg_deaths.loc[BE_reg_deaths['SEX'] == 'F'].copy()
df_reg_male_cases = BE_reg_cases.loc[BE_reg_cases['SEX'] == 'M'].copy()
df_reg_female_cases = BE_reg_cases.loc[BE_reg_cases['SEX'] == 'F'].copy()
BE_reg_total_deaths = aggregate_regions(BE_reg_deaths, 'DEATHS')
BE_reg_total_cases = aggregate_regions(BE_reg_cases, 'CASES')
BE_reg_male_deaths = aggregate_regions(df_reg_male_deaths, 'DEATHS')
BE_reg_female_deaths = aggregate_regions(df_reg_female_deaths, 'DEATHS')
BE_reg_male_cases = aggregate_regions(df_reg_male_cases, 'CASES')
BE_reg_female_cases = aggregate_regions(df_reg_female_cases, 'CASES')
df_epistat_muni = pd.read_excel(url_epistat, sheet_name = 'CASES_MUNI_CUM', usecols = ['CASES', 'TX_DESCR_FR', 'TX_DESCR_NL', 'NIS5'])
df_epistat_muni = df_epistat_muni.loc[df_epistat_muni['TX_DESCR_FR'].isna() == False]
df_epistat_muni = df_epistat_muni.loc[df_epistat_muni['TX_DESCR_NL'].isna() == False]
df_epistat_muni = df_epistat_muni.rename(columns={"TX_DESCR_FR": "name_fr", "TX_DESCR_NL": "name_nl", "NIS5": "NISCode"})
df_epistat_muni['CASES'] = np.where(df_epistat_muni['CASES'] == '<5', '1', df_epistat_muni['CASES'])
df_epistat_muni['CASES'] = pd.to_numeric(df_epistat_muni['CASES'])
df_epistat_muni['NISCode'] = df_epistat_muni['NISCode'].astype(int)
df_epistat_muni['NISCode'] = df_epistat_muni['NISCode'].astype(str)
df_epistat_muni['name_nl'].loc[df_epistat_muni['name_nl'] == 'Puurs-Sint-Amands'] = 'Sint-Amands'
df_epistat_muni['name_nl'].loc[df_epistat_muni['name_nl'] == 'Lievegem'] = 'Waarschoot'
df_epistat_muni['name_nl'].loc[df_epistat_muni['name_nl'] == 'Oudsbergen'] = 'Opglabbeek'
df_epistat_muni['name_nl'].loc[df_epistat_muni['name_nl'] == 'Blegny'] = 'Blégny'
df_epistat_muni['name_nl'].loc[df_epistat_muni['name_nl'] == 'Etalle'] = 'Étalle'
df_epistat_muni['name_nl'].loc[df_epistat_muni['name_nl'] == 'Villers-Le-Bouillet'] = 'Villers-le-Bouillet'
df_epistat_muni['name_nl'].loc[df_epistat_muni['name_nl'] == 'Ecaussinnes'] = 'Écaussinnes'
df_epistat_muni['name_nl'].loc[df_epistat_muni['name_nl'] == 'Pelt'] = 'Neerpelt'
df_epistat_muni = df_epistat_muni.set_index('NISCode')
BE_pop = pd.read_excel(path_pop_BE, sheet_name = 'Bevolking in 2019', header = [1])
BE_pop = BE_pop.loc[BE_pop['NIS code'].isna() == False]
BE_pop = BE_pop.rename(columns={"NIS code": "NISCode"})
BE_pop = BE_pop[:-3]
BE_pop['NISCode'] = BE_pop['NISCode'].astype(int)
BE_pop['NISCode'] = BE_pop['NISCode'].astype(str)
BE_pop = BE_pop.set_index('NISCode')
df_epistat_muni = df_epistat_muni.join(BE_pop)
df_epistat_muni = df_epistat_muni.reset_index()
df_epistat_muni['Infected population (%)'] = ((df_epistat_muni['CASES']/df_epistat_muni['Totaal'])*100).round(2)
with open(path_geo_BE) as f:
df_muni_geo = json.load(f)
temp_list = []
for i in range(len(df_muni_geo['features'])):
for index, j in enumerate(df_muni_geo['features'][i]['properties']['name']):
if j == '#':
df_muni_geo['features'][i]['properties']['name'] = df_muni_geo['features'][i]['properties']['name'][:index]
temp_list.append(df_muni_geo['features'][i]['properties']['name'])
temp_list.sort()
temp_list2 = list(df_epistat_muni['name_nl'])
temp_list3 = list(df_epistat_muni['name_fr'])
for i in range(len(temp_list2)):
temp_string = ''
for index, j in enumerate(temp_list2[i]):
temp_string += temp_list2[i][index]
if index +2 < len(temp_list2[i]) and temp_list2[i][index+1] == ' ' and temp_list2[i][index+2] == '(':
break
temp_list2[i] = temp_string
for i in range(len(temp_list3)):
temp_string = ''
for index, j in enumerate(temp_list3[i]):
temp_string += temp_list3[i][index]
if index +2 < len(temp_list3[i]) and temp_list3[i][index+1] == ' ' and temp_list3[i][index+2] == '(':
break
temp_list3[i] = temp_string
for i in range(len(temp_list2)):
if temp_list2[i] not in temp_list:
if temp_list3[i] not in temp_list:
for name in temp_list:
if SequenceMatcher(None, name, temp_list2[i]).ratio() > 0.7:
temp_list2[i] = name
elif SequenceMatcher(None, name, temp_list3[i]).ratio() > 0.7:
temp_list3[i] = name
for i in range(len(temp_list2)):
if temp_list2[i] not in temp_list and temp_list3[i] in temp_list:
temp_list2[i] = temp_list3[i]
for i in range(len(temp_list2)):
if temp_list2[i] not in temp_list:
pass
#print('not match')
df_epistat_muni['name'] = temp_list2
df_epistat_muni_clean = df_epistat_muni[['CASES', 'name', 'Infected population (%)']]
df_epistat_muni_clean = df_epistat_muni_clean.set_index('name')
df_epistat_muni_clean.loc['Knesselare'] = [df_epistat_muni_clean.loc['Aalter'][0], df_epistat_muni_clean.loc['Aalter'][1]]
df_epistat_muni_clean.loc['Nevele'] = [df_epistat_muni_clean.loc['Deinze'][0], df_epistat_muni_clean.loc['Deinze'][1]]
df_epistat_muni_clean.loc['Zomergem'] = [df_epistat_muni_clean.loc['Waarschoot'][0], df_epistat_muni_clean.loc['Waarschoot'][1]]
df_epistat_muni_clean.loc['Lovendegem'] = [df_epistat_muni_clean.loc['Waarschoot'][0], df_epistat_muni_clean.loc['Waarschoot'][1]]
df_epistat_muni_clean.loc['Zingem'] = [df_epistat_muni_clean.loc['Kruishoutem'][0], df_epistat_muni_clean.loc['Kruishoutem'][1]]
df_epistat_muni_clean.loc['Meeuwen-Gruitrode'] = [df_epistat_muni_clean.loc['Opglabbeek'][0], df_epistat_muni_clean.loc['Opglabbeek'][1]]
df_epistat_muni_clean.loc['Overpelt'] = [df_epistat_muni_clean.loc['Neerpelt'][0], df_epistat_muni_clean.loc['Neerpelt'][1]]
df_epistat_muni_clean.loc['Puers'] = [df_epistat_muni_clean.loc['Sint-Amands'][0], df_epistat_muni_clean.loc['Sint-Amands'][1]]
df_epistat_muni_clean = df_epistat_muni_clean.reset_index()
df_epistat_muni_clean = df_epistat_muni_clean.rename(columns={"name": "Municipality", "CASES": "Number cases"})
df_epistat_muni_clean['Number cases (ln)'] = np.log(df_epistat_muni_clean['Number cases']).round(2)
# Draw weekly mortality
BE_weekly_deaths = clean_data_be(data_path = url_epistat, cases = False, hosp = False, deaths = True)
BE_weekly_deaths['DEATHS'] = BE_weekly_deaths.groupby(level = 0)['DEATHS'].sum().round(2)
BE_weekly_deaths = BE_weekly_deaths.groupby(level = 0).first()
BE_weekly_deaths = BE_weekly_deaths.reset_index()
BE_weekly_deaths['DATE'] = pd.to_datetime(BE_weekly_deaths['DATE'], format = '%Y-%m-%d')
BE_weekly_deaths['month'] = BE_weekly_deaths['DATE'].dt.month
BE_weekly_deaths['day'] = BE_weekly_deaths['DATE'].dt.day
BE_weekly_deaths = BE_weekly_deaths[:-2][['month','day','DEATHS']]
BE_deaths_bydate = pd.read_excel(path_deaths_BE)
BE_deaths_bydate['year'] = BE_deaths_bydate['DT_DATE'].dt.year
BE_deaths_bydate['month'] = BE_deaths_bydate['DT_DATE'].dt.month
BE_deaths_bydate['day'] = BE_deaths_bydate['DT_DATE'].dt.day
BE_deaths_bydate = BE_deaths_bydate[(BE_deaths_bydate['year'] >= 2015) & (BE_deaths_bydate['year'] <= 2017)]
BE_deaths_bydate = BE_deaths_bydate[(BE_deaths_bydate['month'] != 2) | (BE_deaths_bydate['day'] != 29)]
BE_deaths_bydate = BE_deaths_bydate.set_index(['month', 'day'])
BE_deaths_bydate['mean_MS_NUM_DEATHS'] = BE_deaths_bydate.groupby(['month', 'day'])['MS_NUM_DEATHS'].mean().round(2)
BE_deaths_bydate = BE_deaths_bydate.reset_index()
BE_deaths_bydate = BE_deaths_bydate[BE_deaths_bydate['year'] == 2017]
BE_deaths_bydate = BE_deaths_bydate[['month','day', 'mean_MS_NUM_DEATHS', 'DT_DATE']]
BE_deaths_bydate['short_date'] = [f'{m}-{d}' for m, d in zip(BE_deaths_bydate['month'], BE_deaths_bydate['day'])]
BE_excess_mortality = BE_deaths_bydate.merge(BE_weekly_deaths, on = ['month', 'day'], how = 'left')
BE_excess_mortality['weeks'] = 0
week_index = 1
counter = 0
for i in range(len(BE_excess_mortality['short_date'])):
BE_excess_mortality.at[counter, 'weeks'] = week_index
counter += 1
if counter % 7 == 0:
week_index += 1
BE_excess_mortality = BE_excess_mortality.set_index('weeks')
BE_excess_mortality['Weekly COVID-19 deaths'] = BE_excess_mortality.groupby(level = 0)['DEATHS'].sum(min_count = 1).round(2)
BE_excess_mortality['Weekly average (2015-2017) deaths'] = BE_excess_mortality.groupby(level = 0)['mean_MS_NUM_DEATHS'].sum().round(2)
for year in ['2015', '2016', '2017', '2018']:
life_table = pd.read_excel(path_life_table_BE, sheet_name = year, header = None)
life_table = life_table[[0, 6, 13, 20]]
life_table = life_table.rename(columns={0: "age", 6: "surv_male", 13: "surv_female", 20: "surv_all"})
life_table['age'].loc[life_table['age'] == '105+'] = '105'
life_table = life_table.loc[life_table['age'].isna() == False]
life_table = life_table[1:]
life_table['age'] = life_table['age'].astype(int)
life_table = life_table.astype('float')
for sex in ['male', 'female', 'all']:
life_table['density_' + sex] = 1 - life_table['surv_' + sex]/1000000
if year == '2015':
life_table_2015 = life_table.copy()
if year == '2016':
life_table_2016 = life_table.copy()
if year == '2017':
life_table_2017 = life_table.copy()
if year == '2018':
life_table_2018 = life_table.copy()
life_table = pd.concat([life_table_2015, life_table_2016, life_table_2017], ignore_index = True)
life_table = life_table.set_index('age')
for sex in ['male', 'female', 'all']:
life_table['avg_density_' + sex] = life_table.groupby(level = 0)['density_' + sex].mean()
life_table = life_table.groupby(level = 0).last()
life_table = life_table.reset_index()
life_table_cont = life_table.copy()
life_table_cont = life_table_cont[['age', 'avg_density_male', 'avg_density_female', 'avg_density_all']]
life_table_cont = life_table_cont.round(2)
life_table['age'][(life_table['age'] >= 0) & (life_table['age'] <= 24)] = 12
life_table['age'][(life_table['age'] >= 25) & (life_table['age'] <= 44)] = 30
life_table['age'][(life_table['age'] >= 45) & (life_table['age'] <= 64)] = 50
life_table['age'][(life_table['age'] >= 65) & (life_table['age'] <= 74)] = 70
life_table['age'][(life_table['age'] >= 85) & (life_table['age'] <= 94)] = 90
life_table['age'][(life_table['age'] >= 95)] = 90
life_table = life_table.set_index('age')
life_table = life_table.drop(labels = [x for x in range(75, 80, 1)])
life_table = life_table.drop(labels = [x for x in range(81, 85, 1)])
life_table_discrete = life_table[['avg_density_male', 'avg_density_female', 'avg_density_all']]
life_table_discrete = life_table_discrete.round(2)
life_table_discrete = life_table_discrete.groupby(level = 0).last()
BE_deaths_lifetable = pd.read_excel(url_epistat, sheet_name = 'MORT')
dataframe_list = [
[BE_total_prov_merged, 'BE_total_prov_merged'],
[BE_total_merged, 'BE_total_merged'],
[BE_reg_total_deaths, 'BE_reg_total_deaths'],
[BE_reg_total_cases, 'BE_reg_total_cases'],
[BE_reg_male_deaths, 'BE_reg_male_deaths'],
[BE_reg_female_deaths, 'BE_reg_female_deaths'],
[BE_reg_male_cases, 'BE_reg_male_cases'],
[BE_reg_female_cases, 'BE_reg_female_cases'],
[BE_reg_pop, 'BE_reg_pop'],
[df_epistat_muni_clean, 'df_epistat_muni_clean'],
[df_muni_geo, 'df_muni_geo'],
[BE_excess_mortality, 'BE_excess_mortality'],
[BE_total_prov_merged, 'BE_total_prov_merged'],
[available_provinces, 'available_provinces'],
[life_table_discrete, 'life_table_discrete'],
[BE_deaths_lifetable, 'BE_deaths_lifetable']
]
for dataframe, name in dataframe_list:
picklify(dataframe, name)
|
{"hexsha": "b177a7a686e568e7b1a45f5cc7371a2b4f0e6be8", "size": 15223, "ext": "py", "lang": "Python", "max_stars_repo_path": "df_process.py", "max_stars_repo_name": "Learning-from-the-curve/dashboard-belgium", "max_stars_repo_head_hexsha": "b8902a6347560ce622aef4e346e971b5b3a758ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "df_process.py", "max_issues_repo_name": "Learning-from-the-curve/dashboard-belgium", "max_issues_repo_head_hexsha": "b8902a6347560ce622aef4e346e971b5b3a758ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-03-31T20:05:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-13T20:43:59.000Z", "max_forks_repo_path": "df_process.py", "max_forks_repo_name": "Learning-from-the-curve/dashboard-belgium", "max_forks_repo_head_hexsha": "b8902a6347560ce622aef4e346e971b5b3a758ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-21T18:02:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-21T18:02:36.000Z", "avg_line_length": 56.3814814815, "max_line_length": 186, "alphanum_fraction": 0.7248242791, "include": true, "reason": "import numpy", "num_tokens": 4752}
|
from matplotlib import pyplot as plt
from numpy import genfromtxt
vel_data = genfromtxt('vel_log.csv', delimiter=',')
accel_data = genfromtxt('accel_log.csv', delimiter=',')
|
{"hexsha": "49e39625b165d8e0397f2627b5489b3ba6a3d994", "size": 174, "ext": "py", "lang": "Python", "max_stars_repo_path": "igvc_ws/src/igvc_ekf/src/scripts/ekf_log_visual.py", "max_stars_repo_name": "SoonerRobotics/igvc_software_2022", "max_stars_repo_head_hexsha": "906e6a4fca22d2b0c06ef1b8a4a3a9df7f1d17dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-07-07T14:56:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-13T23:31:07.000Z", "max_issues_repo_path": "igvc_ws/src/igvc_ekf/src/scripts/ekf_log_visual.py", "max_issues_repo_name": "pradumn203/igvc-winners-2021", "max_issues_repo_head_hexsha": "658233609054eafac59603a77b2a092dc002e145", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2019-11-12T02:57:54.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-17T17:04:22.000Z", "max_forks_repo_path": "igvc_ws/src/igvc_ekf/src/scripts/ekf_log_visual.py", "max_forks_repo_name": "pradumn203/igvc-winners-2021", "max_forks_repo_head_hexsha": "658233609054eafac59603a77b2a092dc002e145", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-06-29T05:21:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-23T05:03:27.000Z", "avg_line_length": 34.8, "max_line_length": 55, "alphanum_fraction": 0.7701149425, "include": true, "reason": "from numpy", "num_tokens": 42}
|
import os
import argparse
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from src.dataset import CocoDataset, Resizer, Normalizer, Augmenter, collater
from src.model import EfficientDet
from tensorboardX import SummaryWriter
import shutil
import numpy as np
from tqdm.autonotebook import tqdm
from src.config import colors
import cv2
class Infer():
'''
Class for main inference
Args:
verbose (int): Set verbosity levels
0 - Print Nothing
1 - Print desired details
'''
def __init__(self, verbose=1):
self.system_dict = {};
self.system_dict["verbose"] = verbose;
self.system_dict["local"] = {};
self.system_dict["local"]["common_size"] = 512;
self.system_dict["local"]["mean"] = np.array([[[0.485, 0.456, 0.406]]])
self.system_dict["local"]["std"] = np.array([[[0.229, 0.224, 0.225]]])
def Model(self, model_dir="trained/"):
'''
User function: Selet trained model params
Args:
model_dir (str): Relative path to directory containing trained models
Returns:
None
'''
self.system_dict["local"]["model"] = torch.load(model_dir + "/signatrix_efficientdet_coco.pth").module
if torch.cuda.is_available():
self.system_dict["local"]["model"] = self.system_dict["local"]["model"].cuda();
def Predict(self, img_path, class_list, vis_threshold = 0.4, output_folder = 'Inference'):
'''
User function: Run inference on image and visualize it
Args:
img_path (str): Relative path to the image file
class_list (list): List of classes in the training set
vis_threshold (float): Threshold for predicted scores. Scores for objects detected below this score will not be displayed
output_folder (str): Path to folder where output images will be saved
Returns:
tuple: Contaning label IDs, Scores and bounding box locations of predicted objects.
'''
if not os.path.exists(output_folder):
os.makedirs(output_folder)
image_filename = os.path.basename(img_path)
img = cv2.imread(img_path);
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB);
image = img.astype(np.float32) / 255.;
image = (image.astype(np.float32) - self.system_dict["local"]["mean"]) / self.system_dict["local"]["std"]
height, width, _ = image.shape
if height > width:
scale = self.system_dict["local"]["common_size"] / height
resized_height = self.system_dict["local"]["common_size"]
resized_width = int(width * scale)
else:
scale = self.system_dict["local"]["common_size"] / width
resized_height = int(height * scale)
resized_width = self.system_dict["local"]["common_size"]
image = cv2.resize(image, (resized_width, resized_height))
new_image = np.zeros((self.system_dict["local"]["common_size"], self.system_dict["local"]["common_size"], 3))
new_image[0:resized_height, 0:resized_width] = image
img = torch.from_numpy(new_image)
with torch.no_grad():
scores, labels, boxes = self.system_dict["local"]["model"](img.cuda().permute(2, 0, 1).float().unsqueeze(dim=0))
boxes /= scale;
try:
if boxes.shape[0] > 0:
output_image = cv2.imread(img_path)
for box_id in range(boxes.shape[0]):
pred_prob = float(scores[box_id])
if pred_prob < vis_threshold:
break
pred_label = int(labels[box_id])
xmin, ymin, xmax, ymax = boxes[box_id, :]
color = colors[pred_label]
cv2.rectangle(output_image, (xmin, ymin), (xmax, ymax), color, 2)
text_size = cv2.getTextSize(class_list[pred_label] + ' : %.2f' % pred_prob, cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]
cv2.rectangle(output_image, (xmin, ymin), (xmin + text_size[0] + 3, ymin + text_size[1] + 4), color, -1)
cv2.putText(
output_image, class_list[pred_label] + ' : %.2f' % pred_prob,
(xmin, ymin + text_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1,
(255, 255, 255), 1)
cv2.imwrite(os.path.join(output_folder, image_filename), output_image)
cv2.imwrite("output.jpg", output_image)
return scores, labels, boxes
except:
print("NO Object Detected")
return None
def predict_batch_of_images(self, img_folder, class_list, vis_threshold = 0.4, output_folder='Inference'):
'''
User function: Run inference on multiple images and visualize them
Args:
img_folder (str): Relative path to folder containing all the image files
class_list (list): List of classes in the training set
vis_threshold (float): Threshold for predicted scores. Scores for objects detected below this score will not be displayed
output_folder (str): Path to folder where output images will be saved
Returns:
None
'''
all_filenames = os.listdir(img_folder)
all_filenames.sort()
generated_count = 0
for filename in all_filenames:
img_path = "{}/{}".format(img_folder, filename)
try:
self.Predict(img_path , class_list, vis_threshold ,output_folder)
generated_count += 1
except:
continue
print("Objects detected for {} images".format(generated_count))
|
{"hexsha": "1afcac5178aa399852d5ef4aadf82f1ebabb6e32", "size": 5852, "ext": "py", "lang": "Python", "max_stars_repo_path": "4_efficientdet/lib/infer_detector.py", "max_stars_repo_name": "deepchatterjeevns/Monk_Object_Detection", "max_stars_repo_head_hexsha": "861c6035e975ecdf3ea07273f7479dbf60fbf9b2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 549, "max_stars_repo_stars_event_min_datetime": "2020-01-02T05:14:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T18:34:12.000Z", "max_issues_repo_path": "4_efficientdet/lib/infer_detector.py", "max_issues_repo_name": "deepchatterjeevns/Monk_Object_Detection", "max_issues_repo_head_hexsha": "861c6035e975ecdf3ea07273f7479dbf60fbf9b2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 98, "max_issues_repo_issues_event_min_datetime": "2020-01-21T09:41:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:53:06.000Z", "max_forks_repo_path": "4_efficientdet/lib/infer_detector.py", "max_forks_repo_name": "deepchatterjeevns/Monk_Object_Detection", "max_forks_repo_head_hexsha": "861c6035e975ecdf3ea07273f7479dbf60fbf9b2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 233, "max_forks_repo_forks_event_min_datetime": "2020-01-18T03:46:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-19T03:17:47.000Z", "avg_line_length": 40.0821917808, "max_line_length": 134, "alphanum_fraction": 0.5962064252, "include": true, "reason": "import numpy", "num_tokens": 1292}
|
[STATEMENT]
lemma axis_eq_0_iff [simp]:
shows "axis m x = 0 \<longleftrightarrow> x = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (axis m x = 0) = (x = (0::'a))
[PROOF STEP]
by (simp add: axis_def vec_eq_iff)
|
{"llama_tokens": 101, "file": null, "length": 1}
|
#include "../include/sporkel.h"
#include <condition_variable>
#include <fstream>
#include <functional>
#include <map>
#include <mutex>
#include <numeric>
#include <string>
#include <thread>
#include <iostream>
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/iostreams/filter/lzma.hpp>
#include <boost/filesystem.hpp>
#include <boost/optional.hpp>
#include <cereal/access.hpp>
#include <cereal/archives/binary.hpp>
#include <cereal/archives/portable_binary.hpp>
#include <cereal/cereal.hpp>
#include <cereal/types/vector.hpp>
#include <cereal/types/string.hpp>
#include <sodium.h>
#include <bscommon.h>
#include "../../util/util.hpp"
#include "../../util/scopeguard.hpp"
namespace fs = boost::filesystem;
namespace io = boost::iostreams;
namespace {
struct delta_info {
unsigned char hash[crypto_generichash_BYTES];
fs::file_type type;
unsigned long long size;
bool deleted;
};
enum class delta_op_type
{
DELETE,
ADD,
PATCH,
KEEP
};
struct delta_op {
delta_op_type type;
std::string path;
fs::file_type ftype;
std::vector<uint8_t> patch;
delta_op() = default;
delta_op(delta_op_type type, const std::string &path, fs::file_type ftype)
: type(type), path(path), ftype(ftype) {}
private:
friend class cereal::access;
template<class Archive>
void serialize(Archive &ar, const unsigned int version)
{
switch (version) {
case 1:
ar(type, path, ftype);
break;
default:
throw cereal::Exception("unknown version");
}
}
};
struct delta_op_toc {
std::vector<delta_op> ops;
std::string before_hash;
std::string after_hash;
bool require_exact_patch_target = false;
private:
friend class cereal::access;
template<class Archive>
void serialize(Archive &ar, const unsigned int version)
{
switch (version) {
case 2:
ar(ops, before_hash, after_hash, require_exact_patch_target);
break;
case 1:
ar(ops, before_hash, after_hash);
break;
default:
throw cereal::Exception("unknown version");
}
}
};
struct deferred_patch_info
{
using patch_t = std::vector<uint8_t>*;
size_t before_size, after_size;
size_t max_patch_size;
fs::path before_path, after_path;
fs::path cache_path;
patch_t patch;
bool processing = false;
bool done = false;
deferred_patch_info(size_t before_size, size_t after_size, size_t max_patch_size,
fs::path before_path, fs::path after_path, patch_t patch) :
before_size(before_size), after_size(after_size), max_patch_size(max_patch_size),
before_path(before_path), after_path(after_path), patch(patch)
{
patch->resize(max_patch_size + 1);
}
size_t max_mem_usage() const
{
return (sizeof(off_t) + 1) * before_size + 3 * after_size;
}
};
}
CEREAL_CLASS_VERSION(delta_op, 1);
CEREAL_CLASS_VERSION(delta_op_toc, 2);
static void hash_delta_info(const std::string &path, const delta_info &di, crypto_generichash_state &state);
static void hash_entry(const fs::directory_entry &i, unsigned char(&hash)[crypto_generichash_BYTES]);
static void hash_entry(const fs::directory_entry &i, crypto_generichash_state &state);
static bool operator==(const delta_info &l, const delta_info &r) {
return l.type == r.type && l.size == r.size && std::memcmp(l.hash, r.hash, sizeof(l.hash)) == 0;
}
static void hash_delta_info(const std::string &p, const delta_info &di, crypto_generichash_state &state)
{
crypto_generichash_update(&state, (const unsigned char *) p.c_str(), p.length());
crypto_generichash_update(&state, (const unsigned char *) &di.type, sizeof(decltype(di.type)));
crypto_generichash_update(&state, (const unsigned char *) &di.size, sizeof(decltype(di.size)));
crypto_generichash_update(&state, di.hash, sizeof(di.hash));
}
static void hash_entry(const fs::directory_entry &i, crypto_generichash_state &state)
{
using namespace fs;
auto &p = i.path();
size_t size = 0;
if (is_regular_file(i.status()))
size = (size_t)file_size(i.path());
if (is_regular(i.status())) {
char chunk_buffer[16 * 1024];
size_t chunk_buffer_size = sizeof(chunk_buffer);
size_t chunk_cnt = size / chunk_buffer_size;
size_t last_chunk_size = size % chunk_buffer_size;
std::ifstream file(p.native(), std::ifstream::binary);
if (last_chunk_size != 0)
++chunk_cnt;
else
last_chunk_size = chunk_buffer_size;
for (size_t chunk = 0; chunk < chunk_cnt; ++chunk) {
size_t chunk_size = chunk_buffer_size;
if (chunk == chunk_cnt - 1)
chunk_size = last_chunk_size;
file.read(&chunk_buffer[0], chunk_size);
crypto_generichash_update(&state, (unsigned char *)&chunk_buffer[0], chunk_size);
}
return;
}
if (is_symlink(i.status())) {
path sym_path(fs::read_symlink(p));
std::string s = sym_path.generic_string();
crypto_generichash_update(&state, (unsigned char *) s.c_str(), s.length());
return;
}
if (is_directory(i.status())) {
crypto_generichash_update(&state, (const unsigned char *)"d", 1);
return;
}
}
static void hash_entry(const fs::directory_entry &i, unsigned char (&hash)[crypto_generichash_BYTES])
{
crypto_generichash_state state;
crypto_generichash_init(&state, NULL, 0, sizeof(hash));
hash_entry(i, state);
crypto_generichash_final(&state, hash, sizeof(hash));
}
static fs::path get_temp_directory()
{
using namespace fs;
path p(unique_path());
return temp_directory_path() / p;
}
template <typename Func>
static void process_tree(const fs::path &p, Func &&f)
{
using namespace fs;
recursive_directory_iterator end;
for (recursive_directory_iterator i(p); i != end; ++i) {
if (!is_directory(i->status()) && !is_regular_file(i->status()) && !is_symlink(i->status())) {
continue;
}
path rel_path(sporkel_util::make_path_relative(p, i->path()));
if (!rel_path.empty())
f(rel_path, *i);
}
}
template <size_t N, typename T>
static std::string bin2hex(T(&data)[N])
{
char hex[N * 2 + 1];
sodium_bin2hex(hex, N * 2 + 1, static_cast<unsigned char *>(data), N);
return hex;
}
static delta_info make_delta_info(const fs::directory_entry &i)
{
delta_info di;
di.type = i.status().type();
di.size = 0;
if (is_regular_file(i.status()))
di.size = file_size(i.path());
hash_entry(i, di.hash);
di.deleted = false;
return di;
}
static std::string get_tree_hash(const std::map<std::string, delta_info> &tree)
{
unsigned char hash[crypto_generichash_BYTES];
crypto_generichash_state state;
crypto_generichash_init(&state, NULL, 0, sizeof(hash));
for (auto &i : tree) {
hash_delta_info(i.first, i.second, state);
}
crypto_generichash_final(&state, hash, sizeof(hash));
return bin2hex(hash);
}
struct sporkel_tmp_dir {
fs::path path;
std::string generic_string;
sporkel_tmp_dir()
: path(get_temp_directory()),
generic_string(path.generic_string())
{}
};
sporkel_tmp_dir_t *sporkel_tmp_dir_create(void)
{
try {
return new sporkel_tmp_dir_t();
} catch (...) {
return nullptr;
}
}
void sporkel_tmp_dir_destroy(sporkel_tmp_dir_t *dir)
{
delete dir;
}
const char *sporkel_tmp_dir_path(const sporkel_tmp_dir_t *dir)
{
return dir->generic_string.c_str();
}
#define sporklog(cb, l, x) do { if (cb == nullptr || cb->log_cb == nullptr) break; std::stringstream s; s << x; cb->log_cb(cb->log_data, l, s.str().c_str()); } while (0)
#define spklogd(cb, x) sporklog(cb, SPORKEL_DEBUG, x)
#define spklogi(cb, x) sporklog(cb, SPORKEL_INFO, x)
#define spklogw(cb, x) sporklog(cb, SPORKEL_WARNING, x)
#define spkloge(cb, x) sporklog(cb, SPORKEL_ERROR, x)
static bool sporkel_patch_apply_internal(fs::path before_path, std::istream &is, fs::path dest,
bool remove_if_failed, sporkel_callback_t *cb);
bool sporkel_patch_apply(const char *before_path_, const char *patch_path_, const char *dest_,
bool remove_if_failed, sporkel_callback_t *cb)
{
try {
sodium_init();
fs::path patch_path(patch_path_);
fs::path dest(dest_);
std::ifstream ifs(patch_path.native(), std::ios::binary);
return sporkel_patch_apply_internal(before_path_, ifs, dest, remove_if_failed, cb);
} catch (...) {
return false;
}
}
static bool sporkel_patch_apply_internal(fs::path before_path, std::istream &is, fs::path dest,
bool removed_if_failed, sporkel_callback_t *cb)
{
using namespace fs;
using namespace io;
if (before_path.empty() && dest.empty()) {
spkloge(cb, "before_path and dest are empty");
return false;
}
bool target_copied = !(before_path.empty() || dest.empty() || before_path == dest);
if (target_copied) {
spklogi(cb, "copying " << before_path.generic_string() << " to " << dest.generic_string());
sporkel_util::copy_directory_recursive(before_path, dest);
}
bool patch_failed = true;
DEFER{
if (removed_if_failed && patch_failed && target_copied) {
spklogi(cb, "removing " << dest.generic_string() << "...");
remove_all(dest);
}
};
if (before_path.empty())
before_path = dest;
if (dest.empty())
dest = before_path;
filtering_istream filter;
filter.push(lzma_decompressor());
filter.push(is);
delta_op_toc toc;
cereal::PortableBinaryInputArchive archive(filter);
archive(toc);
spklogi(cb, "validating tree initial state " << dest.generic_string() << "...");
std::map<std::string, delta_info> before_tree_state;
process_tree(before_path, [&](path &path, const directory_entry &i) {
before_tree_state[path.generic_string()] = make_delta_info(i);
});
std::string before_tree_hash;
if (toc.require_exact_patch_target)
before_tree_hash = get_tree_hash(before_tree_state);
else {
std::map<std::string, delta_info> before_tree_state_mod;
for (auto &i : toc.ops) {
if (i.type == delta_op_type::ADD)
continue;
auto res = before_tree_state.find(i.path);
if (res == end(before_tree_state)) {
spkloge(cb, "patch contains non-ADD op for non-existing file " << i.path);
return false;
}
before_tree_state_mod.emplace(*res);
//spklogi(cb, res->first << ": " << bin2hex(res->second.hash));
}
before_tree_hash = get_tree_hash(before_tree_state_mod);
}
if (before_tree_hash != toc.before_hash) {
spkloge(cb, "current tree hash " << before_tree_hash << " does not match the expected tree hash " <<
toc.before_hash);
return false;
}
spklogi(cb, "applying patches...");
std::vector<uint8_t> delta;
std::vector<uint8_t> before_file;
std::vector<uint8_t> after_file;
const size_t total = toc.ops.size();
size_t completed = 0;
for (auto &i : toc.ops) {
switch (i.type) {
case delta_op_type::ADD:
{
path p = dest / i.path;
if (i.ftype == file_type::directory_file) {
create_directory(p);
break;
}
// symlink handling here
archive(delta);
sporkel_util::set_file_contents(p, delta.data(), delta.size());
break;
}
case delta_op_type::PATCH:
{
path p = dest / i.path;
auto before_size = file_size(p);
sporkel_util::get_file_contents(p, before_size, before_file);
archive(delta);
auto after_size = sporkel_bspatch_newsize(delta.data(), delta.size());
after_file.resize(after_size);
int res = sporkel_bspatch(before_file.data(), before_file.size(), delta.data(), delta.size(), after_file.data(), after_file.size());
if (res != 0) {
spkloge(cb, "failed patching " << p.generic_string());
}
sporkel_util::set_file_contents(p, after_file.data(), after_file.size());
break;
}
case delta_op_type::KEEP:
break;
case delta_op_type::DELETE:
path p = dest / i.path;
remove_all(p);
break;
}
if (cb != nullptr && cb->progress_cb != nullptr)
cb->progress_cb(cb->progress_data, ++completed, total);
}
spklogi(cb, "validating tree patched state " << dest.generic_string() << "...");
std::map<std::string, delta_info> after_tree_state;
process_tree(dest, [&](path &path, const directory_entry &i) {
after_tree_state[path.generic_string()] = make_delta_info(i);
});
std::string after_tree_hash = get_tree_hash(after_tree_state);
if (toc.require_exact_patch_target)
after_tree_hash = get_tree_hash(after_tree_state);
else {
delta_info deleted;
deleted.deleted = true;
std::map<std::string, delta_info> after_tree_state_mod;
for (auto &i : toc.ops) {
switch (i.type) {
case delta_op_type::ADD:
case delta_op_type::PATCH:
case delta_op_type::KEEP:
after_tree_state_mod.emplace(i.path, after_tree_state[i.path]);
break;
case delta_op_type::DELETE:
after_tree_state_mod[i.path] = deleted;
}
}
after_tree_hash = get_tree_hash(after_tree_state_mod);
}
if (after_tree_hash != toc.after_hash) {
spkloge(cb, "patched tree hash " << after_tree_hash <<
" does not match the expected tree hash " << toc.after_hash);
return false;
}
patch_failed = false;
return true;
}
static void write_cached_diff(const fs::path &p, const std::vector<uint8_t> &data)
{
fs::path tmp = fs::unique_path();
std::ofstream f(tmp.native(), std::ios::binary | std::ios::trunc);
io::filtering_ostream filter;
filter.push(io::lzma_compressor({}, 4096));
filter.push(f);
cereal::PortableBinaryOutputArchive archive(filter);
archive(data);
create_directories(p.parent_path());
rename(tmp, p);
}
static void read_cached_diff(const fs::path &p, std::vector<uint8_t> &data)
{
std::ifstream f(p.native(), std::ios::binary);
io::filtering_istream filter;
filter.push(io::lzma_decompressor());
filter.push(f);
cereal::PortableBinaryInputArchive archive(filter);
archive(data);
}
static bool sporkel_patch_create_internal(fs::path before_path, fs::path after_path, fs::path patch_path,
unsigned num_threads, unsigned memory_limit, boost::optional<fs::path> cache_path, unsigned lzma_preset,
bool require_exact_patch_target,
sporkel_callback_t *cb);
bool sporkel_patch_create(const char *before_path, const char *after_path, const char *patch_path,
unsigned num_threads, unsigned memory_limit, const char *cache_path,
unsigned lzma_preset,
bool require_exact_patch_target,
sporkel_callback_t *cb)
{
try {
sodium_init();
boost::optional<fs::path> cache;
if (cache_path)
cache = cache_path;
return sporkel_patch_create_internal(before_path, after_path, patch_path,
num_threads, memory_limit, cache, lzma_preset, require_exact_patch_target, cb);
} catch (...) {
return false;
}
}
static bool sporkel_patch_create_internal(fs::path before_path, fs::path after_path, fs::path patch_path,
unsigned num_threads, unsigned memory_limit, boost::optional<fs::path> cache_path, unsigned lzma_preset,
bool require_exact_patch_target,
sporkel_callback_t *cb)
{
using namespace fs;
using namespace io;
if (memory_limit != std::numeric_limits<unsigned int>::max())
memory_limit = std::max(memory_limit, memory_limit * 1024 * 1024);
std::map<std::string, delta_info> before_tree_state;
std::map<std::string, delta_info> after_tree_state_unmod;
std::map<std::string, delta_info> after_tree_state;
std::map<std::string, delta_info> before_tree_state_mod;
delta_info deleted;
deleted.deleted = true;
delta_op_toc toc;
spklogi(cb, "processing " << before_path.generic_string() << "...");
std::thread before_thread([&] {
process_tree(before_path, [&](path &path, const directory_entry &i) {
auto before_info = make_delta_info(i);
auto key(path.generic_string());
before_tree_state.emplace(key, std::move(before_info));
after_tree_state.emplace(std::move(key), deleted);
});
if (require_exact_patch_target)
toc.before_hash = get_tree_hash(before_tree_state);
});
if (num_threads == 1)
before_thread.join();
spklogi(cb, "processing " << after_path.generic_string() << "...");
std::thread after_thread([&] {
process_tree(after_path, [&](path &path, const directory_entry &i) {
auto after_info = make_delta_info(i);
auto key(path.generic_string());
after_tree_state_unmod.emplace(std::move(key), std::move(after_info));
});
if (require_exact_patch_target)
toc.after_hash = get_tree_hash(after_tree_state_unmod);
});
if (before_thread.joinable())
before_thread.join();
after_thread.join();
for (auto &after : after_tree_state_unmod) {
auto &key = after.first;
auto &info = after.second;
auto res = before_tree_state.find(key);
if (require_exact_patch_target && res != end(before_tree_state)) {
if (res->second == info) {
after_tree_state.erase(key);
continue;
}
}
after_tree_state[key] = info;
if (res == end(before_tree_state))
continue;
before_tree_state_mod.emplace(*res);
//spklogi(cb, res->first << ": " << bin2hex(res->second.hash));
}
if (!require_exact_patch_target) {
toc.before_hash = get_tree_hash(before_tree_state_mod);
toc.after_hash = get_tree_hash(after_tree_state);
toc.require_exact_patch_target = require_exact_patch_target;
}
spklogi(cb, "before tree: '" << before_path.generic_string() << "'");
spklogi(cb, " hash: '" << toc.before_hash << "'");
spklogi(cb, " file count: " << before_tree_state.size());
spklogi(cb, "after tree: '" << after_path.generic_string() << "'");
spklogi(cb, " hash: '" << toc.after_hash << "'");
spklogi(cb, " mod cnt: " << after_tree_state.size());
spklogi(cb, "generating delta operations...");
int a_op_cnt = 0;
int b_op_cnt = 0;
int d_op_cnt = 0;
toc.ops.reserve(after_tree_state.size() * 2);
std::vector<deferred_patch_info> patch_infos;
for (auto &i : after_tree_state) {
auto &after_info = i.second;
if (after_info.deleted) {
d_op_cnt++;
toc.ops.emplace_back(delta_op_type::DELETE, i.first, file_type::status_unknown);
continue;
}
auto res = before_tree_state.find(i.first);
if (res == end(before_tree_state)) {
a_op_cnt++;
toc.ops.emplace_back(delta_op_type::ADD, i.first, after_info.type);
continue;
}
auto &before_info = res->second;
if (!require_exact_patch_target && before_info == after_info) {
toc.ops.emplace_back(delta_op_type::KEEP, i.first, after_info.type);
continue;
}
if (before_info.type != after_info.type) {
d_op_cnt++; a_op_cnt++;
toc.ops.emplace_back(delta_op_type::DELETE, i.first, before_info.type);
toc.ops.emplace_back(delta_op_type::ADD, i.first, after_info.type);
}
else {
b_op_cnt++;
toc.ops.emplace_back(delta_op_type::PATCH, i.first, before_info.type);
boost::optional<path> cache_file_path;
if (cache_path)
cache_file_path = cache_path.get() / i.first / bin2hex(before_info.hash) / bin2hex(after_info.hash);
if (cache_file_path && exists(cache_file_path.get())) {
read_cached_diff(cache_file_path.get(), toc.ops.back().patch);
continue;
}
size_t max_size = sporkel_bsdiff_patchsize_max(before_info.size, after_info.size);
patch_infos.emplace_back(before_info.size, after_info.size, max_size,
before_path / i.first, after_path / i.first, &toc.ops.back().patch);
if (cache_file_path)
patch_infos.back().cache_path = cache_file_path.get();
}
}
std::sort(begin(patch_infos), end(patch_infos),
[](const deferred_patch_info &a, const deferred_patch_info &b) {
return a.max_mem_usage() > b.max_mem_usage();
});
const size_t buffer_size = std::accumulate(begin(patch_infos), end(patch_infos), 0,
[](size_t start, const deferred_patch_info &b) {
return start + b.max_patch_size;
});
auto min_memory_limit = buffer_size + (patch_infos.empty() ? 0 : patch_infos.front().max_mem_usage());
spklogi(cb, "memory required: " << static_cast<unsigned>(min_memory_limit / 1024 / 1024 + 1) << " MB\n");
if (memory_limit != std::numeric_limits<unsigned int>::max())
spklogi(cb, "memory limit: " << static_cast<unsigned>(memory_limit / 1024 / 1024) << " MB\n");
if (min_memory_limit > memory_limit) {
spkloge(cb, "memory limit < required memory for largest patch");
return false;
}
spklogi(cb, d_op_cnt << " deletions");
spklogi(cb, a_op_cnt << " additions");
spklogi(cb, b_op_cnt << " bpatches (" << static_cast<int>(b_op_cnt - patch_infos.size()) << " cached)");
size_t memory_used = buffer_size;
std::mutex patch_info_mutex;
std::condition_variable wake_threads;
spklogi(cb, "using " << num_threads << " threads (hw: " << std::thread::hardware_concurrency() << ")");
std::vector<std::thread> patcher_threads;
patcher_threads.reserve(num_threads);
const size_t total = patch_infos.size();
size_t completed = 0;
for (unsigned i = 0; i < num_threads && !patch_infos.empty(); i++) {
patcher_threads.emplace_back([&]() {
std::vector<uint8_t> p1_data;
std::vector<uint8_t> p2_data;
for (;;) {
deferred_patch_info *work_item = nullptr;
bool all_done = true;
{
auto lock = std::unique_lock<std::mutex>(patch_info_mutex);
for (auto &info : patch_infos) {
all_done = all_done && info.done;
if (!info.done && !info.processing && info.max_mem_usage() < (memory_limit - memory_used)) {
work_item = &info;
break;
}
}
if (work_item) {
work_item->processing = true;
memory_used += work_item->max_mem_usage();
}
}
if (all_done)
return;
if (!work_item) {
auto lock = std::unique_lock<std::mutex>(patch_info_mutex);
wake_threads.wait(lock, [] { return true; });
continue;
}
sporkel_util::get_file_contents(work_item->before_path, work_item->before_size, p1_data);
sporkel_util::get_file_contents(work_item->after_path, work_item->after_size, p2_data);
int actual_size = sporkel_bsdiff(p1_data.data(), work_item->before_size,
p2_data.data(), work_item->after_size, work_item->patch->data(),
work_item->max_patch_size);
work_item->patch->resize(actual_size);
if (!work_item->cache_path.empty())
write_cached_diff(work_item->cache_path, *work_item->patch);
{
auto lock = std::unique_lock<std::mutex>(patch_info_mutex);
work_item->done = true;
memory_used -= work_item->max_mem_usage();
if (cb != nullptr && cb->progress_cb != nullptr)
cb->progress_cb(cb->progress_data, ++completed, total);
}
wake_threads.notify_all();
}
});
}
for (auto &i : patcher_threads)
i.join();
std::ofstream ofs(patch_path.native(), std::ios::binary);
filtering_ostream filter;
filter.push(lzma_compressor(lzma_params(lzma_preset)), 4096);
filter.push(ofs);
cereal::PortableBinaryOutputArchive archive(filter);
archive(toc);
std::vector<uint8_t> delta;
for (auto &i : toc.ops) {
if (i.ftype != file_type::regular_file)
continue;
switch (i.type) {
case delta_op_type::ADD:
{
path p(after_path / path(i.path));
size_t s = file_size(p);
sporkel_util::get_file_contents(p, s, delta);
archive(delta);
break;
}
case delta_op_type::PATCH:
archive(i.patch);
break;
case delta_op_type::KEEP:
case delta_op_type::DELETE:
break;
}
}
return true;
}
#undef sporklog
#undef spklogd
#undef spklogi
#undef spklogw
#undef spkloge
|
{"hexsha": "3739bb8426f665ba2b99f0ee6d7272473b7d8f06", "size": 22756, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "sporkel/src/patch.cpp", "max_stars_repo_name": "kc5nra/sporkel", "max_stars_repo_head_hexsha": "842ed61f4262b20098545b58aeee4db487143361", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2016-10-18T23:46:45.000Z", "max_stars_repo_stars_event_max_datetime": "2017-05-06T04:09:58.000Z", "max_issues_repo_path": "sporkel/src/patch.cpp", "max_issues_repo_name": "kc5nra/sporkel", "max_issues_repo_head_hexsha": "842ed61f4262b20098545b58aeee4db487143361", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2017-05-06T04:15:10.000Z", "max_issues_repo_issues_event_max_datetime": "2017-05-06T04:22:20.000Z", "max_forks_repo_path": "sporkel/src/patch.cpp", "max_forks_repo_name": "kc5nra/sporkel", "max_forks_repo_head_hexsha": "842ed61f4262b20098545b58aeee4db487143361", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9901599016, "max_line_length": 169, "alphanum_fraction": 0.7029354895, "num_tokens": 6137}
|
# lets try to optimize this
import numpy as np
import scipy.misc as sc
import csv
import itertools
from itertools import combinations
import random
import pprint
import sys
import os
from evaluators import payout
from hand_scoring import get_hand_type
import timeit
start_time = timeit.default_timer()
print_color = { 1 : 'C', 2: 'S', 3: 'D', 4: 'H' }
print_num = {1:'A', 2:'2', 3:'3', 4:'4', 5:'5', 6:'6', 7:'7', 8:'8', 9:'9', 10:'10', 11:'J', 12:'Q', 13:'K'}
def make_deck():
# initialize the 52 cards
color = [1,2,3,4] #names of suits dont matter
value = [0,1,2,3,4,5,6,7,8,9,10,11,12]
deck = []
for c in color:
for v in value:
deck.append([c,v])
random.shuffle(deck)
return deck
def deal_hand(deck):
hand = [deck.pop() for _ in range(5)]
return hand, deck
def draw_cards(deck, hand, n):
# draw n cards
for i in n:
hand.append(deck[-i])
return hand
def check_all_possible_holds(hand):
pass
"""
To optimize let's solve the playing strategy first.
Create a lookup table where based on cards gives us best EV play
"""
# discard zero cards
d_zero = np.zeros(2598960)
# discard one card
d_one = np.zeros([270725, 16])
# discard two cards
d_two = np.zeros([22100, 16])
# discard three cards
d_three = np.zeros([1326, 16])
# discard four cards
d_four = np.zeros([52, 16])
# discard five cards
d_five = np.zeros(16)
counter = 0
# for _ in itertools.permutations(cards, 5):
# counter += 1
# 16 is for the maximum number of paying hands on the draw
# loop through 2,598,960 combinations of 5 cards out of 52
# score according to poker value
# put the score in d_zero.
# first hand in element 0
# 2nd hand in element 1...
# for i, hand in enumerate(itertools.combinations(yo, 5)):
# d_zero[i] = payout(hand)
# dest_file = 'data/5_card_combos.csv'
# with open(dest_file, 'r') as dest_f:
# data_iter = csv.reader(dest_f, delimiter = '\n')
# data = [data for data in data_iter]
# data_array = np.asarray(data)
deck = make_deck()
hand, deck = deal_hand(deck)
# all possible holds
# hold_5 = hand
# hold_4_combos = combinations(hand, 4)
# hold_3_combos = combinations(hand, 3)
# hold_2_combos = combinations(hand, 2)
# hold_1_combos = combinations(hand, 1)
# hold_0 = draw_new_hand(deck)
print(timeit.default_timer() - start_time)
# for each of the 5 ways to choose 4 cards on the deal
# translate the four cards into an index number from
# 0 to 270,724
# then increment element[index number][hand score] of d_one by 1
# for i, hand in enumerate(itertools.combinations(yo, 4)):
# d_one[i] = payout(hand)
# for each of the 10 ways to choose 3 out of 5 cards on the deal, translate
# the three cards into an index number from 0 to 22,099
# then increment element[index number][hand score] of d_two by 1
# For each of the 10 ways to choose 2 out of 5 cards on the deal,
# translate the two cards into an index number from 0 to 1,325,
# and increment element [index number][hand score] of array3 by 1.
# For each of the 5 ways to choose 1 out of 5 cards on the deal, translate the card into an index number from 0 to 51, and increment
# element [index number][hand score] of array4 by 1.
# Increment element [hand score] of array5 by 1.
# Next, loop through the 134,459 classes of hands explained above.
# To determine the value of holding all five cards, translate the five cards to an index number,
# and look up the poker value in d_zero.
|
{"hexsha": "de6379deeb3d036372319aad8478ab442111fc0e", "size": 3477, "ext": "py", "lang": "Python", "max_stars_repo_path": "video_poker_sim/optimized.py", "max_stars_repo_name": "nickweinberg/Python-Video-Poker-Sim", "max_stars_repo_head_hexsha": "f5a71da5c2c7e4926bd8b5f20fb83aa44dda56de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "video_poker_sim/optimized.py", "max_issues_repo_name": "nickweinberg/Python-Video-Poker-Sim", "max_issues_repo_head_hexsha": "f5a71da5c2c7e4926bd8b5f20fb83aa44dda56de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "video_poker_sim/optimized.py", "max_forks_repo_name": "nickweinberg/Python-Video-Poker-Sim", "max_forks_repo_head_hexsha": "f5a71da5c2c7e4926bd8b5f20fb83aa44dda56de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8150684932, "max_line_length": 132, "alphanum_fraction": 0.6879493817, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1036}
|
#include "test_qssintegrator.h"
#include <boost/format.hpp>
#include <iostream>
#include <string>
using std::cout;
using std::endl;
using boost::format;
void QSSTestProblem::odefun(double t, const dvector& y, dvector& q, dvector& d)
{
// csdfe(y, q, d, t)
// description:
// derivative function evaluator(gsub) for an atmospheric chemical
// relaxation test problem involving cesium and cesium ions. format-
// ion and loss rates are calculated for this set of "stiff ordinary
// differential equations" that was suggested by by d. edelson of
// bell laboratories.
// argument list definitions:
// y(i) r*4 current values of the functions plus the i/o
// extra data at the end of the array that may be
// passed back and forth between "csdfe" and the
// main program. locations in y(i) which represent
// the functions being advanced should not be
// tampered with here.
// q(i) r*4 total formation rates. i
// d(i) r*4 total loss rates. i
// t r*4 the value of the independent variable. i
// utilize local storage for varibles.
double o2m = y[0];
double csp = y[1];
double cs = y[2];
double cso2 = y[3];
double o2 = y[4];
double n2 = y[5];
// calculate electron density for local use and transmission back to
// the main program via y(7). however in this case this value should
// not be trusted since "chemeq" will not call the "gsub" with the
// latest function values after the final step has converged. y(7)
// will be one iteration behind in this case. y(7) and y(6) are
// examples tho, of how data may be transfered between the "gsub" and
// the main program.
double ne = std::max(csp - o2m, 0.0);
// y[6] = ne;
// calculate reaction rates.
double cr1 = 5.00e-08*o2m*csp;
double cr2 = 1.00e-12*csp*ne;
double cr3 = 3.24e-03*cs;
double cr4 = 4.00e-01*o2m;
double cr5 = 1.00e-31*o2*cs*(cs + cso2 + n2 + o2);
double cr6 = 1.24e-30*o2*o2*ne;
double cr7 = 1.00e-31*o2*n2*ne;
// calculate total formation rates (c(i)) and total loss rates (d(i))
// for each species.
// o2m
q[0] = cr6 + cr7;
d[0] = cr1 + cr4;
// cs+
q[1] = cr3;
d[1] = cr1 + cr2;
// cs
q[2] = cr1 + cr2;
d[2] = cr3 + cr5;
// cso2
q[3] = cr5;
// q(4) = q(4) - 1.00e-31*o2*cs*cso2
// d(4) = - 1.00e-31*o2*cs*cso2
// o2
q[4] = cr1 + cr4;
d[4] = cr5 + cr6 + cr7;
}
int main(int argc, char** argv) {
// This is the dirver program for the seven-species cesium
// mechanism test problem. The code integrates the system
// MXCASE times using different values of the chemeq2 variable
// epsmin (set by passing an entry from array EPS through
// CHEMSP before each integration).
QSSTestProblem qssSolver;
// PROGRAM SPECIFICATIONS.
dvector Y(10);
dvector YF(10);
dvector YMIN(10, 1e-20);
dvector YI(10);
dvector epsil(10);
vector<std::string> SPSYM(7);
SPSYM[0] = "O2-";
SPSYM[1] = "CS+";
SPSYM[2] = "CS";
SPSYM[3] = "CSO2";
SPSYM[4] = "O2";
SPSYM[5] = "N2";
SPSYM[6] = "NE";
// For this example, the external subroutine that calculates the
// source terms is called CSDFE.
int MXCASE = 9;
double EPS[15] = {0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001, 0.00005,
0.00001, 0.000005, 0.000001, 5e-7, 1e-7, 5e-8, 1e-8};
//1000 FORMAT('CASE NO. ', I5, ' PARAMETERS;', /,
// . ' CONVERGENCE PARAMETER EPS = ', 1PE10.3, /,
// . ' INNER LOOP LENGTH;', I5)
//1001 FORMAT(/, ' SPECIE Y - INITAL Y - FINAL ',
// . ' Y - SOLUTION REL ERR')
//1002 FORMAT(5X, A4, 1P3E15.6, E10.3)
//1003 FORMAT(/, ' T - INITIAL = (', 1PE10.3, ') T - FINAL = (',
// . E10.3, ')')
//1004 FORMAT(/' INTEGRATION STATISTICS;')
//1005 FORMAT(' CPU TIME USED FOR INTEGRATION;', 1PE10.3,
// . ' SEC., CPU TIME NORMALIZED;', I8)
//1006 FORMAT(' SUM OF THE RELATIVE ERRORS SQUARED; ', 1PE10.3)
//1007 FORMAT(/)
// Note that the timing routines included may not work on
// all systems. Extra timing options are included as comments.
// INITIALIZE CONTROL PARAMETERS.
// INLP allows the user to subdivide the interval over which
// each test is run. For INLP=1, CHEMEQ2 is sent the full
// interval TF-TI (specified below) as the global timestep.
int INLP = 1;
// For this particular test, the electron number density is not
// integrated. The other five reacting species are integrated,
// and the electron density is found through charge conservation.
// This calculation is done within CSDFE. Therefore, NA = 5 is
// the number of equations that are integrated, but NS = 7 is the
// number of species. Species to be integrated must be placed in
// first NA positions within the Y array. CHEMEQ2 only works with
// these first NA entries since NA is passed in the argument list
// below, but all NS values are available to and used by CSDFE.
int NS = 7;
// int NA = 5;
// "TI" - INITIAL TIME, "TF" - FINAL TIME.
double TI = 0.0;
double TF = 1000.0;
double DELTAT = (TF - TI)/INLP;
// STORE INITIAL(TI = 0.0) AND FINALT(F = 1000.0) VALUES.
// O2-
YI[0] = 5.200e+02;
YF[0] = 2.59139492061e+04;
// CS+
YI[1] = 6.200e+02;
YF[1] = 7.55718460300e+04;
// CS
YI[2] = 1.000e+12;
YF[2] = 1.53194051722e+03;
// CSO2
YI[3] = 0;
YF[3] = 9.99999923516e+11;
// O2
YI[4] = 3.600e+14;
YF[4] = 3.59000000051e+14;
// N2
YI[5] = 1.400e+15;
YF[5] = 1.40000000000e+15;
// NE
YI[6] = 1.000e+02;
YF[6] = 4.96578968239e+04;
// LOOP OVER THE TEST CASES.
for (int ICASE=0; ICASE<MXCASE; ICASE++) {
cout << ICASE << ", " << EPS[ICASE] << ", " << INLP << endl;
qssSolver.epsmin = EPS[ICASE];
qssSolver.itermax = 5;
qssSolver.ymin = YMIN;
// RESET "Y" TO INITIAL VALUES "YI".
for (int i=0; i<NS; i++) {
Y[i] = YI[i];
}
// INNER LOOP TO DETERMINE OVERHEAD OR RELATIVE STARTING EFFECIENCY
// OF ITEGRATION SCHEME BEING TESTED.
for (int istep=0; istep<INLP; istep++) {
// CALL INTEGRATOR.
// CALL CHEMEQ2(DELTAT, CSDFE, NA, Y)
qssSolver.initialize(Y, TI);
qssSolver.integrateToTime(DELTAT);
}
Y = qssSolver.y;
// Calculate final electron density from densities of other charges species
Y[6] = Y[1] - Y[0];
// CALCULATE RELATIVE ERROR.
double sum = 0.0;
for (int i=0; i<NS; i++) {
epsil[i] = std::abs(Y[i] - YF[i])/std::min(Y[i] , YF[i]);
sum += epsil[i]*epsil[i];
}
// Root-mean-square error is calculated using ns-1 (rather than ns)
// since N2 is inert.
sum = sqrt(sum/(NS-1));
// PRINT RESULTS.
cout << format("t - tInitial = %f ; t - tFinal = %f\n") % TI % TF;
cout << "Species Y-Inital Y-Final Y-Solution Rel. Error\n";
for (int i=0; i<NS; i++) {
cout << format(" %6s %012.6e %012.6e %012.6e %012.6e\n") %
SPSYM[i] % YI[i] % YF[i] % Y[i] % epsil[i];
}
cout << "Integration Statistics:" << endl;
// WRITE(LO, 1006) SUM
// WRITE(LO, 1005) CPUT, TNORM
cout << format("Sum of the relative errors squared: %12.6e") % sum << endl;
// WRITE(*,699) EPS(ICASE),
// & CPUT,
// & TNORM
// // & INT(CPUT*1024. + .5)
// & ,sum
//699 format(1x,25HEPS, time, ticks, error: ,E7.1,2x,e10.4,2x,
// & I5,2x,e10.4)
// WRITE(LO, 1007)
// CALL CHEMCT(TF)
}
return 0;
}
|
{"hexsha": "a58379ed058327c4d579fc762eaf1a245d2fba1c", "size": 8104, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/old/test_qssintegrator.cpp", "max_stars_repo_name": "BangShiuh/ember", "max_stars_repo_head_hexsha": "f0a70c7e01ae0dd7b5bd5ee70c8fc5d3f7207388", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 27.0, "max_stars_repo_stars_event_min_datetime": "2016-11-22T08:29:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-01T12:15:39.000Z", "max_issues_repo_path": "test/old/test_qssintegrator.cpp", "max_issues_repo_name": "minhbau/ember", "max_issues_repo_head_hexsha": "f0a70c7e01ae0dd7b5bd5ee70c8fc5d3f7207388", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11.0, "max_issues_repo_issues_event_min_datetime": "2015-02-12T14:12:33.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-15T15:53:03.000Z", "max_forks_repo_path": "test/old/test_qssintegrator.cpp", "max_forks_repo_name": "minhbau/ember", "max_forks_repo_head_hexsha": "f0a70c7e01ae0dd7b5bd5ee70c8fc5d3f7207388", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20.0, "max_forks_repo_forks_event_min_datetime": "2016-05-15T04:51:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T09:07:35.000Z", "avg_line_length": 32.546184739, "max_line_length": 85, "alphanum_fraction": 0.549851925, "num_tokens": 2732}
|
input := FileTools:-Text:-ReadFile("AoC-2021-17-input.txt" ):
|
{"hexsha": "e4dbed639ecda1a5f2a33bb617d80b76e971fb6f", "size": 62, "ext": "mpl", "lang": "Maple", "max_stars_repo_path": "Day17/AoC17-Maple.mpl", "max_stars_repo_name": "johnpmay/AdventOfCode2021", "max_stars_repo_head_hexsha": "b51756bcebea662333072cf518cf040a962ef8b7", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-04T18:24:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-04T18:24:03.000Z", "max_issues_repo_path": "Day17/AoC17-Maple.mpl", "max_issues_repo_name": "johnpmay/AdventOfCode2021", "max_issues_repo_head_hexsha": "b51756bcebea662333072cf518cf040a962ef8b7", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Day17/AoC17-Maple.mpl", "max_forks_repo_name": "johnpmay/AdventOfCode2021", "max_forks_repo_head_hexsha": "b51756bcebea662333072cf518cf040a962ef8b7", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0, "max_line_length": 61, "alphanum_fraction": 0.6935483871, "num_tokens": 21}
|
[STATEMENT]
lemma map_of_eq_None_iff:
"(map_of xys x = None) = (x \<notin> fst ` (set xys))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (map_of xys x = None) = (x \<notin> fst ` set xys)
[PROOF STEP]
by (induct xys) simp_all
|
{"llama_tokens": 109, "file": null, "length": 1}
|
"""This module contains functions relating to function fitting"""
import matplotlib
import numpy as np
import datetime
from floodsystem.datafetcher import fetch, fetch_measure_levels
### TASK 2F
def polyfit(dates, levels, p):
"""Given the water level time history, this function computes the least squares polynomial of degree p"""
# Convert dates into floats
date_floats = matplotlib.dates.date2num(dates)
# Obtain date shift
d0 = date_floats[0]
# Find the coefficients of the best fit polynomial
coeff = np.polyfit(d0 - date_floats , levels, p)
# Convert coefficients into a polynomial
poly = np.poly1d(coeff)
return poly, d0
###TASK 2G
def rising_check(station, p):
"""Given a station, this function finds the best fit polynomial of degree p to calculate the gradient to determine if the
water level is rising or falling"""
# Obtain dates and levels information for a particular station
dates, levels = fetch_measure_levels(station.measure_id, dt = datetime.timedelta(days = 5))
# Converts dates to floats
date_floats = matplotlib.dates.date2num(dates)
# Obtain the best fit polynomial
poly, d0 = polyfit(dates, levels, p)
# Find the derivative of the polynomial function
derivative = np.polyder(poly)
# Find the gradient towards the end
check = derivative(date_floats[-2])
return check
|
{"hexsha": "a384dacbb6a19142dbf67758a05671f45b4a6517", "size": 1372, "ext": "py", "lang": "Python", "max_stars_repo_path": "floodsystem/analysis.py", "max_stars_repo_name": "ryrolio/IA_Lent_Project", "max_stars_repo_head_hexsha": "9023dfb199b5db7676fef61f0fca46ab69707461", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "floodsystem/analysis.py", "max_issues_repo_name": "ryrolio/IA_Lent_Project", "max_issues_repo_head_hexsha": "9023dfb199b5db7676fef61f0fca46ab69707461", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "floodsystem/analysis.py", "max_forks_repo_name": "ryrolio/IA_Lent_Project", "max_forks_repo_head_hexsha": "9023dfb199b5db7676fef61f0fca46ab69707461", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-28T11:46:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-28T11:46:05.000Z", "avg_line_length": 29.1914893617, "max_line_length": 124, "alphanum_fraction": 0.7361516035, "include": true, "reason": "import numpy", "num_tokens": 329}
|
#ifndef DERIVATIVES_H_5CHQ89V7
#define DERIVATIVES_H_5CHQ89V7
#include <gsl/gsl>
#include <tuple>
#include <type_traits>
namespace sens_loc::math {
/// Calculate the first derivate with the central differential quotient.
/// \tparam Real precision of the calculation
/// \param y__1 \f$y_{i-1}\f$
/// \param y_1 \f$y_{i+1}\f$
/// \param dx \f$2. * dx\f$
/// \returns first derivative at this point of order \f$\mathcal{O}(dx^2)\f$
template <typename Real>
inline Real first_derivative_central(Real y__1, Real y_1, Real dx) noexcept {
static_assert(std::is_floating_point_v<Real>);
Expects(dx > Real(0.));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return (y_1 - y__1) / (Real(2.) * dx);
}
/// Calculate the second derivate with the central differential quotient.
/// \tparam Real precision of the calculation
/// \param y__1 \f$y_{i-1}\f$
/// \param y_0 \f$y_{i}\f$
/// \param y_1 \f$y_{i+1}\f$
/// \param dx \f$dx\f$
/// \returns second derivative at this point of order \f$\mathcal{O}(dx^2)\f$
template <typename Real>
inline Real
second_derivative_central(Real y__1, Real y_0, Real y_1, Real dx) noexcept {
static_assert(std::is_floating_point_v<Real>);
Expects(dx > Real(0.));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return (y_1 + y__1 - Real(2.) * y_0) / (dx * dx);
}
/// Calculate the derivatives for a surface patch.
///
/// Index convention:
/// \f$d\_\_1 == d_{-1}\f$
/// \f$d\_\_0 == d_{0}\f$
/// \f$d\_1 == d_{1}\f$
///
/// Angle convention:
/// \f$\varphi\f$ -> u direction
/// \f$\theta\f$ -> v direction
///
/// \tparam Real precision of the calculation
/// \param d__1__1,d__1__0,d__1_1 neighbours "above" central pixel
/// \param d__0__1,d__0__0,d__0_1 same row as the central pixel
/// \param d_1__1,d_1__0,d_1_1 row "after" the central pixel
/// \param d_phi angle between rays in x direction \f$(u - 1, u + 1)\f$
/// \param d_theta angle between rays in y direction \f$(v - 1, v + 1)\f$
/// \param d_phi_theta angle between rays in diagonal direction
/// \f$(u - 1, v + 1)\f$
/// \returns partial derivatives \f$(f_u, f_v, f_uu, f_vv, f_uv)\f$
/// \pre the depth values shuold be positive, as they encode depth values
/// \pre \p / d_phi, \p d_theta, \p d_phi_theta are all positive angles
// clang-format off
template <typename Real = float>
inline std::tuple<Real, Real, Real, Real, Real>
derivatives(Real d__1__1, Real d__1__0, Real d__1_1,
Real d__0__1, Real d__0__0, Real d__0_1,
Real d_1__1, Real d_1__0, Real d_1_1,
Real d_phi, Real d_theta, Real d_phi_theta) noexcept {
static_assert(std::is_floating_point_v<Real>);
Expects(d_phi > 0.);
Expects(d_theta > 0.);
Expects(d_phi_theta > 0.);
(void)d__1__1;
(void)d__1_1;
(void)d_1__1;
(void)d_1_1;
// clang-format on
const Real f_u = math::first_derivative_central(d__0__1, d__0_1, d_phi);
const Real f_v = math::first_derivative_central(d__1__0, d_1__0, d_theta);
const Real f_uu =
math::second_derivative_central(d__0__1, d__0__0, d__0_1, d_phi);
const Real f_vv =
math::second_derivative_central(d__1__0, d__0__0, d_1__0, d_theta);
const Real f_uv =
math::second_derivative_central(d__1__0, d__0__0, d_1__0, d_phi_theta);
return std::make_tuple(f_u, f_v, f_uu, f_vv, f_uv);
};
// clang-format on
} // namespace sens_loc::math
#endif /* end of include guard: DERIVATIVES_H_5CHQ89V7 */
|
{"hexsha": "1df168888a06557143f36074a538a3f354f6a9df", "size": 3458, "ext": "h", "lang": "C", "max_stars_repo_path": "src/include/sens_loc/math/derivatives.h", "max_stars_repo_name": "JonasToth/depth-conversions", "max_stars_repo_head_hexsha": "5c8338276565d846c07673e83f94f6841006872b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-09-30T07:09:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T09:14:35.000Z", "max_issues_repo_path": "src/include/sens_loc/math/derivatives.h", "max_issues_repo_name": "JonasToth/depth-conversions", "max_issues_repo_head_hexsha": "5c8338276565d846c07673e83f94f6841006872b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/include/sens_loc/math/derivatives.h", "max_forks_repo_name": "JonasToth/depth-conversions", "max_forks_repo_head_hexsha": "5c8338276565d846c07673e83f94f6841006872b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2857142857, "max_line_length": 79, "alphanum_fraction": 0.6766917293, "num_tokens": 1172}
|
import os
import cv2
import numpy as np
import tensorflow as tf
from datasets.constants import DatasetName, DatasetType
from datasets.constants import _N_TIME_STEPS
from datasets.msasl.constants import N_CLASSES as MSASL_N_CLASSES
from datasets.signum.constants import N_CLASSES as SIGNUM_N_CLASSES
from datasets.utils import _tf_records_dir
def tf_record_dataset(dataset_name: DatasetName, dataset_type: DatasetType, ordered=False):
"""Returns a `TFRecordDataset` of the requested dataset.
Arguments:
dataset_name: The name of the dataset.
dataset_type: The type of the dataset.
ordered: Whether the examples should be fetched in order.
Returns:
A `TFRecordDataset` of the requested dataset.
"""
path = f'{_tf_records_dir(dataset_name)}/{dataset_type.value}'
files = [f'{path}/{file}' for file in os.listdir(path)]
num_parallel_reads = 1 if ordered else tf.data.experimental.AUTOTUNE
dataset = tf.data.TFRecordDataset(files, num_parallel_reads=num_parallel_reads)
if not ordered:
options = tf.data.Options()
options.experimental_deterministic = False
dataset = dataset.with_options(options)
return dataset
def _dataset_counts(dataset_name: DatasetName):
"""Returns the sizes of the `dataset_name` train, validation and test datasets.
Arguments:
dataset_name: The name of the dataset.
Returns:
A dictionary with an entry of the size for each of the train, validation and test datasets.
"""
counts = {}
for dataset_type in DatasetType:
dataset = tf_record_dataset(dataset_name, dataset_type)
dataset = dataset.batch(64)
dataset = dataset.prefetch(1)
counts[dataset_type.value] = 0
for records in dataset:
counts[dataset_type.value] += len(records)
return counts
def _bytes_feature(bytes_list):
"""Converts a list of bytestrings into a protocol buffer feature message.
Returns:
A protocol buffer feature message.
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=bytes_list))
def _float_feature(float_list):
"""Converts a list of floats into a protocol buffer feature message.
Returns:
A protocol buffer feature message.
"""
return tf.train.Feature(float_list=tf.train.FloatList(value=float_list))
def _int64_feature(int64_list):
"""Converts a list of ints into a protocol buffer feature message.
Returns:
A protocol buffer feature message.
"""
return tf.train.Feature(int64_list=tf.train.Int64List(value=int64_list))
def _decode_jpeg(bytestring):
"""Decodes a compressed JPEG image into an ndarray.
Arguments:
bytestring: The binary string representation of the compressed JPEG image.
Returns:
The ndarray representation of the image using the uint8 data type for the channel values.
"""
image = np.frombuffer(bytestring, np.uint8)
return cv2.imdecode(image, cv2.IMREAD_COLOR)
def _transform_frames_for_inspection(examples):
"""Transforms a batch of example frames to be consumed for inspection.
An individual frame is a 3D tensor consisting of RGB uint8 values within the range [0, 255] represented in the
`channels_last` data format ([height, width, channels]).
Arguments:
examples: A 5D tensor representing a batch of example frames in the [batch, frames, height, width, channels]
format.
Returns:
The transformed batch of example frames.
"""
return np.array([[_decode_jpeg(frame) for frame in example] for example in examples.numpy()])
def _transform_frames_for_model(examples):
"""Transforms a batch of example frames to be consumed by a model.
An individual frame is a 3D tensor consisting of RGB float32 values within the range [-1.0, 1.0] represented in the
`channels_last` data format ([height, width, channels]).
Arguments:
examples: A 5D tensor representing a batch of example frames in the [batch, frames, height, width, channels]
format.
Returns:
The transformed batch of example frames.
"""
frames = np.array([[_decode_jpeg(frame) for frame in example] for example in examples.numpy()])
frames = frames.astype(np.float32, copy=False)
frames /= 127.5
frames -= 1.0
return frames
_FEATURES = {
'frames': tf.io.FixedLenFeature([_N_TIME_STEPS], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64),
'signer': tf.io.FixedLenFeature([], tf.int64)
}
def transform_for_inspection(examples):
"""Transforms a batch of examples to be consumed for inspection.
The returned frames are represented as RGB uint8 values within the range [0, 255], and the labels and signers are
represented as their corresponding indices.
Arguments:
examples: A batch of serialized `TFRecord` examples.
Returns:
A tuple of batches of frames, labels and signers.
"""
parsed_examples = tf.io.parse_example(examples, _FEATURES)
frames = tf.py_function(_transform_frames_for_inspection, [parsed_examples['frames']], tf.uint8)
labels = parsed_examples['label']
signers = parsed_examples['signer']
return frames, labels, signers
def transform_for_prediction(examples):
"""Transforms a batch of examples to be consumed for prediction.
The returned frames are represented as RGB float32 values within the range [-1.0, 1.0], and the labels and signers
are label encoded.
Arguments:
examples: A batch of serialized `TFRecord` examples.
Returns:
A tuple of batches of frames, labels and signers.
"""
parsed_examples = tf.io.parse_example(examples, _FEATURES)
frames = tf.py_function(_transform_frames_for_model, [parsed_examples['frames']], tf.float32)
labels = parsed_examples['label']
signers = parsed_examples['signer']
return frames, labels, signers
def transform_for_msasl_model(examples):
"""Transforms a batch of `MS-ASL` dataset examples to be consumed for training.
The returned frames are represented as RGB float32 values within the range [-1.0, 1.0], and the labels are one-hot
encoded with a depth of `datasets.msasl.constants.N_CLASSES`.
Arguments:
examples: A batch of serialized `TFRecord` examples.
Returns:
A tuple of batches of frames and labels.
"""
parsed_examples = tf.io.parse_example(examples, _FEATURES)
frames = tf.py_function(_transform_frames_for_model, [parsed_examples['frames']], tf.float32)
labels = tf.one_hot(parsed_examples['label'], MSASL_N_CLASSES)
return frames, labels
def transform_for_signum_model(examples):
"""Transforms a batch of `SIGNUM` dataset examples to be consumed for training.
The returned frames are represented as RGB float32 values within the range [-1.0, 1.0], and the labels are one-hot
encoded with a depth of `datasets.signum.constants.N_CLASSES`.
Arguments:
examples: A batch of serialized `TFRecord` examples.
Returns:
A tuple of batches of frames and labels.
"""
parsed_examples = tf.io.parse_example(examples, _FEATURES)
frames = tf.py_function(_transform_frames_for_model, [parsed_examples['frames']], tf.float32)
labels = tf.one_hot(parsed_examples['label'], SIGNUM_N_CLASSES)
return frames, labels
|
{"hexsha": "7194f81ea30ec05e04bcc1f2e1c02ec4855032b6", "size": 7383, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/tf_record_utils.py", "max_stars_repo_name": "rtoengi/transfer-learning-for-sign-language-recognition", "max_stars_repo_head_hexsha": "e0627115e6b68d6b85244d484011bb3895ccf4ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-25T14:11:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-25T14:11:22.000Z", "max_issues_repo_path": "datasets/tf_record_utils.py", "max_issues_repo_name": "rtoengi/transfer-learning-for-sign-language-recognition", "max_issues_repo_head_hexsha": "e0627115e6b68d6b85244d484011bb3895ccf4ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datasets/tf_record_utils.py", "max_forks_repo_name": "rtoengi/transfer-learning-for-sign-language-recognition", "max_forks_repo_head_hexsha": "e0627115e6b68d6b85244d484011bb3895ccf4ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-04-10T01:33:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-11T06:58:59.000Z", "avg_line_length": 33.7123287671, "max_line_length": 119, "alphanum_fraction": 0.7152918868, "include": true, "reason": "import numpy", "num_tokens": 1653}
|
import torch
import numpy as np
from typing import Optional, Tuple
from src.data.data_transform import DataTransform
import pywt
from pytorch_wavelets import DWTForward, DWTInverse
import pdb
class UNetWavTransform(DataTransform):
"""Pre-processor and post-processor to convert T4C data to
be compatible with Unet
Args:
stack_time: Decides if the time channels are stacked upon each other
pre_batch_dim: Whether batch dimension is present in the data provided
to pre-processor
post_batch_dim: Whether batch dimension is present in the data provided
to post-processor
crop_pad: _dim: Tuple of pixels to crop/pad in each side
"""
def __init__(self, stack_time: bool = False, pre_batch_dim: bool = False,
post_batch_dim: bool = True,
num_channels: int = 8,
crop_pad: Optional[Tuple[int, int, int, int]] = None,
wave: str = "db7", mode:str = "zero",
keep_ch: int = 3) -> None:
self.stack_time = stack_time
self.pre_batch_dim = pre_batch_dim
self.post_batch_dim = post_batch_dim
self.crop_pad = crop_pad
self.num_channels = num_channels
self.xfm = DWTForward(J=1, wave=wave, mode=mode)
self.keep_ch = keep_ch
def pre_transform(
self,
data: np.ndarray,
from_numpy: bool = False,
**kwargs
) -> torch.Tensor:
"""Transform data from `T4CDataset` be used by UNet:
- put time and channels into one dimension
- padding
"""
if from_numpy:
data = torch.from_numpy(data).float()
if not self.pre_batch_dim:
data = torch.unsqueeze(data, 0)
if self.stack_time:
data = self.stack_on_time(data, batch_dim=True)
Yl, Yh = self.xfm(data)
if Yl.shape[1] == 96:
#Yh[0][:,:,2,:,:] = 0
Yh[0] = Yh[0][:, :, :self.keep_ch, :, :]
data = torch.cat((Yl, Yh[0].reshape(1, data.shape[1]*self.keep_ch, Yl.shape[-2], Yl.shape[-1])), 1)
else:
data = torch.cat((Yl, Yh[0].reshape(1, data.shape[1]*3, Yl.shape[-2], Yl.shape[-1])), 1)
if self.crop_pad is not None:
zeropad2d = torch.nn.ZeroPad2d(self.crop_pad)
data = zeropad2d(data)
if not self.pre_batch_dim:
data = torch.squeeze(data, 0)
return data
def post_transform(
self, data: torch.Tensor, **kwargs
) -> torch.Tensor:
"""Bring data from UNet back to `T4CDataset` format:
- separats common dimension for time and channels
- cropping
"""
if not self.post_batch_dim:
data = torch.unsqueeze(data, 0)
if self.crop_pad is not None:
_, _, height, width = data.shape
left, right, top, bottom = self.crop_pad
right = width - right
bottom = height - bottom
data = data[:, :, top:bottom, left:right]
# if self.stack_time:
# data = self.unstack_on_time(data, batch_dim=True)
if not self.post_batch_dim:
data = torch.squeeze(data, 0)
return data
def stack_on_time(self, data: torch.Tensor, batch_dim: bool = False):
"""
`(k, 12, 495, 436, 8) -> (k, 12 * 8, 495, 436)`
"""
if not batch_dim:
# `(12, 495, 436, 8) -> (1, 12, 495, 436, 8)`
data = torch.unsqueeze(data, 0)
_, num_time_steps, height, width, num_channels = data.shape
# (k, 12, 495, 436, 8) -> (k, 12, 8, 495, 436)
data = torch.moveaxis(data, 4, 2)
# (k, 12, 8, 495, 436) -> (k, 12 * 8, 495, 436)
data = torch.reshape(data, (data.shape[0],
num_time_steps * num_channels,
height,
width))
if not batch_dim:
# `(1, 12, 495, 436, 8) -> (12, 495, 436, 8)`
data = torch.squeeze(data, 0)
return data
def unstack_on_time(self, data: torch.Tensor, batch_dim:bool = False):
"""
`(k, 12 * 8, 495, 436) -> (k, 12, 495, 436, 8)`
"""
_, _, height, width = data.shape
if not batch_dim:
# `(12, 495, 436, 8) -> (1, 12, 495, 436, 8)`
data = torch.unsqueeze(data, 0)
num_time_steps = int(data.shape[1] / self.num_channels)
# (k, 12 * 8, 495, 436) -> (k, 12, 8, 495, 436)
data = torch.reshape(data, (data.shape[0],
num_time_steps,
self.num_channels,
height,
width))
# (k, 12, 8, 495, 436) -> (k, 12, 495, 436, 8)
data = torch.moveaxis(data, 2, 4)
if not batch_dim:
# `(1, 12, 495, 436, 8) -> (12, 495, 436, 8)`
data = torch.squeeze(data, 0)
return data
|
{"hexsha": "586422c37c772638263cf14522cbb83d02349d8e", "size": 5051, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data/transformwav.py", "max_stars_repo_name": "shehel/traffic_forecasting", "max_stars_repo_head_hexsha": "63c9fab665f7d48f621e8996290efd0d536dfc09", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/data/transformwav.py", "max_issues_repo_name": "shehel/traffic_forecasting", "max_issues_repo_head_hexsha": "63c9fab665f7d48f621e8996290efd0d536dfc09", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/data/transformwav.py", "max_forks_repo_name": "shehel/traffic_forecasting", "max_forks_repo_head_hexsha": "63c9fab665f7d48f621e8996290efd0d536dfc09", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3381294964, "max_line_length": 111, "alphanum_fraction": 0.5256384874, "include": true, "reason": "import numpy", "num_tokens": 1376}
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
"""
Created on Mar 18th 10:58:37 2016
train a continuous-time sequential model
@author: hongyuan
"""
import pickle
import time
import numpy
import theano
from theano import sandbox
import theano.tensor as tensor
import os
import sys
#import scipy.io
from collections import defaultdict
from theano.tensor.shared_randomstreams import RandomStreams
import modules.utils as utils
import modules.models as models
import modules.optimizers as optimizers
import modules.controllers as controllers
import modules.data_processers as data_processers
import run_models
import datetime
dtype=theano.config.floatX
#
import argparse
__author__ = 'Hongyuan Mei'
def main():
parser = argparse.ArgumentParser(
description='Trainning model ... '
)
#
parser.add_argument(
'-m', '--Model', required=True,
choices = ['hawkes', 'hawkesinhib', 'conttime'],
help='Which model to train? hawkes (SE-MPP)? hawkesinhib (D-SM-MPP)? conttime (N-SM-MPP)?'
)
parser.add_argument(
'-fd', '--FileData', required=True,
help='Path of the dataset (e.g. ./data/data_hawkes/)'
)
#
parser.add_argument(
'-tr', '--TrainRatio', #required=False,
default = 1.0, type = float,
help='How much data to train?'
)
#
parser.add_argument(
'-cl2', '--CoefL2', #required=False,
default = 0.0, type = float,
help='Coefficient of L2 norm'
)
#
parser.add_argument(
'-d', '--DimLSTM', #required=False,
default = 64, type = int,
help='Dimension of LSTM model '
)
parser.add_argument(
'-s', '--Seed', #required=False,
default = 12345, type = int,
help='Seed of random state'
)
#
parser.add_argument(
'-fp', '--FilePretrain', required=False,
help='File of pretrained model (e.g. ./tracks/track_PID=XX_TIME=YY/model.pkl)'
)
parser.add_argument(
'-tp', '--TrackPeriod', #required=False,
default = 1000, type = int,
help='Track period of training'
)
parser.add_argument(
'-me', '--MaxEpoch', #required=False,
default = 50, type = int,
help='Max epoch number of training'
)
parser.add_argument(
'-sb', '--SizeBatch', #required=False,
default = 10, type = int,
help='Size of mini-batch'
)
parser.add_argument(
'-op', '--Optimizer', #required=False,
default = 'adam', type = str,
choices = ['adam', 'sgd'],
help='Optimizer of training'
)
parser.add_argument(
'-mt', '--MultipleTrain', #required=False,
default = 1, type = int,
help='Multiple of events to sample (integral) for training'
)
parser.add_argument(
'-md', '--MultipleDev', #required=False,
default = 10, type = int,
help='Multiple of events to sample (integral) for dev'
)
parser.add_argument(
'-wt', '--WhatTrack', #required=False,
default = 'loss', type = str,
choices = ['loss', 'rmse', 'rate'],
help='What to track for early stoping ? '
)
parser.add_argument(
'-ls', '--LossType', #required=False,
default = 'loglikehood', type = str,
choices = ['loglikehood', 'prediction'],
help='What is the loss to optimized ?'
)
parser.add_argument(
'-lr', '--LearnRate', #required=False,
default = 1e-3, type = float,
help='What learning rate to use ?'
)
parser.add_argument(
'-pp', '--PartialPredict', #required=False,
default = 0, type = int,
choices = [0, 1],
help='What to only predict part of stream ? 0--False, 1--True'
)
parser.add_argument(
'-ps', '--PruneStream', #required=False,
default = 0, type = int,
help='Prune stream? Give me the index ! 0 is nothng to prune. Note : index specifies a COMBINATION of event types by its binary coding (e.g. 0--00000, 1--00001, 31-11111 where 1 means this type is pruned)!'
)
parser.add_argument(
'-ds', '--DevIncludedSetting',#required=False,
default = 0, type = int,
choices = [0,1],
help='Alternative setting (fix tuned hyper-params, train on combo of train and dev, then test)? 0--False, 1--True Note: in our project, this is ONLY used to compare prev work on MIMIC, SO and Financial datasets'
)
parser.add_argument(
'-pf', '--PredictFirst', #required=False,
default = 1, type = int,
choices = [0,1],
help='Predict the first event ? 0--False, 1--True Note: in our project, this is False ONLY on MIMIC, SO and Financial datasets'
)
parser.add_argument(
'-pl', '--PredictLambda', #required=False,
default = 0, type = int,
choices = [0,1],
help='Predict Lambda (intensity) ? 0--False, 1--True Note: this is used ONLY in intensity evaluation'
)
'''
They train model on entire training and eval on test after training, i.e., no dev/validation set
We only use this setting when compared with them on their dataset
Otherwise, we use dev/validation set to tune params and early stop, and only eval on test after the model is fixed.
'''
#
#
args = parser.parse_args()
#
#
args.TrainRatio = numpy.float32(args.TrainRatio)
assert(args.TrainRatio > 0.0 and args.TrainRatio <= 1.0)
#
args.CoefL2 = numpy.float32(args.CoefL2)
assert(args.CoefL2 >= 0.0)
args.DimLSTM = numpy.int32(args.DimLSTM)
args.Seed = numpy.int32(args.Seed)
args.TrackPeriod = numpy.int32(args.TrackPeriod)
args.MaxEpoch = numpy.int32(args.MaxEpoch)
args.SizeBatch = numpy.int32(args.SizeBatch)
args.MultipleTrain = numpy.int32(args.MultipleTrain)
args.MultipleDev = numpy.int32(args.MultipleDev)
#
if args.LossType == 'prediction':
assert(args.WhatTrack == 'rmse' or args.WhatTrack == 'rate')
else:
assert(args.WhatTrack == 'loss')
#
args.LearnRate = numpy.float32(args.LearnRate)
assert(args.LearnRate > 0.0)
#
if args.PartialPredict == 0:
args.PartialPredict = False
else:
args.PartialPredict = True
#
args.PruneStream = numpy.int32(args.PruneStream)
#
if args.DevIncludedSetting == 0:
args.DevIncludedSetting = False
else:
args.DevIncludedSetting = True
#
if args.PredictFirst == 0:
args.PredictFirst = False
else:
args.PredictFirst = True
#
if args.PredictLambda == 0:
args.PredictLambda = False
else:
args.PredictLambda = True
#
#
id_process = os.getpid()
time_current = datetime.datetime.now().isoformat()
#
flag_1 = (
args.Model == 'hawkes' or args.Model == 'hawkesinhib' or args.Model == 'neural' or args.Model == 'neuralgeneral' or args.Model == 'neuraladapt' or args.Model == 'neuraltime' or args.Model == 'neuralgeneraltime' or args.Model == 'neuraladapttime'
)
flag_2 = (
args.Model == 'nanmodel'
)
flag_3 = (
args.Model == 'neuraladapttimescale' or args.Model == 'hawkesinhibscale' or args.Model == 'neuralreduce' or args.Model == 'conttime'
)
#
# conttime is the one with continuous time LSTM
#
assert(flag_1 or flag_2 or flag_3)
# we stop using neuralsimple
# +time means we encode time using neural networks
#
tag_model = '_PID='+str(id_process)+'_TIME='+time_current
#
#file_log = os.path.abspath(
# './logs/log' + tag_model + '.txt'
#)
#path_save = os.path.abspath(
# './models/models' + tag_model + '/'
#)
if 'meme' in args.FileData:
tag_track = '_meme'
elif 'retweet' in args.FileData:
tag_track = '_retweet'
elif 'mimic' in args.FileData:
tag_track = '_mimic'
elif '_so' in args.FileData:
tag_track = '_so'
elif '_bookorder' in args.FileData:
tag_track = '_bookorder'
elif '_missing' in args.FileData:
tag_track = '_missing'
else:
tag_track = ''
#
path_track = './tracks'+ tag_track +'/track' + tag_model + '/'
file_log = os.path.abspath(
path_track + 'log.txt'
)
#path_save = os.path.abspath(
# path_track + 'models/'
#)
path_save = path_track
#
command_mkdir = 'mkdir -p ' + os.path.abspath(
path_track
)
os.system(command_mkdir)
#
#
## show values ##
print ("PID is : %s" % str(id_process) )
print ("TIME is : %s" % time_current )
print ("Seed is : %s" % str(args.Seed) )
#
print ("Model is : %s" % args.Model )
print ("CoefL2 is : %s" % str(args.CoefL2) )
print ("FileData is : %s" % args.FileData )
print ("TrainRatio is : %s" % str(args.TrainRatio) )
if 'neural' in args.Model or 'nanmodel' in args.Model:
print ("DimLSTM is : %s" % str(args.DimLSTM) )
print ("FilePretrain is : %s" % args.FilePretrain)
print ("TrackPeriod is : %s" % str(args.TrackPeriod) )
print ("MaxEpoch is : %s" % str(args.MaxEpoch) )
print ("SizeBatch is : %s" % str(args.SizeBatch) )
print ("Optimizer is : %s" % args.Optimizer)
print ("LossType is : %s" % args.LossType)
print ("WhatTrack is : %s" % args.WhatTrack)
print ("LearnRate is : %s" % args.LearnRate)
print ("PartialPredict is : %s" % args.PartialPredict)
print ("PruneStream is : %s" % str(args.PruneStream) )
print ("Dev Included Setting is: %s" % args.DevIncludedSetting )
print ("PredictFirst is: %s" % args.PredictFirst )
print ("PredictLambda is: %s" % args.PredictLambda )
#
flag_show_1 = (
args.Model == 'hawkesinhib' or args.Model == 'neural' or args.Model == 'neuralgeneral' or args.Model == 'neuraladapt' or args.Model == 'neuralsimple' or args.Model == 'neuraltime' or args.Model == 'neuralgeneraltime' or args.Model == 'neuraladapttime'
)
flag_show_2 = (
args.Model == 'hawkesinhibscale' or args.Model == 'neuraladapttimescale' or args.Model == 'neuralreduce' or args.Model == 'conttime'
)
#
if (flag_show_1 and flag_show_2):
print ("Multiple for training is : %s" % args.MultipleTrain)
print ("Multiple for dev is : %s" % args.MultipleDev)
#
dict_args = {
'PID': id_process,
'TIME': time_current,
'Seed': args.Seed,
#
'Model': args.Model,
'CoefL2': args.CoefL2,
'FileData': args.FileData,
'TrainRatio': args.TrainRatio,
'DimLSTM': args.DimLSTM,
'FilePretrain': args.FilePretrain,
'TrackPeriod': args.TrackPeriod,
'MaxEpoch': args.MaxEpoch,
'SizeBatch': args.SizeBatch,
'Optimizer': args.Optimizer,
'MultipleTrain': args.MultipleTrain,
'MultipleDev': args.MultipleDev,
'LossType': args.LossType,
'WhatTrack': args.WhatTrack,
'LearnRate': args.LearnRate,
'PartialPredict': args.PartialPredict,
'PruneStream': args.PruneStream,
'DevIncludedSetting': args.DevIncludedSetting,
'PredictLambda': args.PredictLambda
}
#
input_train = {
'model': args.Model,
'seed_random': args.Seed,
'path_rawdata': args.FileData,
'ratio_train': args.TrainRatio,
'path_pre_train': args.FilePretrain,
'track_period': args.TrackPeriod,
'max_epoch': args.MaxEpoch,
'size_batch': args.SizeBatch,
'dim_model': args.DimLSTM,
'optimizer': args.Optimizer,
'save_file_path': path_save,
'log_file': file_log,
'args': dict_args,
'coef_l2': args.CoefL2,
'what_to_track': args.WhatTrack,
'loss_type': args.LossType,
'learn_rate': args.LearnRate,
'partial_predict': args.PartialPredict,
'prune_stream': args.PruneStream,
'di_setting': args.DevIncludedSetting,
'predict_lambda': args.PredictLambda
}
#
if '_so' in args.FileData or '_mimic' in args.FileData or '_bookorder' in args.FileData:
input_train['predict_first'] = False
else:
if args.PredictFirst:
input_train['predict_first'] = True
else:
input_train['predict_first'] = False
#
#
flag_multiple_1 = (
args.Model == 'hawkesinhib' or args.Model == 'neural' or args.Model == 'neuralgeneral' or args.Model == 'neuraladapt' or args.Model == 'neuralsimple' or args.Model == 'neuraltime' or args.Model == 'neuralgeneraltime' or args.Model == 'neuraladapttime'
)
flag_multiple_2 = (
args.Model == 'hawkesinhibscale' or args.Model == 'neuraladapttimescale' or args.Model == 'neuralreduce' or args.Model == 'conttime'
)
#
if (flag_multiple_1 or flag_multiple_2):
input_train['multiple_sample_for_train'] = numpy.int32(
args.MultipleTrain
)
input_train['multiple_sample_for_dev'] = numpy.int32(
args.MultipleDev
)
#
if args.Model == 'hawkes':
run_models.train_hawkes_ctsm(input_train)
elif args.Model == 'hawkesinhib' or args.Model == 'hawkesinhibscale':
run_models.train_hawkesinhib_ctsm(input_train)
elif args.Model == 'neural':
run_models.train_neural_hawkes_ctsm(input_train)
elif args.Model == 'neuralgeneral':
run_models.train_generalized_neural_hawkes_ctsm(
input_train, tag_neural_type = 'general'
)
elif args.Model == 'neuraladapt':
run_models.train_generalized_neural_hawkes_ctsm(
input_train, tag_neural_type = 'adaptive'
)
elif args.Model == 'neuralsimple':
run_models.train_generalized_neural_hawkes_ctsm(
input_train, tag_neural_type = 'simple'
)
elif args.Model == 'neuraltime':
run_models.train_neural_hawkes_ctsm_time(
input_train
)
elif args.Model == 'neuralgeneraltime':
run_models.train_generalized_neural_hawkes_ctsm_time(
input_train, tag_neural_type = 'general'
)
elif args.Model == 'neuraladapttime' or args.Model == 'neuraladapttimescale' or args.Model == 'neuralreduce' or args.Model == 'conttime':
if args.DevIncludedSetting:
run_models.train_generalized_neural_hawkes_ctsm_time_DevIncludedSetting(
input_train, tag_neural_type = 'adaptive'
)
else:
run_models.train_generalized_neural_hawkes_ctsm_time(
input_train, tag_neural_type = 'adaptive'
)
else:
print("Model not implemented yet !!! ")
#
if __name__ == "__main__": main()
|
{"hexsha": "7379ab15419c309db01afec181a8134fe0e09b35", "size": 14698, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_models.py", "max_stars_repo_name": "ZhaozhiQIAN/neurawkes", "max_stars_repo_head_hexsha": "1a3caa837b34f77ac9d078bc9bf10ff10a3bf959", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train_models.py", "max_issues_repo_name": "ZhaozhiQIAN/neurawkes", "max_issues_repo_head_hexsha": "1a3caa837b34f77ac9d078bc9bf10ff10a3bf959", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_models.py", "max_forks_repo_name": "ZhaozhiQIAN/neurawkes", "max_forks_repo_head_hexsha": "1a3caa837b34f77ac9d078bc9bf10ff10a3bf959", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9952380952, "max_line_length": 259, "alphanum_fraction": 0.6140291196, "include": true, "reason": "import numpy,import scipy,import theano,from theano", "num_tokens": 3914}
|
import os
import numpy as np
import requests
import boto3
import semver
import json
from requests.auth import HTTPBasicAuth
from requests_toolbelt.multipart.encoder import MultipartEncoder
from requests_toolbelt.utils import dump
from zipfile import ZipFile
from model import train
from generate_datanpz import download_img_match_labels, make_datanpz
from generate_tfrecords import create_tfr
s3 = boto3.client('s3')
auth = os.getenv('MACHINE_AUTH')
stack = os.getenv('StackName')
model_id = os.getenv('MODEL_ID')
prediction_id = os.getenv('PREDICTION_ID')
bucket = os.getenv('ASSET_BUCKET')
api = os.getenv('API_URL')
imagery = os.getenv('TILE_ENDPOINT')
assert(stack)
assert(auth)
assert(model_id)
assert(prediction_id)
assert(api)
assert(imagery)
def get_pred(model_id, prediction_id):
r = requests.get(api + '/v1/model/' + str(model_id) + '/prediction/' + str(prediction_id), auth=HTTPBasicAuth('machine', auth))
r.raise_for_status()
pred = r.json()
return pred
def get_asset(bucket, key):
print('ok - downloading: ' + bucket + '/' + key)
parsed = key.split('/')
obj = s3.download_file(
Filename='/tmp/' + parsed[len(parsed) - 1],
Bucket=bucket,
Key=key
)
dirr = parsed[len(parsed) - 1].replace('.zip', '')
with ZipFile('/tmp/' + parsed[len(parsed) - 1], 'r') as zipObj:
# Extract all the contents of zip file in different directory
zipObj.extractall('/tmp/' + dirr)
return '/tmp/' + dirr
def get_label_npz(model_id, prediction_id):
payload = {'format':'npz', 'inferences':'all', 'threshold': 0}
r = requests.get(api + '/v1/model/' + model_id + '/prediction/' + prediction_id + '/export', params=payload,
auth=HTTPBasicAuth('machine', auth))
r.raise_for_status()
with open('/tmp/labels.npz', 'wb') as f:
f.write(r.content)
return f
def increment_versions(version):
v = semver.VersionInfo.parse(version)
return v.bump_minor()
def get_versions(model_id):
r = requests.get(api + '/v1/model/' + model_id + '/prediction/all', auth=HTTPBasicAuth('machine', auth))
r.raise_for_status()
preds = r.json()
version_lst = []
for pred_dict in preds:
version_lst.append(pred_dict['version'])
version_highest = str(max(map(semver.VersionInfo.parse, version_lst)))
return version_highest
def post_pred(pred, version):
data_pred = {
'modelId': pred['modelId'],
'version': version,
'tileZoom': pred['tileZoom'],
'infList': pred['infList'],
'infType': pred['infType'],
'infBinary': pred['infBinary'],
'infSupertile': pred['infSupertile']
}
r = requests.post(api + '/v1/model/' + model_id + '/prediction', json=data_pred, auth=HTTPBasicAuth('machine', auth))
r.raise_for_status()
print(r.status_code)
pred = r.json()
return pred['prediction_id']
def update_link(pred, link_type, zip_path):
payload = {'type': link_type}
print(payload)
model_id = pred['modelId']
print(model_id)
prediction_id = pred['predictionsId']
print(prediction_id)
encoder = MultipartEncoder(fields={'file': ('filename', open(zip_path, 'rb'), 'application/zip')})
print('/v1/model/' + str(model_id) + '/prediction/' + str(prediction_id) + '/upload')
r = requests.post(api + '/v1/model/' + str(model_id) + '/prediction/' + str(prediction_id) + '/upload', params=payload,
data = encoder, headers= {'Content-Type': encoder.content_type}, auth=HTTPBasicAuth('machine', auth))
r.raise_for_status()
pred = get_pred(model_id, prediction_id)
if pred['modelLink'] is None:
raise Exception("Cannot retrain without modelLink")
if pred['checkpointLink'] is None:
raise Exception("Cannot retrain without checkpointLink")
zoom = pred['tileZoom']
supertile = pred['infSupertile']
version = pred['version']
inflist = pred['infList'].split(',')
if supertile:
x_feature_shape = [-1, 512, 512, 3]
else:
x_feature_shape = [-1, 256, 256, 3]
v = get_versions(model_id)
model = get_asset(bucket, pred['modelLink'].replace(bucket + '/', ''))
checkpoint = get_asset(bucket, pred['checkpointLink'].replace(bucket + '/', ''))
print(model)
print(checkpoint)
get_label_npz(model_id, prediction_id)
# download image tiles that match validated labels.npz file
download_img_match_labels(labels_folder='/tmp', imagery=imagery, folder='/tmp/tiles', zoom=zoom, supertile=supertile)
# create data.npz file that matchs up images and labels
make_datanpz(dest_folder='/tmp', imagery=imagery)
#get train and val number of samples
d = np.load('/tmp/data.npz')
n_train_samps = d['y_train'].shape[0]
n_val_samps = d['y_val'].shape[0]
#convert data.npz into tf-records
create_tfr(npz_path='/tmp/data.npz', city='city')
# conduct re-training
train(tf_train_steps=200, tf_dir='/tmp/tfrecords.zip',
retraining_weights='/tmp/checkpoint.zip',
n_classes=len(inflist), class_names=inflist, x_feature_shape=x_feature_shape,
n_train_samps=n_train_samps, n_val_samps=n_val_samps)
# increment model version
updated_version = str(increment_versions(version=v))
print(updated_version)
# post new pred
newpred_id = post_pred(pred=pred, version=updated_version)
newpred = get_pred(model_id, newpred_id)
# update tf-records zip
update_link(newpred, link_type='tfrecord', zip_path = '/tmp/tfrecords.zip')
print("tfrecords link updated")
# update model link
update_link(newpred, link_type='model', zip_path ='/ml/models.zip')
print("models link updated")
# update checkpoint
update_link(newpred, link_type='checkpoint', zip_path = '/ml/checkpoint.zip')
print("checkpoint link updated")
|
{"hexsha": "8259a1b003dc4e57315a4ed5dff631dd86bd44a2", "size": 5693, "ext": "py", "lang": "Python", "max_stars_repo_path": "task-retrain/task.py", "max_stars_repo_name": "ingalls/ml-enabler", "max_stars_repo_head_hexsha": "efda973cb3fa9954cbe24cd0963a7b8f5be5ad6f", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "task-retrain/task.py", "max_issues_repo_name": "ingalls/ml-enabler", "max_issues_repo_head_hexsha": "efda973cb3fa9954cbe24cd0963a7b8f5be5ad6f", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-06-08T22:14:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:45:58.000Z", "max_forks_repo_path": "task-retrain/task.py", "max_forks_repo_name": "ingalls/ml-enabler", "max_forks_repo_head_hexsha": "efda973cb3fa9954cbe24cd0963a7b8f5be5ad6f", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9831460674, "max_line_length": 131, "alphanum_fraction": 0.6929562621, "include": true, "reason": "import numpy", "num_tokens": 1463}
|
import sys,os,re,time,cPickle
import numpy as np
from networkx import bidirectional_dijkstra,shortest_path_length
import networkx as nx
from scipy.cluster.vq import kmeans2
import scipy.stats as stats
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist,cdist,squareform
#from SpectralMix import SilValueGenerator
#from mpl_toolkits.mplot3d import Axes3D
EPS = np.finfo(float).eps
## the base cluster class for other spectral clustering methods
class ClusterBase:
## constructor
# this class takes as input a raw matrix consisting of observations and features
# the observations occupy the rows and the features the rows
# the class also takes as input a similarity matrix or a networkx graph
# @param mat is a raw matrix (numpp.array((n,d))) or a networkx graph
# @param k is the number of components in the mixture
# @param dataHeader is a list or numpy.array() of length n consisting of labels for the data
# @param labels are an optional vector corresponding to dataHeader that is used for evalutaion purposes
# @param dtype is the data type that may be 'raw', 'similarity' or 'graph'
# @param weighted defines whether the input graph is of type weighted or not (True or False)
# @param verbose generally used for debugging mode
# @param refine used to specify the method for noise refinement 'kmeans'
# @param classify step used to carry out the clustering of the normalized stacked and ranked eigenvectors
# Note on distance matrics:
# \li chebyshev - the Chebyshev distance.
# \li cityblock - the Manhattan distance.
# \li correlation - the Correlation distance.
# \li cosine - the Cosine distance.
# \li euclidean - the Euclidean distance.
# \li hamming - the Hamming distance (boolean).
# \li mahalanobis - the Mahalanobis distance.
# \li minkowski - the Minkowski distance.
# \li seuclidean - the normalized Euclidean distance.
# \li sqeuclidean - the squared Euclidean distance.
def __init__(self,mat,k=None,dataHeader=None,labels=None,dtype='raw',weighted=False,verbose=False,classifyStep='kmeans',dmatPath=None,projID='generic'):
## error check input
if dtype not in ['raw','graph','distance']:
raise ValueError, "matrix input type not valid", dtype
## class-wide variables
self.k = k
self.dtype = dtype
self.weighted = weighted
self.verbose = verbose
self.noiseValue = 999
self.projID = projID
self.dmatPath = dmatPath
self.unusedGenes = None
self.unusedIndices = None
usedIndices = None
if dtype == 'graph':
self.G = mat
self.n = len(self.G.nodes())
else:
self.mat = mat
self.n ,self.d = np.shape(mat)
## handle header and labels
if dataHeader != None:
self.dataHeader = [dat for dat in dataHeader]
self.origDataHeader = [odat for odat in dataHeader]
else:
self.dataHeader = None
self.origDataHeader = None
if labels != None:
self.origLabels = np.array([float(l) for l in labels])
self.labels = np.array([float(l) for l in labels])
else:
self.labels = None
self.origLabels = None
#################
### methods ###
#################
def graph_to_distance_mat(self,G,dataHeader,weighted=False,reweighting=True,verbose=False):
nodeList = dataHeader
n = len(nodeList)
dMat = np.zeros((n,n))
if verbose == True:
print "\tINFO: making graph from distance matrix... reweighting is %s"%reweighting
### get all pairwise shortest paths and add distance to matrix
total = (n * (n-1)) / 2.0
count = 0
for i in range(n):
nodeI = nodeList[i]
for j in range(n):
nodeJ = nodeList[j]
if j >= i:
continue
if reweighting == True:
if weighted == True:
bdResults = bidirectional_dijkstra(G,nodeI,nodeJ)
if bdResults == False:
distance = 1e08
else:
distance, dijkPath = bdResults
else:
distance = shortest_path_length(G,nodeI,nodeJ)
dMat[i,j] = distance
dMat[j,i] = distance
else:
if G.has_edge(nodeI,nodeJ) == True or G.has_edge(nodeJ,nodeI) == True:
weight = G[nodeI][nodeJ]['weight']
dMat[i,j] = weight
dMat[j,i] = weight
count+=1
#if verbose == True:
# if count%100.0 == 0.0:
# print "\t\tpercent complete",round(float(count) / float(total) * 100.0,2), '%'
#print "\t\tpercent complete 100", '%'
return dMat
# mat is a matrix of type numpy.array(n,d) where n are the observations and d are features
def raw_to_distance_mat(self,mat):
values = pdist(mat,'sqeuclidean') # sqeuclidean, euclidean
dMat = squareform(values)
return dMat
# dMmat is a symmetric positive distance matrix of type numpy.array(n,n) where n are the observations
# sigma is the bandwidth parameter that controls how quickly the affinity drops off
# the 1.0 or -1.0 in the numerator is used to control the direction of the drop.
def distance_to_affinity_mat(self,dMat,sigma,reshape=True):
if dMat == None:
print "ERROR: distance matrix is None cannot find affinity"
return None
aMat = np.exp(-1.0 * (dMat**2.0) / 2.0 * (sigma**2.0))
if reshape == True:
aMat = self._reshape_affinity_matrix_to_original_header(aMat)
return aMat
# aram sigma is the bandwidth parameter that controls how quickly the affinity drops off
def get_affinity_matrix(self,sigma,reshape=True,reweighting=True,verbose=False):
self._error_check_input_data()
dmatPickle = 'NotAFile'
if self.dtype == 'raw':
self.dMat = self.raw_to_distance_mat(self.mat)
elif self.dtype == 'graph':
print 'dtype is ', self.dtype
if self.dmatPath != None and os.path.isfile(self.dmatPath) == False:
if verbose == True:
print '\t...............creating new dMat to be pickled...'
self.dMat = self.graph_to_distance_mat(self.G,self.dataHeader,weighted=self.weighted,reweighting=reweighting,verbose=verbose)
cPickle.dump(self.dMat,open(self.dmatPath,'w'))
elif self.dmatPath != None and os.path.isfile(self.dmatPath) == True:
if verbose== True:
print '\t...............using pickled dmat'
self.dMat = cPickle.load(open(self.dmatPath,'r'))
else:
self.dMat = self.graph_to_distance_mat(self.G,self.dataHeader,weighted=self.weighted,reweighting=reweighting,verbose=verbose)
elif self.dtype == 'distance':
self.dMat = self.mat
if self.dMat == None:
print "ERROR: did not find dMat"
return None
aMat = self.distance_to_affinity_mat(self.dMat,sigma,reshape=reshape)
if aMat == None:
print "ERROR: could not find aMat"
return None
return aMat
def affinity_to_diagonal_mat(self,aMat):
diaMat = np.diag(aMat.sum(axis=1)**-0.5)
return diaMat
def affinity_to_nx(self,aMat,header):
G = nx.Graph()
distances = []
n,m = np.shape(aMat)
if n != m or n != np.size(header):
print "INPUT ERROR: for affinity to nx - sizes must be the same"
return None
for i in range(n):
nodeI = header[i]
for j in range(n):
nodeJ = header[j]
if j >= i:
continue
G.add_edge(nodeI, nodeJ, weight=aMat[i,j])
distances.append(aMat[i,j])
return G, distances
def get_silhouette_values(self,rawMat,dMat=None,labels=None):
if labels == None:
centroids, labels = kmeans2(rawMat,self.k,iter=25,minit='points')
svg= SilValueGenerator(rawMat,labels)
return svg.silValues
def _generate_heatmap(self,mat):
cMap = self.plt.cm.spectral # jet, hot, gist_stern
self.plt.imshow(mat,aspect='auto',interpolation='nearest',cmap=cMap)
#self.plt.colorbar()
def _plot_scatter_data(self,mat,color='blue',labels=None,buffer=0.2,use3D=False):
colors = ['blue','orange','red','green','yellow','magenta','cyan','black']
## error checking
if type(labels) == type([]):
labels = np.array(labels)
if use3D == False:
if labels == None:
print 'labels are none'
self.plt.plot([mat[:,0]],[mat[:,1]], marker='o',color=color,markersize=8.0)
else:
numLabels = len(list(set(labels)))
for l in labels:
x = mat[:,0][np.where(labels==l)]
y = mat[:,1][np.where(labels==l)]
if l == self.noiseValue:
self.plt.plot([x],[y],marker='o',markersize=10.0,color='gray')
else:
self.plt.plot([x],[y],marker='o',markersize=10.0,color=colors[l])
self.plt.xlim([mat[:,0].min()-buffer,mat[:,0].max()+buffer])
self.plt.ylim([mat[:,1].min()-buffer,mat[:,1].max()+buffer])
def calculate_distortion_measure(self,clustResults):
clusteredData = {}
totalJ = 0
errorCk = 0
for k in range(self.k):
clusteredData[k] = clustResults['yMat'][np.where(clustResults['labels']==k)[0],:]
for k in range(self.k):
sumOfSquares = (clusteredData[k] - clusteredData[k].mean(axis=0))**2.0
totalJ = totalJ + sumOfSquares.sum()
errorCk = errorCk + len(sumOfSquares)
if errorCk != len(clustResults['labels']):
print "ERROR: Did not pass error check in distortion measure calc"
return totalJ
def _error_check_input_data(self):
## check gene list for genes not in G
newLabels = []
self.unusedGenes = []
if self.dtype == 'graph':
if type(self.dataHeader)==type([]):
self.dataHeader = np.array(self.dataHeader)
for g1 in range(len(self.dataHeader)):
gene = self.dataHeader[g1]
geneIndex = np.where(np.array(self.G.nodes())==gene)
if len(geneIndex[0]) == 0:
self.unusedGenes.append(gene)
## save original labels and orig data header
self.unusedGenes = np.array(self.unusedGenes)
if self.labels != None:
self.origLabels = self.labels.copy()
self.origDataHeader = self.dataHeader.copy()
self.unusedIndices = np.array([np.where(self.origDataHeader==gene)[0][0] for gene in self.unusedGenes])
usedIndices = []
for ind in range(len(self.origDataHeader)): #origLabels
if self.unusedIndices.__contains__(ind) == False:
usedIndices.append(ind)
self.usedIndices = np.array(usedIndices)
self.dataHeader = self.origDataHeader[self.usedIndices]
if self.labels != None:
self.labels = self.origLabels[self.usedIndices]
## error check for genes in G that are not in header
for g2 in range(len(self.G.nodes())):
node = self.G.nodes()[g2]
nodeIndex = np.where(self.dataHeader==node)
if len(nodeIndex[0]) == 0:
print "WARNING: a gene was found in the graph that was not listed in the data header", node
continue
self.n = len(self.dataHeader)
if self.verbose == True:
print "\tINFO: out of %s genes possible genes only %s appear in the graph"%(len(self.origDataHeader),len(self.dataHeader))
## error checking input
if self.dtype not in ['raw','distance','affinity','graph']:
raise ValueError, "matrix input type not valid"
if self.labels != None:
if len(self.labels) != self.n:
raise ValueError, "labels length not matching number observations"
def _reshape_affinity_matrix_to_original_header(self,aMat):
origLength = len(self.origDataHeader)
newAMat = np.zeros((origLength,origLength),)
newAMat = newAMat + EPS
for i in range(origLength):
obj = self.origDataHeader[i]
if i in self.usedIndices:
newRow = np.zeros((origLength),) + EPS
aMatInd = np.where(self.dataHeader==obj)[0][0]
newRow[self.usedIndices] = aMat[aMatInd,:]
newAMat[i,:] = newRow
return newAMat
|
{"hexsha": "60c436453a683ba7f2a065145461bffda87be825", "size": 13442, "ext": "py", "lang": "Python", "max_stars_repo_path": "spectralmix/ClusterBase.py", "max_stars_repo_name": "ksiomelo/cubix", "max_stars_repo_head_hexsha": "cd9e6dda6696b302a7c0d383259a9d60b15b0d55", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-09-07T00:16:16.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-11T20:27:56.000Z", "max_issues_repo_path": "spectralmix/ClusterBase.py", "max_issues_repo_name": "ksiomelo/cubix", "max_issues_repo_head_hexsha": "cd9e6dda6696b302a7c0d383259a9d60b15b0d55", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spectralmix/ClusterBase.py", "max_forks_repo_name": "ksiomelo/cubix", "max_forks_repo_head_hexsha": "cd9e6dda6696b302a7c0d383259a9d60b15b0d55", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4879518072, "max_line_length": 156, "alphanum_fraction": 0.5710459753, "include": true, "reason": "import numpy,import scipy,from scipy,import networkx,from networkx", "num_tokens": 3109}
|
# NB taken from the skimage docs
import numpy as np
import matplotlib.pyplot as plt
from skimage.data import shepp_logan_phantom
from skimage.transform import radon, rescale
image = shepp_logan_phantom()
image = rescale(image, scale=0.4, mode='reflect')
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
ax1.set_title("Original")
ax1.imshow(image, cmap=plt.cm.Greys_r)
theta = np.linspace(0., 180., max(image.shape), endpoint=False)
print(image.shape)
sinogram = radon(image, theta=theta)
print(sinogram.shape)
dx, dy = 0.5 * 180.0 / max(image.shape), 0.5 / sinogram.shape[0]
ax2.set_title("Radon transform\n(Sinogram)")
ax2.set_xlabel("Projection angle (deg)")
ax2.set_ylabel("Projection position (pixels)")
ax2.imshow(sinogram, cmap=plt.cm.Greys_r,
extent=(-dx, 180.0 + dx, -dy, sinogram.shape[0] + dy),
aspect='auto')
fig.tight_layout()
plt.show()
from skimage.transform import iradon
reconstruction_fbp = iradon(sinogram, theta=theta, filter_name='ramp')
error = reconstruction_fbp - image
print(f"FBP rms reconstruction error: {np.sqrt(np.mean(error ** 2)):.3g}")
imkwargs = dict(vmin=-0.2, vmax=0.2)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5),
sharex=True, sharey=True)
ax1.set_title("Reconstruction\nFiltered back projection")
ax1.imshow(reconstruction_fbp, cmap=plt.cm.Greys_r)
ax2.set_title("Reconstruction error\nFiltered back projection")
ax2.imshow(reconstruction_fbp - image, cmap=plt.cm.Greys_r, **imkwargs)
plt.show()
|
{"hexsha": "dd13e4fe27614b2cd78078725bf3f3925ac79dd4", "size": 1510, "ext": "py", "lang": "Python", "max_stars_repo_path": "shearlet_admm/radon_demo.py", "max_stars_repo_name": "AndiBraimllari/SDLX", "max_stars_repo_head_hexsha": "f4a7280d261c00970fd8ab427174fba871f18a6d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-18T02:28:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-18T02:28:10.000Z", "max_issues_repo_path": "shearlet_admm/radon_demo.py", "max_issues_repo_name": "AndiBraimllari/SDLX", "max_issues_repo_head_hexsha": "f4a7280d261c00970fd8ab427174fba871f18a6d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-22T06:09:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T13:57:26.000Z", "max_forks_repo_path": "shearlet_admm/radon_demo.py", "max_forks_repo_name": "AndiBraimllari/SDLX", "max_forks_repo_head_hexsha": "f4a7280d261c00970fd8ab427174fba871f18a6d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3181818182, "max_line_length": 74, "alphanum_fraction": 0.7178807947, "include": true, "reason": "import numpy", "num_tokens": 440}
|
program t
200 parameter(a=1)
implicit integer(y)
parameter(b=2)
100 format (f4.2)
implicit real(kind=8)(i-k,r)
j=3.14
print 100,j
end program t
|
{"hexsha": "21bd828cfe97b6b858b188bcf48f56b9c4b9fff0", "size": 190, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "tests/t0136x/t.f", "max_stars_repo_name": "maddenp/ppp", "max_stars_repo_head_hexsha": "81956c0fc66ff742531817ac9028c4df940cc13e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-08-13T16:32:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-21T12:37:58.000Z", "max_issues_repo_path": "tests/t0136x/t.f", "max_issues_repo_name": "maddenp/ppp", "max_issues_repo_head_hexsha": "81956c0fc66ff742531817ac9028c4df940cc13e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/t0136x/t.f", "max_forks_repo_name": "maddenp/ppp", "max_forks_repo_head_hexsha": "81956c0fc66ff742531817ac9028c4df940cc13e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2015-07-30T17:02:27.000Z", "max_forks_repo_forks_event_max_datetime": "2015-08-03T16:29:41.000Z", "avg_line_length": 19.0, "max_line_length": 34, "alphanum_fraction": 0.5526315789, "num_tokens": 62}
|
@testset "DQNLearner" begin
env = CartPoleEnv(; T = Float32, seed = 11)
ns, na = length(rand(get_observation_space(env))), length(get_action_space(env))
agent = Agent(
policy = QBasedPolicy(
learner = DQNLearner(
approximator = NeuralNetworkApproximator(
model = Chain(
Dense(ns, 128, relu; initW = seed_glorot_uniform(seed = 17)),
Dense(128, 128, relu; initW = seed_glorot_uniform(seed = 23)),
Dense(128, na; initW = seed_glorot_uniform(seed = 39)),
) |> gpu,
optimizer = ADAM(),
),
target_approximator = NeuralNetworkApproximator(
model = Chain(
Dense(ns, 128, relu; initW = seed_glorot_uniform(seed = 17)),
Dense(128, 128, relu; initW = seed_glorot_uniform(seed = 23)),
Dense(128, na; initW = seed_glorot_uniform(seed = 39)),
) |> gpu,
optimizer = ADAM(),
),
loss_func = huber_loss,
stack_size = nothing,
batch_size = 32,
update_horizon = 1,
min_replay_history = 100,
update_freq = 1,
target_update_freq = 100,
seed = 22,
),
explorer = EpsilonGreedyExplorer(
kind = :exp,
ϵ_stable = 0.01,
decay_steps = 500,
seed = 33,
),
),
trajectory = CircularCompactSARTSATrajectory(
capacity = 1000,
state_type = Float32,
state_size = (ns,),
),
)
hook = ComposedHook(TotalRewardPerEpisode(), TimePerStep())
run(agent, env, StopAfterStep(10000), hook)
@info "stats for DQNLearner" avg_reward = mean(hook[1].rewards) avg_fps =
1 / mean(hook[2].times)
end
|
{"hexsha": "481c2344b5c02bf3b673ad968942808cfd4d8927", "size": 2017, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/dqn.jl", "max_stars_repo_name": "findmyway/ReinforcementLearningZoo.jl", "max_stars_repo_head_hexsha": "8868ed5e2f2c4dfe725bec82f2bd4b0d08f365f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-02-23T08:50:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-01T18:28:59.000Z", "max_issues_repo_path": "test/dqn.jl", "max_issues_repo_name": "findmyway/ReinforcementLearningZoo.jl", "max_issues_repo_head_hexsha": "8868ed5e2f2c4dfe725bec82f2bd4b0d08f365f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/dqn.jl", "max_forks_repo_name": "findmyway/ReinforcementLearningZoo.jl", "max_forks_repo_head_hexsha": "8868ed5e2f2c4dfe725bec82f2bd4b0d08f365f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5490196078, "max_line_length": 86, "alphanum_fraction": 0.483391175, "num_tokens": 466}
|
import settings
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import copy
import os, glob
import cv2
import random
import argparse
import bcolz
import pandas as pd
import random
from PIL import Image
#from inception import inception_v3
from vgg import vgg19_bn, vgg16_bn
#from inceptionresv2 import inceptionresnetv2
MODEL_DIR = settings.MODEL_DIR
C = settings.NUM_CLASSES
w_files_training = []
def get_acc_from_w_filename(filename):
try:
stracc = filename.split('_')[-2]
return float(stracc)
except:
return 0.
def load_best_weights(model):
w_files = glob.glob(os.path.join(MODEL_DIR, model.name) + '_*.pth')
max_acc = 0
best_file = None
saved_epoch = -1
for w_file in w_files:
try:
stracc = w_file.split('_')[-2]
epoch = w_file.split('_')[-3]
acc = float(stracc)
if acc > max_acc:
best_file = w_file
max_acc = acc
saved_epoch = int(epoch)
w_files_training.append((acc, w_file))
except:
continue
if max_acc > 0:
print('loading weight: {}'.format(best_file))
model.load_state_dict(torch.load(best_file))
return saved_epoch
def save_weights(acc, model, epoch, max_num=2):
f_name = '{}_{}_{:.5f}_.pth'.format(model.name, epoch, acc)
w_file_path = os.path.join(MODEL_DIR, f_name)
if len(w_files_training) < max_num:
w_files_training.append((acc, w_file_path))
torch.save(model.state_dict(), w_file_path)
return
min = 10.0
index_min = -1
for i, item in enumerate(w_files_training):
val_acc, fp = item
if min > val_acc:
index_min = i
min = val_acc
#print(min)
if acc > min:
torch.save(model.state_dict(), w_file_path)
try:
os.remove(w_files_training[index_min][1])
except:
print('Failed to delete file: {}'.format(w_files_training[index_min][1]))
w_files_training[index_min] = (acc, w_file_path)
def save_array(fname, arr):
c=bcolz.carray(arr, rootdir=fname, mode='w')
c.flush()
def load_array(fname):
return bcolz.open(fname)[:]
def load_weights_file(model, w_file):
model.load_state_dict(torch.load(w_file))
def create_res18(load_weights=False, freeze=False):
model_ft = models.resnet18(pretrained=True)
if freeze:
for param in model_ft.parameters():
param.requires_grad = False
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Sequential(nn.Linear(num_ftrs, C)) #, nn.Softmax())
model_ft = model_ft.cuda()
model_ft.name = 'res18'
model_ft.batch_size = 256
return model_ft
def create_res34(load_weights=False, freeze=False):
model_ft = models.resnet34(pretrained=True)
if freeze:
for param in model_ft.parameters():
param.requires_grad = False
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Sequential(nn.Linear(num_ftrs, C)) #, nn.Softmax())
model_ft = model_ft.cuda()
model_ft.name = 'res34'
model_ft.batch_size = 128
return model_ft
def create_res50(load_weights=False, freeze=False):
model_ft = models.resnet50(pretrained=True)
if freeze:
for param in model_ft.parameters():
param.requires_grad = False
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Sequential(nn.Linear(num_ftrs, C)) #, nn.Softmax())
model_ft = model_ft.cuda()
model_ft.name = 'res50'
model_ft.batch_size = 32
return model_ft
def create_res101(load_weights=False, freeze=False):
model_ft = models.resnet101(pretrained=True)
if freeze:
for param in model_ft.parameters():
param.requires_grad = False
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Sequential(nn.Linear(num_ftrs, C))
model_ft = model_ft.cuda()
model_ft.name = 'res101'
model_ft.batch_size = 32
return model_ft
def create_res152(load_weights=False, freeze=False):
res152 = models.resnet152(pretrained=True)
if freeze:
for param in res152.parameters():
param.requires_grad = False
num_ftrs = res152.fc.in_features
res152.fc = nn.Sequential(nn.Linear(num_ftrs, C))
res152 = res152.cuda()
res152.name = 'res152'
return res152
def create_dense161(load_weights=False, freeze=False):
desnet_ft = models.densenet161(pretrained=True)
if freeze:
for param in desnet_ft.parameters():
param.requires_grad = False
num_ftrs = desnet_ft.classifier.in_features
desnet_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, C))
desnet_ft = desnet_ft.cuda()
desnet_ft.name = 'dense161'
#desnet_ft.batch_size = 32
return desnet_ft
def create_dense169(load_weights=False, freeze=False):
desnet_ft = models.densenet169(pretrained=True)
if freeze:
for param in desnet_ft.parameters():
param.requires_grad = False
num_ftrs = desnet_ft.classifier.in_features
desnet_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, C))
desnet_ft = desnet_ft.cuda()
desnet_ft.name = 'dense169'
#desnet_ft.batch_size = 32
return desnet_ft
def create_dense121(load_weights=False, freeze=False):
desnet_ft = models.densenet121(pretrained=True)
if freeze:
for param in desnet_ft.parameters():
param.requires_grad = False
num_ftrs = desnet_ft.classifier.in_features
desnet_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, C))
desnet_ft = desnet_ft.cuda()
desnet_ft.name = 'dense121'
desnet_ft.batch_size = 32
return desnet_ft
def create_dense201(load_weights=False, freeze=False):
desnet_ft = models.densenet201(pretrained=True)
if freeze:
for param in desnet_ft.parameters():
param.requires_grad = False
num_ftrs = desnet_ft.classifier.in_features
desnet_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, C))
desnet_ft = desnet_ft.cuda()
desnet_ft.name = 'dense201'
#desnet_ft.batch_size = 32
return desnet_ft
def create_vgg19bn(load_weights=False, freeze=False):
vgg19_bn_ft = vgg19_bn(pretrained=True)
if freeze:
for param in vgg19_bn_ft.parameters():
param.requires_grad = False
#vgg19_bn_ft.classifier = nn.Linear(25088, 3)
vgg19_bn_ft.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, C))
vgg19_bn_ft = vgg19_bn_ft.cuda()
vgg19_bn_ft.name = 'vgg19bn'
vgg19_bn_ft.max_num = 1
#vgg19_bn_ft.batch_size = 32
return vgg19_bn_ft
def create_vgg16bn(load_weights=False, freeze=False):
vgg16_bn_ft = vgg16_bn(pretrained=True)
if freeze:
for param in vgg16_bn_ft.parameters():
param.requires_grad = False
#vgg16_bn_ft.classifier = nn.Linear(25088, 3)
vgg16_bn_ft.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, C))
vgg16_bn_ft = vgg16_bn_ft.cuda()
vgg16_bn_ft.name = 'vgg16bn'
vgg16_bn_ft.max_num = 1
#vgg16_bn_ft.batch_size = 32
return vgg16_bn_ft
def create_inceptionv3(load_weights=False, freeze=False):
incept_ft = models.inception_v3(pretrained=True)
if freeze:
for param in incept_ft.parameters():
param.requires_grad = False
num_ftrs = incept_ft.fc.in_features
incept_ft.fc = nn.Sequential(nn.Linear(num_ftrs, C))
incept_ft.aux_logits=False
incept_ft = incept_ft.cuda()
incept_ft.name = 'inceptionv3'
incept_ft.batch_size = 32
return incept_ft
def create_inceptionresv2(load_weights=False, freeze=False):
model_ft = inceptionresnetv2(pretrained=True)
num_ftrs = model_ft.classif.in_features
model_ft.classif = nn.Sequential(nn.Linear(num_ftrs, C))
model_ft = model_ft.cuda()
model_ft.name = 'inceptionresv2'
model_ft.batch_size = 8
return model_ft
def create_model(model_name, freeze=False):
create_func = 'create_' + model_name
model = eval(create_func)(freeze=freeze)
if not hasattr(model, 'batch_size'):
model.batch_size = 16
return model
|
{"hexsha": "a73309591cc35128dd3f3cf9b1fc5e2a8d65c03b", "size": 8619, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "chicm/scene", "max_stars_repo_head_hexsha": "1a18ed92b45ab21a9ca40f2f8030df7a4849b956", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils.py", "max_issues_repo_name": "chicm/scene", "max_issues_repo_head_hexsha": "1a18ed92b45ab21a9ca40f2f8030df7a4849b956", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "chicm/scene", "max_forks_repo_head_hexsha": "1a18ed92b45ab21a9ca40f2f8030df7a4849b956", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2421052632, "max_line_length": 85, "alphanum_fraction": 0.6686390533, "include": true, "reason": "import numpy", "num_tokens": 2229}
|
// Boost.Geometry (aka GGL, Generic Geometry Library)
// Copyright (c) 2012 Barend Gehrels, Amsterdam, the Netherlands.
// Copyright (c) 2012 Bruno Lalande, Paris, France.
// Copyright (c) 2012 Mateusz Loskot, London, UK.
// This file was modified by Oracle on 2018, 2020.
// Modifications copyright (c) 2018, 2020, Oracle and/or its affiliates.
// Contributed and/or modified by Adam Wulkiewicz, on behalf of Oracle
// Use, modification and distribution is subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_GEOMETRY_UTIL_CALCULATION_TYPE_HPP
#define BOOST_GEOMETRY_UTIL_CALCULATION_TYPE_HPP
#include <boost/config.hpp>
#include <boost/mpl/if.hpp>
#include <boost/static_assert.hpp>
#include <boost/type_traits/is_floating_point.hpp>
#include <boost/type_traits/is_fundamental.hpp>
#include <boost/type_traits/is_void.hpp>
#include <boost/geometry/util/select_coordinate_type.hpp>
#include <boost/geometry/util/select_most_precise.hpp>
#include <boost/multiprecision/cpp_int.hpp>
namespace boost { namespace geometry
{
namespace util
{
namespace detail
{
struct default_integral
{
#ifdef BOOST_HAS_LONG_LONG
typedef boost::long_long_type type;
#else
typedef int type;
#endif
};
template <typename Type>
struct is_multiprecision_integral
: boost::false_type
{};
template
<
unsigned MinBits, unsigned MaxBits,
boost::multiprecision::cpp_integer_type SignType,
boost::multiprecision::cpp_int_check_type Checked,
class Allocator
>
struct is_multiprecision_integral
<
boost::multiprecision::number
<
boost::multiprecision::cpp_int_backend
<
MinBits, MaxBits,
SignType,
Checked,
Allocator
>
>
>
: boost::true_type
{};
/*!
\details Selects the most appropriate:
- if calculation type is specified (not void), that one is used
- else if type is non-fundamental (user defined e.g. ttmath), that one
- else if type is floating point, the specified default FP is used
- else it is integral and the specified default integral is used
*/
template
<
typename Type,
typename CalculationType,
typename DefaultFloatingPointCalculationType,
typename DefaultIntegralCalculationType
>
struct calculation_type
{
BOOST_STATIC_ASSERT((
boost::is_fundamental
<
DefaultFloatingPointCalculationType
>::type::value
));
BOOST_STATIC_ASSERT((
boost::is_fundamental
<
DefaultIntegralCalculationType
>::type::value
));
typedef typename boost::mpl::if_
<
boost::is_void<CalculationType>,
typename boost::mpl::if_
<
boost::is_floating_point<Type>,
typename select_most_precise
<
DefaultFloatingPointCalculationType,
Type
>::type,
typename boost::mpl::if_c
<
is_multiprecision_integral<Type>::value,
// TODO: This is not fully correct since Multiprecision type
// will most likely be more precise than the DefaultIntegralCalculationType
// but the problem is that DefaultIntegralCalculationType is not always
// integral. This is a workaround for comparable_distance() calculation_type
// implemetation passing double here. E.g. checking a static_assert above
// boost::is_integral<DefaultIntegralCalculationType>::value would be a start.
DefaultIntegralCalculationType,
typename select_most_precise
<
DefaultIntegralCalculationType,
Type
>::type
>::type
>::type,
CalculationType
>::type type;
};
} // namespace detail
namespace calculation_type
{
namespace geometric
{
template
<
typename Geometry,
typename CalculationType,
typename DefaultFloatingPointCalculationType = double,
typename DefaultIntegralCalculationType = detail::default_integral::type
>
struct unary
{
typedef typename detail::calculation_type
<
typename geometry::coordinate_type<Geometry>::type,
CalculationType,
DefaultFloatingPointCalculationType,
DefaultIntegralCalculationType
>::type type;
};
template
<
typename Geometry1,
typename Geometry2,
typename CalculationType,
typename DefaultFloatingPointCalculationType = double,
typename DefaultIntegralCalculationType = detail::default_integral::type
>
struct binary
{
typedef typename detail::calculation_type
<
typename select_coordinate_type<Geometry1, Geometry2>::type,
CalculationType,
DefaultFloatingPointCalculationType,
DefaultIntegralCalculationType
>::type type;
};
/*!
\brief calculation type (ternary, for three geometry types)
*/
template
<
typename Geometry1,
typename Geometry2,
typename Geometry3,
typename CalculationType,
typename DefaultFloatingPointCalculationType = double,
typename DefaultIntegralCalculationType = detail::default_integral::type
>
struct ternary
{
typedef typename detail::calculation_type
<
typename select_most_precise
<
typename coordinate_type<Geometry1>::type,
typename select_coordinate_type
<
Geometry2,
Geometry3
>::type
>::type,
CalculationType,
DefaultFloatingPointCalculationType,
DefaultIntegralCalculationType
>::type type;
};
}} // namespace calculation_type::geometric
} // namespace util
}} // namespace boost::geometry
#endif // BOOST_GEOMETRY_UTIL_CALCULATION_TYPE_HPP
|
{"hexsha": "5b399d235a67c69d8e1a8b5b520df24ffbd31696", "size": 6462, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "mysql-server/include/boost_1_73_0/patches/boost/geometry/util/calculation_type.hpp", "max_stars_repo_name": "silenc3502/MYSQL-Arch-Doc-Summary", "max_stars_repo_head_hexsha": "fcc6bb65f72a385b9f56debc9b2c00cee5914bae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mysql-server/include/boost_1_73_0/patches/boost/geometry/util/calculation_type.hpp", "max_issues_repo_name": "silenc3502/MYSQL-Arch-Doc-Summary", "max_issues_repo_head_hexsha": "fcc6bb65f72a385b9f56debc9b2c00cee5914bae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mysql-server/include/boost_1_73_0/patches/boost/geometry/util/calculation_type.hpp", "max_forks_repo_name": "silenc3502/MYSQL-Arch-Doc-Summary", "max_forks_repo_head_hexsha": "fcc6bb65f72a385b9f56debc9b2c00cee5914bae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.592920354, "max_line_length": 106, "alphanum_fraction": 0.6182296503, "num_tokens": 1229}
|
"""
Visualize the transformations
Matplotlib:
quiver plot
"""
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
# Function to plot a single transformation
def plot_transformation(transformation):
"""
Plot Transformation matrix
...
Parameters
---
transformation: 4x4 transformation matrix
Returns
---
None
Notes
---
RGB -> XYZ
"""
fig = plt.figure()
ax = fig.gca(projection='3d')
# x, y, z of 6 arrows in a quiver plot
x = np.array([0, 0, 0, transformation[0, 3], transformation[0, 3], transformation[0, 3]])
y = np.array([0, 0, 0, transformation[1, 3], transformation[1, 3], transformation[1, 3]])
z = np.array([0, 0, 0, transformation[2, 3], transformation[2, 3], transformation[2, 3]])
# u, v, w of 6 arrows in a quiver plot
u = np.concatenate([np.array([1, 0, 0]), transformation[:3, 0]])
v = np.concatenate([np.array([0, 1, 0]), transformation[:3, 1]])
w = np.concatenate([np.array([0, 0, 1]), transformation[:3, 2]])
# Color(RGB) for 6 arrows, original X, Y, Z and then transformed X, Y, Z
red = np.array([1, 0, 0])
green = np.array([0, 1, 0])
blue = np.array([0, 0, 1])
colors = np.array([red, green, blue, red, green, blue])
q = ax.quiver(x, y, z, u, v, w, length=0.05, colors = colors, lw=1)
plt.plot([x[0], x[3]], [y[0], y[3]], [z[0], z[3]], '--', color = 'black')
plt.show()
# Function to plot a list of transformations
def plot_transformations(transformations):
"""
Plot Transformation matrix
...
Parameters
---
transformation: list of 4x4 transformation matrix
Returns
---
None
Notes
---
RGB -> XYZ
"""
fig = plt.figure()
ax = fig.gca(projection='3d')
x = np.array([])
y = np.array([])
z = np.array([])
u = np.array([])
v = np.array([])
w = np.array([])
red = np.array([1, 0, 0])
green = np.array([0, 1, 0])
blue = np.array([0, 0, 1])
colors = []
for transformation in transformations:
x = np.concatenate([x, [transformation[0, 3], transformation[0, 3], transformation[0, 3]]])
y = np.concatenate([y, [transformation[1, 3], transformation[1, 3], transformation[1, 3]]])
z = np.concatenate([z, [transformation[2, 3], transformation[2, 3], transformation[2, 3]]])
u = np.concatenate([u, transformation[:3, 0]])
v = np.concatenate([v, transformation[:3, 1]])
w = np.concatenate([w, transformation[:3, 2]])
colors.append(red)
colors.append(green)
colors.append(blue)
[0, 0, 0, 1, 1, 1]
q = ax.quiver(x, y, z, u, v, w, length=0.05, colors = colors, lw=1)
for i in range(x.shape[0] - 3):
plt.plot([x[i], x[i+3]], [y[i], y[i+3]], [z[i], z[i+3]], '--', color = 'black')
plt.show()
def plot_joint_trajectory(q, qd, qdd):
"""
Function to plot joint trajectories
...
Parameters
---
q : Joint Position (Dof x m)
qd : Joint Velocity (Dof x m)
qdd : Joint Acceleration (Dof x m)
Returns
---
None
"""
m = q.shape[1]
timesteps = np.linspace(0, 1, num = m)
n = q.shape[0]
fig, axis = plt.subplots(3)
fig.suptitle("Joint Trajectories")
# Joint Position Plot
axis[0].set_title("Position")
axis[0].set(xlabel = "Time", ylabel = "Position")
for i in range(n):
axis[0].plot(timesteps, q[i])
# Joint Velocity Plot
axis[1].set_title("Velocity")
axis[1].set(xlabel = "Time", ylabel = "Velocity")
for i in range(n):
axis[1].plot(timesteps, qd[i])
# Joint Acceleration Plot
axis[2].set_title("Acceleration")
axis[2].set(xlabel = "Time", ylabel = "Acceleration")
for i in range(n):
axis[2].plot(timesteps, qdd[i])
# Legends
legends = [f"Joint_{i + 1}" for i in range(n)]
axis[0].legend(legends)
axis[1].legend(legends)
axis[2].legend(legends)
fig.tight_layout()
plt.show()
|
{"hexsha": "b2238dea446d005761b3bf93a06b262e8a264024", "size": 4032, "ext": "py", "lang": "Python", "max_stars_repo_path": "Part-15-QuinticInterpolation/tools/visualize.py", "max_stars_repo_name": "SakshayMahna/Robotics-Mechanics", "max_stars_repo_head_hexsha": "3fa4b5860c4c9b4e22bd8799c0edc08237707aef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Part-15-QuinticInterpolation/tools/visualize.py", "max_issues_repo_name": "SakshayMahna/Robotics-Mechanics", "max_issues_repo_head_hexsha": "3fa4b5860c4c9b4e22bd8799c0edc08237707aef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Part-15-QuinticInterpolation/tools/visualize.py", "max_forks_repo_name": "SakshayMahna/Robotics-Mechanics", "max_forks_repo_head_hexsha": "3fa4b5860c4c9b4e22bd8799c0edc08237707aef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-16T08:18:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-16T08:18:52.000Z", "avg_line_length": 24.736196319, "max_line_length": 99, "alphanum_fraction": 0.5724206349, "include": true, "reason": "import numpy", "num_tokens": 1266}
|
#!/usr/bin/python
## ### ### ##
# ### ### #
# #
# ### ### ### #
# ### ### ### #
# # # # #
## # # # ##
import pandas as pd
from pylab import *
import matplotlib.pyplot as plt
import numpy as np
import sys
import argparse
def get_residuals(model,fit_parameter,original_x,original_y):
if model == "PolyModel":
m = PolyModel()
res = (original_y - m.f(original_x,a1=qm.pardict['a1'],a2=qm.pardict['a2'],a3=qm.pardict['a3'],a4=qm.pardict['a4'],a5=qm.pardict['a5']))**2
res = np.mean(res)
return res
def integrate(qm2,df3,ft):
t = np.arange(0,df3.t.tolist()[-1],0.5)
ys = np.poly1d(qm2[0])(t)
# ys -= qm2[0][4]
ii=0
tau_600 = 0
tau_300 = 0
while (ii<len(ys)-1):
if(ys[ii]<ft) & (ys[ii+1]>=ft):
tau_300 = t[ii]
if(ys[ii]<2*ft) & (ys[ii+1]>=2*ft):
tau_600 = t[ii]
break
ii+=1
return tau_600-tau_300,tau_600
def m_plot(qm2,df2,l):
plt.figure(l.split('/')[-1])
plt.plot(df2.t,np.poly1d(qm2[0])(df2.t),'--',label="model")
plt.plot(df2.t,df2.Lesion,'.',label="Lesion raw")
plt.legend()
show()
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("path_in", help="the path to the file containing temporal data computed by INFEST")
parser.add_argument("path_out", help="the path to the file containing LDT and Latency",default='')
parser.add_argument("-ft","--first", help="the first time to consider for the computation of the LDT",type=int,default=300,)
parser.add_argument("-g","--graph", action="store_true",help="monitoring the fit of the curve")
args = parser.parse_args()
print("Open "+args.path_in)
df=pd.read_csv(args.path_in,sep="\t")
df['t'] = (df['time'])*10
leaf = np.unique(df.Id)
out = "Id\ta1\ta2\ta3\ta4\ta5\tresiduals\tLDT\tLatency\n"
ii = 0
for l in leaf:
# df2 = df[(df.Id == l) & (df.t<1500) & (df.t>600)]
df2 = df[(df.Id == l)]
if size(df2.t[df2.Lesion>300]) > 10 :
qm2 = np.polyfit(df2.t,df2.Lesion,4,full=True)
if args.graph:
m_plot(qm2,df2,args.path_in+l)
res = qm2[1][0]
puissance63,puissance60 = integrate(qm2,df2,args.first)
new_out = l+"\t"+str(qm2[0][0])+"\t"+str(qm2[0][1])+"\t"+str(qm2[0][2])+"\t"+str(qm2[0][3])+"\t"+str(qm2[0][4])+"\t"+str(res)+"\t"+str(puissance63)+"\t"+str(puissance60)+"\n"
out+= new_out
else:
fig = plt.figure(l.split('/')[-1])
new_out = l+"\t"+str(0)+"\t"+str(0)+"\t"+str(0)+"\t"+str(0)+"\t"+str(0)+"\t"+str(0)+"\t"+str(0)+"\t"+str(0)+"\n"
print("Bad Data: Lesion size < 30 pxl")
print("save as "+args.path_out)
f = open(args.path_out,"w")
f.write(out)
f.close()
|
{"hexsha": "649e65338a799159c1d4ebb006e6793f25c74f91", "size": 2721, "ext": "py", "lang": "Python", "max_stars_repo_path": "fit_INFEST.py", "max_stars_repo_name": "A02l01/INFEST", "max_stars_repo_head_hexsha": "6cb201a745ea8c780d2b00f68124f2ae892d3ef4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fit_INFEST.py", "max_issues_repo_name": "A02l01/INFEST", "max_issues_repo_head_hexsha": "6cb201a745ea8c780d2b00f68124f2ae892d3ef4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fit_INFEST.py", "max_forks_repo_name": "A02l01/INFEST", "max_forks_repo_head_hexsha": "6cb201a745ea8c780d2b00f68124f2ae892d3ef4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.275862069, "max_line_length": 177, "alphanum_fraction": 0.5711135612, "include": true, "reason": "import numpy", "num_tokens": 972}
|
module AwesomeQuantumStates
using Yao
# GHZ
"""
GHZ state
"""
GHZ(n) = register(bit"0"^n) + register(bit"1"^n)
end # module
|
{"hexsha": "25b868350b72ecf6a827860b39ede5fb58ef1a18", "size": 132, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/AwesomeQuantumStates.jl", "max_stars_repo_name": "Roger-luo/AwesomeQuantumStates.jl", "max_stars_repo_head_hexsha": "5b98b70cc2d7da445995e61670f73e0ff605e58a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/AwesomeQuantumStates.jl", "max_issues_repo_name": "Roger-luo/AwesomeQuantumStates.jl", "max_issues_repo_head_hexsha": "5b98b70cc2d7da445995e61670f73e0ff605e58a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/AwesomeQuantumStates.jl", "max_forks_repo_name": "Roger-luo/AwesomeQuantumStates.jl", "max_forks_repo_head_hexsha": "5b98b70cc2d7da445995e61670f73e0ff605e58a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 10.1538461538, "max_line_length": 48, "alphanum_fraction": 0.6363636364, "num_tokens": 44}
|
from __future__ import print_function, division, absolute_import
import numpy as np
from keras.preprocessing.image import Iterator
from scipy import linalg
from scipy.signal import resample
import keras.backend as K
import warnings
from scipy.ndimage.interpolation import shift
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data.
y: Numpy array of targets data.
audio_data_generator: Instance of `AudioDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the audio
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
audio (if `save_to_dir` is set).
save_format: Format to use for saving sample audio
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in AudioDataGenerator.
"""
def __init__(self, x, y, audio_data_generator,
batch_size=32, shuffle=False, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if y is not None and len(x) != len(y):
raise ValueError('`x` (audio tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * audio_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = 'channels_last'
self.x = np.asarray(x, dtype=K.floatx())
if self.x.ndim != 3:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 3. You passed an array '
'with shape', self.x.shape)
channels_axis = 2 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(
channels_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' + str(
self.x.shape) +
' (' + str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
self.audio_data_generator = audio_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0], batch_size, shuffle, seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.audio_data_generator.random_transform(x.astype(K.floatx()))
x = self.audio_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
raise NotImplementedError
if self.y is None:
return batch_x
batch_y = self.y[index_array]
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
class AudioDataGenerator(object):
"""Generate batches of tensor audio data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean. Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean. Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
roll_range: Float (fraction of total sample length). Range horizontal circular shifts.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
zoom_range: Float (fraction of zoom) or [lower, upper].
noise: [mean,std,'Normal'] or [lower,upper,'Uniform']
Add Random Additive noise. Noise is added to the data with a .5 probability.
noiseSNR: Float required SNR in dB. Noise is added to the data with a .5 probability(NotImplemented)
shift: Float (fraction of total sample). Range of horizontal shifts
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}. Default is 'nearest'.
Points outside the boundaries of the input are filled according to the given mode:
'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
'nearest': aaaaaaaa|abcd|dddddddd
'reflect': abcddcba|abcd|dcbaabcd
'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int. Value used for points outside the boundaries when `fill_mode = "constant"`.
rescale: rescaling factor. Defaults to None. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided (before applying
any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: One of {"channels_first", "channels_last"}.
"channels_last" mode means that the images should have shape `(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape `(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation (strictly between 0 and 1).
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
roll_range=0.,
brightness_range=None,
zoom_range=0.,
shift=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
noise=None,
validation_split=0.0):
if data_format is None:
data_format = 'channels_last'
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.roll_range = roll_range
self.brightness_range = brightness_range
self.zoom_range = zoom_range
self.horizontal_flip = horizontal_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
self.fill_mode = fill_mode
self.cval = cval
self.shift = shift
self.noise = noise
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('`data_format` should be `"channels_last"` (channel after row and '
'column) or `"channels_first"` (channel before row and column). '
'Received arg: ', data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
if data_format == 'channels_last':
self.channel_axis = 2
self.row_axis = 1
if validation_split and not 0 < validation_split < 1:
raise ValueError('`validation_split` must be strictly between 0 and 1. '
' Received arg: ', validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This AudioDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
if noise:
if len(noise) != 3:
raise ValueError('`noise` should be a list of format'
'[mean,std,`Normal`] or [lower,upper,`Uniform`]'
'Received arg: ', noise)
if noise[-1] not in {'Uniform', 'Normal'}:
raise ValueError('Distribution not recognised', noise[-1])
def flow(self, x, y=None, batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of
augmented/normalized data.
# Arguments
x: data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
y: labels.
batch_size: int (default: 32).
shuffle: boolean (default: True).
seed: int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: str (default: `''`). Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg" (only relevant if `save_to_dir` is set). Default: "png".
# Returns
An Iterator yielding tuples of `(x, y)` where `x` is a numpy array of image data and
`y` is a numpy array of corresponding labels."""
if self.noise:
shuffle = True
warnings.warn('This AudioDataGenerator specifies '
'`noise`, which overrides the setting of'
'`shuffle` as True'
)
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory, and generates batches of augmented/normalized data.
# Arguments
directory: path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images inside each of the subdirectories directory tree will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d) for more details.
target_size: tuple of integers `(height, width)`, default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: one of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to have 1 or 3 color channels.
classes: optional list of class subdirectories (e.g. `['dogs', 'cats']`).
Default: None. If not provided, the list of classes will
be automatically inferred from the subdirectory names/structure under `directory`,
where each subdirectory will be treated as a different class
(and the order of the classes, which will map to the label indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: one of "categorical", "binary", "sparse", "input" or None.
Default: "categorical". Determines the type of label arrays that are
returned: "categorical" will be 2D one-hot encoded labels, "binary" will be 1D binary labels,
"sparse" will be 1D integer labels, "input" will be images identical to input images (mainly used to work with autoencoders).
If None, no labels are returned (the generator will only yield batches of image data, which is useful to use
`model.predict_generator()`, `model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory of `directory` for it to work correctly.
batch_size: size of the batches of data (default: 32).
shuffle: whether to shuffle the data (default: True)
seed: optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None). This allows you to optionally specify a directory to which to save
the augmented pictures being generated (useful for visualizing what you are doing).
save_prefix: str. Prefix to use for filenames of saved pictures (only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg" (only relevant if `save_to_dir` is set). Default: "png".
follow_links: whether to follow symlinks inside class subdirectories (default: False).
# Returns
A DirectoryIterator yielding tuples of `(x, y)` where `x` is a numpy array of image data and
`y` is a numpy array of corresponding labels.
"""
raise NotImplementedError
def standardize(self, x):
"""Apply the normalization configuration to a batch of inputs.
# Arguments
x: batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This AudioDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This AudioDataGenerator specifies '
'`featurewise_std_normalization`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-2:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This AudioDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augment a single image tensor.
# Arguments
x: 2D tensor.
seed: random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single audio, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
if not (self.zoom_range[0] == 1 and self.zoom_range[1] == 1):
zx = np.random.uniform(self.zoom_range[0], self.zoom_range[1])
input_length = x.shape[img_row_axis]
x = resample(x, num=int(zx * x.shape[img_row_axis]), axis=img_row_axis)
if x.shape[img_row_axis] >= input_length:
x = x[:input_length]
else:
x = np.pad(x, ((0, input_length - x.shape[img_row_axis]), (0, 0)),
'constant', constant_values=(0, np.mean(x)))
if shift:
hx = np.random.uniform(-self.shift, self.shift)
x = shift(x, (int(hx * x.shape[img_row_axis]), 0), mode=self.fill_mode, cval=self.cval)
if self.roll_range:
tx = np.random.uniform(-self.roll_range, self.roll_range)
if self.roll_range < 1:
tx *= x.shape[img_row_axis]
x = np.roll(x, int(tx), axis=(img_row_axis))
if self.horizontal_flip:
if np.random.random() < 0.5:
x = np.flip(x, axis=img_row_axis)
if (self.noise):
if np.random.random() < 0.5:
if self.noise[-1] == 'Uniform':
x = x + np.random.uniform(self.noise[0], self.noise[1], size=x.shape)
elif self.noise[-1] == 'Normal':
x = x + np.random.normal(self.noise[0], self.noise[1], size=x.shape)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Compute the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if featurewise_center or featurewise_std_normalization or zca_whitening.
# Arguments
x: sample data.
augment: Boolean (default: False). Whether to fit on randomly augmented samples.
rounds: int (default: 1). If augment, how many augmentation passes over the data to use.
seed: int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Input to `.fit()` should have rank 3. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' + self.data_format + '" '
'(channels on axis ' + str(
self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. '
'However, it was passed an array with shape ' + str(
x.shape) +
' (' + str(x.shape[self.channel_axis]) + ' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
raise NotImplementedError
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis))
broadcast_shape = [1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis))
broadcast_shape = [1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
def random_brightness(x, brightness_range):
"""Perform a random brightness shift.
# Arguments
x: Input tensor. Must be 2D.
brightness_range: Tuple of floats; brightness range.
# Returns
Numpy audio tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError('`brightness_range should be tuple or list of two floats. '
'Received arg: ', brightness_range)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = u * x
return x
|
{"hexsha": "922d24206d67762a4d2612c778fb18988cb5ffeb", "size": 25575, "ext": "py", "lang": "Python", "max_stars_repo_path": "codes/AudioDataGenerator.py", "max_stars_repo_name": "sushmit0109/ASSC", "max_stars_repo_head_hexsha": "8beda6f3d055a35fff9ae2ff417b38a38e2a7fa5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-04-28T04:36:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T10:24:22.000Z", "max_issues_repo_path": "codes/AudioDataGenerator.py", "max_issues_repo_name": "sushmit0109/ASSC", "max_issues_repo_head_hexsha": "8beda6f3d055a35fff9ae2ff417b38a38e2a7fa5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-01-21T06:13:05.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-15T11:32:49.000Z", "max_forks_repo_path": "codes/AudioDataGenerator.py", "max_forks_repo_name": "sushmit0109/ASSC", "max_forks_repo_head_hexsha": "8beda6f3d055a35fff9ae2ff417b38a38e2a7fa5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-04-28T07:35:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-18T00:20:21.000Z", "avg_line_length": 49.7568093385, "max_line_length": 157, "alphanum_fraction": 0.5660606061, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5302}
|
from time import sleep
import numpy as np
from keras.callbacks import Callback
class RussianRoulette(Callback):
"""Play a game of russian roulette.
# Arguments
rounds: int, number of bullets that will be loaded.
chambers: int, number of bullet chambers.
firings: int, number of times the trigger will be pulled.
"""
def __init__(self, rounds=1, chambers=6, firings=1, **kwargs):
super(RussianRoulette, self).__init__(**kwargs)
if chambers < 5 or chambers > 12:
raise ValueError('A revolver has 5-12 chambers.')
if rounds < 1:
raise ValueError('No cheating... you have to put at least'
' one round in the revolver.')
if firings < 1:
raise ValueError('No cheating... you have to fire at least'
' once.')
if chambers - rounds < firings:
raise ValueError('Someone has a deathwish... give yourself'
' a chance to live.')
self.rounds = rounds
self.chambers = [False]*chambers
self.firings = firings
def on_train_begin(self, logs=None):
# storing starting weights to destroy the network with
self.starting_weights = self.model.get_weights()
def on_train_end(self, logs=None):
print(' ______________________________')
print('| |')
print('| LET´S PLAY RUSSIAN ROULETTE! |')
print('|______________________________|')
# sabotage seed cheaters!
seed_state = np.random.get_state()
np.random.seed(None)
# inserting rounds in a row
chamber = np.random.randint(0, len(self.chambers) - 1)
for r in range(1, self.rounds+1):
print('\nInserting round')
self.chambers[chamber % len(self.chambers)] = True
sleep(1)
chamber += 1
# spin until it lands on chamber
print('\nSpinning cylinder\n')
for _ in range(5):
sleep(1)
print('.')
chamber = np.random.randint(0, len(self.chambers) - 1)
# restore the seed
np.random.set_state(seed_state)
# fire the revolver and see if chamber is loaded
for _ in range(self.firings):
sleep(1)
print('\nSqueezing trigger')
sleep(2)
if self.chambers[chamber % len(self.chambers)]:
# destroy weights
self.model.set_weights(self.starting_weights)
raise RuntimeError('You died... Thank you for playing!')
else:
print('\nCLICK!')
chamber += 1
print('\nYou survived! Make it matter.')
|
{"hexsha": "ebb6ace2ac0366d0f7009c3582195ff983a6c114", "size": 2936, "ext": "py", "lang": "Python", "max_stars_repo_path": "masochism/callbacks.py", "max_stars_repo_name": "simon-larsson/keras-masochism", "max_stars_repo_head_hexsha": "1d869d1a092b6c324f1183e292f95ccf235b3e4b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-06T00:09:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-06T00:09:50.000Z", "max_issues_repo_path": "masochism/callbacks.py", "max_issues_repo_name": "simon-larsson/keras-masochism", "max_issues_repo_head_hexsha": "1d869d1a092b6c324f1183e292f95ccf235b3e4b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "masochism/callbacks.py", "max_forks_repo_name": "simon-larsson/keras-masochism", "max_forks_repo_head_hexsha": "1d869d1a092b6c324f1183e292f95ccf235b3e4b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7471264368, "max_line_length": 72, "alphanum_fraction": 0.5303133515, "include": true, "reason": "import numpy", "num_tokens": 626}
|
import cvxpy as cvx
import numpy as np
from scipy.optimize import root, minimize
from numpy.linalg import norm, inv, slogdet
import scipy.linalg as sla
from numpy import exp
import scipy.linalg as sla
import numpy.random as ra
import numpy.linalg as la
import scipy.stats
import ipdb
from Functions.objective_functions import LinearModel
from scipy.special import logsumexp
from myutils3_v2 import *
import calcsubset
'''Place holder class that all GLM bandit algorithms must inherit from GLM '''
def gloc_solve_by_cvx(d,S,theta_prime,At):
th = cvx.Variable(d)
obj = cvx.Minimize(cvx.quad_form(th - theta_prime, At))
cons = [cvx.norm(th) <= S]
prob = cvx.Problem(obj, cons)
prob.solve()
return np.array(th.value).flatten()
def calc_sqrt_beta_det2(d,t,R,ridge,delta,S,logdetV):
return R * np.sqrt( logdetV - d*log(ridge) + log (1/(delta**2)) ) + sqrt(ridge) * S
def calc_sqrt_beta_det3(t,R,ridge,delta,S,logdetV,logdetV0):
""" to allow diagonal regularization
"""
return R * np.sqrt( logdetV - logdetV0 + log (1/(delta**2)) ) + sqrt(ridge) * S
def calc_sqrt_beta_det4(t,R,ridge,delta,Sp,logdetV,logdetV0):
""" to allow diagonal regularization, actually I can have a better one.
`Sp = sqrt(ridge) * S` in the previous versions.
"""
return R * np.sqrt( logdetV - logdetV0 + log (1/(delta**2)) ) + Sp
def calc_sqrt_beta_thompson(d,t,R,delta):
return R * np.sqrt(9 * d * np.log(t/delta))
def mu_logistic(z):
return 1.0/(1.0+np.exp(-z))
def mu_probit(z):
return scipy.stats.norm.cdf(z)
def logistic_loss_pm1(w, x, y):
y = float(y)
assert (y in [+1.0, -1.0])
z = y *np.dot(x,w)
return np.log(1 + np.exp(-z))
def logistic_loss_01(w, x, y):
assert (y in [0.0, 1.0])
yy = y * 2 - 1
return logistic_loss_pm1(w,x,yy)
def logistic_loss_pm1_grad(w, x, y):
y = float(y)
assert (y in [+1.0, -1.0])
z = y * np.dot(x,w)
return - 1.0 / (1 + np.exp(z)) * (y*x)
def logistic_loss_01_grad(w, x, y):
assert (y in [0.0, 1.0])
yy = y * 2 - 1
return logistic_loss_pm1_grad(w,x,yy)
def mfunc_logistic(z):
return np.log(1.0 + np.exp(z))
def mfunc_logistic_der(z):
return 1.0/(1.0 + np.exp(-z))
from sklearn import metrics
def evalAuc(banditObj, dataObj, nTry=1):
"""
evaluates deployment performance by AUC.
repeats are important when the data is synthetic and generated by p(y|x)
"""
aucList = []
trainAucList = []
testAucList = []
N = dataObj.N
for iTry in range(nTry):
#- 1. generate the labels
y = np.array([dataObj.get_reward(i) for i in range(dataObj.N)])
#- 2. let the bandit compute the scores (0.0-1.0)
pred = banditObj.predict(dataObj.X)
#- 3. normalize the pred to be in (0.0-1.0)
mini = pred.min()
maxi = pred.max()
yhat = (pred - mini) / (maxi - mini)
#- 4. measure auc
auc = metrics.roc_auc_score(y, yhat)
assert(np.all(dataObj.X == banditObj.X))
do_not_ask = banditObj.getDoNotAsk()
trainAuc = metrics.roc_auc_score(y[do_not_ask], yhat[do_not_ask])
testIdx = np.setdiff1d(np.arange(N), do_not_ask)
testAuc = metrics.roc_auc_score(y[testIdx], yhat[testIdx])
aucList.append(auc)
trainAucList.append(trainAuc)
testAucList.append(testAuc)
return np.mean(aucList), np.mean(trainAucList), np.mean(testAucList)
################################################################################
# bandit classes
################################################################################
class Bandit(object):
def __init__(self, X, theta):
raise NotImplementedError()
def next_arm(self):
raise NotImplementedError()
def update(self):
raise NotImplementedError()
def get_debug_dict(self):
raise NotImplementedError()
class Glm(object):
def __init__(self):
raise NotImplementedError()
class GlmLogistic(Glm):
def __init__(self):
raise NotImplementedError()
@staticmethod
def negloglik(z, y):
assert (float(y) == 1.0 or float(y) == 0.0)
return -z*y + GlmLogistic.m(z)
@staticmethod
def negloglik_derivative(z, y):
assert (float(y) == 1.0 or float(y) == 0.0)
return -y + GlmLogistic.mu(z)
@staticmethod
def mu(z): # link function
return 1.0/(1.0+np.exp(-z))
@staticmethod
def m(z): # integral of mu
return logsumexp([np.zeros(z.shape), z],axis=0) #np.log(1.0 + np.exp(z))
# return np.log(1.0 + np.exp(z))
@staticmethod
def getKappa(S):
return 1.0 / ((1 + exp(S)) * (1 + exp(-S)))
class GlmProbit(Glm):
def __init__(self):
raise NotImplementedError()
@staticmethod
def negloglik(z, y):
assert (float(y) == 1.0 or float(y) == 0.0)
return -z*y + GlmProbit.m(z)
@staticmethod
def negloglik_derivative(z, y):
assert (float(y) == 1.0 or float(y) == 0.0)
return -y + GlmProbit.mu(z)
@staticmethod
def mu(z): # link function
return scipy.stats.norm.cdf(z)
@staticmethod
def m(z): # integral of mu
return z*sstats.norm.cdf(z) + sstats.norm.pdf(z)
@staticmethod
def getKappa(S):
return scipy.stats.norm.pdf(S)
pass
class GlmGaussian(Glm):
def __init__(self):
raise NotImplementedError();
@staticmethod
def negloglik(z, y):
return -z*y + GlmGaussian.m(z);
@staticmethod
def negloglik_derivative(z, y):
return -y + GlmGaussian.mu(z);
@staticmethod
def mu(z): # link function
return z
@staticmethod
def m(z): # integral of mu
return .5*z**2
@staticmethod
def getKappa(S):
return 1.0
########################################
class BilinearGlocNuclear(Bandit):
########################################
"""
note that this is just for the squared loss (1/2)*(theta^T * x - y)^2
R: subgaussian parameter.
S: norm upper bound of theta_star
r: range parameter for qgloc
kappa: lower bound on the dervative of \mu, always 1 for squared loss.
"""
def __init__(self, X, Z, lam, R, S_star, glm=GlmGaussian, flags={},
multiplier=1.0, calc_radius_version=3, bArmRemoval=False):
self.X = X
self.Z = Z
self.lam = lam
self.R = R
self.S_star = S_star
self.glm = glm
self.kappa = glm.getKappa(self.S_star)
assert self.kappa == 1.0
self.multiplier = multiplier
self.bArmRemoval = bArmRemoval
self.bNaive = flags.get('bNaive', False)
#- more instance variables
self.t = 1
self.N1, self.d1 = self.X.shape
self.N2, self.d2 = self.Z.shape
# super arms
self.N = self.N1 * self.N2
self.d = self.d1 * self.d2
self.W = np.zeros( (self.N, self.d) )
for i in range(self.W.shape[0]):
i1, i2 = np.unravel_index(i, (self.N1, self.N2))
self.W[i,:] = np.outer(self.X[i1,:], self.Z[i2,:]).ravel()
self.At = np.eye(self.d)*self.lam
self.invAt = np.eye(self.d)/self.lam
self.theta_s = np.zeros(self.d);
# print "Note that I am setting theta_s as zeros and this is warned to cause an error in cvx."
self.theta_hat = self.theta_s.copy()
#- theta_hat = np.zeros(self.d); # this causes an error in cvx
#- alternative: I could randomize with just a bit of noise
#-- TODO I could use the following to speed up
# self.W_invVt_norm_sq = np.sum(self.W * self.W, axis=1) / self.lam
self.WTq = np.zeros(self.d)
self.sum_q_t_sq = 0
self.radius_problem_constant = 0.
self.cvx_th = cvx.Variable(self.d)
#- WARNING: it is important to reshape by (d2,d1) not (d1,d2)
self.cvx_cons = [cvx.norm(cvx.reshape(self.cvx_th, (self.d2,self.d1)), 'nuc') <= self.S_star]
#- do_not_ask_list
assert bArmRemoval == False
self.do_not_ask = []
self.dbg_dict = {
'multiplier':float(multiplier),
'calc_radius_version': calc_radius_version,
}
self.time_obj = 0.0
self.time_cvx = 0.0
self.cvx_th_p = cvx.Parameter(shape=self.d)
self.cvx_A = cvx.Parameter(shape=(self.d,self.d), PSD=True)
self.cvx_obj = cvx.Minimize(cvx.quad_form(self.cvx_th - self.cvx_th_p, self.cvx_A))
self.cvx_prob = cvx.Problem(self.cvx_obj, self.cvx_cons)
def _calc_radius_sq_v1(self): # the best at around 1.0
if (self.t == 1):
radius_sq = 0
else:
radius_sq = self.radius_problem_constant + (self.S_star**2) * self.lam
return radius_sq
def _calc_radius_sq_v2(self): # the best at around 1.0
if (self.t == 1):
radius_sq = 0
else:
radius_sq = self.radius_problem_constant
return radius_sq
def _calc_radius_sq_v3(self): # the best at around 0.01
dt = 0.2; R = self.R
if (self.t == 1):
radius_sq = 0
else:
#- still, omitting some terms.
B = (0.5/self.kappa)*self.radius_problem_constant + 2*self.kappa*(self.S_star**2) * self.lam
inner = 1 + (2/self.kappa)*B + 4*R**4/(self.kappa**4 * dt**2)
extra = (self.sum_q_t_sq - self.theta_hat.dot(self.WTq))
assert (extra > -1e-8)
alpha = 1 + (4.0/self.kappa)*B \
+ (8*R**2/self.kappa**2)*np.log((2/dt)*np.sqrt( inner ))
radius_sq = alpha\
+ self.lam*self.S_star**2 \
- extra
return radius_sq
def _calc_radius_sq(self):
v = self.dbg_dict['calc_radius_version']
if (v == 1):
return self._calc_radius_sq_v1()
elif (v == 2):
return self._calc_radius_sq_v2()
elif (v == 3):
return self._calc_radius_sq_v3()
else:
raise ValueError()
def next_arm(self):
valid_idx = setdiff1d(np.arange(self.N),self.do_not_ask)
if (self.t == 1):
return (ra.randint(self.N1), ra.randint(self.N2)), np.nan
invAt = self.invAt
radius_sq = self.multiplier * self._calc_radius_sq()
tt = tic() # this is only 0.168 while time_cvx is 17.x seconds!!
obj_func = np.dot(self.W, self.theta_hat) \
+ np.sqrt(radius_sq) * np.sqrt(mahalanobis_norm_sq_batch(self.W, self.invAt))
self.time_obj += toc(tt)
arm_inner = np.argmax(obj_func[valid_idx])
arm = valid_idx[arm_inner]
chosenPair = np.unravel_index(arm, (self.N1,self.N2))
return chosenPair, radius_sq
def _solve_by_cvx(self, theta_prime, At):
"""
if you want to improve the speed, might want to use quadprog. see
https://scaron.info/blog/quadratic-programming-in-python.html
"""
d = self.d
th = self.cvx_th
cons = self.cvx_cons
#- new method
obj = self.cvx_obj
prob = self.cvx_prob
self.cvx_th_p.value = theta_prime
self.cvx_A.value = At
if self.bNaive:
return theta_prime
else:
#- previous method
# obj = cvx.Minimize(cvx.quad_form(th - theta_prime, At))
# prob = cvx.Problem(obj, cons)
try:
tt = tic()
# prob.solve(warm_start=True, solver="MOSEK")
#- eps=1e-5 is the default
prob.solve(warm_start=True, solver="SCS", eps=1e-2)
self.time_cvx += toc(tt)
except Exception as inst:
print('#'*40); print('# ' + str(inst)); print('#'*40)
print('try again, with a different solver')
try:
prob.solve(solver=cvx.SCS)
except Exception as inst2:
ipdb.set_trace()
pass
sol = np.array(th.value).flatten()
assert sol[0] is not None
return sol
def update(self, pulled_idx_pair, y_t):
pulled_idx = np.ravel_multi_index(pulled_idx_pair, (self.N1,self.N2))
wt = self.W[pulled_idx, :]
At = self.At
invAt = self.invAt
theta_s = self.theta_s
kappa = self.kappa
d = self.d
At_new = At + np.outer(wt, wt)
At_new = .5*(At_new + At_new.T)
# eigvalsh = sla.eigvalsh(At_new)
# assert min(eigvalsh) >= 0 and eigvalsh.dtype != complex
invAt_new = inv(At_new)
z = np.dot(theta_s, wt)
grad = self.glm.negloglik_derivative(z, y_t); # (1 / (1 + np.exp(-np.dot(theta_s, wt))) - y_t)
theta_prime = theta_s - grad * np.dot(invAt_new, wt) / kappa
try:
theta_s = self._solve_by_cvx(theta_prime, At_new)
except Exception as inst:
eps = 1e-7
print('#'*40); print('# '+ str(inst)); print('#'*40)
print('try again... by adding eps=%g'%eps)
try:
theta_s = self._solve_by_cvx(theta_prime, At_new + eps*np.eye(len(theta_prime)))
except Exception as inst2:
ipdb.set_trace()
assert theta_s[0] is not None
self.At = At_new
self.invAt = invAt_new
self.theta_s = theta_s
#- extra
qt = np.dot(theta_s, wt)
self.WTq += qt * wt
self.sum_q_t_sq += qt**2
invAt = self.invAt
self.radius_problem_constant += (grad ** 2) * np.dot(wt, np.dot(invAt, wt)); # no need for this, but let's keep it this way.
self.theta_hat = np.dot(self.invAt, self.WTq)
if (self.bArmRemoval):
self.do_not_ask.append( pulled_idx_pair )
self.t += 1
def getDoNotAsk(self):
return self.do_not_ask
def predict(self, X=None):
raise NotImplementedError()
if X is None:
X = self.X
return X.dot(self.theta_hat)
def get_debug_dict(self):
return self.dbg_dict
pass
########################################
class BilinearOful(Bandit):
########################################
""" this is now heavily modified for graph bandits.. for the original, please use the one in 'expr-nips17-post'
Sp: 'S prime', for oful, should be \sqrt(lam) * (2-norm bound on theta)
For spectral bandit, should be a bound on ||\\theta||_{V_0}
"""
def __init__(self, X, Z, lam, R, Sp, D=None, flags={}, subsample_func=None, subsample_rate=1.0, multiplier=1.0, binaryRewards=False, bArmRemoval=False):
"""
D allows diagonal regularization.
Warning: Sp must be set to `sqrt(lam) * S` for OFUL.
"""
self.X = X
self.Z = Z
self.R = R
self.lam = lam
self.delta = .2
# self.S_frobnorm = S_frobnorm
# self.Sp = np.sqrt(self.lam) * self.S_frobnorm
self.Sp = Sp
self.flags = flags
self.multiplier = float(multiplier)
self.binaryRewards = binaryRewards
self.bArmRemoval = bArmRemoval
# more instance variables
self.t = 1
self.N1, self.d1 = self.X.shape
self.N2, self.d2 = self.Z.shape
# super arms
self.N = self.N1 * self.N2
self.d = self.d1 * self.d2
self.W = np.zeros( (self.N, self.d) )
for i in range(self.W.shape[0]):
i1, i2 = np.unravel_index(i, (self.N1, self.N2))
self.W[i,:] = np.outer(self.X[i1,:], self.Z[i2,:]).ravel()
#- subsampling aspect (disabled for now)
assert subsample_func == None
self.subsample_func = None
self.WTy = np.zeros(self.d)
self.D = D
if (self.D is None):
self.Vt = self.lam * np.eye(self.d)
self.invVt = np.eye(self.d) / self.lam
self.W_invVt_norm_sq = np.sum(self.W * self.W, axis=1) / self.lam
self.logdetV = self.d*log(self.lam)
else:
assert self.lam is None
self.Vt = D
self.invVt = np.diag(1/np.diag(self.Vt))
self.W_invVt_norm_sq = np.sum((self.W*np.diag(self.invVt)) * self.W, axis=1)
self.logdetV = np.sum(np.log(np.diag(self.Vt)))
self.logdetV0 = self.logdetV
self.sqrt_beta = calc_sqrt_beta_det4(self.t,self.R,self.lam,self.delta,self.Sp,self.logdetV, self.logdetV0)
self.theta_hat = np.zeros(self.d)
assert bArmRemoval == False # let's implement this later on.
self.do_not_ask = []
self.dbg_dict = {'multiplier':float(multiplier)}
self.cache_valid_idx = np.arange(self.N)
#@profile
def next_arm(self):
if (len(self.do_not_ask) == 0):
valid_idx = self.cache_valid_idx
else:
valid_idx = setdiff1d(np.arange(self.N),self.do_not_ask)
if (self.t == 1):
return (ra.randint(self.N1), ra.randint(self.N2)), np.nan
radius_sq = self.multiplier * (self.sqrt_beta)**2
if (self.subsample_func == None):
# obj_func = np.dot(self.W, self.theta_hat) + np.sqrt(radius_sq) * np.sqrt(self.W_invVt_norm_sq)
# A = np.dot(self.W, self.theta_hat)
A = (self.X @ self.theta_hat.reshape(self.d1,self.d2) @ self.Z.T).ravel()
B = np.sqrt(radius_sq) * np.sqrt(self.W_invVt_norm_sq)
obj_func = A + B
if (len(self.do_not_ask) == 0):
chosen = np.argmax(obj_func)
else:
chosen_inner = np.argmax(obj_func[valid_idx])
chosen = valid_idx[chosen_inner]
else:
raise NotImplementedError("use valid_idx")
chosenPair = np.unravel_index(chosen, (self.N1,self.N2))
return chosenPair, radius_sq
def calc_index(self):
""" newly written for `SpectralUCB`
"""
radius_sq = self.multiplier * (self.sqrt_beta)**2
obj_func = np.dot(self.W, self.theta_hat) + np.sqrt(radius_sq) * np.sqrt(self.W_invVt_norm_sq)
if (self.bArmRemoval):
obj_func[self.do_not_ask] = -np.inf
return obj_func
def update(self, pulled_idx_pair, y_t):
pulled_idx = np.ravel_multi_index(pulled_idx_pair, (self.N1,self.N2))
wt = self.W[pulled_idx, :]
if (self.binaryRewards):
assert (y_t >= 0.0 and y_t <= 1.0)
self.WTy += (2*y_t - 1) * wt
else:
self.WTy += y_t*wt
self.Vt += np.outer(wt,wt)
tempval1 = np.dot(self.invVt, wt) # d by 1, O(d^2)
tempval2 = np.dot(tempval1, wt) # scalar, O(d)
self.logdetV += log(1 + tempval2)
if (self.t % 100 == 0):
self.invVt = la.inv(self.Vt)
else:
# self.invVt -= np.outer(tempval1, tempval1) / (1 + tempval2)
aVec = tempval1 / np.sqrt(1 + tempval2)
self.invVt -= np.outer(aVec, aVec)
if (self.subsample_func == None):
# self.W_invVt_norm_sq = mahalanobis_norm_sq_batch(self.W, self.invVt) # O(Nd^2)
# self.W_invVt_norm_sq -= (np.dot(self.W, tempval1) ** 2) / (1 + tempval2) # efficient update, O(Nd)
v = (np.dot(self.X, tempval1.reshape(self.d1,self.d2)) @ self.Z.T).ravel()
self.W_invVt_norm_sq -= (v ** 2) / (1 + tempval2) # efficient update, O(Nd)
pass
self.theta_hat = np.dot(self.invVt, self.WTy)
if (self.bArmRemoval):
self.do_not_ask.append( pulled_idx_pair )
my_t = self.t + 1
self.sqrt_beta = calc_sqrt_beta_det4(my_t,self.R,self.lam,self.delta,self.Sp,self.logdetV,self.logdetV0)
self.t += 1
def getDoNotAsk(self):
return self.do_not_ask
def predict(self, X=None):
raise NotImplementedError()
if X is None:
X = self.X
return X.dot(self.theta_hat)
def get_debug_dict(self):
return self.dbg_dict
#- some functions necessary for the next class
def averageMatrixEntries(armPairs, rewards):
matDict = {}; cntDict = {}
for i in range(armPairs.shape[0]):
[r,c] = armPairs[i,:]
matDict[(r,c)] = matDict.get((r,c), 0.0) + rewards[i]
cntDict[(r,c)] = cntDict.get((r,c), 0) + 1
for ((r,c),v) in matDict.items():
matDict[(r,c)] = v / cntDict[(r,c)]
return matDict
########################################
class BilinearTwoStage(Bandit):
########################################
""" this is now heavily modified for graph bandits.. for the original, please use the one in 'expr-nips17-post'
"""
def __init__(self, X, Z, lam, R, S_F, sval_max, sval_min, r, C_T1, T, flags={}, subsample_func=None, subsample_rate=1.0, multiplier=1.0, binaryRewards=False, bArmRemoval=False, SpType=None, algoMatrixCompletion='optspace'):
"""
Two stage: internally uses BilinearOful object.
"""
self.X = X
self.Z = Z
self.R = R
self.lam = lam
self.delta = .2
self.S_F = S_F
self.sval_max = sval_max
self.sval_min = sval_min
self.r = r
self.C_T1 = C_T1
self.T = T
self.flags = flags
self.multiplier = float(multiplier)
self.binaryRewards = binaryRewards
self.bArmRemoval = bArmRemoval
self.SpType = SpType
self.algoMatrixCompletion = algoMatrixCompletion
#- to be set in the first stage
self.stage1arms = None
self.stage1rewards = []
self.hatUFull = None; self.hatVFull = None
self.lamp = None
self.Sp = None
self.oful = None
self.subsetX = None
self.subsetZ = None
# more instance variables
self.t = 1
self.N1, self.d1 = self.X.shape
self.N2, self.d2 = self.Z.shape
assert (self.d1 == self.d2)
d = self.d1 # FIXME I should change this
#- old scheme; saved for reference..
# myT1 = int(np.ceil(C_T1 * self.R * d**(3/2) * r**(1/2) * np.sqrt(self.T)))
# self.T1 = np.maximum( myT1, self.r*(self.d1 + self.d2) - self.r**2 )
#- new scheme
minT1 = self.r*(self.d1 + self.d2) - self.r**2
self.T1 = int(np.ceil(C_T1 * minT1))
self.dbg_dict = {}
def next_arm(self):
if (self.t == 1):
# prepare representative arms.
self.subsetX, self.invX_norm = calcsubset.hybrid(self.X, 20)
self.subsetZ, self.invZ_norm = calcsubset.hybrid(self.Z, 20)
self.subsetXInv = dict(zip(self.subsetX, range(len(self.subsetX))))
self.subsetZInv = dict(zip(self.subsetZ, range(len(self.subsetZ))))
mat = dstack_product(self.subsetX, self.subsetZ) # num of super arms (d^2) by 2
idxAry = ra.permutation(mat.shape[0])
nRepeat = int((self.T1 - 1) // len(idxAry) + 1)
idxAry = np.squeeze(np.tile(idxAry, (1,nRepeat)))
self.stage1arms = mat[idxAry[:self.T1],:]
self.stage1rewards = nans(self.T1)
#- if there is an empty row / column, then ensure that there is no empty row/column.
sa = self.stage1arms
if len(np.unique(sa[:,0])) != self.d1 or len(np.unique(sa[:,1])) != self.d2:
idxDiagonals = np.arange(0,len(mat),self.d2+1)
idxRemainders = np.setdiff1d(range(len(mat)), idxDiagonals)
idxRemainders = ra.permutation(idxRemainders)
oneTile = np.concatenate( (idxDiagonals,idxRemainders) )
nRepeat = int((self.T1 - 1) // len(idxAry) + 1)
idxAry = np.squeeze(np.tile(oneTile, (1,nRepeat)))
self.stage1arms = mat[idxAry[:self.T1],:]
#- save necessary stats
self.dbg_dict['T1'] = self.T1
printExpr("self.T1")
pass
if (self.t <= self.T1):
armPairToPull = tuple(self.stage1arms[self.t-1,:])
radius_sq = np.nan
else:
# at the beginning of the second stage
if (self.t == self.T1+1):
#----- invoke matrix completion
#- average out entries observed more than once.; stage1arms: list of (lArmIdx,rArmIdx).
matDict = averageMatrixEntries(self.stage1arms, self.stage1rewards)
#- translate index
smat = [(self.subsetXInv[k1],self.subsetZInv[k2],v) for ((k1,k2),v) in matDict.items()]
#- run optspace and but catch the stdout
if self.algoMatrixCompletion == 'optspace':
import optspace
[U,S,V,out_niter] = optspace.optspace(smat, rank_n=self.r,
num_iter=1000,
tol=1e-4,
verbosity=0,
outfile="")
printExpr('out_niter')
hatK = (U @ S @ V.T)
self.dbg_dict['out_niter'] = out_niter
assert np.all(np.logical_and(~np.isnan(hatK),~np.isinf(hatK)))
elif self.algoMatrixCompletion == 'bm':
myX = []; myZ = []; myRewards = []
for ((k1,k2),v) in matDict.items():
myX.append( indicator(self.subsetXInv[k1],self.d1) )
myZ.append( indicator(self.subsetZInv[k2],self.d2) )
myRewards.append( v )
myX = np.array(myX)
myZ = np.array(myZ)
myRewards = np.array(myRewards)
from matrixrecovery import rankone
U,V,out_nIter,stat = rankone(myX,myZ,myRewards,self.r,self.R)
printExpr('out_nIter')
hatK = U@V.T
self.dbg_dict['out_nIter'] = out_nIter
assert np.all(np.logical_and(~np.isnan(hatK),~np.isinf(hatK)))
else:
raise ValueError()
#- Instead of the following, we do a robust version of the same operation
#- hatTh = la.inv(self.X[self.subsetX,:]) @ hatK @ la.inv(self.Z[self.subsetZ,:].T)
#- note lstsq is like solve(), but solves approximately when ill-conditioned
# tmp = la.solve(self.X[self.subsetX,:], hatK)
# self.hatThStage1 = la.solve(self.Z[self.subsetZ,:], tmp.T).T
tmp, _,_,_ = la.lstsq(self.X[self.subsetX,:], hatK, rcond=None)
tmp2, _,_,__ = la.lstsq(self.Z[self.subsetZ,:], tmp.T, rcond=None)
self.hatThStage1 = tmp2.T
#- get the subspaces
[self.hatUFull, self.hatSFull, VT] = la.svd(self.hatThStage1)
self.hatVFull = VT.T
#- rotate the arms
self.newX = self.X @ self.hatUFull
self.newZ = self.Z @ self.hatVFull
#- prepare oful
d = self.d1 # FIXME just an impromptu
T2 = self.T - self.T1
self.lamp = T2/d/self.r/np.log(1+T2/self.lam) # FIXME I think I should use T rather than T2...
term1 = np.sqrt(self.lam) * self.S_F #2 * np.sqrt(d*self.r)
kappa = self.sval_max / self.sval_min
Cp_Cpp = 1
term2 = np.sqrt(self.lamp) \
* Cp_Cpp**2 * kappa**4 * self.R**2 * d**3 * self.r / self.sval_min**2 / self.T1 \
* self.invX_norm**2 * self.invZ_norm**2
if self.SpType == None:
self.Sp = term1 + self.sval_max * term2
elif self.SpType == 'simple':
term2p = np.sqrt(self.lamp) \
* self.R**2 * d**3 * self.r / self.T1 \
* self.invX_norm**2 * self.invZ_norm**2
self.Sp = term1 + self.sval_max * term2p
elif self.SpType == 'simple2':
term2pp = np.sqrt(self.lamp) \
* self.R**2 * d**3 * self.r / self.T1
self.Sp = term1 + self.sval_max * term2pp
elif self.SpType == 'simple3':
self.Sp = term1
else:
raise ValueError()
k = self.r*(self.d1 + self.d2) - self.r**2
p = self.d1*self.d2
diagvec = [self.lam]*(self.r * self.d2)
row = [self.lam]*(self.r) + [self.lamp]*(self.d2 - self.r)
diagvec += row*(self.d1 - self.r)
self.D = np.diag(diagvec)
# self.D = np.diag([self.lam]*k + [self.lamp]*(p-k))
#- initialize oful
self.oful = BilinearOful(X=self.newX, Z=self.newZ,
lam=None, R=self.R, Sp=self.Sp, D=self.D,
flags={}, multiplier=self.multiplier)
#- pseudo play oful, so it is up to date!
for myt in range(self.T1):
self.oful.update( tuple(self.stage1arms[myt,:]), self.stage1rewards[myt] )
#- get the next arm from oful
armPairToPull, radius_sq = self.oful.next_arm()
return armPairToPull, radius_sq
def update(self, pulled_arm_pair, y_t):
if (self.t <= self.T1):
assert(pulled_arm_pair == tuple(self.stage1arms[self.t-1,:]))
self.stage1rewards[self.t-1] = y_t
else:
self.oful.update(pulled_arm_pair, y_t)
self.t += 1
def getDoNotAsk(self):
return self.do_not_ask
def predict(self, X=None):
raise NotImplementedError()
if X is None:
X = self.X
return X.dot(self.theta_hat)
def get_debug_dict(self):
return self.dbg_dict
####################
class BilinearOneStage(Bandit):
########################################
""" a heuristic method that keeps updating the subspace in every exponentially-space time steps
SpType: 'simple2' or 'simple3'
"""
def __init__(self, X, Z, lam, R, S_F, sval_max, sval_min, r, T, flags={}, subsample_func=None, subsample_rate=1.0, multiplier=1.0, binaryRewards=False, bArmRemoval=False, SpType='simple2'):
self.X = X.astype(float)
self.Z = Z.astype(float)
self.R = R
self.lam = lam
self.delta = .2
self.S_F = S_F
self.sval_max = sval_max
self.sval_min = sval_min
self.r = r
self.T = T
self.flags = flags
self.multiplier = float(multiplier)
self.binaryRewards = binaryRewards # perhaps not being used
self.bArmRemoval = bArmRemoval
self.SpType = SpType # how to form Sp..?
self.subspaceUpdateBase = np.sqrt(2)
#- to be set in the first stage
self.arms = []
self.rewards = []
self.hatUFull = None; self.hatVFull = None
self.lamp = None
self.Sp = None
self.oful = None
# more instance variables
self.t = 1
self.N1, self.d1 = self.X.shape
self.N2, self.d2 = self.Z.shape
assert (self.d1 == self.d2)
d = self.d1 # FIXME I should change this
#- we update subspace after every t=knotList[i]
T1 = self.r*(self.d1 + self.d2) - self.r**2
base = self.subspaceUpdateBase
L = np.ceil(np.log(T/T1) / np.log(base))
self.knotList = np.ceil(L*base ** np.arange(0, L)).astype(int)
#- initialize oful
self.oful = BilinearOful(X=self.X, Z=self.Z,
lam=self.lam, R=self.R, Sp=np.sqrt(self.lam) * self.S_F,
flags={}, multiplier=self.multiplier)
self.dbg_dict = {'knotList': self.knotList,
'out_nIter_list': [] }
def next_arm(self):
return self.oful.next_arm()
def update(self, pulled_arm_pair, y_t):
self.arms.append(pulled_arm_pair)
self.rewards.append(y_t)
self.oful.update(pulled_arm_pair, y_t)
if (self.t in self.knotList):
#- estimate subspace
myX = self.X[[i[0] for i in self.arms],:]
myZ = self.Z[[i[1] for i in self.arms],:]
from matrixrecovery import rankone
U,V,out_nIter,stat = rankone(myX,myZ,np.array(self.rewards),self.r,self.R)
Th = U@V.T
U,S,VT = la.svd(Th)
V = VT.T
self.dbg_dict['out_nIter_list'].append( out_nIter )
#- rotate the arms
newX = self.X @ U
newZ = self.Z @ V
#- restart oful
d = self.d1 # FIXME just an impromptu
# T2 = self.T - self.t
self.lamp = self.T/d/self.r/np.log(1+self.T/self.lam) # different from TwoStage; I am using T instead of T2
term1 = np.sqrt(self.lam) * self.S_F #2 * np.sqrt(d*self.r)
if self.SpType == 'simple2':
term2pp = np.sqrt(self.lamp) \
* self.R**2 * d**3 * self.r / self.t
self.Sp = term1 + self.sval_max * term2pp
elif self.SpType == 'simple3':
self.Sp = term1
else:
raise ValueError()
k = self.r*(self.d1 + self.d2) - self.r**2
p = self.d1*self.d2
diagvec = [self.lam]*(self.r * self.d2)
row = [self.lam]*(self.r) + [self.lamp]*(self.d2 - self.r)
diagvec += row*(self.d1 - self.r)
self.D = np.diag(diagvec)
#- initialize oful
self.oful = BilinearOful(X=newX, Z=newZ,
lam=None, R=self.R, Sp=self.Sp, D=self.D,
flags={}, multiplier=self.multiplier)
#- pseudo play oful, so it is up to date!
for myt in range(self.t):
self.oful.update(self.arms[myt], self.rewards[myt])
pass
self.t += 1
def getDoNotAsk(self):
return self.do_not_ask
def predict(self, X=None):
raise NotImplementedError()
if X is None:
X = self.X
return X.dot(self.theta_hat)
def get_debug_dict(self):
return self.dbg_dict
################################################################################
# for experiments
################################################################################
class DataForBilinearBandit(object):
def __init__(self):
raise NotImplementedError()
def gen_data(self):
raise NotImplementedError()
def get_reward(self, idx_pair):
raise NotImplementedError()
def genRandomFeatures(A, r, d):
"""
A: N × N matrix. the rank is r.
extract features of rows/cols of A so that
A = F @ Th @ G.T, where F and G are N × d, and Th is d × d (and rank r)
"""
U,S,VH = la.svd(A)
S = S[:r]
r = len(S)
U = U[:,:r] * np.sqrt(S)
V = VH.T
V = V[:,:r] * np.sqrt(S)
B = ra.randn(d,r)
F = U @ la.pinv(B)
D = ra.randn(d,r)
G = V @ la.pinv(D)
Th = B@D.T
return F, Th, G
class MovieLense(DataForBilinearBandit):
def __init__(self, filename, R): #, d=16, r=5):
self.R = R
self.filename = filename
self.rawdata = LoadPickle(self.filename)
self.M = self.rawdata['M']
self.N1, self.N2 = self.M.shape
def gen_features(self, d=16, r=5):
self.d = d
self.r = r
self.X, self.Th, self.Z = genRandomFeatures(self.M, r,d)
self.S_F = la.norm(self.Th, 'fro')
self._save_expected_rewards()
def _save_expected_rewards(self):
self.expt_reward = (self.X @ self.Th) @ self.Z.T
self.best_arm_pair = tuple(np.unravel_index(np.argmax(self.expt_reward),
self.expt_reward.shape))
def get_reward(self, idx_pair):
x = self.X[idx_pair[0],:]
z = self.Z[idx_pair[1],:]
return x @ self.Th @ z + self.R * ra.normal(0,1)
def get_best_reward(self):
return self.expt_reward[self.best_arm_pair]
def get_expected_reward(self, idx_pair):
""" can also take idx_pair as a list of index pairs (list of tuples)
"""
return [data.expt_reward[row[0],row[1]] for row in idx_pair]
def get_expected_regret(self, idx_pair):
""" can also take idx_pair as a list of index pairs (list of tuples)
"""
x = self.best_arm_pair[0]
z = self.best_arm_pair[1]
return self.expt_reward[x,z] - self.expt_reward[idx_pair[0], idx_pair[1]]
if type(idx_pair) is list:
return self.expt_reward[x,z] - self.get_expected_reward(self, idx_pair)
def __str__(self):
return str(self.__dict__)
pass
class SphericalGaussian(DataForBilinearBandit):
def __init__(self, R, r):
self.R = R
self.r = r
def set_X_Z(self, X, Z):
self.X = X
self.Z = Z
[self.N1, self.d1] = X.shape
[self.N2, self.d2] = Z.shape
self.N = N1*N2
self.d = d1*d2
def gen_theta_star(self, S_2norm=1.0):
self._gen_theta_star(S_2norm)
self._save_expected_rewards_()
def _gen_theta_star(self, S_2norm=1.0):
self.S_2norm = S_2norm
#- generate Th
v = ra.normal(0,1,self.d1*self.d2)
Th0 = np.reshape(v, (self.d1, self.d2));
if (self.r != np.min([self.d1, self.d2])):
#- FIXME this part is buggy; use V there is actually V.T...
#- but I will keep this for reproducibility
U,s,V = la.svd(Th0)
Th0 = (U[:,:self.r] * s[:self.r]) @ V[:,:self.r].T
Th0 = Th0 / la.norm(Th0,2) # normalize by its two norm
self.Th = Th0 * self.S_2norm
self.S_F = la.norm(self.Th, 'fro')
def _save_expected_rewards(self):
self.expt_reward = (self.X @ self.Th) @ self.Z.T
self.best_arm_pair = tuple(np.unravel_index(np.argmax(self.expt_reward),
self.expt_reward.shape))
@staticmethod
def _genRademacher(N,d):
return 2 * ra.randint(2,size=(N,d)) - 1
def gen_data(self, d1, d2, N1, N2, S=1.0, armtype="gaussian"):
""" type could be 'gaussian' or 'rademacher'
"""
[self.d1, self.d2] = [d1, d2]
[self.N1, self.N2] = [N1, N2]
self.S = S
if (armtype == "gaussian"):
#- generate X
X = ra.normal(0,1,(self.N1, self.d1))
norms = la.norm(X, axis=1)
X /= norms.reshape(-1,1)
#- generate X
Z = ra.normal(0,1,(self.N2, self.d2))
norms = la.norm(Z, axis=1)
Z /= norms.reshape(-1,1)
#- save expected rewards
self._gen_theta_star(self.S)
elif armtype == "rademacher":
X = self.__class__._genRademacher(self.N1, self.d1)
Z = self.__class__._genRademacher(self.N2, self.d2)
#- save expected rewards
self._gen_theta_star(self.S) # this is being repeated, but I keep this for replicability
elif armtype == "rademacher2":
#- ensure that there exists the arm with the largest reward!
self._gen_theta_star(self.S)
X = self.__class__._genRademacher(self.N1, self.d1)
Z = self.__class__._genRademacher(self.N2, self.d2)
U,S,VT = la.svd(self.Th)
V = VT.T
#- implant (nearly) best arm
i1 = ra.randint(self.N1)
X[i1,:] = np.sign(U[:,0])
i2 = ra.randint(self.N2)
Z[i2,:] = np.sign(V[:,0])
else:
raise ValueError()
self.X = X; self.Z = Z
self._save_expected_rewards()
def get_reward(self, idx_pair):
x = self.X[idx_pair[0],:]
z = self.Z[idx_pair[1],:]
return x @ self.Th @ z + self.R * ra.normal(0,1)
def get_best_reward(self):
return self.expt_reward[self.best_arm_pair]
def get_expected_reward(self, idx_pair):
""" can also take idx_pair as a list of index pairs (list of tuples)
"""
return [data.expt_reward[row[0],row[1]] for row in idx_pair]
def get_expected_regret(self, idx_pair):
""" can also take idx_pair as a list of index pairs (list of tuples)
"""
x = self.best_arm_pair[0]
z = self.best_arm_pair[1]
return self.expt_reward[x,z] - self.expt_reward[idx_pair[0], idx_pair[1]]
if type(idx_pair) is list:
return self.expt_reward[x,z] - self.get_expected_reward(self, idx_pair)
def __str__(self):
return str(self.__dict__)
#@profile
def run_bilinear_bandit(learner, data_obj, T, initIdx=-1,timeList=[]):
reward_ary = np.zeros(T)
arm_pair_ary = np.zeros((T,2), dtype=int16)
inst_regret = np.zeros(T)
cum_regret = np.zeros(T)
#- initial point, if given
if initIdx != -1:
learner.update(initIdx, 1)
my_tt = tic()
for t in range(1,T+1):
#- choose the next arm
next_arm_pair, radius_sq = learner.next_arm()
#- get reward and update the model
reward = data_obj.get_reward(next_arm_pair)
learner.update(next_arm_pair, reward)
#- save stats
reward_ary[t-1] = reward
arm_pair_ary[t-1,:] = next_arm_pair
inst_regret[t-1] = data_obj.get_expected_regret(next_arm_pair)
if (t == 1):
cum_regret[t-1] = inst_regret[t-1]
else:
cum_regret[t-1] = cum_regret[t-2] + inst_regret[t-1]
# if (t % 300 == 0):
# print('%.4g' % toc(my_tt))
# print('%.4g' % learner.time_cvx)
# ipdb.set_trace()
# pass
#- print out stats
if (t % 1000 == 0):
timeSoFar = toc(my_tt)
print(('t=%d, time=%.1f, radius_sq= %.4f, inst_reg=%.4f, cum_reg=%.4f' % \
(t, timeSoFar, radius_sq, inst_regret[t-1], cum_regret[t-1])))
timeList.append( [t,timeSoFar] )
sys.stdout.flush()
return reward_ary, arm_pair_ary, learner.get_debug_dict()
def run_bilinear_bandit_time(bandit,data_obj,T,initIdx=-1):
timeList = []
reward_ary, arm_pair_ary, dbg_dict = run_bilinear_bandit(bandit,data_obj,T,initIdx, timeList=timeList)
return reward_ary, arm_pair_ary, dbg_dict, timeList
|
{"hexsha": "9c83acd0a0813249c8ccb8160f28ceff9ecc00b1", "size": 42543, "ext": "py", "lang": "Python", "max_stars_repo_path": "blbandits3.py", "max_stars_repo_name": "kwangsungjun/lrbandit", "max_stars_repo_head_hexsha": "2f1f7ca4bbefe2bfd3e0bc50c4423a9791bfcde8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-08-11T22:50:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-21T17:48:20.000Z", "max_issues_repo_path": "blbandits3.py", "max_issues_repo_name": "kwangsungjun/lrbandit", "max_issues_repo_head_hexsha": "2f1f7ca4bbefe2bfd3e0bc50c4423a9791bfcde8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "blbandits3.py", "max_forks_repo_name": "kwangsungjun/lrbandit", "max_forks_repo_head_hexsha": "2f1f7ca4bbefe2bfd3e0bc50c4423a9791bfcde8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-08-11T22:52:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-18T05:31:32.000Z", "avg_line_length": 35.4525, "max_line_length": 227, "alphanum_fraction": 0.5448134828, "include": true, "reason": "import numpy,from numpy,import scipy,from scipy,import cvxpy", "num_tokens": 11649}
|
""" Change the reference of an EEG signal
"""
import numpy
import warnings
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.resources.data_types.time_series import TimeSeries
from pySPACE.resources.dataset_defs.stream import StreamDataset
class InvalidWindowException(Exception):
pass
class LaplacianReferenceNode(BaseNode):
""" Apply the Laplacian spatial filter
It derives from the need of improving the spatial resolution of EEG.
The signal recorded at each electrode is a combination of the brain activity
immediately underneath it and of brain activity of neighboring areas.
The idea is to filter from each electrode the contribution coming from
its neighbors. It can be applied using the nearest neighboring electrodes
(small Laplacian: 4 channels) or nearest and next nearest neighboring
electrodes (big Laplacian: 8 channels).
The number of electrodes of *the returned time series is reduced*:
each electrode that has less than 4 (or 8 when the big Laplacian is applied)
neighbors is excluded.
**References**
======== ====================================================================================
main source: original article
======== ====================================================================================
author Hjorth, Bo
title An on-line transformation of EEG scalp potentials into orthogonal source derivations
journal Electroencephalography and Clinical Neurophysiology
year 1975
volume 39
number 5
pages 526--530
doi 10.1016/0013-4694(75)90056-5
======== ====================================================================================
**Parameters**
:l_type:
type of Laplacian applied, e.g. 'small' or 'big'
(*optional, default: 'small'*)
:selected_channels:
A list of channel names for which the filter should be applied.
If None, all channels are considered.
(*optional, default: None*)
**Exemplary Call**
.. code-block:: yaml
-
node: LaplacianReference
parameters:
l_type: 'big'
:Author: Laura Manca (laura.manca89@gmail.com)
:Created: 20013/09/24
"""
def __init__(self, selected_channels=None, l_type='small', **kwargs):
super(LaplacianReferenceNode, self).__init__(**kwargs)
self.set_permanent_attributes(selected_channels=selected_channels,
l_type=l_type,
dist_max=0.28,
dist=None
)
def calc_distance_matrix(self, data, distFunc=lambda deltaPoint: \
numpy.sqrt(sum(deltaPoint[d]**2 \
for d in xrange(len(deltaPoint))))):
""" Compute the distance matrix from the dictionary StreamDataset.ec
StreamDataset.ec maps the coordinates of each electrode and to the
respective electrode name.
"""
# Rearrange the coordinates according to the order in data
nDimPoints = numpy.zeros((len(self.selected_channels),3))
for ind, name in enumerate(self.selected_channels):
nDimPoints[ind,:] = StreamDataset.ec[name][:]
# compute the distances between all selected channels
dim = len(nDimPoints[0]) # dim=3 (coordinates)
delta = [None]*dim
for d in xrange(dim):
position = nDimPoints[:,d] #all x,y or z values
delta[d] = position - numpy.reshape(position,(len(position),1))
# matrix of distances from one electrode to any other
self.dist = distFunc(delta)
return self.dist
def compute_laplacian(self,data):
"""Compute the Laplacian
.. math::
\\text{filtered data}_{i} =
\\text{data}_{i}*\\text{number of neighbours} -
\\sum_{i} \\text{neighbours of data}_{i}
\\text{with } i = EEG channel
The channels that are at the borders or close to Ref are excluded
"""
idx = numpy.argsort(self.dist)
#compute the Laplacian
filt_data = data * self.l_type - data[:,idx[:,1]] - \
data[:,idx[:,2]] - data[:,idx[:,3]] - data[:,idx[:,4]]
if self.l_type == 8:
filt_data = filt_data - data[:,idx[:,5]] - data[:,idx[:,6]] - \
data[:,idx[:,7]] - data[:,idx[:,8]]
#remove unbalanced channels (either borders or electrodes close to Ref)
if self.l_type == 4:
nearest = idx[ : , 0 : (self.l_type + 1)]
unbalanced = []
balanced = []
balanced_ch_names = []
for ch in range(len(nearest)):
x = [data.channel_names[ch] for i in \
self.dist[ch,nearest[ch,:]]if i > self.dist_max]
if x != []:
unbalanced.append(x[:1])
else:
balanced.append(ch)
balanced_ch_names.append(filt_data.channel_names[ch])
elif self.l_type == 8:
nearest = idx[ : , 0 : (self.l_type + 1)]
unbalanced = []
balanced = []
balanced_ch_names = []
for ch in range(len(nearest)):
x = [data.channel_names[ch] for i in \
self.dist[ch,nearest[ch,:]] if i > self.dist_max]
if x != []:
unbalanced.append(x[:1])
else:
balanced.append(ch)
balanced_ch_names.append(filt_data.channel_names[ch])
# list of channels left after the Laplacian filter being applied
data_noborder = filt_data[:,balanced]
data_noborder.channel_names = balanced_ch_names
filtered_time_series = TimeSeries(data_noborder,
data_noborder.channel_names,
data.sampling_frequency,
data.start_time,
data.end_time,
data.name,
data.marker_name,
data.tag)
self._log("These channels are unbalanced (border or close to reference) "
"they will be removed from the data: %s" % str(unbalanced))
return filtered_time_series
def _execute(self, data):
if self.selected_channels == None:
self.selected_channels = data.channel_names
# set dist_max according to the kind of chosen filter (big or small)
if self.l_type == 'small':
self.l_type = 4
self.dist_max = 0.28
elif self.l_type == 'big':
self.l_type = 8
self.dist_max = 0.4
# check if the distance matrix has been already computed,
# if not compute it
if self.dist is None:
self.calc_distance_matrix(data)
# compute the Laplacian
filtered_time_series = self.compute_laplacian(data)
return filtered_time_series
class AverageReferenceNode(BaseNode):
""" Rereference EEG signal against the average of a selected set of electrodes
This node computes for every time step separately the average of a selected
set of electrodes (*avg_channels*) and subtracts this average from each
channel. It thus implements a kind of average rereferencing.
**Parameters**
:avg_channels:
the channels over which the average is computed
(*optional, default: all available channels*)
:keep_average:
Whether the average should be added as separate channel.
(*optional, default: False*)
:inverse:
Determine whether *avg_channels* are the channels over which
the average is computed (inverse=False) or the channels
that are ignored when calculating the average.
(*optional, default: False*)
:old_ref:
This is the old reference channel name usually used during
recording as a reference. After re-referencing and if keep_average
is set to true, this name will be used for the appended channel.
If keep_average is true, but old_ref is not specified, name of the
appended channel will be "avg".
.. todo:: use different version from keeping the average values
**Exemplary call**
.. code-block:: yaml
-
node : Average_Reference
parameters :
avg_channels : ["C3","C4"]
keep_average : False
inverse : True
old_ref : "Fcz"
:Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de)
:Created: 2009/09/28
:Revised: 2013/03/25 Foad Ghaderi (foad.ghaderi@dfki.de)
:For more details see: http://sccn.ucsd.edu/wiki/Chapter_04:_Preprocessing_Tools
"""
def __init__(self, avg_channels = None, keep_average = False, old_ref = None,
inverse=False, **kwargs):
super(AverageReferenceNode, self).__init__(*kwargs)
self.set_permanent_attributes(avg_channels = avg_channels,
keep_average = keep_average,
old_ref = old_ref,
inverse=inverse)
def _execute(self, data):
# First check if all channels actually appear in the data
# Determine the indices of the channels that are the basis for the
# average reference.
if not self.inverse:
if self.avg_channels == None:
self.avg_channels = data.channel_names
channel_indices = [data.channel_names.index(channel_name)
for channel_name in self.avg_channels]
else:
channel_indices = [data.channel_names.index(channel_name)
for channel_name in data.channel_names
if channel_name not in self.avg_channels]
not_found_channels = \
[channel_name for channel_name in self.avg_channels
if channel_name not in data.channel_names]
if not not_found_channels == []:
warnings.warn("Couldn't find selected channel(s): %s. Ignoring." %
not_found_channels, Warning)
if self.old_ref is None:
self.old_ref = 'avg'
# Compute the actual data of the reference channel. This is the sum of all
# channels divided by (the number of channels +1).
ref_chen = -numpy.sum(data[:, channel_indices], axis=1)/(data.shape[1]+1)
ref_chen = numpy.atleast_2d(ref_chen).T
# Reference all electrodes against average
avg_referenced_data = data + ref_chen
# Add average as new channel to the signal if enabled
if self.keep_average:
avg_referenced_data = numpy.hstack((avg_referenced_data, ref_chen))
channel_names = data.channel_names + [self.old_ref]
result_time_series = TimeSeries(avg_referenced_data,
channel_names,
data.sampling_frequency,
data.start_time, data.end_time,
data.name, data.marker_name)
else:
result_time_series = TimeSeries.replace_data(data,
avg_referenced_data)
return result_time_series
_NODE_MAPPING = {"Average_Reference": AverageReferenceNode,
"Laplacian_Reference": LaplacianReferenceNode}
|
{"hexsha": "5f7960417b1616d605a883827ac39d7a32e37304", "size": 12334, "ext": "py", "lang": "Python", "max_stars_repo_path": "pySPACE/missions/nodes/spatial_filtering/rereferencing.py", "max_stars_repo_name": "pyspace/pyspace", "max_stars_repo_head_hexsha": "763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 32, "max_stars_repo_stars_event_min_datetime": "2015-02-20T09:03:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T22:32:52.000Z", "max_issues_repo_path": "pySPACE/missions/nodes/spatial_filtering/rereferencing.py", "max_issues_repo_name": "pyspace/pyspace", "max_issues_repo_head_hexsha": "763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2015-05-18T15:08:40.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-05T19:18:01.000Z", "max_forks_repo_path": "pySPACE/missions/nodes/spatial_filtering/rereferencing.py", "max_forks_repo_name": "pyspace/pyspace", "max_forks_repo_head_hexsha": "763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2015-09-28T07:16:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-20T13:52:19.000Z", "avg_line_length": 40.976744186, "max_line_length": 101, "alphanum_fraction": 0.5444300308, "include": true, "reason": "import numpy", "num_tokens": 2427}
|
import evi
import pandas as pd
import numpy as np
import scipy
import harmonypy as hm
from sklearn.preprocessing import MinMaxScaler
def compute_lisi(adata, basis, batch_key, perplexity):
X = adata.obsm[basis]
metadata = pd.DataFrame(adata.obs[batch_key].values, columns = [batch_key])
lisi = hm.compute_lisi(X, metadata, [batch_key], perplexity)
return lisi
def corr_dist(adata_batch, adata, batch_label, batch_key):
spliced_b = pd.DataFrame(adata_batch.layers['spliced'].todense(), index = adata_batch.obs_names, columns = adata_batch.var_names)
unspliced_b = pd.DataFrame(adata_batch.layers['unspliced'].todense(), index = adata_batch.obs_names, columns = adata_batch.var_names)
spliced_i = pd.DataFrame(adata.layers['spliced'].todense(), index = adata.obs_names, columns = adata.var_names)
unspliced_i = pd.DataFrame(adata.layers['unspliced'].todense(), index = adata.obs_names, columns = adata.var_names)
b = np.where(adata_batch.obs[batch_key] == batch_label)[0]
corr_list = []
for i in range(0, len(adata_batch.var_names)):
df_b = pd.concat([spliced_b.iloc[b, i], unspliced_b.iloc[b, i]], axis = 1)
cellind = df_b.iloc[np.where(df_b.sum(axis = 1) != 0)[0], :].index
df_b = df_b.loc[cellind]
mat_b = np.array(df_b.values)
df_i = pd.concat([spliced_i.iloc[:, i], unspliced_i.iloc[:, i]], axis = 1)
df_i = df_i.loc[cellind]
mat_i = np.array(df_i.values)
rho, pval = scipy.stats.spearmanr(scipy.spatial.distance.pdist(mat_b), scipy.spatial.distance.pdist(mat_i))
corr_list.append(rho)
return corr_list
def average_dataset_metric(df = None, m_order = None, metric = None, palette = None, figsize = None, save = False, filename = None):
#computes ranked aggregate scores by min-max scaling, then taking the mean across datasets
m = df[np.isin(df.index, m_order)]
scaler = MinMaxScaler()
m_ranked = pd.DataFrame(scaler.fit_transform(m), index = m.index, columns = m.columns)
m_ranked = m_ranked.reindex(m_order)
mean_metrics = pd.DataFrame(m_ranked.mean(1), columns = [metric])
nplots = len(m_ranked.columns)
evi.pl.ranked_barplot(df = m_ranked, figsize = figsize, y = m_ranked.index, save = save, palette = palette, filename = filename, nplots = nplots)
return mean_metrics
|
{"hexsha": "6d7333631c2ee477380b3b9cb688d69fdb48f97b", "size": 2385, "ext": "py", "lang": "Python", "max_stars_repo_path": "evi/tools/evaluate.py", "max_stars_repo_name": "jranek/EVI", "max_stars_repo_head_hexsha": "7a4ec37dc847d02268241b464b296f00826c327d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "evi/tools/evaluate.py", "max_issues_repo_name": "jranek/EVI", "max_issues_repo_head_hexsha": "7a4ec37dc847d02268241b464b296f00826c327d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evi/tools/evaluate.py", "max_forks_repo_name": "jranek/EVI", "max_forks_repo_head_hexsha": "7a4ec37dc847d02268241b464b296f00826c327d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.1666666667, "max_line_length": 149, "alphanum_fraction": 0.6893081761, "include": true, "reason": "import numpy,import scipy", "num_tokens": 644}
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../../common")
import test_util as tu
import tritonclient.http as httpclient
from tritonclient.utils import *
import numpy as np
import unittest
class LifecycleTest(tu.TestResultCollector):
def test_batch_error(self):
# The execute_error model returns an error for the first request and
# sucessfully processes the second request. This is making sure that
# an error in a single request does not completely fail the batch.
model_name = "execute_error"
shape = [2, 2]
request_parallelism = 2
with httpclient.InferenceServerClient(
"localhost:8000", concurrency=request_parallelism) as client:
input_datas = []
requests = []
for i in range(request_parallelism):
input_data = np.random.randn(*shape).astype(np.float32)
input_datas.append(input_data)
inputs = [
httpclient.InferInput("IN", input_data.shape,
np_to_triton_dtype(input_data.dtype))
]
inputs[0].set_data_from_numpy(input_data)
requests.append(client.async_infer(model_name, inputs))
for i in range(request_parallelism):
results = None
if i == 0:
with self.assertRaises(InferenceServerException):
results = requests[i].get_result()
continue
else:
results = requests[i].get_result()
print(results)
output_data = results.as_numpy("OUT")
self.assertIsNotNone(output_data, "error: expected 'OUT'")
self.assertTrue(
np.array_equal(output_data, input_datas[i]),
"error: expected output {} to match input {}".format(
output_data, input_datas[i]))
def test_infer_pymodel_error(self):
model_name = "wrong_model"
shape = [2, 2]
with httpclient.InferenceServerClient("localhost:8000") as client:
input_data = (16384 * np.random.randn(*shape)).astype(np.uint32)
inputs = [
httpclient.InferInput("IN", input_data.shape,
np_to_triton_dtype(input_data.dtype))
]
inputs[0].set_data_from_numpy(input_data)
try:
client.infer(model_name, inputs)
except InferenceServerException as e:
print(e.message())
self.assertTrue(
e.message().startswith(
"Failed to process the request(s) for model instance"),
"Exception message is not correct")
else:
self.assertTrue(
False,
"Wrong exception raised or did not raise an exception")
def test_incorrect_execute_return(self):
model_name = 'execute_return_error'
shape = [1, 1]
with httpclient.InferenceServerClient("localhost:8000") as client:
input_data = (5 * np.random.randn(*shape)).astype(np.float32)
inputs = [
httpclient.InferInput("INPUT", input_data.shape,
np_to_triton_dtype(input_data.dtype))
]
inputs[0].set_data_from_numpy(input_data)
# The first request to this model will return None.
with self.assertRaises(InferenceServerException) as e:
client.infer(model_name, inputs)
self.assertTrue(
str(e.exception).startswith(
"Failed to process the request(s) for model instance "
"'execute_return_error_0', message: Expected a list in the "
"execute return"), "Exception message is not correct.")
# The second inference request will return a list of None object
# instead of Python InferenceResponse objects.
with self.assertRaises(InferenceServerException) as e:
client.infer(model_name, inputs)
self.assertTrue(
str(e.exception).startswith(
"Failed to process the request(s) for model instance "
"'execute_return_error_0', message: Expected an "
"'InferenceResponse' object in the execute function return"
" list"), "Exception message is not correct.")
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "587237e5188dfdebffe4f878986c44d1d3c1db62", "size": 6164, "ext": "py", "lang": "Python", "max_stars_repo_path": "qa/L0_backend_python/lifecycle/lifecycle_test.py", "max_stars_repo_name": "Mu-L/triton-inference-server", "max_stars_repo_head_hexsha": "ec1881d491cc2d2bd89ad9383724118e7121280c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "qa/L0_backend_python/lifecycle/lifecycle_test.py", "max_issues_repo_name": "Mu-L/triton-inference-server", "max_issues_repo_head_hexsha": "ec1881d491cc2d2bd89ad9383724118e7121280c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qa/L0_backend_python/lifecycle/lifecycle_test.py", "max_forks_repo_name": "Mu-L/triton-inference-server", "max_forks_repo_head_hexsha": "ec1881d491cc2d2bd89ad9383724118e7121280c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.9927007299, "max_line_length": 80, "alphanum_fraction": 0.6182673589, "include": true, "reason": "import numpy", "num_tokens": 1167}
|
from __future__ import print_function
try:
import h5py
from h5py import defs, utils, h5ac, _proxy # for py2app
except:
print ('Missing the h5py library (hdf5 support)...')
import gzip
import scipy.io
from scipy import sparse, stats, io
import numpy as np
import sys, string, os, csv, math
import time
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
def index_items(universe, itemset):
"""
Returns a list of indices to the items in universe that match items in itemset
"""
return [ idx for idx, item in enumerate(universe) if item in itemset ]
class CellCollection:
"""
Encapsulates a cohort of cells, ie from a CellRanger run
Expression values are stored in a sparse matrix, and barcodes/gene identifiers are
maintained in parallel arrays. Construct by calling CellCollection.from_file(), or one
of the other specialized static constructors
"""
@staticmethod
def from_cellranger_h5(h5_filename, genome=None, returnGenes=False):
"""
Creates a CellCollection from the contents of an H5 file created by CellRanger.
The meaning of the genome parameter differs depending on the version of CellRanger that created the h5.
For CellRanger version 2, the genome parameters specifies the matrix to load. If genome is None, the
single matrix present will be loaded (using genome==None when multiple genomes are present in the file
is an error and will cause an exception).
For CellRanger version 3, genome is now specified as an attribute of the features (typically genes).
In this version, specifying a genome will filter the matrix to only include features from that genome.
Whether a genome is specified or not, non-gene features will be removed
"""
start = time.time()
coll = CellCollection()
f = h5py.File(h5_filename, 'r')
if 'matrix' in f:
# CellRanger v3
coll._barcodes = f['matrix']['barcodes']
coll._gene_ids = f['matrix']['features']['id']
coll._gene_names = f['matrix']['features']['name']
if returnGenes:
""" Do not import the matrix at this point """
return list(coll._gene_names)
coll._matrix = sparse.csc_matrix((f['matrix']['data'], f['matrix']['indices'], f['matrix']['indptr']), shape=f['matrix']['shape'])
indices = np.flatnonzero(np.array(f['matrix']['features']['genome']) != '') if \
genome == None else \
np.flatnonzero(np.array(f['matrix']['features']['genome']) == genome)
coll._filter_genes_by_index(indices.tolist())
else:
# CellRanger v2
if genome == None:
possible_genomes = f.keys()
if len(possible_genomes) != 1:
raise Exception("{} contains multiple genomes ({}). Explicitly select one".format(h5_filename, ", ".join(possible_genomes)))
genome = possible_genomes[0]
#print("Auto-selecting genome {}".format(genome), file=sys.stderr)
coll._gene_names = f[genome]['gene_names']
if returnGenes:
""" Do not import the matrix at this point """
return list(coll._gene_names)
coll._matrix = sparse.csc_matrix((f[genome]['data'], f[genome]['indices'], f[genome]['indptr']))
coll._barcodes = f[genome]['barcodes']
coll._gene_ids = f[genome]['genes']
print('sparse matrix data imported from h5 file in %s seconds' % str(time.time()-start))
return coll
@staticmethod
def from_cellranger_mtx(mtx_directory, genome=None, returnGenes=False):
"""
Creates a CellCollection from a sparse matrix (.mtx and associated files) exported by CellRanger
Recognize directories from CellRanger version 2 (files: matrix.mtx, genes.tsv, barcodes.tsv) and
CellRanger v3 (files: matrix.mtx.gz, features.tsv.gz, barcodes.tsv.gz)
"""
start = time.time()
coll = CellCollection()
cellranger_version = 2
if '.mtx' in mtx_directory:
mtx_file = mtx_directory ### Hence an mtx file was directly supplied
mtx_directory = os.path.abspath(os.path.join(mtx_file, os.pardir))
else:
mtx_file = os.path.join(mtx_directory, "matrix.mtx")
if not os.path.exists(mtx_file):
cellranger_version = 3
mtx_file = mtx_file + ".gz"
if not os.path.exists(mtx_file):
raise Exception("Directory {} does not contain a recognizable matrix file".format(mtx_directory))
if '.gz' in mtx_file:
cellranger_version = 3
sparse_matrix = io.mmread(mtx_file)
coll._matrix = sparse_matrix.tocsc()
coll._gene_ids = np.empty((coll._matrix.shape[0], ), np.object)
coll._gene_names = np.empty((coll._matrix.shape[0], ), np.object)
if cellranger_version == 2:
with open(os.path.join(mtx_directory, "genes.tsv"), "rU") as f:
idx = 0
for line in f:
i, n = line.rstrip().split("\t")
coll._gene_ids[idx] = i
coll._gene_names[idx] = n
idx += 1
with open(os.path.join(mtx_directory, "barcodes.tsv"), "rU") as f:
coll._barcodes = np.array( [ line.rstrip() for line in f ] )
else:
with gzip.open(os.path.join(mtx_directory, "features.tsv.gz"), "rt") as f:
idx = 0
indices = []
for line in f:
i, n, t = line.rstrip().split("\t")
coll._gene_ids[idx] = i
coll._gene_names[idx] = n
if t == 'Gene Expression':
indices.append(idx)
idx += 1
coll._filter_genes_by_index(indices)
with gzip.open(os.path.join(mtx_directory, "barcodes.tsv.gz"), "rt") as f:
coll._barcodes = np.array( [ line.rstrip() for line in f ] )
if returnGenes:
""" Do not import the matrix at this point """
return list(coll._gene_names)
print('sparse matrix data imported from mtx file in %s seconds' % str(time.time()-start))
return coll
@staticmethod
def from_tsvfile_alt(tsv_file, genome=None, returnGenes=False, gene_list=None):
"""
Creates a CellCollection from the contents of a tab-separated text file.
"""
startT = time.time()
coll = CellCollection()
UseDense=False
header=True
skip=False
for line in open(tsv_file,'rU').xreadlines():
if header:
delimiter = ',' # CSV file
start = 1
if 'row_clusters' in line:
start=2 # An extra column and row are present from the ICGS file
skip=True
if '\t' in line:
delimiter = '\t' # TSV file
barcodes = string.split(line.rstrip(),delimiter)[start:]
if ':' in line:
barcodes = map(lambda x:x.split(':')[1],barcodes)
coll._barcodes=barcodes
coll._gene_names=[]
data_array=[]
header=False
elif skip:
skip=False # Igore the second row in the file that has cluster info
else:
values = line.rstrip().split(delimiter)
gene = values[0]
if ' ' in gene:
gene = string.split(gene,' ')[0]
if ':' in gene:
gene = (gene.rstrip().split(':'))[1]
if gene_list!=None:
if gene not in gene_list:
continue
coll._gene_names.append(gene)
""" If the data (always log2) is a float, increment by 0.5 to round up """
if returnGenes==False:
if UseDense:
data_array.append(map(float,values[start:]))
else:
#data_array.append(map(lambda x: round(math.pow(2,float(x))),values[start:]))
data_array.append(map(float,values[start:]))
if returnGenes:
""" Do not import the matrix at this point """
return list(coll._gene_names)
if UseDense:
coll._matrix = np.array(data_array)
else:
""" Convert to a sparse matrix """
coll._matrix = sparse.csc_matrix(np.array(data_array))
coll._barcodes = np.array(coll._barcodes)
coll._gene_names = np.array(coll._gene_names)
coll._gene_ids = coll._gene_names
print('sparse matrix data imported from TSV file in %s seconds' % str(time.time()-startT))
#print (len(coll._gene_ids),len(coll._barcodes))
return coll
@staticmethod
def from_tsvfile(tsv_filename, genome=None, returnGenes=False, gene_list=None):
"""
Generates a CellCollection from a (dense) tab-separated file, where cells are in
columns and
"""
start = time.time()
coll = CellCollection()
with open(tsv_filename, "rU") as f:
try:
line = next(f)
except StopIteration:
raise Exception("TSV file {} is empty".format(tsv_filename))
### Check formatting
skip=False
if '\t' in line:
delimiter = '\t' # TSV file
else:
delimiter = ','
col_start = 1
if 'row_clusters' in line:
col_start=2 # An extra column and row are present from the ICGS file
skip=True
### Check formatting end
coll._barcodes = np.array(line.rstrip().split(delimiter)[col_start:])
sparse_matrix = sparse.lil_matrix((50000, len(coll._barcodes)), dtype=np.float_)
coll._gene_names = np.empty((sparse_matrix.shape[0], ), np.object)
row = 0
for line in f:
if row==0 and skip:
skip = False
continue
vals = line.rstrip().split(delimiter)
coll._gene_names[row] = vals[0]
if returnGenes==False:
for i in range(col_start, len(vals)):
if vals[i] != "0":
sparse_matrix[row, i-col_start] = float(vals[i])
if row == sparse_matrix.shape[0]-1:
sparse_matrix.resize(sparse_matrix.shape + (10000, 0))
coll._gene_names.resize(coll._gene_names.shape + (10000, 0))
row += 1
coll._gene_names.resize((row, ))
if returnGenes:
""" Do not import the matrix at this point """
return list(coll._gene_names)
sparse_matrix.resize((row, len(coll._barcodes)))
coll._matrix = sparse_matrix.tocsc()
coll._gene_ids = coll._gene_names
#print('matrix shape: {}'.format(coll._matrix.shape))
print('sparse matrix data imported from TSV file in %s seconds' % str(time.time()-start))
return coll
def __init__(self):
self._matrix = sparse.csc_matrix((0,0), dtype=np.int8)
self._barcodes = ()
self._gene_names = ()
self._gene_ids = ()
def __getattr__(self, name):
"""
Methods/attributes not explicitly defined in the CellCollection are passed down
to the matrix
"""
return getattr(self._matrix, name)
def num_genes(self):
return len(self._gene_ids)
def num_cells(self):
return len(self._barcodes)
def get_barcode(self, cell_index):
return self._barcodes[cell_index]
def get_cell_expression_vector(self, cell_index):
"""
Returns a (standard, non-sparse) sequence of expression values for a given cell
"""
#try:
return self._matrix.getcol(cell_index).todense()
#except:
# return self._matrix[:,cell_index] # ith column for existing dense matrix
def centroid(self):
"""
Returns the centroid of this collection as a (standard, non-sparse) sequence.
The centroid is defined as the mean expression of each gene
"""
return self._matrix.mean(axis=1)
def partition(self, partition_dict):
"""
Returns a dictionary of CellCollections, each a distinct subset (by cell) of self.
partition_dict is a dictionary of cell index => set id, as generated by
the python-louvain methods
"""
partitions = {}
for k, v in partition_dict.items():
if v not in partitions: partitions[v] = []
partitions[v].append(k)
result = {}
for part_id in partitions.keys():
result[part_id] = self.subset_by_cell_index(partitions[part_id])
return result
def find_best_correlated(self, query):
"""
Identifies the cell in this collection that has the highest Pearson's correlation
with query (a sequence of expression values in the same order as in this collection)
Returns the pair of (barcode, r^2 value) for the best match in ref
"""
best_cor = -2
best_bc = "<None>"
for idx in range(self.num_cells()):
r = self.get_cell_expression_vector(idx)
cor = stats.pearsonr(query, r)[0][0] # pearsonr returns the pair (r^2, p-val), and for some reason the r^2 is a list
if cor > best_cor:
best_cor = cor
best_bc = self.get_barcode(idx)
return best_bc, best_cor
def filter_by_cell_index(self, cell_index):
self._matrix = self._matrix[:, cell_index]
self._barcodes = self._barcodes[cell_index]
def subset_by_cell_index(self, cell_index):
"""
Returns a new CellCollection containing only chosen cells from self
"""
cc = CellCollection()
cc._gene_ids = self._gene_ids
cc._gene_names = self._gene_names
cc._matrix = self._matrix[:, cell_index]
cc._barcodes = self._barcodes[cell_index]
return cc
def filter_barcodes(self, barcode_list):
"""
Reduces the CellCollection in-place to only contain the barcodes requested
"""
barcode_subset = set(barcode_list)
#print("Selecting {} barcodes".format(len(barcode_subset)), file=sys.stderr)
barcode_index = index_items(self._barcodes, barcode_subset)
self.filter_by_cell_index(barcode_index)
def subset_barcodes(self, barcode_list):
barcode_subset = set(barcode_list)
barcode_index = index_items(self._barcodes, barcode_subset)
return self.subset_by_cell_index(barcode_index)
def _filter_genes_by_index(self, gene_index):
#print(gene_index);sys.exit()
self._matrix = self._matrix[gene_index, :]
self._gene_ids = self._gene_ids[gene_index]
self._gene_names = self._gene_names[gene_index]
#mat_array_original = self._matrix.toarray()
#print(len(mat_array_original))
def filter_genes_by_symbol(self, symbol_list, data_type):
"""
Reduces the CellCollection in-place to only contain the genes requested.
Note that gene symbols could be non-unique, and thus more genes may remain in the
filtered collection than were requested. The order of the genes in the h5 may also
differ and the same genes may not be present in the different sets
"""
gene_subset = set(symbol_list)
#print("Selecting {} genes".format(len(gene_subset)), file=sys.stderr)
gene_index=[]
gene_names = list(self._gene_names)
if data_type == 'txt':
### below code is problematic for h5 and probably sparse matrix files
for gene in gene_subset:
if gene in gene_names:
gene_index.append(gene_names.index(gene))
else:
gene_index = index_items(self._gene_names, gene_subset) # will output genes in the full dataset order
self._filter_genes_by_index(gene_index)
def filter_genes_by_id(self, id_list):
"""
Reduces the CellCollection in-place to only contain the genes requested.
"""
gene_subset = set(id_list)
#print("Selecting {} genes".format(len(gene_subset)), file=sys.stderr)
gene_index = index_items(self._gene_ids, gene_subset)
self._filter_genes_by_index(gene_index)
|
{"hexsha": "f4244789d8fb3cf8a7178ed0e563cf1f011b73e5", "size": 17039, "ext": "py", "lang": "Python", "max_stars_repo_path": "stats_scripts/cell_collection.py", "max_stars_repo_name": "michalkouril/altanalyze", "max_stars_repo_head_hexsha": "e721c79c56f7b0022516ff5456ebaa14104c933b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 73, "max_stars_repo_stars_event_min_datetime": "2015-08-17T03:21:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-02T17:37:56.000Z", "max_issues_repo_path": "stats_scripts/cell_collection.py", "max_issues_repo_name": "michalkouril/altanalyze", "max_issues_repo_head_hexsha": "e721c79c56f7b0022516ff5456ebaa14104c933b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 43, "max_issues_repo_issues_event_min_datetime": "2017-07-30T03:30:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T04:46:05.000Z", "max_forks_repo_path": "stats_scripts/cell_collection.py", "max_forks_repo_name": "michalkouril/altanalyze", "max_forks_repo_head_hexsha": "e721c79c56f7b0022516ff5456ebaa14104c933b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2015-08-26T07:21:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-09T02:53:25.000Z", "avg_line_length": 40.9591346154, "max_line_length": 145, "alphanum_fraction": 0.5787311462, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 3728}
|
import logging
from typing import Tuple
from numpy.random import uniform
from problems.test_case import TestCase, TestCaseTypeEnum
from problems.solutions.rock_star_climate import rock_temperature
logger = logging.getLogger(__name__)
FUNCTION_NAME = "rock_temperature"
INPUT_VARS = ['solar_constant', 'albedo', 'emissivity']
OUTPUT_VARS = ['T_rock']
STATIC_RESOURCES = []
PHYSICAL_CONSTANTS = {
# Earth
'S_Earth': 1361, # Solar constant [W/m^2] from Kopp & Lean (2011).
'a_Earth': 0.306, # Bond albedo from NASA Earth fact sheet: https://nssdc.gsfc.nasa.gov/planetary/factsheet/earthfact.html
'ε_Earth': 0.612, # Effective emissivity.
# Mars
'S_Mars': 586, # Assuming S falls off as 1/r^2 from Kopp & Lean (2011) and r = 1.524 AU.
'a_Mars': 0.24, # Bond albedo from NASA Mars fact sheet: https://nssdc.gsfc.nasa.gov/planetary/factsheet/marsfact.html
'ε_Mars': 0.9, # Can't find anything so picking a 0.9 which is close to limestone and brick.
# Pluto
'S_Pluto': 0.87, # Assuming S falls off as 1/r^2 from Kopp & Lean (2011) and r = 39.48 AU (semi-major axis).
'a_Pluto': 0.72, # Bond albedo from NASA Pluto fact sheet: https://nssdc.gsfc.nasa.gov/planetary/factsheet/plutofact.html
'ε_Pluto': 0.9
}
ATOL = {}
RTOL = {
'T_rock': 1e-6
}
class TestCaseType(TestCaseTypeEnum):
EARTH = ("Earth", 1)
BLACKBODY_EARTH = ("Blackbody Earth", 1)
REFLECTIVE_EARTH = ("Reflective Earth", 1)
MARS = ("Mars", 1)
PLUTO = ("Pluto", 1)
RANDOM = ("Random", 1)
class ProblemTestCase(TestCase):
def input_tuple(self) -> tuple:
return self.input['solar_constant'], self.input['albedo'], self.input['emissivity'],
def output_tuple(self) -> tuple:
return self.output['T_rock'],
def generate_test_case(test_type: TestCaseType) -> ProblemTestCase:
test_case = ProblemTestCase(test_type)
if test_type is TestCaseType.EARTH:
S = PHYSICAL_CONSTANTS['S_Earth']
a = PHYSICAL_CONSTANTS['a_Earth']
ε = PHYSICAL_CONSTANTS['ε_Earth']
elif test_type is TestCaseType.BLACKBODY_EARTH:
S = PHYSICAL_CONSTANTS['S_Earth']
a = PHYSICAL_CONSTANTS['a_Earth']
ε = 1.0
elif test_type is TestCaseType.REFLECTIVE_EARTH:
S = PHYSICAL_CONSTANTS['S_Earth']
a = 1
ε = PHYSICAL_CONSTANTS['ε_Earth']
elif test_type is TestCaseType.MARS:
S = PHYSICAL_CONSTANTS['S_Mars']
a = PHYSICAL_CONSTANTS['a_Mars']
ε = PHYSICAL_CONSTANTS['ε_Mars']
elif test_type is TestCaseType.PLUTO:
S = PHYSICAL_CONSTANTS['S_Pluto']
a = PHYSICAL_CONSTANTS['a_Pluto']
ε = PHYSICAL_CONSTANTS['ε_Pluto']
elif test_type is TestCaseType.RANDOM:
S = uniform(1000, 10000)
a = uniform(0, 1)
ε = uniform(0, 1)
else:
raise ValueError(f"Unrecognized test case: {test_type}")
test_case.input = {'solar_constant': S, 'albedo': a, 'emissivity': ε}
test_case.output['T_rock'] = rock_temperature(S, a, ε)
return test_case
|
{"hexsha": "1ebb11702d95311dd1117b0e662087e0dbb25417", "size": 3059, "ext": "py", "lang": "Python", "max_stars_repo_path": "problems/rock_star_climate.py", "max_stars_repo_name": "benallan/lovelace-problems", "max_stars_repo_head_hexsha": "3780d2bfc58fe0531d60a92ae0a6c45e9814f58f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2019-07-23T16:51:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T21:42:05.000Z", "max_issues_repo_path": "problems/rock_star_climate.py", "max_issues_repo_name": "benallan/lovelace-problems", "max_issues_repo_head_hexsha": "3780d2bfc58fe0531d60a92ae0a6c45e9814f58f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 44, "max_issues_repo_issues_event_min_datetime": "2019-03-22T00:05:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-04T13:25:12.000Z", "max_forks_repo_path": "problems/rock_star_climate.py", "max_forks_repo_name": "benallan/lovelace-problems", "max_forks_repo_head_hexsha": "3780d2bfc58fe0531d60a92ae0a6c45e9814f58f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-08-04T13:06:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-20T07:41:42.000Z", "avg_line_length": 31.5360824742, "max_line_length": 127, "alphanum_fraction": 0.6665576986, "include": true, "reason": "from numpy", "num_tokens": 908}
|
[STATEMENT]
lemma less_multiset\<^sub>H\<^sub>O:
"M < N \<longleftrightarrow> M \<noteq> N \<and> (\<forall>y. count N y < count M y \<longrightarrow> (\<exists>x>y. count M x < count N x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (M < N) = (M \<noteq> N \<and> (\<forall>y. count N y < count M y \<longrightarrow> (\<exists>x>y. count M x < count N x)))
[PROOF STEP]
by (rule mult\<^sub>H\<^sub>O[folded multp_def less_multiset_def])
|
{"llama_tokens": 177, "file": null, "length": 1}
|
# -*- coding: utf-8 -*-
# -*- mode: python -*-
""" Python reference implementations of model code
CODE ORIGINALLY FROM
https://github.com/melizalab/mat-neuron/blob/master/mat_neuron/_pymodel.py
"""
from __future__ import division, print_function, absolute_import
import numpy as np
#from mat_neuron.core import impulse_matrix
import numba
from numpy import exp
@jit
def impulse_matrix_direct(params, dt):
Aexp = np.zeros((6, 6), dtype='d')
a1, a2, b, w, R, tm, t1, t2, tv, tref = params
Aexp[0, 0] = exp(-dt / tm)
Aexp[0, 1] = tm - tm * exp(-dt / tm)
Aexp[1, 1] = 1
Aexp[2, 2] = exp(-dt / t1)
Aexp[3, 3] = exp(-dt / t2)
Aexp[4, 0] = b*tv*(dt*tm*exp(dt/tm) - dt*tv*exp(dt/tm) + tm*tv*exp(dt/tm) - tm*tv*exp(dt/tv))*exp(-dt/tv - dt/tm)/(pow(tm, 2) - 2*tm*tv + pow(tv, 2))
Aexp[4, 1] = b*tm*tv*(-dt*(tm - tv)*exp(dt*(tm + tv)/(tm*tv)) + tm*tv*exp(2*dt/tv) - tm*tv*exp(dt*(tm + tv)/(tm*tv)))*exp(-dt*(2*tm + tv)/(tm*tv))/pow(tm - tv, 2)
Aexp[4, 4] = exp(-dt / tv)
Aexp[4, 5] = dt * exp(-dt / tv)
Aexp[5, 0] = b*tv*exp(-dt/tv)/(tm - tv) - b*tv*exp(-dt/tm)/(tm - tv)
Aexp[5, 1] = -b*tm*tv*exp(-dt/tv)/(tm - tv) + b*tm*tv*exp(-dt/tm)/(tm - tv)
Aexp[5, 5] = exp(-dt / tv)
return Aexp
@jit
def impulse_matrix(params, dt, reduced=False):
"""Calculate the matrix exponential for integration of MAT model"""
from scipy import linalg
a1, a2, b, w, R, tm, t1, t2, tv, tref = params
if not reduced:
A = - np.matrix([[1 / tm, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 1 / t1, 0, 0, 0],
[0, 0, 0, 1 / t2, 0, 0],
[0, 0, 0, 0, 1 / tv, -1],
[b / tm, -b, 0, 0, 0, 1 / tv]])
else:
A = - np.matrix([[1 / tm, -1, 0, 0],
[0, 0, 0, 0],
[0, 0, 1 / tv, -1],
[b / tm, -b, 0, 1 / tv]])
return linalg.expm(A * dt)
@jit
def predict(state, params, current, dt):
"""Integrate model to predict spiking response
This method uses the exact integration method of Rotter and Diesmann (1999).
Note that this implementation implicitly represents the driving current as a
series of pulses, which may or may not be appropriate.
parameters: 9-element sequence (α1, α2, β, ω, τm, R, τ1, τ2, and τV)
state: 5-element sequence (V, θ1, θ2, θV, ddθV) [all zeros works fine]
current: a 1-D array of N current values
dt: time step of forcing current, in ms
Returns an Nx5 array of the model state variables and a list of spike times
"""
D = 6
a1, a2, b, w, R, tm, t1, t2, tv, tref = params
v, phi, h1, h2, hv, dhv = state
Aexp = impulse_matrix(params, dt)
N = current.size
Y = np.zeros((N, D))
y = np.asarray(state)
spikes = []
iref = 0
last_I = 0
for i in range(N):
y = np.dot(Aexp, y)
y[1] += R / tm * (current[i] - last_I)
last_I = current[i]
# check for spike
h = y[2] + y[3] + y[4] + w
if i > iref and y[0] > h:
y[2] += a1
y[3] += a2
iref = i + int(tref * dt)
spikes.append(i * dt)
Y[i] = y
return Y, spikes
@jit
def predict_voltage(state, params, current, dt):
"""Integrate just the current-dependent variables.
This function is usually called as a first step when evaluating the
log-likelihood of a spike train. Usually there are several trials for each
stimulus, so it's more efficient to predict the voltage and its derivative
from the current separately.
See predict() for specification of params and state arguments
"""
D = 4
a1, a2, b, w, R, tm, t1, t2, tv, tref = params
Aexp = impulse_matrix(params, dt, reduced=True)
v, phi, _, _, hv, dhv = state
y = np.asarray([v, phi, hv, dhv], dtype='d')
N = current.size
Y = np.zeros((N, D), dtype='d')
x = np.zeros(D, dtype='d')
last_I = 0
for i in range(N):
x[1] = R / tm * (current[i] - last_I)
last_I = current[i]
y = np.dot(Aexp, y) + x
Y[i] = y
return Y
@jit
def predict_adaptation(params, state, spikes, dt, N):
"""Predict the voltage-independent adaptation variables from known spike times.
This function is usually called as a second step when evaluating the
log-likelihood of a spike train.
See predict() for specification of params and state arguments
"""
D = 2
a1, a2, b, w, tm, R, t1, t2, tv = params
_, h1, h2, _, _ = state
# the system matrix is purely diagonal, so these are exact solutions
A1 = np.exp(-dt / t1)
A2 = np.exp(-dt / t2)
y = np.asarray([h1, h2], dtype='d')
Y = np.zeros((N, D), dtype='d')
idx = (np.asarray(spikes) / dt).astype('i')
spk = np.zeros(N)
spk[idx] = 1
for i in range(N):
y[0] = A1 * y[0] + a1 * spk[i]
y[1] = A2 * y[1] + a2 * spk[i]
Y[i] = y
return Y
@jit
def log_intensity(V, H, params):
"""Evaluate the log likelihood of spiking with an exponential link function.
V: 2D array with voltage and θV in the first two columns
H: 2D array with θ1 and θ2 in the first two columns
params: list of parameters (see predict() for specification)
"""
return V[:, 0] - H[:, 0] - H[:, 1] - V[:, 1] - params[3]
|
{"hexsha": "9ef4e7abf472f34c8d6941b6451c46b92445cfd5", "size": 5377, "ext": "py", "lang": "Python", "max_stars_repo_path": "jithub/models/mat.py", "max_stars_repo_name": "russelljjarvis/numba_reduced_neuronal_models", "max_stars_repo_head_hexsha": "bc500aefab267a1a1eaf2a1d8dac83da676d7ee6", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-17T07:39:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-17T07:39:19.000Z", "max_issues_repo_path": "jithub/models/mat.py", "max_issues_repo_name": "russelljjarvis/numba_reduced_neuronal_models", "max_issues_repo_head_hexsha": "bc500aefab267a1a1eaf2a1d8dac83da676d7ee6", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-11-25T06:36:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-17T21:12:24.000Z", "max_forks_repo_path": "jithub/models/mat.py", "max_forks_repo_name": "russelljjarvis/numba_reduced_neuronal_models", "max_forks_repo_head_hexsha": "bc500aefab267a1a1eaf2a1d8dac83da676d7ee6", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-01T01:40:39.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-01T01:40:39.000Z", "avg_line_length": 36.0872483221, "max_line_length": 166, "alphanum_fraction": 0.5538404315, "include": true, "reason": "import numpy,from numpy,from scipy,import numba", "num_tokens": 1873}
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
open import Cubical.Core.Everything
open import Cubical.Relation.Binary.Raw
module Cubical.Relation.Binary.Reasoning.PartialOrder
{c ℓ} {A : Type c} (P : PartialOrder A ℓ) where
open PartialOrder P
import Cubical.Relation.Binary.Raw.Construct.NonStrictToStrict _≤_ as Strict
------------------------------------------------------------------------
-- Re-export contents of base module
open import Cubical.Relation.Binary.Reasoning.Base.Double
isPreorder
(Strict.<-transitive isPartialOrder)
Strict.<⇒≤
(Strict.<-≤-trans transitive antisym)
(Strict.≤-<-trans transitive antisym)
public
|
{"hexsha": "32ed96c8a648cd46fe2d8901e41ceb4a466dfa34", "size": 655, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "Cubical/Relation/Binary/Reasoning/PartialOrder.agda", "max_stars_repo_name": "bijan2005/univalent-foundations", "max_stars_repo_head_hexsha": "737f922d925da0cd9a875cb0c97786179f1f4f61", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Cubical/Relation/Binary/Reasoning/PartialOrder.agda", "max_issues_repo_name": "bijan2005/univalent-foundations", "max_issues_repo_head_hexsha": "737f922d925da0cd9a875cb0c97786179f1f4f61", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Cubical/Relation/Binary/Reasoning/PartialOrder.agda", "max_forks_repo_name": "bijan2005/univalent-foundations", "max_forks_repo_head_hexsha": "737f922d925da0cd9a875cb0c97786179f1f4f61", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7727272727, "max_line_length": 76, "alphanum_fraction": 0.6732824427, "num_tokens": 172}
|
function [nu, g] = orderedNoiseUpdateParams(noise, mu, varsigma, y, index)
% ORDEREDNOISEUPDATEPARAMS Update parameters for ordered categorical noise model.
% NOISE
% NOISE
[g, dlnZ_dvs] = orderedNoiseGradVals(noise, mu(index, :), ...
varsigma(index, :), ...
y(index, :));
nu = g.*g - 2*dlnZ_dvs;
|
{"author": "SheffieldML", "repo": "GPmat", "sha": "4b5914a38ecbad9fb7a13a3392970bfc28c9d911", "save_path": "github-repos/MATLAB/SheffieldML-GPmat", "path": "github-repos/MATLAB/SheffieldML-GPmat/GPmat-4b5914a38ecbad9fb7a13a3392970bfc28c9d911/noise/orderedNoiseUpdateParams.m"}
|
from scipy.io import loadmat
import h5py
import pandas as pd
import seaborn as sns
diag_kws = {'bins': 50, 'color': 'teal', 'alpha': 0.4, 'edgecolor':None}
plot_kws = {'color': 'teal', 'edgecolor': None, 'alpha': 0.1}
path = "/media/robbis/DATA/meg/reftep/derivatives/phastimate/"
columns = ['phases32', 'hjort', 'predictedyule32',
'amplitudes200', 'amplitudes32', 'mep1']
def read_channels(mat, key='chlabels'):
test = mat['chlabels']
channels = list()
for st in test[0]:
obj = mat[st]
str1 = ''.join(chr(i) for i in obj[:])
channels.append(str1)
return np.array(channels)
for i in range(9):
sub = "sub-%03d" % (i+1)
filename = os.path.join(path, sub, sub+"_space-sensor_window-500_atlas-subject_band-mu_phastimate.mat")
mat = h5py.File(filename, 'r')
vector = []
channels = read_channels(mat)
idx = np.nonzero(channels == 'C3')[0]
idx = int(idx)
for c in columns:
if 'mep' in c:
idx = int(c[-1]) - 1
data = np.log(mat['AmpsMclean'][()][idx])
#elif 'amplitude' in c:
# data = np.log(mat[c][()][idx])
else:
data = mat[c][()][idx]
vector.append(data)
vector = np.vstack(vector)
df = pd.DataFrame(vector.T, columns=columns)
sns.pairplot(df, diag_kws=diag_kws, plot_kws=plot_kws)
mat.close()
##############################################################################
task = 'phastimate'
threshold_key = 'phases32'
full_dataset = list()
for i in range(9):
sub = "sub-%03d" % (i+1)
filename = os.path.join(path, sub, sub+"_space-sensor_window-500_atlas-subject_band-mu_%s.mat" %(task))
mat = h5py.File(filename, 'r')
vector = []
channels = read_channels(mat)
idx = np.nonzero(channels == 'C3')[0]
idx = int(idx)
for c in columns:
if 'mep' in c:
idx = int(c[-1]) - 1
data = np.log(mat['AmpsMclean'][()][idx])
data /= np.mean(data) * 0.01
elif 'amplitude' in c:
data = np.log(mat[c][()][idx])
else:
data = mat[c][()][idx]
vector.append(data)
vector = np.vstack(vector)
df = pd.DataFrame(vector.T, columns=columns)
#threshold = np.mean(df[threshold_key].values) + .5*np.std(df[threshold_key].values)
threshold = np.pi * .5
mask_upper = np.abs(df[threshold_key].values) < (threshold + .2)
mask_lower = np.abs(df[threshold_key].values) > (threshold - .2)
mask_amplitude = df['amplitudes32'] > np.median(df['amplitudes32'])
mask = np.logical_and(mask_upper, mask_lower)
mask = np.logical_and(mask, mask_amplitude)
df = df.loc[mask_amplitude]
#sns.pairplot(df, diag_kws=diag_kws, plot_kws=plot_kws)
mat.close()
peak_sign = np.sign(df[threshold_key].values)
df['peak_sign'] = peak_sign
df['subject'] = (i+1) * np.ones_like(df[threshold_key].values)
negative = df[threshold_key].values < 0
positive = df[threshold_key].values > 0
t, p = ttest_ind(df.loc[negative]['mep1'], df.loc[positive]['mep1'])
print(t, p)
full_dataset.append(df)
df_full = pd.concat(full_dataset)
pl.figure()
sns.barplot(data=df_full, x='subject', y='mep1', hue='peak_sign')
################# seedstimate #####################################
path = "/media/robbis/DATA/meg/reftep/derivatives/seedstimate/"
threshold_key = 'phases32'
full_dataset = list()
columns = ['phases32',
'amplitudes200', 'amplitudes32', 'mep1']
for i in range(9):
sub = "sub-%03d" % (i+1)
filename = os.path.join(path, sub, sub+"_space-sensor_window-500_atlas-subject_band-mu_seedstimate.mat")
mat = h5py.File(filename, 'r')
vector = []
#channels = read_channels(mat)
#idx = np.nonzero(channels == 'C3')[0]
idx = 0
for c in columns:
if 'mep' in c:
idx = int(c[-1]) - 1
data = np.log(mat['AmpsMclean'][()][idx])
data /= np.mean(data) * 0.01
elif 'amplitude' in c:
data = np.log(mat[c][()][idx])
else:
data = mat[c][()][idx]
vector.append(data)
vector = np.vstack(vector)
df = pd.DataFrame(vector.T, columns=columns)
#threshold = np.mean(df[threshold_key].values) + .5*np.std(df[threshold_key].values)
threshold = np.pi * .5
mask_upper = np.abs(df[threshold_key].values) < (threshold + .35)
mask_lower = np.abs(df[threshold_key].values) > (threshold - .35)
mask_amplitude = df['amplitudes32'] > np.median(df['amplitudes32'])
mask = np.logical_and(mask_upper, mask_lower)
mask = np.logical_and(mask, mask_amplitude)
df = df.loc[mask]
sns.pairplot(df, diag_kws=diag_kws, plot_kws=plot_kws)
mat.close()
peak_sign = np.sign(df[threshold_key].values)
df['peak_sign'] = peak_sign
df['subject'] = (i+1) * np.ones_like(df[threshold_key].values)
negative = df[threshold_key].values < 0
positive = df[threshold_key].values > 0
t, p = ttest_ind(df.loc[negative]['mep1'], df.loc[positive]['mep1'])
print(t, p)
full_dataset.append(df)
df_full = pd.concat(full_dataset)
pl.figure()
sns.barplot(data=df_full, x='subject', y='mep1', hue='peak_sign')
##################### neighbours #####################à
path = "/media/robbis/DATA/meg/reftep/derivatives/phastimate/"
neighbours = ['FCC3h', 'CCP5h', 'CCP3h', 'FCC5h']
for n in neighbours:
task = 'phastimate'
threshold_key = 'phases32'
full_dataset = list()
for i in range(9):
sub = "sub-%03d" % (i+1)
filename = os.path.join(path, sub, sub+"_space-sensor_window-500_atlas-subject_band-mu_%s.mat" %(task))
mat = h5py.File(filename, 'r')
vector = []
channels = read_channels(mat)
idx = np.nonzero(channels == n)[0]
if len(idx) == 0:
continue
idx = int(idx)
for c in columns:
if 'mep' in c:
idx = int(c[-1]) - 1
data = np.log(mat['AmpsMclean'][()][idx])
data /= np.mean(data) * 0.01
elif 'amplitude' in c:
data = np.log(mat[c][()][idx])
else:
data = mat[c][()][idx]
vector.append(data)
vector = np.vstack(vector)
df = pd.DataFrame(vector.T, columns=columns)
#threshold = np.mean(df[threshold_key].values) + .5*np.std(df[threshold_key].values)
"""
threshold = np.pi * .5
mask_upper = np.abs(df[threshold_key].values) < (threshold + .2)
mask_lower = np.abs(df[threshold_key].values) > (threshold - .2)
mask_amplitude = df['amplitudes32'] > np.median(df['amplitudes32'])
mask = np.logical_and(mask_upper, mask_lower)
mask = np.logical_and(mask, mask_amplitude)
df = df.loc[mask_amplitude]
"""
#sns.pairplot(df, diag_kws=diag_kws, plot_kws=plot_kws)
mat.close()
peak_sign = np.sign(df[threshold_key].values)
df['peak_sign'] = peak_sign
df['subject'] = (i+1) * np.ones_like(df[threshold_key].values)
negative = df[threshold_key].values < 0
positive = df[threshold_key].values > 0
t, p = ttest_ind(df.loc[negative]['mep1'], df.loc[positive]['mep1'])
print(t, p)
full_dataset.append(df)
df_full = pd.concat(full_dataset)
pl.figure()
sns.barplot(data=df_full, x='subject', y='mep1', hue='peak_sign')
pl.ylim((85, 105))
########################## Selected trials ############################
path = "/media/robbis/DATA/meg/reftep/derivatives/phastimate/"
selection_fname = "/media/robbis/DATA/meg/reftep/derivatives/trial_selection.mat"
columns = ['phases32', 'hjort', 'predictedyule32',
'amplitudes200', 'amplitudes32', 'mep1']
sel_mat = loadmat(selection_fname, squeeze_me=True)
selection = [[], []]
for t in sel_mat.keys():
if t[0] != 't':
continue
trials = sel_mat[t]
subj_idx = int(t[3])
criterion = int(t[-1])
if criterion != 0:
criterion = 1
selection[criterion].append(trials)
def read_channels(mat, key='chlabels'):
test = mat['chlabels']
channels = list()
for st in test[0]:
obj = mat[st]
str1 = ''.join(chr(i) for i in obj[:])
channels.append(str1)
return np.array(channels)
threshold_key = 'phases32'
full_dataset = list()
for i in range(9):
sub = "sub-%03d" % (i+1)
filename = os.path.join(path, sub, sub+"_space-sensor_window-500_atlas-subject_band-mu_phastimate.mat")
mat = h5py.File(filename, 'r')
vector = []
channels = read_channels(mat)
idx = np.nonzero(channels == 'C3')[0]
idx = int(idx)
for c in columns:
if 'mep' in c:
idx = int(c[-1]) - 1
data = np.log(mat['AmpsMclean'][()][idx])
data /= np.mean(data) * 0.01
#elif 'amplitude' in c:
# data = np.log(mat[c][()][idx])
else:
data = mat[c][()][idx]
vector.append(data)
vector = np.vstack(vector)
df = pd.DataFrame(vector.T, columns=columns)
# sns.pairplot(df, diag_kws=diag_kws, plot_kws=plot_kws)
mat.close()
for j, trials in enumerate(selection):
subj_trials = trials[i]
df = df.loc[subj_trials]
assert df.shape[0] == len(subj_trials)
threshold = np.pi * .5
mask_upper = np.abs(df[threshold_key].values) < (threshold + .35)
mask_lower = np.abs(df[threshold_key].values) > (threshold - .35)
mask_amplitude = df['amplitudes32'] > np.median(df['amplitudes32'])
mask = np.logical_and(mask_upper, mask_lower)
mask = np.logical_and(mask, mask_amplitude)
df = df.loc[mask]
peak_sign = np.sign(df[threshold_key].values)
df['peak_sign'] = peak_sign
df['subject'] = (i+1) * np.ones_like(df[threshold_key].values)
df['selection'] = (j+1) * np.ones_like(df[threshold_key].values)
negative = df[threshold_key].values < 0
positive = df[threshold_key].values > 0
t, p = ttest_ind(df.loc[negative]['mep1'], df.loc[positive]['mep1'])
print(t, p)
full_dataset.append(df)
df_full = pd.concat(full_dataset)
pl.figure()
g = sns.catplot(x="subject", y="mep1",
hue="peak_sign", row="selection",
data=df_full, kind="bar")
sns.barplot(data=df_full, x='subject', y='mep1', hue='peak_sign')
pl.ylim((85, 105))
|
{"hexsha": "98e8177c3c678cc4d5e19d96398ec72468f06d8c", "size": 10490, "ext": "py", "lang": "Python", "max_stars_repo_path": "mvpa_itab/script/mambo/reftep/reftep_replica_results.py", "max_stars_repo_name": "robbisg/mvpa_itab_wu", "max_stars_repo_head_hexsha": "e3cdb198a21349672f601cd34381e0895fa6484c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-12T08:59:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T08:59:22.000Z", "max_issues_repo_path": "mvpa_itab/script/mambo/reftep/reftep_replica_results.py", "max_issues_repo_name": "robbisg/mvpa_itab_wu", "max_issues_repo_head_hexsha": "e3cdb198a21349672f601cd34381e0895fa6484c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 46, "max_issues_repo_issues_event_min_datetime": "2016-08-04T14:49:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T08:47:48.000Z", "max_forks_repo_path": "mvpa_itab/script/mambo/reftep/reftep_replica_results.py", "max_forks_repo_name": "robbisg/mvpa_itab_wu", "max_forks_repo_head_hexsha": "e3cdb198a21349672f601cd34381e0895fa6484c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9779005525, "max_line_length": 111, "alphanum_fraction": 0.5792183031, "include": true, "reason": "from scipy", "num_tokens": 2984}
|
# coding: utf-8
# Script to demo scikit for tweet popular/unpopular classification.
# In[1]:
from __future__ import division
from __future__ import print_function
import csv
import datetime as dt
import os
import platform
import sys
import numpy as np
import pandas
from sklearn import preprocessing
from sklearn import svm
from sklearn import tree
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.feature_extraction import DictVectorizer
from sklearn.metrics import classification_report
# In[2]:
def csv_to_dict_cesar(csv_filename):
# Let's say, We are intersted in only count features
count_features = ['_char_count', '_hashtag_count', '_word_count', '_url_count']
with open(csv_filename) as f:
features = [({k: int(v) for k, v in row.items() if k in count_features}, row['_popular'])
for row in csv.DictReader(f, skipinitialspace=True)]
X = [f[0] for f in features]
Y = [f[1] for f in features]
return (X, Y)
# In[3]:
def csv_to_dict(csv_filename):
"""Open feature table with csv library.
Task: Run with '_rt_count'. See the good results!
"""
non_numeric_features = ['', '_text', '_urls', '_mentions', '_hashtags',
'_tweet_datetime', '_popular', '_rt_count']
with open(csv_filename, 'rU') as f:
rows = csv.DictReader(f, skipinitialspace=True, delimiter='|')
labels = [row['_popular'] for row in rows]
features = []
with open(csv_filename, 'rU') as f:
rows = csv.DictReader(f, skipinitialspace=True, delimiter='|')
for row in rows:
#print(row)
row_dict = {}
for k, v in row.items():
if k not in non_numeric_features:
try:
row_dict[k] = int(v)
# these tries catch a few junk entries
except TypeError:
row_dict[k] = 0
except ValueError:
row_dict[k] = 0
#row_dict = {k: int(v) for k, v in row.items() if k not in non_numeric_features}
features.append(row_dict)
return features, labels
# In[4]:
def csv_to_df(csv_file):
"""Open csv with Pandas DataFrame, then convert to dict
and return.
TODO: Fix this.
"""
dataframe = pandas.read_csv(csv_file,
encoding='utf-8',
engine='python',
sep='|',
delimiter='|',
index_col=0)
return dataframe
# In[5]:
def train(csv_filename):
print('Loading CSV into dict ...')
t0 = dt.datetime.utcnow()
data, target = csv_to_dict(csv_filename)
print('... finished in {} secs.'.format(dt.datetime.utcnow() - t0))
print()
print('Loading dict into vectorizer')
t0 = dt.datetime.utcnow()
vec = DictVectorizer()
X = vec.fit_transform(data).toarray() # change to numpy array
Y = np.array(target) # change to numpy array
print('... finished in {} secs.'.format(dt.datetime.utcnow() - t0))
print()
'''
-In case we need to know the features
'''
feature_names = vec.get_feature_names()
'''
-Dividing the data into train and test
-random_state is pseudo-random number generator state used for
random sampling
'''
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=0)
# write models dir if not present
models_dir = 'models'
if not os.path.isdir(models_dir):
os.mkdir(models_dir)
'''
-PREPOCESSING
-Here, scaled data has zero mean and unit varience
-We save the scaler to later use with testing/prediction data
'''
print('Scaling data ...')
t0 = dt.datetime.utcnow()
scaler = preprocessing.StandardScaler().fit(X_train)
joblib.dump(scaler, 'models/scaler.pickle')
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
print('... finished in {} secs.'.format(dt.datetime.utcnow() - t0))
print()
'''
-This is where we define the models
-Here, I use SVM and Decision tree with pre-defined parameters
-We can learn these parameters given our data
'''
print('Defining and fitting models ...')
t0 = dt.datetime.utcnow()
clf0 = svm.LinearSVC(C=100.)
clf1 = tree.DecisionTreeClassifier()
clf0.fit(X_train_scaled, Y_train)
clf1.fit(X_train_scaled, Y_train)
joblib.dump(clf0, 'models/svc.pickle')
joblib.dump(clf1, 'models/tree.pickle')
print('... finished in {} secs.'.format(dt.datetime.utcnow() - t0))
print()
Y_prediction_svc = clf0.predict(X_test_scaled)
print('svc_predictions ', Y_prediction_svc)
Y_prediction_tree = clf1.predict(X_test_scaled)
print('tree_predictions ', Y_prediction_tree)
expected = Y_test
print('actual_values ', expected)
print()
'''
Classifiation metrics
(Case 1): SVMs
'''
print()
print('----Linear SVC_report--------------------------')
print(classification_report(expected, Y_prediction_svc))
'''
Classification metrics
(case 2): Decision tree
'''
print()
print('----Tree_report--------------------------------')
print(classification_report(expected, Y_prediction_tree))
# In[ ]:
train("feature_tables/all.csv")
# In[ ]:
|
{"hexsha": "44b8f61c42d74976e78f5ee63affd7071b113a41", "size": 5547, "ext": "py", "lang": "Python", "max_stars_repo_path": "public_talks/2016_02_26_columbia/do_ml_on_feature_tables (all.csv).py", "max_stars_repo_name": "kylepjohnson/ipython_notebooks", "max_stars_repo_head_hexsha": "7f77ec06a70169cc479a6f912b4888789bf28ac4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2016-08-10T09:03:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-06T21:34:20.000Z", "max_issues_repo_path": "public_talks/2016_02_26_columbia/do_ml_on_feature_tables (all.csv).py", "max_issues_repo_name": "kylepjohnson/ipython", "max_issues_repo_head_hexsha": "7f77ec06a70169cc479a6f912b4888789bf28ac4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "public_talks/2016_02_26_columbia/do_ml_on_feature_tables (all.csv).py", "max_forks_repo_name": "kylepjohnson/ipython", "max_forks_repo_head_hexsha": "7f77ec06a70169cc479a6f912b4888789bf28ac4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-10-07T01:56:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-06T21:33:28.000Z", "avg_line_length": 27.8743718593, "max_line_length": 97, "alphanum_fraction": 0.6041103299, "include": true, "reason": "import numpy", "num_tokens": 1287}
|
#!/usr/bin/env python3
# plotCsv - Create simple plots from a CSV file.
# Dave McEwan 2020-04-29
#
# Run like:
# plotCsv mydata.csv
# OR
# cat mydata.csv | plotCsv -o myplot
import argparse
import functools
import matplotlib
matplotlib.use("Agg") # Don't require X11.
import matplotlib.pyplot as plt
import numpy as np
import sys
from dmppl.base import fnameAppendExt, run, verb, rdLines, \
argparse_nonNegativeInteger
__version__ = "0.1.0"
# {{{ argparser
argparser = argparse.ArgumentParser(
description = "plotCsv - Wrapper around np.loadtxt() for quick plotting.",
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
argparser.add_argument("-o", "--output",
type=str,
default="plot",
help="Output filepath, without extension.")
argparser.add_argument("input",
type=str,
help="CSV file, or STDIN if None.")
argparser.add_argument("--pdf",
action="store_true",
help="Create PDF instead of PNG.")
argparser.add_argument("--skiprows",
type=functools.partial(argparse_nonNegativeInteger, "skiprows"),
default=0,
help="Skip this many lines, excluding comments.")
argparser.add_argument("--delimiter",
type=str,
default=',',
help="Column delimiter.")
argparser.add_argument("--figsize",
type=str,
default="16,10",
help="Horizontal,vertical (inches).")
argparser.add_argument("--markers",
type=str,
default=".ox^s*",
help="Markers in matplotlib notation.")
argparser.add_argument("--labels",
type=str,
default="1,2,3,4,5,6",
help="Comma-separated list of labels")
argparser.add_argument("--title",
type=str,
default=None)
argparser.add_argument("--xlabel",
type=str,
default=None)
argparser.add_argument("--ylabel",
type=str,
default=None)
argparser.add_argument("--xlim",
type=str,
default=None,
help="Limits for X-axis like '0.1,5.5'.")
argparser.add_argument("--ylim",
type=str,
default=None,
help="Limits for Y-axis like '0.1,5.5'.")
argparser.add_argument("--vlines",
type=str,
default=None,
help="Vertical lines like '0,1.8'.")
argparser.add_argument("--hlines",
type=str,
default=None,
help="Horizontal lines like '0,1.8'.")
argparser.add_argument("--baseX",
action="store_true",
help="Set --addX to negative top value of leftmost column.")
argparser.add_argument("--baseY",
action="store_true",
help="Set --addY to negative top value of right columns.")
argparser.add_argument("--addX",
type=float,
default=None,
help="Add constant to left column.")
argparser.add_argument("--addY",
type=float,
default=None,
help="Add constant to right column(s).")
argparser.add_argument("--mulX",
type=float,
default=None,
help="Multiply left column.")
argparser.add_argument("--mulY",
type=float,
default=None,
help="Multiply right column(s).")
argparser.add_argument("--intX",
action="store_true",
help="Treat left column as integers rather than reals.")
argparser.add_argument("--intY",
action="store_true",
help="Treat right column as integers rather than reals.")
argparser.add_argument("--product",
action="store_true",
help="Plot product of x and y, after manipulation, on Y-axis.")
argparser.add_argument("--diffX",
action="store_true",
help="Difference x for plotting product.")
argparser.add_argument("--diffY",
action="store_true",
help="Difference y for plotting product.")
argparser.add_argument("--invX",
action="store_true",
help="Inverse x for plotting product.")
argparser.add_argument("--invY",
action="store_true",
help="Inverse y for plotting product.")
# }}} argparser
def main(args) -> int: # {{{
'''
'''
###########################################################################
# 1. Setup plot
###########################################################################
fignum = 0
# figsize used to set dimensions in inches.
# ax.set_aspect() doesn't work for KDE where Y-axis is scaled.
figsize = tuple(int(a) for a in args.figsize.split(','))
assert 2 == len(figsize)
assert all(0 < i for i in figsize)
fig = plt.figure(fignum, figsize=figsize)
if args.title:
plt.title(args.title)
if args.xlabel:
plt.xlabel(args.xlabel)
if args.ylabel:
plt.ylabel(args.ylabel)
if args.xlim:
xLo, xHi = args.xlim.split(',')
plt.xlim(float(xLo), float(xHi))
if args.ylim:
yLo, yHi = args.ylim.split(',')
plt.ylim(float(yLo), float(yHi))
markers = list(args.markers)
labels = list(l for l in args.labels.split(',') if 0 < len(l))
###########################################################################
# 2. Populate data
###########################################################################
a = np.loadtxt(rdLines(args.input),
skiprows=args.skiprows,
delimiter=args.delimiter,
unpack=True)
x = a[0]
if args.baseX:
args.addX = x[0] * -1
if args.addX:
verb("Add constant to X axis. (+ %0.05f)" % args.addX)
x += args.addX
if args.mulX:
verb("Multiply X axis by constant. (* %0.05f)" % args.mulX)
x *= args.mulX
if args.intX:
verb("Reduce X axis to integers.")
x = x.astype(np.int)
if args.product:
prdX = np.copy(x)
if args.diffX:
verb("Product difference X axis.")
tmpX = np.zeros(prdX.shape)
tmpX[1:] = np.diff(prdX)
prdX = tmpX
if args.invX:
verb("Product X**-1 axis.")
prdX = prdX.astype(np.float)
prdX **= -1
ys = a[1:]
for i,y in enumerate(ys):
if args.baseY:
args.addY = y[0] * -1
if args.addY:
verb("Add constant to Y axis[%d]. (+ %0.05f)" % (i, args.addY))
y += args.addY
if args.mulY:
verb("Multiply Y axis (%d) by constant. (%0.05f)" % (i, args.mulY))
y *= args.mulY
if args.intY:
verb("Reduce Y axis (%d) to integers.")
y = y.astype(np.int)
if args.product:
prdY = np.copy(y)
if args.diffY:
verb("Product difference Y axis.")
tmpY = np.zeros(prdY.shape)
tmpY[1:] = np.diff(prdY)
prdY = tmpY
if args.invY:
verb("Product Y**-1 axis.")
prdY = prdY.astype(np.float)
prdY **= -1
y = prdX * prdY
###########################################################################
# 3. Draw plot
###########################################################################
for i,y in enumerate(ys):
marker = markers[i] if i < len(markers) else ''
label = labels[i] if i < len(labels) else None
kwargsPlot = {"marker": marker}
if label is not None:
kwargsPlot.update({"label": label})
plt.plot(x, y, **kwargsPlot)
if 0 < len(labels):
plt.legend()
if args.vlines:
for line in args.vlines.split(','):
plt.axvline(y=float(line), color="green", linestyle='-', linewidth=1)
if args.hlines:
for line in args.hlines.split(','):
plt.axhline(y=float(line), color="green", linestyle='-', linewidth=1)
###########################################################################
# 4. Save plot to file
###########################################################################
if args.pdf:
plt.savefig(fnameAppendExt(args.output, "pdf"), bbox_inches="tight")
else:
plt.savefig(fnameAppendExt(args.output, "png"), bbox_inches="tight")
plt.close()
return 0
# }}} def main
def entryPoint(argv=sys.argv):
return run(__name__, argv=argv)
if __name__ == "__main__":
sys.exit(entryPoint())
|
{"hexsha": "9be02203822770b513434c26b0b0e33ba95da0a4", "size": 8032, "ext": "py", "lang": "Python", "max_stars_repo_path": "dmppl/scripts/plotCsv.py", "max_stars_repo_name": "DaveMcEwan/dmppl", "max_stars_repo_head_hexsha": "68e8a121d4591360080cd40121add1796ae48a1b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-05T19:46:43.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-05T19:46:43.000Z", "max_issues_repo_path": "dmppl/scripts/plotCsv.py", "max_issues_repo_name": "DaveMcEwan/dmppl", "max_issues_repo_head_hexsha": "68e8a121d4591360080cd40121add1796ae48a1b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dmppl/scripts/plotCsv.py", "max_forks_repo_name": "DaveMcEwan/dmppl", "max_forks_repo_head_hexsha": "68e8a121d4591360080cd40121add1796ae48a1b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.1, "max_line_length": 81, "alphanum_fraction": 0.5502988048, "include": true, "reason": "import numpy", "num_tokens": 1916}
|
using EzXML
using DataStructures
using LightGraphs
using Vulkan_Headers_jll:vk_xml
xdoc = readxml(vk_xml)
xroot = xdoc.root
include("utils.jl")
include("handles.jl")
include("graph.jl")
base_types_exceptions = Dict(
"CAMetalLayer" => "void",
"ANativeWindow" => "void",
"AHardwareBuffer" => "void",
)
vk_base_types_mapping = Dict(
("uint$(size)_t" => "UInt$size" for size ∈ (8, 16, 32, 64))...,
("int$(size)_t" => "Int$size" for size ∈ (8, 16, 32, 64))...,
"float" => "Float32",
"double" => "Float64",
"void" => "Cvoid",
)
function translate_base_type_c(base_type)
base_type ∉ keys(vk_base_types_mapping) && error("Unknown base type $base_type")
vk_base_types_mapping[base_type]
end
function fetch_base_types(xroot)
vk_base_types_nodes = findall("//type[@category='basetype']", xroot)
names = member_attr.(vk_base_types_nodes, "name")
println.(vk_base_types_nodes)
res = Dict()
for (i, name) ∈ enumerate(names)
res[name] = translate_base_type_c(name ∈ keys(base_types_exceptions) ? base_types_exceptions[name] : member_attr(vk_base_types_nodes[i], "type"))
end
res
end
base_types_vk = fetch_base_types(xroot)
function translate_base_type_vk(base_type)
base_type ∉ keys(base_types_vk) && error("Unknown vulkan type $base_type")
base_types_vk[base_type]
end
"""
translate_type(base_type)
Translates a type from available C-based or Vulkan-based definitions.
# Examples
```
julia> translate_type("uint_32_t")
"UInt32"
julia> translate_type("VkBool32")
"UInt32"
```
"""
function translate_type(base_type)
base_type ∈ keys(base_types_vk) && return translate_base_type_vk(base_type)
base_type ∈ keys(vk_base_types_mapping) && return translate_base_type_c(base_type)
error("Unknown type $base_type")
end
|
{"hexsha": "bd1e24b81c8e0e0720e990572c779790d7e89e9b", "size": 1808, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/spec/dev2.jl", "max_stars_repo_name": "serenity4/VulkanGen.jl", "max_stars_repo_head_hexsha": "cc876405ac0158d288062ed1b96fa586a633be89", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/spec/dev2.jl", "max_issues_repo_name": "serenity4/VulkanGen.jl", "max_issues_repo_head_hexsha": "cc876405ac0158d288062ed1b96fa586a633be89", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/spec/dev2.jl", "max_forks_repo_name": "serenity4/VulkanGen.jl", "max_forks_repo_head_hexsha": "cc876405ac0158d288062ed1b96fa586a633be89", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2028985507, "max_line_length": 153, "alphanum_fraction": 0.7096238938, "num_tokens": 501}
|
# -*- coding: utf-8 -*-
#!/usr/bin/python3
__author__ = "Richa Bharti"
__copyright__ = "Copyright 2019-2022"
__license__ = "MIT"
__version__ = "0.1.0"
__maintainer__ = "Richa Bharti, Dominik Grimm"
__email__ = "richabharti74@gmail.com"
__status__ = "Dev"
import pandas as pd
import numpy as np
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
import os
parser = argparse.ArgumentParser(description='occurrence based ranking and visualization. ')
parser.add_argument('final_output_file', type=str,
help='a final combined input file containing only both (translational and transcriptional) classified genomes')
parser.add_argument('path_plots' , type=str,
help='path for all plots generated')
parser.add_argument('path_outputfiles', type=str,
help='path for all output files and plots')
args = parser.parse_args()
final_output_file = args.final_output_file
path_plots = args.path_plots
path_outputfiles = args.path_outputfiles
final_output = pd.read_csv(final_output_file, sep='\t')
def count_to_dict(it, dic):
if it == 'na' or it == 'NA' or it == 'hypothetical protein' or it == 'conserved hypothetical protein' or it == '-':
return
if it in dic.keys():
dic[it] += 1
else:
dic[it] = 1
return
def add_to_dict(it, dic, val):
if it == 'na' or it == 'NA' or it == 'hypothetical protein' or it == 'conserved hypothetical protein' or it == '-':
return
if it in dic.keys():
dic[it] = dic[it] + ',' + val.rstrip()
else:
dic[it] = val.rstrip()
return
def count_val_to_dict(source_dic, target_dic, split_ch):
for k in source_dic.keys():
target_dic[k] = len(source_dic[k].split(split_ch))
func_dict = dict()
func_dict_name = dict()
func_dict_name_cnt = dict()
gene_dict = dict()
gene_dict_name = dict()
gene_dict_name_cnt = dict()
cognum_dict = dict()
cognum_dict_name = dict()
cognum_dict_name_cnt = dict()
for i in range(0,len(final_output)):
func_lst = final_output.iloc[i, 9].split(';')[0:-1] # function
gene_lst = final_output.iloc[i, 7].split(',')# gene
cognum_lst = final_output.iloc[i, 8].split(',') # cog number
for itm in func_lst:
count_to_dict(itm, func_dict)
add_to_dict(itm, func_dict_name, final_output.iloc[i,1])
for itm in gene_lst:
count_to_dict(itm.lower(), gene_dict)
add_to_dict(itm.lower(), gene_dict_name, final_output.iloc[i,1])
for itm in cognum_lst:
count_to_dict(itm, cognum_dict)
add_to_dict(itm, cognum_dict_name, final_output.iloc[i,1])
count_val_to_dict(func_dict_name, func_dict_name_cnt, ',')
count_val_to_dict(gene_dict_name, gene_dict_name_cnt, ',')
count_val_to_dict(cognum_dict_name, cognum_dict_name_cnt, ',')
func_ana_output = pd.DataFrame(
{#'start': operon_start,
#'stop': operon_stop,
'genome':list(func_dict_name.keys()),
#'name': list(func_dict_name.values()),
'count': list(func_dict_name_cnt.values()),
'genome_count': list(func_dict.values())
})
func_output_text = os.path.join(args.path_outputfiles, 'functional_occurrence_based_ranking_output.txt')
func_ana_output.to_csv(func_output_text, sep='\t')
func_ana_output_sort = func_ana_output.sort_values(ascending=False, by=['count'])
gsum = np.sum(func_ana_output_sort['count'])
top_cutoff_num = 20;
top_func_ana = func_ana_output_sort[0:top_cutoff_num]
perc_func_ana = list(top_func_ana['count']/gsum * 100)
labels_fun = top_func_ana['genome']
sizes_fun = perc_func_ana
fig1, ax1 = plt.subplots()
fig1.set_size_inches(10, 10)
clrs = sns.color_palette('husl', n_colors=top_cutoff_num)
ax1.pie(sizes_fun, labels=labels_fun, autopct='%1.1f%%', startangle=90,colors=clrs)
centre_circle = plt.Circle((0,0),0.70,fc='white')
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
ax1.tick_params(labelsize=10)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
func_output_fig1 = os.path.join(args.path_plots, 'occurrence_functional_pie_chart.png')
fig1.savefig(func_output_fig1, dpi=300,bbox_inches='tight')
plt.show()
fig, ax = plt.subplots()
fig.set_size_inches(10, 10)
xax = range(0,len(top_func_ana))
plt.bar(xax, perc_func_ana)
xax = range(0,len(top_func_ana))
plt.xticks(xax, top_func_ana['genome'])
plt.xlabel("Funtions",fontsize=14)
plt.ylabel("Percentage occurrence",fontsize=14)
plt.tick_params(labelsize=8,rotation=90)
func_output_fig2 = os.path.join(args.path_plots, 'occurrence_functional_bar_graph.png')
fig.savefig(func_output_fig2,dpi=300,bbox_inches='tight')
plt.show()
gene_ana_output = pd.DataFrame(
{#'start': operon_start,
#'stop': operon_stop,
'genome':list(gene_dict_name.keys()),
#'name': list(gene_dict_name.values()),
'count': list(gene_dict_name_cnt.values()),
'genome_count': list(gene_dict.values())
})
gene_output_text = os.path.join(args.path_outputfiles, 'gene_occurrence_based_ranking_output.txt')
gene_ana_output.to_csv(gene_output_text, sep='\t')
gene_ana_output_sort = gene_ana_output.sort_values(ascending=False, by=['count'])
gsum = np.sum(gene_ana_output_sort['count'])
top_cutoff_num = 20;
top_gene_ana = gene_ana_output_sort[0:top_cutoff_num]
#print (top_gene_ana['count'])
perc_gene_ana = list(top_gene_ana['count']/gsum * 100)
labels_gene = top_gene_ana['genome']
sizes_gene = perc_gene_ana
#print (sizes)
fig1, ax1 = plt.subplots()
fig1.set_size_inches(10, 10)
clrs = sns.color_palette('husl', n_colors=top_cutoff_num)
ax1.pie(sizes_gene, labels=labels_gene, autopct='%1.1f%%', startangle=90,colors=clrs)
centre_circle = plt.Circle((0,0),0.70,fc='white')
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
ax1.tick_params(labelsize=10)
ax1.axis('equal')
gene_output_fig1 = os.path.join(args.path_plots, 'occurrence_gene_pie_chart.png')
fig1.savefig(gene_output_fig1,dpi=300)
plt.show()
fig, ax = plt.subplots()
fig.set_size_inches(10, 10)
xax = range(0,len(top_gene_ana))
plt.bar(xax, perc_gene_ana)
xax = range(0,len(top_gene_ana))
plt.xticks(xax, top_gene_ana['genome'])
plt.xlabel("Genes",fontsize=14)
plt.ylabel("Percentage occurrence",fontsize=14)
plt.tick_params(labelsize=12,rotation=90)
gene_output_fig2 = os.path.join(args.path_plots, 'occurrence_gene_bar_graph.png')
fig.savefig(gene_output_fig2,dpi=300)
plt.show()
cognum_ana_output = pd.DataFrame(
{#'start': operon_start,
#'stop': operon_stop,
'genome':list(cognum_dict_name.keys()),
#'name': list(cognum_dict_name.values()),
'count': list(cognum_dict_name_cnt.values()),
'genome_count': list(cognum_dict.values())
})
cog_output_text = os.path.join(args.path_outputfiles, 'cog_occurrence_based_ranking_output.txt')
cognum_ana_output.to_csv(cog_output_text, sep='\t')
cognum_ana_output_sort = cognum_ana_output.sort_values(ascending=False, by=['count'])
gsum = np.sum(cognum_ana_output_sort['count'])
top_cutoff_num = 20;
top_cog_ana = cognum_ana_output_sort[0:top_cutoff_num]
perc_cog_ana = list(top_cog_ana['count']/gsum * 100)
labels_cog = top_cog_ana['genome']
sizes_cog = perc_cog_ana
fig1, ax1 = plt.subplots()
fig1.set_size_inches(10, 10)
clrs = sns.color_palette('husl', n_colors=top_cutoff_num)
ax1.pie(sizes_cog, labels=labels_cog, autopct='%1.1f%%', startangle=90,colors=clrs)
centre_circle = plt.Circle((0,0),0.70,fc='white')
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
ax1.tick_params(labelsize=10)
ax1.axis('equal')
cog_output_fig1 = os.path.join(args.path_plots, 'occurrence_cog_pie_chart.png')
fig1.savefig(cog_output_fig1,dpi=300,bbox_inches='tight')
plt.show()
fig, ax = plt.subplots()
fig.set_size_inches(10, 10)
xax = range(0,len(top_cog_ana))
plt.bar(xax, perc_cog_ana)
xax = range(0,len(top_cog_ana))
plt.xticks(xax, top_cog_ana['genome'])
plt.xlabel("COG ID",fontsize=14)
plt.ylabel("Percentage occurrence",fontsize=14)
plt.tick_params(labelsize=12,rotation=90)
cog_output_fig2 = os.path.join(args.path_plots, 'occurrence_cog_bar_graph.png')
fig.savefig(cog_output_fig2,dpi=300,bbox_inches='tight')
plt.show()
|
{"hexsha": "690539516871c8798af906595436c01cd5386c98", "size": 8161, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/occurrence_based_ranking_analysis.py", "max_stars_repo_name": "grimmlab/transcriptional-translational-coupling", "max_stars_repo_head_hexsha": "3dec2d7c25973b4c37f1810468d4778726f96f0f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bin/occurrence_based_ranking_analysis.py", "max_issues_repo_name": "grimmlab/transcriptional-translational-coupling", "max_issues_repo_head_hexsha": "3dec2d7c25973b4c37f1810468d4778726f96f0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bin/occurrence_based_ranking_analysis.py", "max_forks_repo_name": "grimmlab/transcriptional-translational-coupling", "max_forks_repo_head_hexsha": "3dec2d7c25973b4c37f1810468d4778726f96f0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1299212598, "max_line_length": 131, "alphanum_fraction": 0.7225830168, "include": true, "reason": "import numpy", "num_tokens": 2297}
|
import numpy as np
np.sin.nin + "foo" # E: Unsupported operand types
np.sin(1, foo="bar") # E: Unexpected keyword argument
np.sin(1, extobj=["foo", "foo", "foo"]) # E: incompatible type
np.abs(None) # E: incompatible type
|
{"hexsha": "ae7833de6c1a5d004bc18964317bf3e8941c198d", "size": 235, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python_OCR_JE/venv/Lib/site-packages/numpy/typing/tests/data/fail/ufuncs.py", "max_stars_repo_name": "JE-Chen/je_old_repo", "max_stars_repo_head_hexsha": "a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python_OCR_JE/venv/Lib/site-packages/numpy/typing/tests/data/fail/ufuncs.py", "max_issues_repo_name": "JE-Chen/je_old_repo", "max_issues_repo_head_hexsha": "a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python_OCR_JE/venv/Lib/site-packages/numpy/typing/tests/data/fail/ufuncs.py", "max_forks_repo_name": "JE-Chen/je_old_repo", "max_forks_repo_head_hexsha": "a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-26T22:41:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-26T22:41:56.000Z", "avg_line_length": 29.375, "max_line_length": 64, "alphanum_fraction": 0.6468085106, "include": true, "reason": "import numpy", "num_tokens": 72}
|
/**********************************************************\
Original Author: Dan Weatherford
Imported with permission by: Richard Bateman (taxilian)
Imported: Aug 7, 2010
License: Dual license model; choose one of two:
New BSD License
http://www.opensource.org/licenses/bsd-license.php
- or -
GNU Lesser General Public License, version 2.1
http://www.gnu.org/licenses/lgpl-2.1.html
Copyright 2009 Dan Weatherford, Facebook inc
\**********************************************************/
#ifdef _WIN32
#include <windows.h>
#else
#include "../3rdParty/utf8/utf8.h"
#include <xlocale.h>
#include <wctype.h>
#endif
#include <stdexcept>
#include <boost/scoped_array.hpp>
#include "precompiled_headers.h" // On windows, everything above this line in PCH
#include <limits.h>
#include <boost/algorithm/string/case_conv.hpp>
#include "utf8_tools.h"
namespace FB {
std::string wstring_to_utf8(const std::wstring& src) {
std::string out_str;
#ifdef _WIN32
utf8::utf16to8(src.begin(), src.end(), std::back_inserter(out_str));
#else
utf8::utf32to8(src.begin(), src.end(), std::back_inserter(out_str));
#endif
return out_str;
}
std::wstring utf8_to_wstring(const std::string& src) {
std::wstring out_str;
#ifdef _WIN32
utf8::utf8to16(src.begin(), src.end(), std::back_inserter(out_str));
#else
utf8::utf8to32(src.begin(), src.end(), std::back_inserter(out_str));
#endif
return out_str;
}
std::wstring wstring_tolower(const std::wstring& src) {
return boost::algorithm::to_upper_copy(src);
}
};
|
{"hexsha": "e43b60a13b9b62b49f60a559a402853d8a10368f", "size": 1657, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "chrome/plugin/src/ScriptingCore/utf8_tools.cpp", "max_stars_repo_name": "Faham/bric-n-brac", "max_stars_repo_head_hexsha": "c886e0855869a794700eb385171bbf5bfd595aed", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2017-05-08T08:23:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-29T04:41:47.000Z", "max_issues_repo_path": "chrome/plugin/src/ScriptingCore/utf8_tools.cpp", "max_issues_repo_name": "Faham/bric-n-brac", "max_issues_repo_head_hexsha": "c886e0855869a794700eb385171bbf5bfd595aed", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chrome/plugin/src/ScriptingCore/utf8_tools.cpp", "max_forks_repo_name": "Faham/bric-n-brac", "max_forks_repo_head_hexsha": "c886e0855869a794700eb385171bbf5bfd595aed", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3015873016, "max_line_length": 81, "alphanum_fraction": 0.6185878093, "num_tokens": 403}
|
{"mathlib_filename": "Mathlib.Tactic.GuardGoalNums", "llama_tokens": 0}
|
|
# !/usr/bin/env python3
# -*-coding:utf-8-*-
# @file: check_latex_label_ref.py
# @brief:
# @author: Changjiang Cai, ccai1@stevens.edu, caicj5351@gmail.com
# @version: 0.0.1
# @creation date: 23-01-2021
# @last modified: Mon 25 Jan 2021 06:07:03 PM EST
import numpy as np
#from PIL import Image
import glob
import os
from os import listdir
from os.path import isfile, join
from pathlib import Path
import re # In Python, we can implement wildcards using the regex (regular expressions) library.
from collections import defaultdict
def find_substring_in_files(file_list, substr_target = "\label", verbose = False):
labels_dict = defaultdict(int)
lab_idx = 0
for f in file_list:
with open(f, "r") as myfile:
# Types of wildcards: the asterisk (*)
# The ".+" symbol is used in place of "*" symbol
#if re.search("\label{.+}", fread.read():
i = 0
for line in myfile:
i += 1
line = line.rstrip("\n")
idx = line.find(substr_target)
if idx != -1:
#find "{"
beg_idx = idx
while (True):
beg_idx+=1
if line[beg_idx] == '{' or beg_idx >= len(line)-1:
break
end_idx = beg_idx
while (True):
end_idx+=1
if line[end_idx] == '}' or end_idx >= len(line)-1:
break
my_key = line[beg_idx + 1 : end_idx]
if labels_dict[my_key] == 0:
lab_idx += 1
labels_dict[my_key] += 1
if verbose and labels_dict[my_key] >= 1:
print ("idx %d, key = %s, in file %s, line %d" %(lab_idx, my_key, f, i))
return labels_dict
if __name__ == "__main__":
""" for PhD thesis proposal """
if 1:
#mypath = '/media/ccjData3_HDD/Downloads2/proposal_phd_ccj_2021-Jan23'
mypath = '/media/ccjData3_HDD/Downloads2/proposal_phd_ccj_2021_Jan25'
#onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
#result = [y for x in os.walk(mypath) for y in glob(os.path.join(x[0], '*.tex'), recursive=True)]
result = list(Path(mypath).rglob("*.[tT][eE][xX]"))
#for i,f in enumerate(result):
# print ("i = ", f)
"""
If your file is not too large, you can read it into a string,
and just use that (easier and often faster than reading and checking line per line)
"""
#find all the labels
lab_str = "\label"
labels_dict = find_substring_in_files(result, lab_str, verbose = True)
print ("find %d labels" %len(labels_dict.keys()))
#find all the refs
ref_str = "\\ref"
print ("find ref")
ref_dict = find_substring_in_files(result, ref_str, verbose = True)
print ("find %d refs" %len(ref_dict.keys()))
j = 0
for lab in labels_dict.keys():
#print ("checking label %s" %lab)
if lab not in ref_dict.keys():
j += 1
print ("j = %d, label %s NOT used by \\ref{}" %(j, lab))
|
{"hexsha": "853c5f481ff671d995eef1701488912636ae6942", "size": 3338, "ext": "py", "lang": "Python", "max_stars_repo_path": "check_latex_label_ref.py", "max_stars_repo_name": "ccj5351/func_utility", "max_stars_repo_head_hexsha": "95a7ab515433cd012c6ae34bb4f970f4ad66e3f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "check_latex_label_ref.py", "max_issues_repo_name": "ccj5351/func_utility", "max_issues_repo_head_hexsha": "95a7ab515433cd012c6ae34bb4f970f4ad66e3f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "check_latex_label_ref.py", "max_forks_repo_name": "ccj5351/func_utility", "max_forks_repo_head_hexsha": "95a7ab515433cd012c6ae34bb4f970f4ad66e3f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6813186813, "max_line_length": 105, "alphanum_fraction": 0.5242660276, "include": true, "reason": "import numpy", "num_tokens": 858}
|
"""canonical_test.py"""
import numpy as np
import pytest
import scipy.linalg
from control.tests.conftest import slycotonly
from control import ss, tf, tf2ss
from control.canonical import canonical_form, reachable_form, \
observable_form, modal_form, similarity_transform, bdschur
from control.exception import ControlNotImplemented
class TestCanonical:
"""Tests for the canonical forms class"""
def test_reachable_form(self):
"""Test the reachable canonical form"""
# Create a system in the reachable canonical form
coeffs = [1.0, 2.0, 3.0, 4.0, 1.0]
A_true = np.polynomial.polynomial.polycompanion(coeffs)
A_true = np.fliplr(np.rot90(A_true))
B_true = np.array([[1.0, 0.0, 0.0, 0.0]]).T
C_true = np.array([[1.0, 1.0, 1.0, 1.0]])
D_true = 42.0
# Perform a coordinate transform with a random invertible matrix
T_true = np.array([[-0.27144004, -0.39933167, 0.75634684, 0.44135471],
[-0.74855725, -0.39136285, -0.18142339, -0.50356997],
[-0.40688007, 0.81416369, 0.38002113, -0.16483334],
[-0.44769516, 0.15654653, -0.50060858, 0.72419146]])
A = np.linalg.solve(T_true, A_true).dot(T_true)
B = np.linalg.solve(T_true, B_true)
C = C_true.dot(T_true)
D = D_true
# Create a state space system and convert it to the reachable canonical form
sys_check, T_check = canonical_form(ss(A, B, C, D), "reachable")
# Check against the true values
np.testing.assert_array_almost_equal(sys_check.A, A_true)
np.testing.assert_array_almost_equal(sys_check.B, B_true)
np.testing.assert_array_almost_equal(sys_check.C, C_true)
np.testing.assert_array_almost_equal(sys_check.D, D_true)
np.testing.assert_array_almost_equal(T_check, T_true)
# Reachable form only supports SISO
sys = tf([[ [1], [1] ]], [[ [1, 2, 1], [1, 2, 1] ]])
np.testing.assert_raises(ControlNotImplemented, reachable_form, sys)
def test_unreachable_system(self):
"""Test reachable canonical form with an unreachable system"""
# Create an unreachable system
A = np.array([[1., 2., 2.],
[4., 5., 5.],
[7., 8., 8.]])
B = np.array([[1.], [1.],[1.]])
C = np.array([[1., 1.,1.]])
D = np.array([[42.0]])
sys = ss(A, B, C, D)
# Check if an exception is raised
np.testing.assert_raises(ValueError, canonical_form, sys, "reachable")
def test_observable_form(self):
"""Test the observable canonical form"""
# Create a system in the observable canonical form
coeffs = [1.0, 2.0, 3.0, 4.0, 1.0]
A_true = np.polynomial.polynomial.polycompanion(coeffs)
A_true = np.fliplr(np.flipud(A_true))
B_true = np.array([[1.0, 1.0, 1.0, 1.0]]).T
C_true = np.array([[1.0, 0.0, 0.0, 0.0]])
D_true = 42.0
# Perform a coordinate transform with a random invertible matrix
T_true = np.array([[-0.27144004, -0.39933167, 0.75634684, 0.44135471],
[-0.74855725, -0.39136285, -0.18142339, -0.50356997],
[-0.40688007, 0.81416369, 0.38002113, -0.16483334],
[-0.44769516, 0.15654653, -0.50060858, 0.72419146]])
A = np.linalg.solve(T_true, A_true).dot(T_true)
B = np.linalg.solve(T_true, B_true)
C = C_true.dot(T_true)
D = D_true
# Create a state space system and convert it to the observable canonical form
sys_check, T_check = canonical_form(ss(A, B, C, D), "observable")
# Check against the true values
np.testing.assert_array_almost_equal(sys_check.A, A_true)
np.testing.assert_array_almost_equal(sys_check.B, B_true)
np.testing.assert_array_almost_equal(sys_check.C, C_true)
np.testing.assert_array_almost_equal(sys_check.D, D_true)
np.testing.assert_array_almost_equal(T_check, T_true)
def test_observable_form_MIMO(self):
"""Test error as Observable form only supports SISO"""
sys = tf([[[1], [1] ]], [[[1, 2, 1], [1, 2, 1]]])
with pytest.raises(ControlNotImplemented):
observable_form(sys)
def test_unobservable_system(self):
"""Test observable canonical form with an unobservable system"""
# Create an unobservable system
A = np.array([[1., 2., 2.],
[4., 5., 5.],
[7., 8., 8.]])
B = np.array([[1.], [1.], [1.]])
C = np.array([[1., 1., 1.]])
D = 42.0
sys = ss(A, B, C, D)
# Check if an exception is raised
with pytest.raises(ValueError):
canonical_form(sys, "observable")
def test_arguments(self):
# Additional unit tests added on 25 May 2019 to increase coverage
# Unknown canonical forms should generate exception
sys = tf([1], [1, 2, 1])
with pytest.raises(ControlNotImplemented):
canonical_form(sys, 'unknown')
def test_similarity(self):
"""Test similarty transform"""
# Single input, single output systems
siso_ini = tf2ss(tf([1, 1], [1, 1, 1]))
for form in 'reachable', 'observable':
# Convert the system to one of the canonical forms
siso_can, T_can = canonical_form(siso_ini, form)
# Use a similarity transformation to transform it back
siso_sim = similarity_transform(siso_can, np.linalg.inv(T_can))
# Make sure everything goes back to the original form
np.testing.assert_array_almost_equal(siso_sim.A, siso_ini.A)
np.testing.assert_array_almost_equal(siso_sim.B, siso_ini.B)
np.testing.assert_array_almost_equal(siso_sim.C, siso_ini.C)
np.testing.assert_array_almost_equal(siso_sim.D, siso_ini.D)
# Multi-input, multi-output systems
mimo_ini = ss(
[[-1, 1, 0, 0], [0, -2, 1, 0], [0, 0, -3, 1], [0, 0, 0, -4]],
[[1, 0], [0, 0], [0, 1], [1, 1]],
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]],
np.zeros((3, 2)))
# Simple transformation: row/col flips + scaling
mimo_txf = np.array(
[[0, 1, 0, 0], [2, 0, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
# Transform the system and transform it back
mimo_sim = similarity_transform(mimo_ini, mimo_txf)
mimo_new = similarity_transform(mimo_sim, np.linalg.inv(mimo_txf))
np.testing.assert_array_almost_equal(mimo_new.A, mimo_ini.A)
np.testing.assert_array_almost_equal(mimo_new.B, mimo_ini.B)
np.testing.assert_array_almost_equal(mimo_new.C, mimo_ini.C)
np.testing.assert_array_almost_equal(mimo_new.D, mimo_ini.D)
# Make sure rescaling by identify does nothing
mimo_new = similarity_transform(mimo_ini, np.eye(4))
np.testing.assert_array_almost_equal(mimo_new.A, mimo_ini.A)
np.testing.assert_array_almost_equal(mimo_new.B, mimo_ini.B)
np.testing.assert_array_almost_equal(mimo_new.C, mimo_ini.C)
np.testing.assert_array_almost_equal(mimo_new.D, mimo_ini.D)
# Time rescaling
mimo_tim = similarity_transform(mimo_ini, np.eye(4), timescale=0.3)
mimo_new = similarity_transform(mimo_tim, np.eye(4), timescale=1/0.3)
np.testing.assert_array_almost_equal(mimo_new.A, mimo_ini.A)
np.testing.assert_array_almost_equal(mimo_new.B, mimo_ini.B)
np.testing.assert_array_almost_equal(mimo_new.C, mimo_ini.C)
np.testing.assert_array_almost_equal(mimo_new.D, mimo_ini.D)
# Time + transformation, in one step
mimo_sim = similarity_transform(mimo_ini, mimo_txf, timescale=0.3)
mimo_new = similarity_transform(mimo_sim, np.linalg.inv(mimo_txf),
timescale=1/0.3)
np.testing.assert_array_almost_equal(mimo_new.A, mimo_ini.A)
np.testing.assert_array_almost_equal(mimo_new.B, mimo_ini.B)
np.testing.assert_array_almost_equal(mimo_new.C, mimo_ini.C)
np.testing.assert_array_almost_equal(mimo_new.D, mimo_ini.D)
# Time + transformation, in two steps
mimo_sim = similarity_transform(mimo_ini, mimo_txf, timescale=0.3)
mimo_tim = similarity_transform(mimo_sim, np.eye(4), timescale=1/0.3)
mimo_new = similarity_transform(mimo_tim, np.linalg.inv(mimo_txf))
np.testing.assert_array_almost_equal(mimo_new.A, mimo_ini.A)
np.testing.assert_array_almost_equal(mimo_new.B, mimo_ini.B)
np.testing.assert_array_almost_equal(mimo_new.C, mimo_ini.C)
np.testing.assert_array_almost_equal(mimo_new.D, mimo_ini.D)
def extract_bdiag(a, blksizes):
"""
Extract block diagonals
Parameters
----------
a - matrix to get blocks from
blksizes - sequence of block diagonal sizes
Returns
-------
Block diagonals
Notes
-----
Conceptually, inverse of scipy.linalg.block_diag
"""
idx0s = np.hstack([0, np.cumsum(blksizes[:-1], dtype=int)])
return tuple(a[idx0:idx0+blksize,idx0:idx0+blksize]
for idx0, blksize in zip(idx0s, blksizes))
def companion_from_eig(eigvals):
"""
Find companion matrix for given eigenvalue sequence.
"""
from numpy.polynomial.polynomial import polyfromroots, polycompanion
return polycompanion(polyfromroots(eigvals)).real
def block_diag_from_eig(eigvals):
"""
Find block-diagonal matrix for given eigenvalue sequence
Returns ideal, non-defective, schur block-diagonal form.
"""
blocks = []
i = 0
while i < len(eigvals):
e = eigvals[i]
if e.imag == 0:
blocks.append(e.real)
i += 1
else:
assert e == eigvals[i+1].conjugate()
blocks.append([[e.real, e.imag],
[-e.imag, e.real]])
i += 2
return scipy.linalg.block_diag(*blocks)
@slycotonly
@pytest.mark.parametrize(
"eigvals, condmax, blksizes",
[
([-1,-2,-3,-4,-5], None, [1,1,1,1,1]),
([-1,-2,-3,-4,-5], 1.01, [5]),
([-1,-1,-2,-2,-2], None, [2,3]),
([-1+1j,-1-1j,-2+2j,-2-2j,-2], None, [2,2,1]),
])
def test_bdschur_ref(eigvals, condmax, blksizes):
# "reference" check
# uses companion form to introduce numerical complications
from numpy.linalg import solve
a = companion_from_eig(eigvals)
b, t, test_blksizes = bdschur(a, condmax=condmax)
np.testing.assert_array_equal(np.sort(test_blksizes), np.sort(blksizes))
bdiag_b = scipy.linalg.block_diag(*extract_bdiag(b, test_blksizes))
np.testing.assert_array_almost_equal(bdiag_b, b)
np.testing.assert_array_almost_equal(solve(t, a).dot(t), b)
@slycotonly
@pytest.mark.parametrize(
"eigvals, sorted_blk_eigvals, sort",
[
([-2,-1,0,1,2], [2,1,0,-1,-2], 'continuous'),
([-2,-2+2j,-2-2j,-2-3j,-2+3j], [-2+3j,-2+2j,-2], 'continuous'),
(np.exp([-0.2,-0.1,0,0.1,0.2]), np.exp([0.2,0.1,0,-0.1,-0.2]), 'discrete'),
(np.exp([-0.2+0.2j,-0.2-0.2j, -0.01, -0.03-0.3j,-0.03+0.3j,]),
np.exp([-0.01, -0.03+0.3j, -0.2+0.2j]),
'discrete'),
])
def test_bdschur_sort(eigvals, sorted_blk_eigvals, sort):
# use block diagonal form to prevent numerical complications
# for discrete case, exp and log introduce round-off, can't test as compeletely
a = block_diag_from_eig(eigvals)
b, t, blksizes = bdschur(a, sort=sort)
assert len(blksizes) == len(sorted_blk_eigvals)
blocks = extract_bdiag(b, blksizes)
for block, blk_eigval in zip(blocks, sorted_blk_eigvals):
test_eigvals = np.linalg.eigvals(block)
np.testing.assert_allclose(test_eigvals.real,
blk_eigval.real)
np.testing.assert_allclose(abs(test_eigvals.imag),
blk_eigval.imag)
@slycotonly
def test_bdschur_defective():
# the eigenvalues of this simple defective matrix cannot be separated
# a previous version of the bdschur would fail on this
a = companion_from_eig([-1, -1])
amodal, tmodal, blksizes = bdschur(a, condmax=1e200)
def test_bdschur_empty():
# empty matrix in gives empty matrix out
a = np.empty(shape=(0,0))
b, t, blksizes = bdschur(a)
np.testing.assert_array_equal(b, a)
np.testing.assert_array_equal(t, a)
np.testing.assert_array_equal(blksizes, np.array([]))
def test_bdschur_condmax_lt_1():
# require condmax >= 1.0
with pytest.raises(ValueError):
bdschur(1, condmax=np.nextafter(1, 0))
@slycotonly
def test_bdschur_invalid_sort():
# sort must be in ('continuous', 'discrete')
with pytest.raises(ValueError):
bdschur(1, sort='no-such-sort')
@slycotonly
@pytest.mark.parametrize(
"A_true, B_true, C_true, D_true",
[(np.diag([4.0, 3.0, 2.0, 1.0]), # order from largest to smallest
np.array([[1.1, 2.2, 3.3, 4.4]]).T,
np.array([[1.3, 1.4, 1.5, 1.6]]),
np.array([[42.0]])),
(np.array([[-1, 1, 0, 0],
[-1, -1, 0, 0],
[ 0, 0, -2, 1],
[ 0, 0, 0, -3]]),
np.array([[0, 1, 0, 0],
[0, 0, 0, 1]]).T,
np.array([[1, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]]),
np.array([[0, 1],
[1, 0],
[0, 0]])),
],
ids=["sys1", "sys2"])
def test_modal_form(A_true, B_true, C_true, D_true):
# Check modal_canonical corresponds to bdschur
# Perform a coordinate transform with a random invertible matrix
T_true = np.array([[-0.27144004, -0.39933167, 0.75634684, 0.44135471],
[-0.74855725, -0.39136285, -0.18142339, -0.50356997],
[-0.40688007, 0.81416369, 0.38002113, -0.16483334],
[-0.44769516, 0.15654653, -0.50060858, 0.72419146]])
A = np.linalg.solve(T_true, A_true).dot(T_true)
B = np.linalg.solve(T_true, B_true)
C = C_true.dot(T_true)
D = D_true
# Create a state space system and convert it to modal canonical form
sys_check, T_check = modal_form(ss(A, B, C, D))
a_bds, t_bds, _ = bdschur(A)
np.testing.assert_array_almost_equal(sys_check.A, a_bds)
np.testing.assert_array_almost_equal(T_check, t_bds)
np.testing.assert_array_almost_equal(sys_check.B, np.linalg.solve(t_bds, B))
np.testing.assert_array_almost_equal(sys_check.C, C.dot(t_bds))
np.testing.assert_array_almost_equal(sys_check.D, D)
# canonical_form(...,'modal') is the same as modal_form with default parameters
cf_sys, T_cf = canonical_form(ss(A, B, C, D), 'modal')
np.testing.assert_array_almost_equal(cf_sys.A, sys_check.A)
np.testing.assert_array_almost_equal(cf_sys.B, sys_check.B)
np.testing.assert_array_almost_equal(cf_sys.C, sys_check.C)
np.testing.assert_array_almost_equal(cf_sys.D, sys_check.D)
np.testing.assert_array_almost_equal(T_check, T_cf)
# Make sure Hankel coefficients are OK
for i in range(A.shape[0]):
np.testing.assert_almost_equal(
np.dot(np.dot(C_true, np.linalg.matrix_power(A_true, i)),
B_true),
np.dot(np.dot(C, np.linalg.matrix_power(A, i)), B))
@slycotonly
@pytest.mark.parametrize(
"condmax, len_blksizes",
[(1.1, 1),
(None, 5)])
def test_modal_form_condmax(condmax, len_blksizes):
# condmax passed through as expected
a = companion_from_eig([-1, -2, -3, -4, -5])
amodal, tmodal, blksizes = bdschur(a, condmax=condmax)
assert len(blksizes) == len_blksizes
xsys = ss(a, [[1],[0],[0],[0],[0]], [0,0,0,0,1], 0)
zsys, t = modal_form(xsys, condmax=condmax)
np.testing.assert_array_almost_equal(zsys.A, amodal)
np.testing.assert_array_almost_equal(t, tmodal)
np.testing.assert_array_almost_equal(zsys.B, np.linalg.solve(tmodal, xsys.B))
np.testing.assert_array_almost_equal(zsys.C, xsys.C.dot(tmodal))
np.testing.assert_array_almost_equal(zsys.D, xsys.D)
@slycotonly
@pytest.mark.parametrize(
"sys_type",
['continuous',
'discrete'])
def test_modal_form_sort(sys_type):
a = companion_from_eig([0.1+0.9j,0.1-0.9j, 0.2+0.8j, 0.2-0.8j])
amodal, tmodal, blksizes = bdschur(a, sort=sys_type)
dt = 0 if sys_type == 'continuous' else True
xsys = ss(a, [[1],[0],[0],[0],], [0,0,0,1], 0, dt)
zsys, t = modal_form(xsys, sort=True)
my_amodal = np.linalg.solve(tmodal, a).dot(tmodal)
np.testing.assert_array_almost_equal(amodal, my_amodal)
np.testing.assert_array_almost_equal(t, tmodal)
np.testing.assert_array_almost_equal(zsys.A, amodal)
np.testing.assert_array_almost_equal(zsys.B, np.linalg.solve(tmodal, xsys.B))
np.testing.assert_array_almost_equal(zsys.C, xsys.C.dot(tmodal))
np.testing.assert_array_almost_equal(zsys.D, xsys.D)
def test_modal_form_empty():
# empty system should be returned as-is
# t empty matrix
insys = ss([], [], [], 123)
outsys, t = modal_form(insys)
np.testing.assert_array_equal(outsys.A, insys.A)
np.testing.assert_array_equal(outsys.B, insys.B)
np.testing.assert_array_equal(outsys.C, insys.C)
np.testing.assert_array_equal(outsys.D, insys.D)
assert t.shape == (0,0)
|
{"hexsha": "0db6b924c0d8d2ee1795e290155fbf9f820dd0ce", "size": 17425, "ext": "py", "lang": "Python", "max_stars_repo_path": "control/tests/canonical_test.py", "max_stars_repo_name": "AI-App/Python-Control", "max_stars_repo_head_hexsha": "c2f6f8ab94bbc8b5ef1deb33c3d2df39e00d22bf", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-03-02T06:06:47.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-01T23:47:52.000Z", "max_issues_repo_path": "control/tests/canonical_test.py", "max_issues_repo_name": "AI-App/Python-Control", "max_issues_repo_head_hexsha": "c2f6f8ab94bbc8b5ef1deb33c3d2df39e00d22bf", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 41, "max_issues_repo_issues_event_min_datetime": "2021-01-14T05:33:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-06T15:35:11.000Z", "max_forks_repo_path": "control/tests/canonical_test.py", "max_forks_repo_name": "AI-App/Python-Control", "max_forks_repo_head_hexsha": "c2f6f8ab94bbc8b5ef1deb33c3d2df39e00d22bf", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2015-10-19T06:39:56.000Z", "max_forks_repo_forks_event_max_datetime": "2015-10-19T06:39:56.000Z", "avg_line_length": 39.1573033708, "max_line_length": 85, "alphanum_fraction": 0.6227833572, "include": true, "reason": "import numpy,from numpy,import scipy", "num_tokens": 5140}
|
[STATEMENT]
lemma interval_integral_eq_integral':
fixes f :: "real \<Rightarrow> 'a::euclidean_space"
shows "a \<le> b \<Longrightarrow> set_integrable lborel (einterval a b) f \<Longrightarrow> LBINT x=a..b. f x = integral (einterval a b) f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>a \<le> b; set_integrable lborel (einterval a b) f\<rbrakk> \<Longrightarrow> interval_lebesgue_integral lborel a b f = integral (einterval a b) f
[PROOF STEP]
by (subst interval_lebesgue_integral_le_eq, simp) (rule set_borel_integral_eq_integral)
|
{"llama_tokens": 208, "file": null, "length": 1}
|
\section{Models}
\label{sec:models}
%A description of the models that you'll be using as baselines, and a preliminary description of the model or models that will be the focus of your investigation. At this early stage, some aspects of these models might not yet be worked out, so preliminary descriptions are fine.
In this section we give a brief summary of the different models we consider in this study.
\subsection{Random}
\label{subsec:randommodel}
This is a simplest case, where the model randomly selects one of the three classes: {\texttt{contradiction}, \texttt{neutral} \texttt{entailment}}
\subsection{Baseline}
\label{subsec:baselinemodel}
Our baseline model is a hypothesis-only simple RNN classifier. Hypothesis-only baselines for NLI tasks can be remarkably robust, and hence we chose it as our baseline model. For the embedding layer, we use 50 dimensional Glove \cite{pennington2014glove} embeddings. We use a uni-directional LSTM with a hidden dimension of 50.
\subsection{BERT}
\label{subsec:bertmodel}
BERT \cite{devlin-etal-2019-bert} is one of the Transformer-based models that we include in our study. We use \texttt{bert-base-uncased} which is a 12-layer, 768-hidden, 12-heads, 110M parameters model.
\subsection{RoBERTa}
\label{subsec:robertamodel}
RoBERTa \cite{liu2019roberta} is the second Transformer-based models that we include in our study. We use roberta-base, which is a 12-layer, 768-hidden, 12-heads, 125M parameters model.
|
{"hexsha": "58b731be699a7d9fa410a354dce67dd95d389d81", "size": 1469, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "writeup_cs224u_expprotocol/models.tex", "max_stars_repo_name": "abgoswam/cs224u", "max_stars_repo_head_hexsha": "33e1a22d1c9586b473f43b388163a74264e9258a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "writeup_cs224u_expprotocol/models.tex", "max_issues_repo_name": "abgoswam/cs224u", "max_issues_repo_head_hexsha": "33e1a22d1c9586b473f43b388163a74264e9258a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "writeup_cs224u_expprotocol/models.tex", "max_forks_repo_name": "abgoswam/cs224u", "max_forks_repo_head_hexsha": "33e1a22d1c9586b473f43b388163a74264e9258a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.5, "max_line_length": 326, "alphanum_fraction": 0.7923757658, "num_tokens": 378}
|
import numpy as np
import src.coding.codelength as codelength
import random
import collections
from infomap import Infomap
def merge_modules(trajectories, module_assignments, scheme="Huffman", init_module=True, init_node=True, deterministic=False, n_trials=10, n_itr=1000):
def smallest_module_pair(module_assignments_opt):
module_histogram = collections.Counter(module_assignments_opt).most_common()
if len(module_histogram) < 2:
multiple_modules = False
module1 = None
module2 = None
else:
k_smallest = module_histogram.pop()
k_next_smallest = module_histogram.pop()
multiple_modules = True
module1 = k_smallest[0]
module2 = k_next_smallest[0]
return module1, module2, multiple_modules
MCL = codelength.AverageCodeLength(module_assignments, trajectories=trajectories, scheme=scheme, init_module=init_module, init_node=init_node)
module_assignments_opt = module_assignments.copy()
if deterministic == True:
# Deterministically merge smallest module pair
merged_assignments = module_assignments.copy()
while True:
module1, module2, multiple_modules = smallest_module_pair(merged_assignments)
if multiple_modules == False:
break
merged_assignments = [module1 if v_module == module2 else v_module for v_module in merged_assignments]
ACL = codelength.AverageCodeLength(merged_assignments, trajectories=trajectories, scheme=scheme, init_module=init_module, init_node=init_node)
if ACL < MCL:
MCL = ACL
module_assignments_opt = merged_assignments
else:
# Randomly merge module pairs
for trial in range(n_trials):
merged_assignments = module_assignments.copy()
for t in range(n_itr):
modules = list(set(merged_assignments))
if len(modules) == 1:
break
else:
ids = random.sample(modules, 2)
merged_assignments = [ids[0] if v_module == ids[1] else v_module for v_module in merged_assignments]
ACL = codelength.AverageCodeLength(merged_assignments, trajectories=trajectories, scheme=scheme, init_module=init_module, init_node=init_node)
if ACL < MCL:
MCL = ACL
module_assignments_opt = merged_assignments
# Rename module labels to a set of labels from zero
module_labels = list(set(module_assignments_opt))
module_assignments_opt_ = [module_labels.index(k) for k in module_assignments_opt]
return MCL, module_assignments_opt_
def trajectories_to_edgelist(trajectories):
edgelist = []
for trajectory in trajectories:
for i in range(len(trajectory)-1):
edgelist.append([trajectory[i],trajectory[i+1]])
return edgelist
def Infomap_rawdir(edgelist, vertices):
im = Infomap("--two-level -f rawdir", num_trials=100)
if vertices is not None:
im.add_nodes(vertices)
for edge in edgelist:
im.add_link(edge[0], edge[1])
im.run()
d = {}
for node in im.tree:
if node.is_leaf:
d[node.node_id] = node.module_id-1
module_assignments_ = dict(sorted(d.items()))
module_assignments = list(module_assignments_.values())
return module_assignments
def Infomap_st(trajectories, vertices=None, scheme="Huffman", init_module=True, init_node=True, deterministic=False, n_trials=10, n_itr=1000):
# Initial partition by Infomap
edgelist = trajectories_to_edgelist(trajectories)
module_assignments = Infomap_rawdir(edgelist, vertices)
# Correction by the single-trajectory map equation
MCL_stMapEqn, module_assignments_stMapEqn = MCL_ST, module_assignments = merge_modules(trajectories, module_assignments, \
scheme=scheme, init_module=init_module, init_node=init_node, \
deterministic=deterministic, n_trials=n_trials, n_itr=n_itr)
return MCL_stMapEqn, module_assignments_stMapEqn
|
{"hexsha": "021188656a78573a0c336b8b2035abd577b281a9", "size": 3629, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/stMapEqn_Infomap.py", "max_stars_repo_name": "tatsuro-kawamoto/single-trajectory_map_equation", "max_stars_repo_head_hexsha": "5dbb4c564e9563a6f8f669319dce4842ce531736", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/stMapEqn_Infomap.py", "max_issues_repo_name": "tatsuro-kawamoto/single-trajectory_map_equation", "max_issues_repo_head_hexsha": "5dbb4c564e9563a6f8f669319dce4842ce531736", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/stMapEqn_Infomap.py", "max_forks_repo_name": "tatsuro-kawamoto/single-trajectory_map_equation", "max_forks_repo_head_hexsha": "5dbb4c564e9563a6f8f669319dce4842ce531736", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2, "max_line_length": 150, "alphanum_fraction": 0.777073574, "include": true, "reason": "import numpy", "num_tokens": 930}
|
import numpy as np
import nanotune as nt
from nanotune.tests.mock_classifier import MockClassifer
from nanotune.tuningstages.gatecharacterization1d import GateCharacterization1D
atol = 1e-05
def test_gatecharacterizaton1D_run(gatecharacterization1D_settings, experiment):
pinchoff = GateCharacterization1D(
classifier=MockClassifer("pinchoff"),
**gatecharacterization1D_settings, # readout_s., setpoint_s., data_s.
)
tuning_result = pinchoff.run_stage(plot_result=False)
assert tuning_result.success
assert not tuning_result.termination_reasons
assert tuning_result.ml_result
|
{"hexsha": "ffcb77f4df4f112771b8bf09270d245e0464452b", "size": 622, "ext": "py", "lang": "Python", "max_stars_repo_path": "nanotune/tests/tuningstages/test_gatecharacterization1d.py", "max_stars_repo_name": "jenshnielsen/nanotune", "max_stars_repo_head_hexsha": "0f2a252d1986f9a5ff155fad626658f85aec3f3e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-02-24T14:32:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-05T16:37:26.000Z", "max_issues_repo_path": "nanotune/tests/tuningstages/test_gatecharacterization1d.py", "max_issues_repo_name": "jenshnielsen/nanotune", "max_issues_repo_head_hexsha": "0f2a252d1986f9a5ff155fad626658f85aec3f3e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 149, "max_issues_repo_issues_event_min_datetime": "2021-03-23T14:44:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T06:09:07.000Z", "max_forks_repo_path": "nanotune/tests/tuningstages/test_gatecharacterization1d.py", "max_forks_repo_name": "jenshnielsen/nanotune", "max_forks_repo_head_hexsha": "0f2a252d1986f9a5ff155fad626658f85aec3f3e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2021-03-29T13:36:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T23:06:35.000Z", "avg_line_length": 31.1, "max_line_length": 80, "alphanum_fraction": 0.8006430868, "include": true, "reason": "import numpy", "num_tokens": 155}
|
// Copyright (c) 2014-2020 The Gridcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "main.h"
#include "gridcoin/support/block_finder.h"
#include <boost/test/unit_test.hpp>
#include <array>
#include <cstdint>
namespace
{
template<size_t Size>
class BlockChain
{
public:
BlockChain()
{
// Initialize block link.
for(CBlockIndex* block = blocks.begin(); block != blocks.end(); ++block)
{
block->SetNull();
CBlockIndex* prev = std::prev(block);
CBlockIndex* next = std::next(block);
if(block != &blocks.front())
{
block->pprev = prev;
block->nHeight = prev->nHeight + 1;
block->nTime = prev->nTime + 10;
}
if(block != &blocks.back())
block->pnext = next;
}
// Setup global variables.
pindexBest = &blocks.back();
pindexGenesisBlock = &blocks.front();
nBestHeight = blocks.back().nHeight;
}
std::array<CBlockIndex, Size> blocks;
};
}
BOOST_AUTO_TEST_SUITE(block_finder_tests);
BOOST_AUTO_TEST_CASE(FindBlockInNormalChainShouldWork)
{
BlockChain<100> chain;
GRC::BlockFinder finder;
for(auto& block : chain.blocks)
BOOST_CHECK_EQUAL(&block, finder.FindByHeight(block.nHeight));
}
BOOST_AUTO_TEST_CASE(FindBlockAboveHighestHeightShouldReturnHighestBlock)
{
BlockChain<100> chain;
GRC::BlockFinder finder;
CBlockIndex& last = chain.blocks.back();
BOOST_CHECK_EQUAL(&last, finder.FindByHeight(101));
}
BOOST_AUTO_TEST_CASE(FindBlockByHeightShouldWorkOnChainsWithJustOneBlock)
{
BlockChain<1> chain;
GRC::BlockFinder finder;
BOOST_CHECK_EQUAL(&chain.blocks.front(), finder.FindByHeight(0));
BOOST_CHECK_EQUAL(&chain.blocks.front(), finder.FindByHeight(1));
BOOST_CHECK_EQUAL(&chain.blocks.front(), finder.FindByHeight(-1));
}
BOOST_AUTO_TEST_CASE(FindBlockByTimeShouldReturnNextYoungestBlock)
{
// Chain with block times 0, 10, 20, 30, 40 etc.
BlockChain<10> chain;
GRC::BlockFinder finder;
// Finding the block older than time 10 should return block #2
// which has time 20.
BOOST_CHECK_EQUAL(&chain.blocks[2], finder.FindByMinTime(11));
BOOST_CHECK_EQUAL(&chain.blocks[1], finder.FindByMinTime(10));
BOOST_CHECK_EQUAL(&chain.blocks[1], finder.FindByMinTime(9));
}
BOOST_AUTO_TEST_CASE(FindBlockByTimeShouldReturnLastBlockIfOlderThanTime)
{
BlockChain<10> chain;
GRC::BlockFinder finder;
BOOST_CHECK_EQUAL(&chain.blocks.back(), finder.FindByMinTime(999999));
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "40feac136eb68a3375688c8933f8fe3938dec18a", "size": 2850, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/test/gridcoin/block_finder_tests.cpp", "max_stars_repo_name": "sweede-se/Gridcoin-Research", "max_stars_repo_head_hexsha": "48eb9482bd978b67cf9e8a795048438acab980c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/test/gridcoin/block_finder_tests.cpp", "max_issues_repo_name": "sweede-se/Gridcoin-Research", "max_issues_repo_head_hexsha": "48eb9482bd978b67cf9e8a795048438acab980c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/test/gridcoin/block_finder_tests.cpp", "max_forks_repo_name": "sweede-se/Gridcoin-Research", "max_forks_repo_head_hexsha": "48eb9482bd978b67cf9e8a795048438acab980c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9782608696, "max_line_length": 84, "alphanum_fraction": 0.6522807018, "num_tokens": 676}
|
.import cv2
import numpy as np
from skimage.measure import compare_ssim
def noiseCalibrate(cap,rob,bbLC,bbRC):
diffPercent=0
for i in range(30):
ret,frame=cap.read()
roi=frame[bbLC[0]:bbRC[0], bbLC[1]:bbRC[1]]
(score,diff)=compare_ssim(rob,roi,full=True,multichannel=True)
diffPercent+=score
diffPercent/=30
return diffPercent-.03
cap = cv2.VideoCapture(0)
bbLC=(0,0)
bbRC=(300,300)
textOrg=(20,50)
kernel=np.ones((5,5),np.uint8)
dontcare,temp=cap.read()
rob=temp[bbLC[0]:bbRC[0], bbLC[1]:bbRC[1]]
diffPercent=noiseCalibrate(cap,rob,bbLC,bbRC)
fingerCount=0
while True:
ret,frame=cap.read()
roi=frame[bbLC[0]:bbRC[0], bbLC[1]:bbRC[1]]
(score,diff)=compare_ssim(rob,roi,full=True,multichannel=True)
cv2.rectangle(frame,bbLC,bbRC,(0,255,0),0)
if(score<diffPercent):
diff = (diff * 255).astype("uint8")
diff = cv2.morphologyEx(diff,cv2.MORPH_OPEN,kernel)
diff = cv2.cvtColor(diff,cv2.COLOR_BGR2GRAY)
diff = cv2.GaussianBlur(diff,(5,5),100)
th= cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV| cv2.THRESH_OTSU)[1]
cnt, hierarchy = cv2.findContours(th, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt = max(cnt, key=lambda x: cv2.contourArea(x))
cv2.drawContours(frame, [cnt], -1, (255,255,0), 2)
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
if defects is not None:
count=0
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
cv2.line(frame,start,end,[0,255,0],2)
a = np.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)
b = np.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)
c = np.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)
angle = np.arccos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))
if angle <= np.pi/2: # angle less than 90 degree, treat as fingers
count += 1
cv2.line(frame,start,far,[255,0,0],2)
cv2.line(frame,far,end,[255,0,0],2)
cv2.circle(frame, far, 4, [0, 0, 255], -1)
if count > 0:
count = count+1
fingerCount=count
cv2.putText(frame, str(fingerCount), textOrg,cv2.FONT_HERSHEY_SIMPLEX,1,[255,255,255])
else:
mask = np.zeros(roi.shape, dtype='uint8')
th = np.zeros(roi.shape, dtype='uint8')
cv2.imshow('diff',diff)
cv2.imshow('sanitized',th)
cv2.imshow('Frame',frame)
if(cv2.waitKey(1) & 0xFF == ord('q')):
break
if (cv2.waitKey(1) & 0xFF == ord('r')):
dontcare,temp=cap.read()
rob=temp[bbLC[0]:bbRC[0], bbLC[1]:bbRC[1]]
diffPercent=noiseCalibrate(cap,rob,bbLC,bbRC)
bkgrem=cv2.bgsegm.createBackgroundSubtractorGSOC(replaceRate=0,propagationRate=0)
cap.release()
cv2.destroyAllWindows()
|
{"hexsha": "6a6e2bdb0f4fc5f6e81c3dcc0e91cacdde69f31f", "size": 3097, "ext": "py", "lang": "Python", "max_stars_repo_path": "Cam Code/oldCam.py", "max_stars_repo_name": "MackQian/Robotics1Project", "max_stars_repo_head_hexsha": "ca611bbd71dcca397a46941d4d40d720c8faa58c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Cam Code/oldCam.py", "max_issues_repo_name": "MackQian/Robotics1Project", "max_issues_repo_head_hexsha": "ca611bbd71dcca397a46941d4d40d720c8faa58c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Cam Code/oldCam.py", "max_forks_repo_name": "MackQian/Robotics1Project", "max_forks_repo_head_hexsha": "ca611bbd71dcca397a46941d4d40d720c8faa58c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7051282051, "max_line_length": 98, "alphanum_fraction": 0.572489506, "include": true, "reason": "import numpy", "num_tokens": 1006}
|
# This file was generated by the Julia Swagger Code Generator
# Do not modify this file directly. Modify the swagger specification instead.
@doc raw"""a metric value for some object
IoK8sApiCustomMetricsV1beta1MetricValue(;
apiVersion=nothing,
kind=nothing,
describedObject=nothing,
metricName=nothing,
timestamp=nothing,
windowSeconds=nothing,
value=nothing,
)
- apiVersion::String : APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
- kind::String : Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
- describedObject::IoK8sApiCoreV1ObjectReference : a reference to the described object
- metricName::String : the name of the metric
- timestamp::IoK8sApimachineryPkgApisMetaV1Time : indicates the time at which the metrics were produced
- windowSeconds::Int64 : indicates the window ([Timestamp-Window, Timestamp]) from which these metrics were calculated, when returning rate metrics calculated from cumulative metrics (or zero for non-calculated instantaneous metrics).
- value::IoK8sApimachineryPkgApiResourceQuantity : the value of the metric for this
"""
mutable struct IoK8sApiCustomMetricsV1beta1MetricValue <: SwaggerModel
apiVersion::Any # spec type: Union{ Nothing, String } # spec name: apiVersion
kind::Any # spec type: Union{ Nothing, String } # spec name: kind
describedObject::Any # spec type: Union{ Nothing, IoK8sApiCoreV1ObjectReference } # spec name: describedObject
metricName::Any # spec type: Union{ Nothing, String } # spec name: metricName
timestamp::Any # spec type: Union{ Nothing, IoK8sApimachineryPkgApisMetaV1Time } # spec name: timestamp
windowSeconds::Any # spec type: Union{ Nothing, Int64 } # spec name: windowSeconds
value::Any # spec type: Union{ Nothing, IoK8sApimachineryPkgApiResourceQuantity } # spec name: value
function IoK8sApiCustomMetricsV1beta1MetricValue(;apiVersion=nothing, kind=nothing, describedObject=nothing, metricName=nothing, timestamp=nothing, windowSeconds=nothing, value=nothing)
o = new()
validate_property(IoK8sApiCustomMetricsV1beta1MetricValue, Symbol("apiVersion"), apiVersion)
setfield!(o, Symbol("apiVersion"), apiVersion)
validate_property(IoK8sApiCustomMetricsV1beta1MetricValue, Symbol("kind"), kind)
setfield!(o, Symbol("kind"), kind)
validate_property(IoK8sApiCustomMetricsV1beta1MetricValue, Symbol("describedObject"), describedObject)
setfield!(o, Symbol("describedObject"), describedObject)
validate_property(IoK8sApiCustomMetricsV1beta1MetricValue, Symbol("metricName"), metricName)
setfield!(o, Symbol("metricName"), metricName)
validate_property(IoK8sApiCustomMetricsV1beta1MetricValue, Symbol("timestamp"), timestamp)
setfield!(o, Symbol("timestamp"), timestamp)
validate_property(IoK8sApiCustomMetricsV1beta1MetricValue, Symbol("windowSeconds"), windowSeconds)
setfield!(o, Symbol("windowSeconds"), windowSeconds)
validate_property(IoK8sApiCustomMetricsV1beta1MetricValue, Symbol("value"), value)
setfield!(o, Symbol("value"), value)
o
end
end # type IoK8sApiCustomMetricsV1beta1MetricValue
const _property_map_IoK8sApiCustomMetricsV1beta1MetricValue = Dict{Symbol,Symbol}(Symbol("apiVersion")=>Symbol("apiVersion"), Symbol("kind")=>Symbol("kind"), Symbol("describedObject")=>Symbol("describedObject"), Symbol("metricName")=>Symbol("metricName"), Symbol("timestamp")=>Symbol("timestamp"), Symbol("windowSeconds")=>Symbol("windowSeconds"), Symbol("value")=>Symbol("value"))
const _property_types_IoK8sApiCustomMetricsV1beta1MetricValue = Dict{Symbol,String}(Symbol("apiVersion")=>"String", Symbol("kind")=>"String", Symbol("describedObject")=>"IoK8sApiCoreV1ObjectReference", Symbol("metricName")=>"String", Symbol("timestamp")=>"IoK8sApimachineryPkgApisMetaV1Time", Symbol("windowSeconds")=>"Int64", Symbol("value")=>"IoK8sApimachineryPkgApiResourceQuantity")
Base.propertynames(::Type{ IoK8sApiCustomMetricsV1beta1MetricValue }) = collect(keys(_property_map_IoK8sApiCustomMetricsV1beta1MetricValue))
Swagger.property_type(::Type{ IoK8sApiCustomMetricsV1beta1MetricValue }, name::Symbol) = Union{Nothing,eval(Base.Meta.parse(_property_types_IoK8sApiCustomMetricsV1beta1MetricValue[name]))}
Swagger.field_name(::Type{ IoK8sApiCustomMetricsV1beta1MetricValue }, property_name::Symbol) = _property_map_IoK8sApiCustomMetricsV1beta1MetricValue[property_name]
function check_required(o::IoK8sApiCustomMetricsV1beta1MetricValue)
true
end
function validate_property(::Type{ IoK8sApiCustomMetricsV1beta1MetricValue }, name::Symbol, val)
end
|
{"hexsha": "63aaa23ac4bbb02d2dac85cc4f86046b51c2fa4d", "size": 5162, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ApiImpl/api/model_IoK8sApiCustomMetricsV1beta1MetricValue.jl", "max_stars_repo_name": "memetics19/Kuber.jl", "max_stars_repo_head_hexsha": "0834cab05d2b5733cb365594000be16f54345ddb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 47, "max_stars_repo_stars_event_min_datetime": "2018-12-13T13:17:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T20:19:11.000Z", "max_issues_repo_path": "src/ApiImpl/api/model_IoK8sApiCustomMetricsV1beta1MetricValue.jl", "max_issues_repo_name": "memetics19/Kuber.jl", "max_issues_repo_head_hexsha": "0834cab05d2b5733cb365594000be16f54345ddb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2019-03-14T03:51:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T11:15:26.000Z", "max_forks_repo_path": "src/ApiImpl/api/model_IoK8sApiCustomMetricsV1beta1MetricValue.jl", "max_forks_repo_name": "memetics19/Kuber.jl", "max_forks_repo_head_hexsha": "0834cab05d2b5733cb365594000be16f54345ddb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-12-19T12:02:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T17:43:32.000Z", "avg_line_length": 78.2121212121, "max_line_length": 386, "alphanum_fraction": 0.7754746222, "num_tokens": 1306}
|
import torch
import torchvision
import torchvision.transforms as transforms
import json
# import matplotlib.pyplot as plt
import numpy as np
import time
import argparse
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from probprec import Preconditioner
torch.set_default_dtype(torch.float32)
parser = argparse.ArgumentParser(
description="Run SGD on a tfobs test problem.")
# parser.add_argument("test_problem",
# help="Name of the test_problem (e.g. 'cifar10.cifar10_3c3d'")
# parser.add_argument("--data_dir",
# help="Path to the base data dir. If not set, tfobs uses its default.")
parser.add_argument("-bs", "-batch_size", required=True, type=int,
help="The batch size (positive integer).")
parser.add_argument("-wd", "-weight_decay", type=float,default=0.0,
help="Factor used for the weight_deacy.")
parser.add_argument("-nw", "-number_of_workers", type=int,default=2,
help="Number of Workers.")
parser.add_argument("-N", "-num_epochs", required=True, type=int,
help="Total number of training epochs.")
parser.add_argument("-ei", "-evaluation_iteration", type=int,default=100,
help="Total number of training epochs.")
parser.add_argument("-po", "-prior_observations", type=int,default=10,
help="Number of observations to estimate prior hyperparameters.")
parser.add_argument("-nl", "-likelihoods", type=int,default=5,
help="Number of observations to estimate posterior.")
parser.add_argument("-pr", "-preconditioner_rank", type=int,default=2,
help="Rank of preconditioner.")
parser.add_argument("-rs", "-random_seed", type=int, default=42,
help="Rank of preconditioner.")
args = parser.parse_args()
print(args)
BATCH_SIZE=args.bs#64
NUM_WORKERS=args.nw#2
EVALUATION_ITERATION=args.ei#200
NUM_EPOCHS=args.N#5
WEIGHT_DECAY=args.wd#2e-3
est_rank=args.pr
est_prior=args.po
gather_obs=args.nl
RANDOM_SEED=args.rs
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device=torch.device('cpu')
torch.set_default_dtype(torch.float)
torch.manual_seed(rs)
# device=torch.device('cpu')
# Assume that we are on a CUDA machine, then this should print a CUDA device:
print(device)
if __name__ == '__main__':
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data_deepobs/pytorch', train=True,
download=False, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE,
shuffle=True, num_workers=NUM_WORKERS)
testset = torchvision.datasets.CIFAR10(root='./data_deepobs/pytorch', train=False,
download=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE,
shuffle=False, num_workers=NUM_WORKERS)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def name(self):
return "2conv_3dense"
class Net3c3d(nn.Module):
def __init__(self, num_classes=10):
super(Net3c3d, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=5,padding=0),#, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2,padding=1),
nn.Conv2d(64, 96, kernel_size=3,padding=0),#, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2,padding=1),
nn.Conv2d(96, 128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2,padding=1)
)
self.classifier = nn.Sequential(
nn.Linear(128 * 3 * 3, 512),
nn.ReLU(inplace=True),
nn.Linear(512, 256),
nn.ReLU(inplace=True),
nn.Linear(256, num_classes),
)
# init the layers
for module in self.modules():
if isinstance(module, nn.Conv2d):
nn.init.constant_(module.bias, 0.0)
nn.init.xavier_normal_(module.weight)
if isinstance(module, nn.Linear):
nn.init.constant_(module.bias, 0.0)
nn.init.xavier_uniform_(module.weight)
def forward(self, x):
x = self.features(x)
# print(x.size())
x = x.view(x.size(0), 128 * 3 * 3)
x = self.classifier(x)
return x
def name(self):
return "3conv_3dense"
model = Net3c3d()
model.to(device)
# EVALUATION_ITERATION=500
criterion = nn.CrossEntropyLoss()
alphas=[]
train_loss=[]
test_loss=[]
test_acc=[]
alphas.append(LEARNING_RATE)
# specify the optimizer class
optimizer_class = optim.SGD
# and its hyperparameters
hyperparams = {} #'lr': 0.1} #'momentum': 0.99}
Poptimizer = Preconditioner([{"params": model.features.parameters()},
{"params": model.classifier.parameters()}],
lr = 10,
est_rank=est_rank, num_observations=gather_obs, prior_iterations=est_prior,
optim_class=optimizer_class, **hyperparams)
# Optimizer = optim.SGD([{"params": model.features.parameters()},
# {"params": model.classifier.parameters()}],
# lr=0.01)
for epoch in range(NUM_EPOCHS): # loop over the dataset multiple times
if epoch > 0:
Poptimizer.start_estimate()
start_time_epoch=time.perf_counter()
running_loss = 0.0
for i, data in enumerate(trainloader):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
Poptimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward(create_graph = True)
Poptimizer.step()
Poptimizer.get_log()
# print statistics
running_loss += loss.item()
if i % EVALUATION_ITERATION == EVALUATION_ITERATION-1: # print every [ei] mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / EVALUATION_ITERATION))
train_loss.append(running_loss / EVALUATION_ITERATION)
running_loss = 0.0
epoch_time=time.perf_counter()-start_time_epoch
#Evaluate on test set
running_test_loss=0.0
correct = 0
total = 0
with torch.no_grad():
for j, data in enumerate(testloader,0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
running_test_loss += loss.item()
# print('epoch $d time %1.3e testloss %1.4e testacc %1.3e'%(epoch+1,time_epoch,))
# print(total,correct,running_test_loss)
test_acc.append(100.0*correct/total)
test_loss.append(running_test_loss/j)
print('epoch %d: testloss %1.4e testacc %1.3e'%(epoch, test_loss[-1],test_acc[-1]))
print('Finished Training')
# print(train_loss)
print(alphas)
save_name='results/'
save_name+='%s_psgd_%d_%d_%.4f_%.4f'%(model.name(),NUM_EPOCHS,BATCH_SIZE,LEARNING_RATE,WEIGHT_DECAY)
save_name += '_' + time.strftime("%Y-%m-%d-%H-%M-%S")
save_name += '_sqrt'
save_data_train=[np.asarray(train_loss)]
save_data_test=[np.asarray(test_loss),np.asarray(test_acc)]
# np.savetxt(save_name+'_train.txt', save_data_train, fmt='%1.5e', delimiter=' ')
# np.savetxt(save_name+'_test.txt', save_data_test, fmt='%1.5e', delimiter=' ')
# np.savetxt(save_name+'_alphas.txt',alphas, fmt='%1.5e', delimiter=' ')
|
{"hexsha": "44ed6b4782d2bb4d9f7dd653370009da2c01b0e9", "size": 9295, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/cifar10_psgd.py", "max_stars_repo_name": "ludwigbald/probprec", "max_stars_repo_head_hexsha": "227a924a725551f4531cbe682da4830305f55277", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/cifar10_psgd.py", "max_issues_repo_name": "ludwigbald/probprec", "max_issues_repo_head_hexsha": "227a924a725551f4531cbe682da4830305f55277", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/cifar10_psgd.py", "max_forks_repo_name": "ludwigbald/probprec", "max_forks_repo_head_hexsha": "227a924a725551f4531cbe682da4830305f55277", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6828358209, "max_line_length": 107, "alphanum_fraction": 0.5798816568, "include": true, "reason": "import numpy", "num_tokens": 2206}
|
import sys
import os
import json
import time
import cantera as ct
import shutil
import copy
from PyQt4 import uic
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from src.core.def_tools import *
from src.ct.def_ct_tools import Xstr
from src.ct.senkin import senkin
from src.ct.psr import S_curve
from src.ck.def_cheminp import skeletal
from src.ct.ck2cti_GPS import ck2cti
from dialog_GPS import dialog_GPS
#from dialog_PFA import dialog_PFA
from dialog_database import dialog_database
from dialog_mech import dialog_mech
from dialog_view_mech import dialog_view_mech
from find_tau_ign import find_tau_ign
from src.ct.def_ct_tools import load_raw
from src.core.def_GPS import GPS_algo
from src.core.def_build_graph import build_flux_graph
from networkx.readwrite import json_graph
from src.core.def_tools import st2name
from src.core.def_GPSA import find_GPSA
""" >>>>>>>>>>>>>------------------------------------------------
0.3. run
called by: window_main
"""
class dialog_progress(object):
def set_value(self, task, value):
tasks = ['train','GPS','test','GPSA']
bars = [self.w.bar_train, self.w.bar_GPS, self.w.bar_test, self.w.bar_GPSA]
bar = bars[tasks.index(task)]
bar.setValue(int(value))
self.parent.app.processEvents()
def set_info(self, new_info):
str_time = '[' + time.strftime("%H:%M:%S") + '] '
new_info = str_time + new_info.replace(self.dir_public,'').strip('/') + '\n'
#self.f.write(str(new_info))
old_info = self.w.txt_info.toPlainText()
self.w.txt_info.setText(old_info + new_info)
self.w.txt_info.moveCursor(QTextCursor.End)
self.parent.app.processEvents()
def act_verbose(self):
if self.verbose:
self.w.btn_verbose.setText('verbose')
self.verbose = False
else:
self.w.btn_verbose.setText('concise')
self.verbose = True
def act_stop(self):
self.stop = True
msg = 'will stop after finishing current sub-task'
QMessageBox.information(QWidget(),'',msg)
def close(self):
self.w.accept()
def __init__(self, parent):
ui_name = 'progress.ui'
self.parent = parent
self.stop = False
self.verbose = False
self.dir_public = self.parent.project['dir_public']
self.f = open(os.path.join(self.dir_public,'log.txt'),'w')
self.w = uic.loadUi(os.path.join(parent.dir_ui, ui_name))
self.w.btn_stop.clicked.connect(self.act_stop)
self.w.btn_verbose.clicked.connect(self.act_verbose)
tasks = ['train','GPS','test']
for task in tasks:
self.set_value(task,0)
self.parent.app.processEvents()
self.w.show()
""" >>>>>>>>>>>>>------------------------------------------------
"""
def write_sk_inp(species_kept, dir_mech_de, dir_mech_sk, notes):
species_kept = list(species_kept)
n_sp = len(species_kept)
print 'total: '+str(n_sp)
notes.append('! number of species = '+str(n_sp))
skeletal(dir_mech_de, dir_mech_sk, species_kept, notes=notes)
ck2cti(dir_mech_sk)
f = open(os.path.join(dir_mech_sk,'ns.txt'),'w')
f.write(str(n_sp))
f.close()
""" >>>>>>>>>>>>>------------------------------------------------
"""
def find_raw(soln, soln_in, dir_desk, fuel, \
oxid, phi, atm, T0, reactor, n_digit):
dir_raw = cond2dir(dir_desk, fuel['name'], oxid['name'], phi, atm, T0, reactor, n_digit)
if not os.path.exists(dir_raw):
os.makedirs(dir_raw)
X0 = Xstr(soln, fuel['composition'], phi, oxid['composition'])
if reactor == 'autoignition':
senkin(soln, atm, T0, X0, if_half=True, dir_raw=dir_raw, if_fine=False)
elif reactor == 'autoignition fine':
senkin(soln, atm, T0, X0, if_half=True, dir_raw=dir_raw, if_fine=True)
elif reactor == 'autoignition full':
senkin(soln, atm, T0, X0, if_half=False, dir_raw=dir_raw, if_fine=False)
elif reactor == 'PSR extinction':
S_curve(soln_in, soln, atm, T0, X0, dir_raw=dir_raw)
""" ------------------------------------------------------
training
dP oo
88
d8888P 88d888b. .d8888b. dP 88d888b.
88 88' `88 88' `88 88 88' `88
88 88 88. .88 88 88 88
dP dP `88888P8 dP dP dP
"""
def run_train(parent, progress):
dir_de = os.path.join(parent.project['dir_public'],'detailed')
soln = parent.soln['detailed']
soln_in = parent.soln_in['detailed']
list_train = []
for db_name in parent.project['database'].keys():
if parent.project['database'][db_name]['train']:
list_train.append(db_name)
list_train = sorted(list_train)
v = 0
progress.set_value('train', v)
raw_single_mech(progress, list_train, parent, 100, v, dir_de, soln, soln_in)
progress.set_value('train', 100)
return True
""" >>>>>>>>>>>>>------------------------------------------------
"""
def raw_single_mech(progress, list_db, parent, dv_mech, v, dir_desk, soln, soln_in, bar='train'):
dv_db = 1.0 * dv_mech /len(list_db)
for db_name in list_db:
database = parent.project['database'][db_name]
phi_list = database['phi']
T0_list = database['T0']
atm_list = database['atm']
fuel_list = database['fuel']
oxid_list = database['oxid']
reactor = database['reactor']
dv_raw = 1.0 * dv_db / (len(phi_list) * len(atm_list) * len(T0_list) *\
len(fuel_list) * len(oxid_list))
progress.set_info('\n' + '-'*10 + ' database: ' + db_name + ' ' + '-'*10)
for fuel_name in fuel_list:
for oxid_name in oxid_list:
for phi in phi_list:
for atm in atm_list:
for T0 in T0_list:
if progress.stop:
progress.close()
return False
fuel = parent.project['fuel'][fuel_name]
oxid = parent.project['oxid'][oxid_name]
dir_raw = cond2dir(dir_desk, fuel_name, oxid_name, phi, atm, T0, \
reactor, parent.n_digit)
path_raw = os.path.join(dir_raw,'raw.npz')
if os.path.exists(path_raw):
progress.set_info('<already exists, skipped> '+dir_raw)
else:
progress.set_info(dir_raw)
find_raw(soln, soln_in,\
dir_desk, fuel, oxid, phi, atm, T0, reactor, parent.n_digit)
v += dv_raw
progress.set_value(bar, v)
#print '@'*20
if 'autoignition' in reactor:
fld = os.path.join(dir_desk,'raw',
'['+fuel_name.strip('[').strip(']')+'] + ['+oxid_name.strip('[').strip(']')+']')
find_tau_ign(fld)
print
print 'find_tau_ign'
print 'fld = '+str(fld)
print
#print dir_desk
#print '@'*20
#sys.exit()
#fld = os.path.join(dir_desk,)
return v
def run_graph(parent, progress, task):
if task == 'GPS':
obj = progress.w.label_GPS
elif task == 'GPSA':
obj = progress.w.label_GPSA
obj.setText('building graph')
dir_public = parent.project['dir_public']
soln = parent.soln['detailed']
list_train = []
train_name = parent.train_name
for db_name in parent.project['database'].keys():
if parent.project['database'][db_name]['train']:
list_train.append(db_name)
traced_list = []
for GPS_name in parent.project['GPS'].keys():
if parent.project['GPS'][GPS_name]['checked']:
GPS = parent.project['GPS'][GPS_name]
es_name = GPS['es']
es = parent.project['es'][es_name]
for e in es['element'].keys():
if es['element'][e]['traced']:
if e not in traced_list:
traced_list.append(e)
if not bool(traced_list):
msg = 'no traced element in selected GPS'
QMessageBox.information(QWidget(),'',msg)
return False
v = 0.0
dv_db = 100.0/len(list_train)
for db_name in list_train:
database = parent.project['database'][db_name]
phi_list = database['phi']
T0_list = database['T0']
atm_list = database['atm']
fuel_list = database['fuel']
oxid_list = database['oxid']
reactor = database['reactor']
dv_raw = dv_db / (len(phi_list) * len(atm_list) * len(T0_list) *\
len(fuel_list) * len(oxid_list))
for fuel_name in fuel_list:
for oxid_name in oxid_list:
for phi in phi_list:
for atm in atm_list:
for T0 in T0_list:
if progress.stop:
progress.close()
return False
dir_de = os.path.join(dir_public,'detailed')
dir_raw = cond2dir(dir_de, fuel_name, \
oxid_name, phi, atm, T0, \
reactor, parent.n_digit)
if 'DNS' in reactor:
dir_raw = os.path.join(dir_raw, database['case'][0])
raw = load_raw(os.path.join(dir_raw,'raw.npz'))
dir_graph = os.path.join(dir_raw,'graph')
if not os.path.exists(dir_graph):
os.makedirs(dir_graph)
n_pnt = len(raw['axis0'])
dv_pnt = 1.0 * dv_raw / n_pnt
print 'dir_raw = '+str(dir_raw)
print 'n_point = '+str(len(raw['axis0']))
for i_pnt in range(len(raw['axis0'])):
if 'active reactions' in raw.keys() and len(raw['active reactions'])>0:
if raw['active reactions'][i_pnt] == 0:
print 'skipped pnt '+str(i_pnt)+' as no active reaction'
continue
for e in traced_list:
path_graph = os.path.join(dir_graph, e+'_'+str(i_pnt)+'.json')
if not os.path.exists(path_graph):
info = 'building '+e+'-graph for pnt'+str(i_pnt)+' of '+\
str(dir_raw.replace(dir_de,''))
if i_pnt%10==0:
print info
if n_pnt<100:
progress.set_info(info)
flux_graph = build_flux_graph(soln, raw, e, \
path_save=path_graph, overwrite=False, \
i0=i_pnt, i1=i_pnt, constV=False)
else:
print 'already exists '+str(path_graph)
v += dv_pnt
if n_pnt<100:
progress.set_value(task,v)
obj.setText(task)
progress.set_value(task,0.0)
return True
""" ------------------------------------------------------
.88888. 888888ba .d88888b
d8' `88 88 `8b 88. "'
88 a88aaaa8P' `Y88888b.
88 YP88 88 `8b
Y8. .88 88 d8' .8P
`88888' dP Y88888P
"""
def run_GPS(parent, progress):
if not run_graph(parent, progress,'GPS'):
return False
min_dT = parent.min_dT
dir_public = parent.project['dir_public']
soln = parent.soln['detailed']
list_train = []
train_name = parent.train_name
for db_name in parent.project['database'].keys():
if parent.project['database'][db_name]['train']:
list_train.append(db_name)
list_GPS = []
for GPS_name in parent.project['GPS'].keys():
if parent.project['GPS'][GPS_name]['checked']:
list_GPS.append(GPS_name)
v = 0
if bool(list_GPS) == False:
return False
dv_GPS = 100.0/len(list_GPS)
# for different GPS settings ============================
for GPS_name in list_GPS:
GPS = parent.project['GPS'][GPS_name]
alpha_list = GPS['alpha']
beta_list = GPS['beta']
K_list = GPS['K']
must_keep = GPS['must_keep']
es_name = GPS['es']
es = parent.project['es'][es_name]
traced_list = []
for e in es['element'].keys():
if es['element'][e]['traced']:
traced_list.append(e)
if GPS['iso_enable']:
iso_name = GPS['iso']
iso = parent.project['iso'][iso_name]
gamma_list = GPS['gamma']
else:
iso_name = None
iso = None
gamma_list = [None]
dv_kab = 1.0 * dv_GPS/ (len(K_list) * len(beta_list) * len(alpha_list) * len(gamma_list))
for K in K_list:
for beta in beta_list:
for alpha in alpha_list:
for gamma in gamma_list:
dir_sk = para2dir_GPS(dir_public, train_name, \
alpha=alpha, K=K, beta=beta, \
es_name=es_name, iso_name=iso_name, \
d=parent.n_digit, gamma=gamma)
progress.set_info('\n' + '-'*10 + dir_sk + ' ' + '-'*10)
dir_mech_sk = os.path.join(dir_sk,'mech')
path_cti_sk = os.path.join(dir_mech_sk,'chem.cti')
if os.path.exists(path_cti_sk):
progress.set_info('<already exists, skipped> '+path_cti_sk)
v += dv_kab
progress.set_value('GPS',v)
continue
dir_de = os.path.join(dir_public,'detailed')
dir_mech_de = os.path.join(dir_de,'mech')
path_cti_de = os.path.join(dir_mech_de,'chem.cti')
species_kept = set(must_keep)
notes = ['! generated by global pathway selection, Gao et al,'+\
' Combustion and flame, 167 (2016) 238-247']
notes.append('! alpha = ' + str(alpha) + ', K = ' + str(K) + \
', beta = ' + str(beta))
notes.append('! training database: '+train_name)
# for different training database ============================
dv_db = 1.0 * dv_kab / len(list_train)
for db_name in list_train:
database = parent.project['database'][db_name]
phi_list = database['phi']
T0_list = database['T0']
atm_list = database['atm']
fuel_list = database['fuel']
oxid_list = database['oxid']
reactor = database['reactor']
dv_raw = 1.0 * dv_db / (len(phi_list) * len(atm_list) * len(T0_list) *\
len(fuel_list) * len(oxid_list))
for fuel_name in fuel_list:
for oxid_name in oxid_list:
for phi in phi_list:
for atm in atm_list:
for T0 in T0_list:
if progress.stop:
progress.close()
return False
fuel = parent.project['fuel'][fuel_name]
oxid = parent.project['oxid'][oxid_name]
species_kept |= set(fuel['composition'].keys())
species_kept |= set(oxid['composition'].keys())
e_available = set()
for sp in fuel['composition'].keys():
e_available |= set(soln.species(sp).composition.keys())
for sp in oxid['composition'].keys():
e_available |= set(soln.species(sp).composition.keys())
dir_raw = cond2dir(dir_de, fuel_name, \
oxid_name, phi, atm, T0, \
reactor, parent.n_digit)
if 'DNS' in reactor:
dir_raw = os.path.join(dir_raw, database['case'][0])
progress.set_info('raw = '+\
dir_raw.replace(os.path.join(dir_de,'raw'),'').strip('/'))
dir_graph = os.path.join(dir_raw,'graph')
#raw_name = dir_raw.replace(\
# os.path.join(dir_de,'raw'),'').strip('/')
raw_name = cond2dir('', fuel_name, \
oxid_name, phi, atm, T0, \
reactor, parent.n_digit)
if 'DNS' in reactor:
raw_name = os.path.join(dir_raw, database['case'][0])
dir_how = os.path.join(dir_sk,raw_name.replace('raw','how'))
if not os.path.exists(dir_how):
os.makedirs(dir_how)
raw = load_raw(os.path.join(dir_raw,'raw.npz'))
T = raw['temperature']
axis0 = raw['axis0']
# for different time instance ================
flag = False
# -----------------------------------------------
# I only consider the points where T and T0 has
# some difference, which means there're reactions
# performing GPS on chemically frozen state, or
# equilibirum state (for PSR), does not give too
# much useful information
#
# once a point is sampled, flag = True
# -----------------------------------------------
dv_pnt = 1.0 * dv_raw / len(T)
for i_pnt in range(len(T)):
"""
if 'active reactions' in raw.keys():
if raw['active reactions'][i_pnt] == 0:
print 'skipped pnt '+str(i_pnt)+' as no active reaction'
continue
"""
if flag == False:
if abs(T[i_pnt]-T[0])>min_dT:
flag = True
# for different source->target ==============
for e in traced_list:
if e not in e_available:
continue
sources = copy.copy(es['element'][e]['source'])
if bool(sources) == False:
sources = [None]
if parent.alias_fuel in sources:
del sources[sources.index(parent.alias_fuel)]
for sp in fuel['composition'].keys():
atoms = soln.species(sp).composition.keys()
#print 'atms of ' + sp + ' = ' +str(atoms)
if e in atoms:
sources += [sp]
targets = es['element'][e]['target']
if bool(targets) == False:
targets = [None]
for target in targets:
for source in sources:
name_how = st2name(i_pnt, e, source, target)
if progress.verbose:
progress.set_info(' '*5 + name_how)
path_gps = os.path.join(dir_how, name_how+'.json')
path_graph = os.path.join(dir_graph, e+'_'+str(i_pnt)+'.json')
if os.path.exists(path_graph):
data = json.load(\
open(path_graph, 'r'))
flux_graph = json_graph.node_link_graph(data)
else:
if progress.verbose:
progress.set_info(' '*10 + 'building graph...')
dir_graph = os.path.join(dir_raw,'graph')
if not os.path.exists(dir_graph):
os.makedirs(dir_graph)
flux_graph = build_flux_graph(soln, raw, e, \
path_save=path_graph, overwrite=False, \
i0=i_pnt, i1=i_pnt, constV=False)
if flag == False:
continue
GPS_notes = 'T = '+str(T[i_pnt])+', axis0 = '+str(axis0[i_pnt])+\
' ('+str(min(axis0))+' ~ '+str(max(axis0))+')'
GPS_results = GPS_algo(soln, flux_graph, source, target, \
path_save=path_gps, K=K, alpha=alpha, beta=beta, \
normal='max', iso=iso, overwrite=True, raw=dir_raw, \
notes=GPS_notes, gamma=gamma)
new_kept = set(GPS_results['species'].keys())
species_kept |= new_kept
v += dv_pnt
progress.set_value('GPS', v)
# generate chem.inp ***************
write_sk_inp(species_kept, dir_mech_de, dir_mech_sk, notes)
#"""
progress.set_value('GPS', 100)
return True
"""
---------------------------------------------
.88888. 888888ba .d88888b .d888888
d8' `88 88 `8b 88. "' d8' 88
88 a88aaaa8P' `Y88888b. 88aaaaa88a
88 YP88 88 `8b 88 88
Y8. .88 88 d8' .8P 88 88
`88888' dP Y88888P 88 88
---------------------------------------------
"""
def load_dR(path_save, soln, overwrite=False):
if overwrite == False:
if os.path.exists(path_save):
npz_file = np.load(open(path_save, 'rb'))
npz = dict()
for key in npz_file.keys():
npz[key] = npz_file[key]
return npz['dnR']
R = []
sp_list = soln.species_names
R_cand = ['O','H','OH']
for R_cand_i in R_cand:
if R_cand_i in sp_list:
R.append(R_cand_i)
elif R_cand_i.lower() in sp_list:
R.append(R_cand_i.lower())
rxn = soln.reaction
dnR = []
for id_rxn in range(soln.n_reactions):
reactants = rxn(id_rxn).reactants.keys()
products = rxn(id_rxn).products.keys()
dnR_i = 0
for R_i in R:
if R_i in reactants:
dnR_i -= rxn(id_rxn).reactants[R_i]
if R_i in products:
dnR_i += rxn(id_rxn).products[R_i]
dnR.append(dnR_i)
np.savez(path_save,dnR=dnR)
return dnR
def run_GPSA(parent, progress):
if not run_graph(parent, progress, 'GPSA'):
return False
min_dT = parent.min_dT
dir_public = parent.project['dir_public']
soln = parent.soln['detailed']
list_train = []
train_name = parent.train_name
for db_name in parent.project['database'].keys():
if parent.project['database'][db_name]['train']:
list_train.append(db_name)
list_GPS = []
for GPS_name in parent.project['GPS'].keys():
if parent.project['GPS'][GPS_name]['checked']:
list_GPS.append(GPS_name)
if bool(list_GPS):
# for different GPS settings ============================
# add new GP
print 'run_GPSA: here0'
for GPS_name in list_GPS:
GPS = parent.project['GPS'][GPS_name]
alpha_list = GPS['alpha']
beta_list = GPS['beta']
K_list = GPS['K']
es_name = GPS['es']
es = parent.project['es'][es_name]
traced_list = []
for e in es['element'].keys():
if es['element'][e]['traced']:
traced_list.append(e)
if GPS['iso_enable']:
iso_name = GPS['iso']
iso = parent.project['iso'][iso_name]
gamma_list = GPS['gamma']
else:
iso_name = None
iso = None
gamma_list = [None]
for K in K_list:
for beta in beta_list:
for alpha in alpha_list:
for gamma in gamma_list:
dir_sk = para2dir_GPS(dir_public, train_name, \
alpha=alpha, K=K, beta=beta, \
es_name=es_name, iso_name=iso_name, \
d=parent.n_digit, gamma=gamma)
dir_how = os.path.join(dir_sk,'how')
if not os.path.exists(dir_how):
continue
for fo in os.listdir(dir_how):
dir_fo = os.path.join(dir_how,fo)
if ('+' not in fo) or (not os.path.isdir(dir_fo)): continue
for reactor in os.listdir(dir_fo):
dir_reactor = os.path.join(dir_fo,reactor)
if (not os.path.isdir(dir_reactor)): continue
for fat in os.listdir(dir_reactor):
dir_fat = os.path.join(dir_reactor, fat)
if ('phi' not in fat) or (not os.path.isdir(dir_fat)): continue
for file in os.listdir(dir_fat):
if 'graph' not in file: continue
file_GP = os.path.join(dir_fat,file)
traced = file.split(',')[1].replace('graph','').strip().upper()
GP_traced = 'GP_' + traced
if GP_traced not in parent.project.keys():
parent.project[GP_traced] = dict()
GPS_results = json.load(open(file_GP,'r'))
for GP_name in GPS_results['global_path'].keys():
if GP_name not in parent.project[GP_traced].keys():
GP_dict = dict()
GP_dict['alias'] = GP_name
GP_dict['name'] = GP_name
GP_dict['member'] = GPS_results['global_path'][GP_name]['member']
GP_dict['traced'] = traced
parent.project[GP_traced][GP_name] = GP_dict
progress.set_info('added '+traced+'-traced global pathway: '+str(GP_name))
print 'run_GPSA: here1'
# find all GP ==============================
GP_list = []
filter_traced = str(parent.w.cb_GPSA_traced.currentText())
print 'filter_traced = '+str(filter_traced)
if filter_traced == 'no filter':
ee = parent.soln['detailed'].element_names
else:
ee = [filter_traced]
alias_only = (str(parent.w.cb_GPSA_alias.currentText()) == 'with alias only')
source_str = str(parent.w.cb_GPSA_source.currentText())
if source_str == 'no filter':
sources = parent.soln['detailed'].species_names
else:
sources = [source_str]
for traced in ee:
traced = traced.upper()
if 'GP_'+traced in parent.project.keys():
for GP_name in parent.project['GP_'+traced].keys():
GP_dir = parent.project['GP_'+traced][GP_name]
if GP_dir['member'][0] not in sources:
continue
if (not alias_only) or (alias_only and (GP_dir['alias'] != GP_name)):
GP_list.append((traced, GP_name))
if not bool(GP_list):
msg = 'GP_list is empty!\nTry to run GPS first or loose GPSA settings'
QMessageBox.information(QWidget(),'',msg)
return False
print 'len(GP_list) = '+str(len(GP_list))
# for different training set ============================
# to compute GPSA quantities
dir_desk = parent.project['mech']['detailed']['desk']
path_R_npz = os.path.join(dir_desk,'mech','radical.npz')
dnR = load_dR(path_R_npz, soln)
soln = parent.soln['detailed']
v = 0.0
dv_db = 100.0 / len(list_train)
for db_name in list_train:
database = parent.project['database'][db_name]
phi_list = database['phi']
T0_list = database['T0']
atm_list = database['atm']
fuel_list = database['fuel']
oxid_list = database['oxid']
reactor = database['reactor']
dv_raw = 1.0 * dv_db / (len(phi_list) * len(atm_list) * len(T0_list) * len(fuel_list) * len(oxid_list))
dv_GP = 1.0 * dv_raw / len(GP_list)
for fuel_name in fuel_list:
for oxid_name in oxid_list:
for phi in phi_list:
for atm in atm_list:
for T0 in T0_list:
if progress.stop:
progress.close()
return False
fuel = parent.project['fuel'][fuel_name]
oxid = parent.project['oxid'][oxid_name]
fuel_comp = parent.project['fuel'][fuel_name]['composition']
dir_de = os.path.join(dir_public,'detailed')
dir_raw = cond2dir(dir_de, fuel_name, \
oxid_name, phi, atm, T0, \
reactor, parent.n_digit)
if 'DNS' in reactor:
dir_raw = os.path.join(dir_raw, database['case'][0])
raw = load_raw(os.path.join(dir_raw,'raw.npz'))
n_break = len(raw['axis0'])
else:
n_break = 0
dir_graph = os.path.join(dir_raw,'graph')
no_graph = True
if os.path.exists(dir_graph):
for file in os.listdir(dir_graph):
if file.endswith('.json'):
no_graph = False
break
if no_graph:
msg = 'no graph file found for: \n\n'+str(dir_raw.replace(dir_public,'[working dir]'))
QMessageBox.information(QWidget(),'',msg)
return False
progress.set_info(str(dir_raw))
for traced, GP_name in GP_list:
msg = ' '*4+'computing GPSA for '+str(GP_name)
print msg
progress.set_info(msg)
GP_dir = parent.project['GP_'+traced][GP_name]
find_GPSA(dir_raw, GP_dir, soln, dnR, fuel_comp, n_break)
v += dv_GP
progress.set_value('GPSA', v)
return True
""" ------------------------------------------------------
dP dP
88 88
d8888P .d8888b. .d8888b. d8888P
88 88ooood8 Y8ooooo. 88
88 88. ... 88 88
dP `88888P' `88888P' dP
"""
def run_test(parent, progress):
dir_public = parent.project['dir_public']
list_test = []
for db_name in parent.project['database'].keys():
if parent.project['database'][db_name]['test']:
list_test.append(db_name)
list_train = []
train_name = parent.train_name
for db_name in parent.project['database'].keys():
if parent.project['database'][db_name]['train']:
list_train.append(db_name)
# ============================
# dP dP oo dP dP
# 88 88 88 88
# .d888b88 .d8888b. d8888P .d8888b. dP 88 .d8888b. .d888b88
# 88' `88 88ooood8 88 88' `88 88 88 88ooood8 88' `88
# 88. .88 88. ... 88 88. .88 88 88 88. ... 88. .88
# `88888P8 `88888P' dP `88888P8 dP dP `88888P' `88888P8
progress.w.label_test.setText('calculating detailed...')
dir_de = os.path.join(dir_public,'detailed')
soln = parent.soln['detailed']
soln_in = parent.soln_in['detailed']
v = 0
progress.set_value('test', v)
raw_single_mech(progress, list_test, parent, 100, v, dir_de, soln, soln_in, 'test')
# ============================
# .88888. 888888ba .d88888b
# d8' `88 88 `8b 88. "'
# 88 a88aaaa8P' `Y88888b.
# 88 YP88 88 `8b
# Y8. .88 88 d8' .8P
# `88888' dP Y88888P
progress.w.label_test.setText('testing GPS...')
list_GPS = []
for GPS_name in parent.project['GPS'].keys():
if parent.project['GPS'][GPS_name]['checked']:
list_GPS.append(GPS_name)
v = 0
progress.set_value('test', v)
if bool(list_GPS):
dv_GPS = 100.0/len(list_GPS)
for GPS_name in list_GPS:
GPS = parent.project['GPS'][GPS_name]
alpha_list = GPS['alpha']
beta_list = GPS['beta']
K_list = GPS['K']
es_name = GPS['es']
if GPS['iso_enable']:
iso_name = GPS['iso']
gamma_list = GPS['gamma']
else:
iso_name = None
gamma_list = [None]
dv_kab = 1.0 * dv_GPS/ (len(K_list) * len(beta_list) * len(alpha_list) * len(gamma_list))
for K in K_list:
for beta in beta_list:
for alpha in alpha_list:
for gamma in gamma_list:
dir_sk = para2dir_GPS(dir_public, train_name, \
alpha=alpha, K=K, beta=beta, \
es_name=es_name, iso_name=iso_name, \
d=parent.n_digit, gamma=gamma)
progress.set_info('\n' + '-'*10 + dir_sk + ' ' + '-'*10)
path_cti = os.path.join(dir_sk,'mech','chem.cti')
soln = ct.Solution(path_cti)
soln_in = ct.Solution(path_cti)
v = raw_single_mech(progress, list_test, parent, dv_kab, v, dir_sk, soln, soln_in,'test')
# ============================
# dP dP
# 88 88
# .d8888b. d8888P 88d888b. .d8888b. 88d888b.
# 88' `88 88 88' `88 88ooood8 88' `88
# 88. .88 88 88 88 88. ... 88
# `88888P' dP dP dP `88888P' dP
for name in parent.project['mech'].keys():
sk = parent.project['mech'][name]
if name != 'detailed' and sk['checked']:
dir_sk = sk['desk']
path_cti = os.path.join(dir_sk,'mech','chem.cti')
if name not in parent.soln.keys():
parent.soln[name] = ct.Solution(path_cti)
parent.soln_in[name] = ct.Solution(path_cti)
soln = parent.soln[name]
soln_in = parent.soln_in[name]
v = 0
progress.w.label_test.setText('testing '+name)
progress.set_value('test', v)
raw_single_mech(progress, list_test, parent, 100, v, dir_sk, soln, soln_in, 'test')
|
{"hexsha": "a3b81690fc4f19d6c3d20683c01eb8bbfdb27217", "size": 30165, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/gui/def_run.py", "max_stars_repo_name": "haoxy97/GPS", "max_stars_repo_head_hexsha": "3da6d3a7410b7b7e5340373f206a1833759d5acf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-12T00:01:57.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-12T00:01:57.000Z", "max_issues_repo_path": "src/gui/def_run.py", "max_issues_repo_name": "haoxy97/GPS", "max_issues_repo_head_hexsha": "3da6d3a7410b7b7e5340373f206a1833759d5acf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/gui/def_run.py", "max_forks_repo_name": "haoxy97/GPS", "max_forks_repo_head_hexsha": "3da6d3a7410b7b7e5340373f206a1833759d5acf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.1375, "max_line_length": 105, "alphanum_fraction": 0.5683739433, "include": true, "reason": "from networkx", "num_tokens": 8302}
|
C TEST SUBROUTINE testos
C
INCLUDE 'VICMAIN_FOR'
SUBROUTINE MAIN44
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
C THIS IS A TEST FOR MODULE testos C
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
CALL TESTOS(IOS)
IF (IOS .EQ. 0) CALL XVMESSAGE('THE OS IS VMS',' ')
IF (IOS .EQ. 1) CALL XVMESSAGE('THE OS IS UNIX',' ')
IF (IOS .EQ. 2) CALL XVMESSAGE('THE OS IS other',' ')
RETURN
END
|
{"hexsha": "6b14e8edc8a147132fbe5098db2decb3cb7a3c95", "size": 433, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "vos/p2/sub/testos/test/ttestos.f", "max_stars_repo_name": "NASA-AMMOS/VICAR", "max_stars_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-10-21T05:56:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T10:02:01.000Z", "max_issues_repo_path": "vos/p2/sub/testos/test/ttestos.f", "max_issues_repo_name": "NASA-AMMOS/VICAR", "max_issues_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vos/p2/sub/testos/test/ttestos.f", "max_forks_repo_name": "NASA-AMMOS/VICAR", "max_forks_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-09T01:51:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-23T00:23:24.000Z", "avg_line_length": 27.0625, "max_line_length": 59, "alphanum_fraction": 0.6605080831, "num_tokens": 133}
|
!
! This is the test program for EXTRAP
!
INCLUDE 'VICMAIN_FOR'
SUBROUTINE MAIN44
C-----THIS IS A TEST PROGRAM FOR MODULE EXTRAP
C-----EXTRAP WILL CALCULATE VALUES FOR THE DNS OF A LINE SEGMENT
C-----BASED ON THE VALUES OF OTHER POINTS IN THE PICTURE.
C-----THESE OTHER POINTS ARE STORED IN ARRAY PTS.
C-----THIS PROGRAM ASSUMES THAT AN 8 X 8 IMAGE IS INPUT.
C-----EXTRAP IS BEING TOLD TO INTERPOLATE OVER THE AREA
C-----(4,4,3,3)
INTEGER*4 IUNIT,OUNIT1,OUNIT2,NL,NS
INTEGER*2 LINE(20)
INTEGER*2 PTS(48)/3,3,5,4,3, 6,5,3, 7,6,3, 8,7,3, 9,
. 3,4,6, 7,4,10,
. 3,5,7, 7,5,11,
. 3,6,8, 7,6,12,
. 3,7,9,4,7,10,5,7,11,6,7,12,7,7,13/
C
MAX1 = 25
MAX2 = 10000000
C OPEN INPUT DATA SET
CALL XVUNIT(IUNIT,'INP',1,STAT,' ')
CALL XVOPEN(IUNIT,STAT,'U_FORMAT','HALF',' ')
CALL XVGET(IUNIT,STAT,'NL',NL,'NS',NS,' ')
C OPEN OUTPUT DATA SETS
CALL XVUNIT(OUNIT1,'OUT',1,STAT,' ')
CALL XVOPEN(OUNIT1,STAT,'OP','WRITE','U_FORMAT','HALF',
& 'O_FORMAT','BYTE',' ')
CALL XVUNIT(OUNIT2,'OUT',2,STAT,' ')
CALL XVOPEN(OUNIT2,STAT,'OP','WRITE','U_FORMAT','HALF',
& 'O_FORMAT','BYTE',' ')
DO L=1,NL
CALL XVREAD(IUNIT,LINE,STAT,'LINE',L,' ')
CALL XVWRIT(OUNIT1,LINE,STAT,'LINE',L,'NSAMPS',NS,' ')
CALL XVWRIT(OUNIT2,LINE,STAT,'LINE',L,'NSAMPS',NS,' ')
END DO
C CLOSE OUTPUT DATA SETS AND RE-OPEN FOR UPDATE
CALL XVCLOSE(OUNIT1,STAT,' ')
CALL XVCLOSE(OUNIT2,STAT,' ')
CALL XVOPEN(OUNIT1,STAT,'OP','UPDATE','U_FORMAT','HALF',
& 'O_FORMAT','BYTE',' ')
CALL XVOPEN(OUNIT2,STAT,'OP','UPDATE','U_FORMAT','HALF',
& 'O_FORMAT','BYTE',' ')
C
DO 20 L=4,6
CALL EXTRAP(16,L,4,6,PTS,LINE,MAX1)
CALL XVWRIT(OUNIT1,LINE,STAT,'LINE',L,
& 'SAMP',4,'NSAMPS',3,' ')
CALL EXTRAP(16,L,4,6,PTS,LINE,MAX2)
CALL XVWRIT(OUNIT2,LINE,STAT,'LINE',L,
& 'SAMP',4,'NSAMPS',3,' ')
20 CONTINUE
CALL XVCLOSE(IUNIT,STAT,' ')
CALL XVCLOSE(OUNIT1,STAT,' ')
CALL XVCLOSE(OUNIT2,STAT,' ')
STOP
END
|
{"hexsha": "bac76c94a2b40cb00d12f0e6434835bd78704b64", "size": 2267, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "vos/p2/sub/extrap/test/textrap.f", "max_stars_repo_name": "NASA-AMMOS/VICAR", "max_stars_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-10-21T05:56:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T10:02:01.000Z", "max_issues_repo_path": "vos/p2/sub/extrap/test/textrap.f", "max_issues_repo_name": "NASA-AMMOS/VICAR", "max_issues_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vos/p2/sub/extrap/test/textrap.f", "max_forks_repo_name": "NASA-AMMOS/VICAR", "max_forks_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-09T01:51:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-23T00:23:24.000Z", "avg_line_length": 34.8769230769, "max_line_length": 64, "alphanum_fraction": 0.5403617115, "num_tokens": 819}
|
MODULE esn_I
INTERFACE
!...Generated by Pacific-Sierra Research 77to90 4.4G 10:47:13 03/09/06
SUBROUTINE esn ( AL, A, ESPI, OVL, CESPM2, CESPML, CESP, POTPT, ES&
, ESPC, WORK1D, NORBS, NUMAT)
USE vast_kind_param,ONLY: DOUBLE
integer, INTENT(IN) :: NORBS
integer, INTENT(IN) :: NUMAT
real(DOUBLE), DIMENSION((NUMAT + 1)**2) :: AL
real(DOUBLE), DIMENSION(NORBS**2) :: CESPM2
real(DOUBLE), DIMENSION(NORBS**2) :: CESPML
real(DOUBLE), DIMENSION(NORBS**2) :: CESP
real(DOUBLE), DIMENSION(3,*) :: POTPT
real(DOUBLE), DIMENSION(*) :: ES
real(DOUBLE), DIMENSION(*) :: ESPC
real(DOUBLE), DIMENSION(*) :: WORK1D
END SUBROUTINE
END INTERFACE
END MODULE
|
{"hexsha": "692d41dbf67bb298ba1c78a41e0ff8303dedf9a8", "size": 799, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "2006_MOPAC7.1/src_interfaces/esn_I.f90", "max_stars_repo_name": "openmopac/MOPAC-archive", "max_stars_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-16T20:53:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-16T20:54:11.000Z", "max_issues_repo_path": "2006_MOPAC7.1/src_interfaces/esn_I.f90", "max_issues_repo_name": "openmopac/MOPAC-archive", "max_issues_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2006_MOPAC7.1/src_interfaces/esn_I.f90", "max_forks_repo_name": "openmopac/MOPAC-archive", "max_forks_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.95, "max_line_length": 76, "alphanum_fraction": 0.5682102628, "num_tokens": 270}
|
-- Enumerated Types
inductive weekday : Type
| sunday : weekday
| monday : weekday
| tuesday : weekday
| wednesday : weekday
| thursday : weekday
| friday : weekday
| saturday : weekday
#check weekday
#print weekday
#check weekday.sunday
#check weekday.monday
open weekday
#check sunday
#check monday
#check weekday.rec
#check @weekday.rec
#check weekday.rec_on
#check @weekday.rec_on
def number_of_day (d : weekday) : ℕ :=
weekday.rec_on d 1 2 3 4 5 6 7
#reduce number_of_day sunday
#reduce number_of_day monday
#reduce number_of_day tuesday
def number_of_day' (d : weekday) : ℕ :=
weekday.cases_on d 1 2 3 4 5 6 7
#reduce number_of_day' wednesday
namespace weekday
@[reducible]
private def cases_on := @weekday.cases_on
def number_of_day₁ (d : weekday) : nat :=
cases_on d 1 2 3 4 5 6 7
end weekday
#reduce weekday.number_of_day₁ weekday.sunday
open weekday (renaming cases_on → cases_on)
#reduce number_of_day₁ sunday
#check cases_on
namespace weekday
def next (d : weekday) : weekday :=
weekday.cases_on d monday tuesday wednesday thursday friday saturday sunday
def previous (d : weekday) : weekday :=
weekday.cases_on d saturday sunday monday tuesday wednesday thursday friday
#reduce next (next tuesday)
#reduce next (previous tuesday)
example : next (previous tuesday) = tuesday := rfl
theorem next_previous (d: weekday) : next (previous d) = d :=
weekday.cases_on d
(show next (previous sunday) = sunday, from rfl)
(show next (previous monday) = monday, from rfl)
(show next (previous tuesday) = tuesday, from rfl)
(show next (previous wednesday) = wednesday, from rfl)
(show next (previous thursday) = thursday, from rfl)
(show next (previous friday) = friday, from rfl)
(show next (previous saturday) = saturday, from rfl)
theorem next_previous' (d: weekday) : next (previous d) = d :=
weekday.cases_on d rfl rfl rfl rfl rfl rfl rfl
theorem next_previous'' (d: weekday) : next (previous d) = d :=
by apply weekday.cases_on d; refl
theorem next_previous₁ (d: weekday) : next (previous d) = d :=
by apply weekday.rec_on d; refl
end weekday
|
{"author": "agryman", "repo": "theorem-proving-in-lean", "sha": "cf5a3a19d0d9d9c0a4f178f79e9b0fa67c5cddb9", "save_path": "github-repos/lean/agryman-theorem-proving-in-lean", "path": "github-repos/lean/agryman-theorem-proving-in-lean/theorem-proving-in-lean-cf5a3a19d0d9d9c0a4f178f79e9b0fa67c5cddb9/src/07-Inductive-Types/example-7.1-1.lean"}
|
c !! This is used to get the error
double precision function qexact(blockno,xc,yc,t)
implicit none
integer blockno
double precision xc,yc,t
double precision x0, y0, u0, v0
double precision q0,qc
double precision u0_comm,v0_comm,revs_comm
common /comm_velocity/ u0_comm,v0_comm, revs_comm
u0 = revs_comm*u0_comm
v0 = revs_comm*v0_comm
c # Assume velocity is horizontal; unit speed.
qc = q0(blockno, xc - u0*t,yc - v0*t)
qexact = qc
end
double precision function q0(blockno,xc1,yc1)
implicit none
double precision xc,yc, xp, yp, zp, rp
double precision xc1, yc1
integer blockno
integer*8 cont, get_context
double precision r,r0
double precision Hsmooth
cont = get_context()
xc = xc1
yc = yc1
call fclaw2d_map_c2m(cont,
& blockno,xc,yc,xp,yp,zp)
c # Sphere centered at (1,0,r0) on torus
r0 = 0.4d0
r = sqrt((xp - 1.0)**2 + yp**2 + (zp-r0)**2)
q0 = Hsmooth(r + r0) - Hsmooth(r - r0)
end
double precision function Hsmooth(r)
implicit none
double precision r
Hsmooth = (tanh(r/0.02d0) + 1)/2.d0
end
|
{"hexsha": "72cee8dca071ca8c13f780646187e2714c96f8ba", "size": 1241, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "applications/clawpack/advection/2d/torus/qexact.f", "max_stars_repo_name": "MelodyShih/forestclaw", "max_stars_repo_head_hexsha": "2abaab636e6e93f5507a6f231490144a3f805b59", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-09T23:06:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-09T23:06:42.000Z", "max_issues_repo_path": "applications/clawpack/advection/2d/torus/qexact.f", "max_issues_repo_name": "scottaiton/forestclaw", "max_issues_repo_head_hexsha": "2abaab636e6e93f5507a6f231490144a3f805b59", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "applications/clawpack/advection/2d/torus/qexact.f", "max_forks_repo_name": "scottaiton/forestclaw", "max_forks_repo_head_hexsha": "2abaab636e6e93f5507a6f231490144a3f805b59", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.0338983051, "max_line_length": 55, "alphanum_fraction": 0.5938759065, "num_tokens": 396}
|
from typing import Any, Dict, List, Optional
import ConfigSpace as CS
import numpy as np
from tpe.optimizer.base_optimizer import BaseOptimizer, ObjectiveFunc
class RandomSearch(BaseOptimizer):
def __init__(
self,
obj_func: ObjectiveFunc,
config_space: CS.ConfigurationSpace,
resultfile: str,
n_init: int = 10,
max_evals: int = 100,
seed: Optional[int] = None,
metric_name: str = "loss",
runtime_name: str = "iter_time",
only_requirements: bool = False,
result_keys: List[str] = ["loss"],
):
super().__init__(
obj_func=obj_func,
config_space=config_space,
resultfile=resultfile,
n_init=n_init,
max_evals=max_evals,
seed=seed,
metric_name=metric_name,
runtime_name=runtime_name,
only_requirements=only_requirements,
result_keys=result_keys,
)
self._observations = {hp_name: np.array([]) for hp_name in self._hp_names}
self._observations[metric_name] = np.array([])
self._observations[runtime_name] = np.array([])
def update(self, eval_config: Dict[str, Any], results: Dict[str, float], runtime: float) -> None:
for hp_name, val in eval_config.items():
self._observations[hp_name] = np.append(self._observations[hp_name], val)
for key, val in results.items():
self._observations[key] = np.append(self._observations[key], val)
self._observations[self._runtime_name] = np.append(self._observations[self._runtime_name], runtime)
def fetch_observations(self) -> Dict[str, np.ndarray]:
return {hp_name: vals.copy() for hp_name, vals in self._observations.items()}
def sample(self) -> Dict[str, Any]:
return self.initial_sample()
|
{"hexsha": "bedd81337be87225c18fd37ba2d2219523caa731", "size": 1866, "ext": "py", "lang": "Python", "max_stars_repo_path": "tpe/optimizer/random_search.py", "max_stars_repo_name": "nabenabe0928/AIST_TPE", "max_stars_repo_head_hexsha": "0094043aed9e148ea817bcdbd5c61c7659f779e0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tpe/optimizer/random_search.py", "max_issues_repo_name": "nabenabe0928/AIST_TPE", "max_issues_repo_head_hexsha": "0094043aed9e148ea817bcdbd5c61c7659f779e0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tpe/optimizer/random_search.py", "max_forks_repo_name": "nabenabe0928/AIST_TPE", "max_forks_repo_head_hexsha": "0094043aed9e148ea817bcdbd5c61c7659f779e0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3214285714, "max_line_length": 107, "alphanum_fraction": 0.6334405145, "include": true, "reason": "import numpy", "num_tokens": 413}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 27 09:58:57 2020
@author: gao
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.axes_grid1 import AxesGrid
import matplotlib as mpl
import os
from matplotlib.colors import LinearSegmentedColormap
import pickle
import seaborn as sns
import pandas as pd
import ptitprince as pt
#----read lc list--------------------------------------------------
with open("../simulation/LC.txt", "r") as file:
lcs = eval(file.readline()) # read lc list
##============================================
"""Due to the long time for abstract the data for 3+1, so we run only one times and then save the data local.
The following gray code are used for the data abstracting for 3+1 or any other data."""
##----10000 line data -------------
#with open('%s/size_tsn_line10000_v0.txt'%data_pathway, 'rb') as fp:
# t_data0 = pickle.load(fp)
#
#test_number=10000 # test the results under 1000 lines
#t_data=t_data0[0:test_number]
#
#binary_lc=np.array([1,3,6,7,12,13,20,22,23,34,36,37,52,56,57,58])-1
#
##-------------------------------------------------------------------------------
#
##---read data-----------------------
#'''proved that all optimal lc are binary splitting lcs '''
#"collect the line and its corresponding optimal lcs"
#
#end_lc=58 # 13[5], 23[6], 37[7]; 58[8]; 87[9]; 128[10] # how many lcs we care
#
#line_opLCs=[] # collect the lines and its optimal lcs
#
#for T_cluster in range(0,test_number): # how many figures or cell divisions
# all_list=[]
# for i_th in range(0,end_lc): # read all lcs data = growth rate
#
# with open('./data/data_general_size_effect/%d_%d.txt'%(T_cluster,i_th), "r") as file:
# nan=float(np.nan)
# inf=np.inf
# grate = eval(file.readline()) # read growth rate
# all_list.append(np.array([i_th, grate]))
#
# all_list_f = np.array(all_list, dtype=np.float128)
# max_value=np.amax(all_list_f[:,1])
# lc_ith=int(all_list_f[np.where(all_list_f[:,1]==max_value)][0][0])
# "check if all optimal lcs are the binary splitting lcs"
## if lc_ith not in binary_lc:
## print('Wrong!!!! \n')
## print('The line is %s'%T_cluster)
# "save the line and optimal lcs"
# line_opLCs.append(np.array([T_cluster,lc_ith]))
#
#line_opLCs=np.array(line_opLCs) # each line-id and its optimal lc
#oplc_list=set(line_opLCs[:,1])
#
##--------np.array_t-data-------------------------------------------------
###%%
#neutral_list=np.array([np.log((i+1)/(i)) for i in range(1,8)]) #neutral cases
#
#t_data_arr=np.array([np.array(i) for i in t_data]) # 10000,7
#t_data_bac=t_data_arr*neutral_list # sample dots
#
## sample to panda.data---
#
#
#
#df_list=[]
#
#for i in range(7):
# data_sample={}
# df=pd.DataFrame(data_sample)
#
# posi=[str(i+1) for j in range(test_number)]
# dots=t_data_bac[:,i]
# chi_ratio=t_data_arr[:,i]
#
# df['posi']=posi
# df['dots']=dots
# df['chi_ratio']=chi_ratio
# df_list.append(df)
#
#frames=[df_list[0],df_list[1],df_list[2],df_list[3],df_list[4], df_list[5],df_list[6]]
#sample=pd.concat(frames)
#
#line_prop=[str('Sample') for i in range(7*test_number)]
#sample['Lines']=line_prop
#
###------target lc--========-------------
#blc=5
#target_lc=line_opLCs[np.where(line_opLCs[:,1]==blc)]
#target_lines=[]
#target_lines_ratio=[]
#for i in target_lc[:,0]:
# target_lines.append(t_data_bac[i]) # original t_sn data
# target_lines_ratio.append(t_data_arr[i]) # ratio t_sn data
#target_lines=np.array(target_lines) # target dots
#target_lines_ratio=np.array(target_lines_ratio) # ratio t_sn data
#
## target to panda.data---
#df_list0=[]
#
#for i in range(7):
# data_sample={}
# df=pd.DataFrame(data_sample)
#
# posi=[str(i+1) for j in range(np.shape(target_lines)[0])]
# dots=target_lines[:,i]
# chi_ratio=target_lines_ratio[:,i]
#
# df['posi']=posi
# df['dots']=dots
# df['chi_ratio']=chi_ratio
# df_list0.append(df)
#
#frames0=[df_list0[0],df_list0[1],df_list0[2],df_list0[3],df_list0[4], df_list0[5],df_list0[6]]
#target=pd.concat(frames0)
#
#line_prop0=[str('Promoted') for i in range(7*(np.shape(target_lines)[0]))]
#target['Lines']=line_prop0
#
##======who data-===============
#combined_data=pd.concat([sample,target])
#
#sample.to_pickle('sample_data.pkl')
#combined_data.to_pickle('origin_combined_data.pkl') # dave data
#target.to_pickle('origin_target_13.pkl') # blue dots
##============================================
"""These are data that we saved which can be generated by runing the above codes."""
blc=5
sample_data=pd.read_pickle('/Users/gao/Desktop/life-cycles-with-multiplayer-game/SimulationCode/v6/v2_VD_V0/v12_sizeti/code_general_size_effect/test_lines10000/sample_data.pkl') # read data
combined_data=pd.read_pickle('origin_combined_data.pkl') # read data
target=pd.read_pickle('/Users/gao/Desktop/life-cycles-with-multiplayer-game/SimulationCode/v6/v2_VD_V0/v12_sizeti/code_general_size_effect/test_lines10000/origin_target_13.pkl')
#-----------------draw figures--------------------------
#------raincloud plot----------
f,ax = plt.subplots( figsize=(12, 4))
df0=sample_data
df=target
dy="chi_ratio"; dx="posi"; ort="v"; pal={""}
pal = sns.color_palette(n_colors=1)
pal0 = sns.color_palette(n_colors=2)
dodge_value=1
jitter_value=0.12
ax=sns.stripplot( x = dx, y = dy, data = df0,
palette={ "darkgrey"},dodge=dodge_value,
edgecolor = "darkgrey",size = 1, jitter = jitter_value, zorder = 0,
orient = ort,alpha=0.5)
#-- blue color
ax=pt.half_violinplot( x = dx, y = dy, data = df,
palette = { "#377eb8"},
linewidth=0.5,dodge=dodge_value,
bw = .2, cut = 0.,scale = "area", width = 1., inner = None,
orient = ort,alpha=0.8)
ax=sns.stripplot( x = dx, y = dy, data = df, palette = { "#377eb8"},
linewidth=0.5,dodge=dodge_value,
edgecolor = "#377eb8",size = 2, jitter = jitter_value, zorder = 0,
orient = ort,alpha=0.8)
ax.set_xlabel(r"Organism size $n$",fontsize=16)
ax.set_ylabel("Normalised cell increment" "\n" r"component $\chi_{n}$",fontsize=16)
#------remove ticks and top and right frames
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#------remove ticks and top and right frames
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.yaxis.set_ticks_position('none')
plt.xlim(-.7,6.3)
#---artifical legend--------------
import matplotlib.patches as mpatches
legend_dict = { 'Sample' : 'silver', r'Promoting $3+1$' : '#377eb8' }
patchList = []
for key in legend_dict:
data_key = mpatches.Patch(color=legend_dict[key], label=key)
patchList.append(data_key)
ax.legend(handles=patchList,frameon=False,loc='upper center', bbox_to_anchor=(0.45, 1.13),
shadow=None, ncol=1)
plt.ylim(0.35,1.6)
plt.show()
#f.savefig('./figure/figure_2C.pdf' % ,bbox_inches='tight' ) # save figures
|
{"hexsha": "0427d648c870d7a9d365daf12a45f9e8f97edf96", "size": 7326, "ext": "py", "lang": "Python", "max_stars_repo_path": "figure/Figure2C_general_size_effect.py", "max_stars_repo_name": "YuanxiaoGao/Evolution_of_reproductive_strategies_in_incipient_multicellularity", "max_stars_repo_head_hexsha": "13eb51639fcee630a76e197b50ef321e3a94ce0f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "figure/Figure2C_general_size_effect.py", "max_issues_repo_name": "YuanxiaoGao/Evolution_of_reproductive_strategies_in_incipient_multicellularity", "max_issues_repo_head_hexsha": "13eb51639fcee630a76e197b50ef321e3a94ce0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "figure/Figure2C_general_size_effect.py", "max_forks_repo_name": "YuanxiaoGao/Evolution_of_reproductive_strategies_in_incipient_multicellularity", "max_forks_repo_head_hexsha": "13eb51639fcee630a76e197b50ef321e3a94ce0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5566037736, "max_line_length": 194, "alphanum_fraction": 0.616025116, "include": true, "reason": "import numpy", "num_tokens": 2069}
|
import pyvisa
import feeltech
import time
import numpy
import matplotlib.pyplot as plt
import math
fichero = open('config.txt')
########################
timeDelay = 0.7 #Adjust the time delay between frequency increments (in seconds)
########################
startFreq = float(fichero.readline().split(',')[1]) #Read the start frequency
endFreq = float(fichero.readline().split(',')[1]) #Read the end frequency
if startFreq < 0 or endFreq < 0:
print('ERROR. Frequency must be possitive')
print('Please press Enter to exit :-(')
input()
exit()
if startFreq > endFreq:
print('ERROR. Start Frequency must be less than End Frequency')
print('Please press Enter to exit:-(')
input()
exit()
freqSteps = int(fichero.readline().split(',')[1]) #Read the frequency steps
if freqSteps <= 0:
print('ERROR. Frequency steps must be greater than zero')
print('Please press Enter to exit :-(')
input()
exit()
waveVMax = float(fichero.readline().split(',')[1]) #Read the max voltage of sine wave
if waveVMax <= 0:
print('ERROR. Max Voltage must be greater than zero')
print('Please press Enter to exit :-(')
input()
exit()
print(startFreq)
print(endFreq)
print(freqSteps)
print(waveVMax)
freqInc = ((endFreq-startFreq)/freqSteps) #Compute the frequency increments in function of steps, start and end frequencies
rm = pyvisa.ResourceManager() #PyVISA Resource Manager
print("List of connected instruments:")
print(rm.list_resources()) #Show the list of detected instruments
instrument = input('Please enter the oscilloscope ID: ') #Read the ID of oscilloscope
scope = rm.open_resource(instrument) #Identify oscilloscope with "scope"
scope.write("MEASure:CLEar ALL") #Clear all measurement items
scope.write("MEASure:ITEM VMAX,CHANnel1") #Create the VMax measurement item for CH1
scope.write("MEASure:ITEM VMAX,CHANnel2") #Create the VMax measurement item for CH2
port_gen = fichero.readline().split(',')[1]
ft = feeltech.FeelTech(port_gen) #Connect the FY3224s generator
c1 = feeltech.Channel(1,ft) #Init the CH1 of generator
CH1VMax = numpy.zeros(freqSteps+1) #Create an array for CH1 measurements
CH2VMax = numpy.zeros(freqSteps+1) #Create an array for CH2 measurements
db = numpy.zeros(freqSteps+1) #Create an array for the result in db
freqValues = numpy.zeros(freqSteps+1) #Create an arrayo for values of frequency
c1.waveform(feeltech.SINE) #CH1 will generate a sine wave
c1.amplitude(waveVMax*2) #Set CH1 peak to peak voltage
freq = startFreq
c1.frequency(freq) #Set CH1 frequency
scope.write("TIMebase:MAIN:SCAle " + str(1/(3*freq))) #Set horizontal scale of oscilloscope
if waveVMax <= 3.5: #Set vertical scale of oscilloscope
scope.write("CHANnel1:SCALe 1")
scope.write("CHANnel2:SCALe 1")
elif waveVMax > 3.5 and waveVMax <= 7:
scope.write("CHANnel1:SCALe 2")
scope.write("CHANnel2:SCALe 2")
elif waveVMax > 7:
scope.write("CHANnel1:SCALe 5")
scope.write("CHANnel2:SCALe 5")
time.sleep(2*timeDelay) #Time delay
i = 0
while i <= freqSteps:
c1.frequency(freq) #Set CH1 (gen) frequency
scope.write("TIMebase:MAIN:SCAle "+ str(1/(3*freq))) #Set the horizontal scale of oscilloscope
time.sleep(timeDelay) #Time delay
CH1VMax[i] = scope.query("MEASure:ITEM? VMAX,CHANnel1") #Read and save CH1 VMax
CH2VMax[i] = scope.query("MEASure:ITEM? VMAX,CHANnel2") #Read and save CH2 Vmax
freqValues[i] = freq; #Save actual frequency
freq = freq + freqInc #Increment frequency
i = i + 1 #Increment index
db = (CH2VMax/CH1VMax) #Cocient between CH2VMax and CH1VMax (for compute db)
db = 20*numpy.log10(db) #Compute db
plt.plot(freqValues,db) #Graph data
plt.xlabel('f')
plt.ylabel('dB')
plt.title('Bode Plot')
plt.grid()
plt.show()
scope.close() #Stop communication with oscilloscope
|
{"hexsha": "8efa3dde410aff4aef7f0496146f4120ec706211", "size": 3817, "ext": "py", "lang": "Python", "max_stars_repo_path": "BodePlot.py", "max_stars_repo_name": "ailr16/BodePlot-DS1054Z", "max_stars_repo_head_hexsha": "1bb77b92b0c7249499e3d0748fff551210f5b81c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-08-01T05:11:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-01T01:58:08.000Z", "max_issues_repo_path": "BodePlot.py", "max_issues_repo_name": "ailr16/BodePlot-DS1054Z", "max_issues_repo_head_hexsha": "1bb77b92b0c7249499e3d0748fff551210f5b81c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BodePlot.py", "max_forks_repo_name": "ailr16/BodePlot-DS1054Z", "max_forks_repo_head_hexsha": "1bb77b92b0c7249499e3d0748fff551210f5b81c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-24T16:31:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-24T16:31:14.000Z", "avg_line_length": 35.0183486239, "max_line_length": 125, "alphanum_fraction": 0.7123395337, "include": true, "reason": "import numpy", "num_tokens": 1137}
|
#Author : Dhaval Harish Sharma
#Red ID : 824654344
#Assignment 3, Question A and B, Using user defined edge detection
"""Finding the edges in an image using user defined edge detection and changing the colors
of edges of different objects. After that, adding salt and pepper noise to the image,
again applying edge detection algorithm and then removing the noise using median filter."""
#Importing the required libraries
import skimage.io as io
import math
import numpy as np
import matplotlib.pyplot as plt
import colorsys
#Initializing the input image
in_img = io.imread("pepper.jpg")
height = in_img.shape[0]
width = in_img.shape[1]
#Question A begins!
#Defining the convolution function
def convolution(h, w, window, in_img, out_img):
sum_of_elem = 0
for i in range(3):
for j in range(3):
sum_of_elem = sum_of_elem + (np.average(in_img[h - 1 + i][w - 1 + j]) * window[i][j])
out_img[h][w] = sum_of_elem
def sobel_edge_detection(in_img):
grad_x = np.zeros(shape = (height, width), dtype = np.uint8)
grad_y = np.zeros(shape = (height, width), dtype = np.uint8)
magnitude = np.zeros(shape = (height, width), dtype = np.uint8)
edge_img_1 = np.zeros(shape = (height, width, 3), dtype = np.uint8)
#Output image with x gradient
for i in range(1, height - 1):
for j in range(1, width - 1):
convolution(i, j, [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], in_img, grad_x)
#Thresholding the image
for i in range(height):
for j in range(width):
if grad_x[i][j] < 64:
grad_x[i][j] = 255
# Output image with y gradient
for i in range(1, height - 1):
for j in range(1, width - 1):
convolution(i, j, [[-1, -2, -1], [0, 0, 0], [1, 2, 1]], in_img, grad_y)
#Thresholding the image
for i in range(height):
for j in range(width):
if grad_y[i][j] < 64:
grad_y[i][j] = 255
#Output image with magnitude
for i in range(1, height - 1):
for j in range(1, width - 1):
magnitude[i][j] = math.sqrt((grad_x[i][j]) ** 2 + (grad_y[i][j]) ** 2)
#Thresholding the image
for i in range(height):
for j in range(width):
if magnitude[i][j] < 128:
magnitude[i][j] = 0
#Adding colors to the image
edges = []
for i in range(height):
for j in range(width):
if magnitude[i][j] != 0:
edge_img_1[i][j] = in_img[i][j]
edges.append(edge_img_1[i][j])
#Finding the mean and standard deviation of all the rgb channels in the edges
edges = np.array(edges)
mean = np.mean(edges, axis = 0)
std_dev = np.std(edges, axis = 0)
#Changing the color of the found edges to the respective colors in the question
for i in range(height):
for j in range(width):
if magnitude[i][j] != 0:
if edge_img_1[i][j][0] > (mean[0] - std_dev[0]) and edge_img_1[i][j][1] < mean[1] and edge_img_1[i][j][2] < mean[2]:
edge_img_1[i][j][0] = 0
edge_img_1[i][j][1] = 255
edge_img_1[i][j][2] = 0
elif edge_img_1[i][j][1] > (mean[1] - std_dev[1]) and edge_img_1[i][j][0] < mean[0] and edge_img_1[i][j][2] < mean[2]:
edge_img_1[i][j][0] = 0
edge_img_1[i][j][1] = 0
edge_img_1[i][j][2] = 255
elif edge_img_1[i][j][2] > (mean[2] - std_dev[2]) and edge_img_1[i][j][0] < mean[0] and edge_img_1[i][j][1] < mean[1]:
edge_img_1[i][j][0] = 255
edge_img_1[i][j][1] = 0
edge_img_1[i][j][2] = 0
elif edge_img_1[i][j][0] > (mean[0] - std_dev[0]) and edge_img_1[i][j][1] > (mean[1] - std_dev[1]) and edge_img_1[i][j][2] < mean[2]:
edge_img_1[i][j][0] = 0
edge_img_1[i][j][1] = 255
edge_img_1[i][j][2] = 255
elif edge_img_1[i][j][0] < mean[0] and edge_img_1[i][j][1] > (mean[1] - std_dev[1]) and edge_img_1[i][j][2] > (mean[2] - std_dev[2]):
edge_img_1[i][j][0] = 255
edge_img_1[i][j][1] = 0
edge_img_1[i][j][2] = 255
elif edge_img_1[i][j][0] > (mean[0] - std_dev[0]) and edge_img_1[i][j][1] < mean[1] and edge_img_1[i][j][2] > (mean[2] - std_dev[2]):
edge_img_1[i][j][0] = 255
edge_img_1[i][j][1] = 255
edge_img_1[i][j][2] = 0
else:
edge_img_1[i][j][0] = 255
edge_img_1[i][j][1] = 255
edge_img_1[i][j][2] = 255
return edge_img_1
#Finding edges using sobel_edge_detecton
edge_img_1 = sobel_edge_detection(in_img)
#Question A ends!
#Question B begins!
#Adding salt and pepper noise in the image
def salt_pepper(no_of_sp):
for iteration in range(no_of_sp):
x_coord = np.random.randint(0, height)
y_coord = np.random.randint(0, width)
s_p_img[x_coord][y_coord] = np.random.choice([0, 255])
s_p_img = np.copy(in_img)
no_of_sp = int(0.2 * height * width)
salt_pepper(no_of_sp)
#Detecting the edges using canny edge detection
edge_img_2 = sobel_edge_detection(s_p_img)
#Initializing the output image and applying median filter to the image
filt_img = np.zeros(shape = (height, width, 3), dtype = np.uint8)
def med_filt(h, w):
win_elem = []
for i in range(5):
for j in range(5):
win_elem.append(s_p_img[h - 1 + i][w - 1 + j])
win_elem.sort(key=lambda rgb: colorsys.rgb_to_hsv(*rgb))
filt_img[h][w] = win_elem[12]
#Loop for traversing through the input image
for i in range(3, height - 3):
for j in range(3, width - 3):
med_filt(i, j)
#Question B ends!
#Printing the output image
fig, ax = plt.subplots(nrows = 2, ncols = 2)
ax[0][0].imshow(in_img)
ax[0][1].imshow(edge_img_1)
ax[1][0].imshow(s_p_img)
ax[1][1].imshow(edge_img_2, cmap = 'gray')
plt.show()
|
{"hexsha": "47519676574cf6ac5b0b1998bc854e9fc0373fee", "size": 6250, "ext": "py", "lang": "Python", "max_stars_repo_path": "Assignment 3/QuestionAB_Sobel.py", "max_stars_repo_name": "dhavalsharma97/Computer-Vision", "max_stars_repo_head_hexsha": "75b39c9e5adb2a1a54854ed487fe750ad316a8fc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Assignment 3/QuestionAB_Sobel.py", "max_issues_repo_name": "dhavalsharma97/Computer-Vision", "max_issues_repo_head_hexsha": "75b39c9e5adb2a1a54854ed487fe750ad316a8fc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Assignment 3/QuestionAB_Sobel.py", "max_forks_repo_name": "dhavalsharma97/Computer-Vision", "max_forks_repo_head_hexsha": "75b39c9e5adb2a1a54854ed487fe750ad316a8fc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6506024096, "max_line_length": 149, "alphanum_fraction": 0.55184, "include": true, "reason": "import numpy", "num_tokens": 1916}
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize']=12,9 # make the chart wider
import pycountry
df=pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-01-05/transit_cost.csv')
df.head()
df.info()
df.dropna(inplace=True) # Drop Rows with NaN Values
df.reset_index(drop=True) # After dropping rows you can Reset the Index
# Lets add full country names using the pycountry package
# pycountry.countries.lookup('in').name
def f(row):
try:
return pycountry.countries.lookup(row.country).name
except:
return 'Not found'
def ff(row):
try:
return pycountry.countries.lookup(row.country).alpha_3
except:
return 'Not found'
df.loc[(df.country=='UK'),'country']='GB' # replace country = UK to country = GB as UK is not recognized a country code by pycountry
df['country_full'] = df.apply(f, axis=1) # call the function to get country using pycountry
df['country_alpha_3'] = df.apply(ff, axis=1) # call the function to get country using pycountry
# now lets summarize the data
# view 0: number of train lines by country
df.groupby('country_full').count()[['line']].plot(kind='bar')
# view 1: number of train lines by country - excluding china
df[~df.country.isin(['CN'])].groupby('country').count()[['line']].plot(kind='bar')
# view 2: number of train lines by country, bar chart will be stacked with cities of the countries
df[~df.country.isin(['CN','IN'])].groupby(['country','city']).count()[['line']].unstack('city').plot(kind='bar',stacked=True)
# view 3: pivot by country x start year showing number of lines
# df[df.country.isin(['CN','IN'])]\
df\
.groupby(['country','start_year']).count()[['line']]\
.reset_index()\
.pivot(index='start_year', columns='country', values='line')\
.plot()
df2 = df[df.country=='IN']\
[['city','line','start_year','end_year']]\
.dropna()\
.set_index(['city','line'])
def fff(row, yr):
if ( yr >= int(row[0]) and yr <= int(row[1]) ):
return 1
else:
return 0
# new = s.apply(lambda num : num + 5)
#df2['2009'] = df2[['start_year','end_year']].apply(fff, axis=1)
for i in range( ( int(df2.start_year.min()) - 2 ) , ( int(df2.end_year.max()) + 2 ) ):
df2[str(i)] = df2.apply(fff, yr=i, axis=1) # we can pass additional values to the function by specifying them after the function name
df2.drop(['start_year','end_year'], axis=1, inplace=True) # drop start and end year
df2.reset_index(inplace=True)
years = list(range(2004,2028))
years = list(map(str, years))
df2 = pd.melt(df2, id_vars=['city','line'], value_vars=years, var_name='year', value_name='track_exists')
def ffff(row):
return row[0]+" - "+row[1]
df2['city_line'] = df2.apply(ffff, axis=1)
df2.drop(['city','line'],axis=1, inplace=True)
import altair as alt
# Table Bubble Plot (Github Punch Card)
c1=alt.Chart(df2).mark_circle().encode(
y=alt.Y('city_line:O', axis=alt.Axis(title='India: City + Line (Project) Name')),
x=alt.X('year:O', axis=alt.Axis(title='Project Duration (Start year to End year)')),
size='track_exists:Q'
).properties(
height=600,
width=350
)
# chart for hcat
df3 = df[df.country=='IN']\
[['city','line','cost_km_millions','stations','length']]\
.dropna()
def ffff(row):
return row[0]+" - "+row[1]
df3['city_line'] = df3.apply(ffff, axis=1)
df3.drop(['city','line'],axis=1, inplace=True)
chart2 = alt.Chart(df3).mark_bar().encode(
y=alt.Y('city_line:O', axis=alt.Axis(labels=False)),
x=alt.X('cost_km_millions:Q', axis=alt.Axis(format='$~s', title='Cost/Km (USD Mn)'))
# y=alt.Y('petalWidth:Q', bin=alt.Bin(maxbins=30)),
# color='species:N'
).properties(
height=600,
width=100
)
chart3 = alt.Chart(df3).mark_bar().encode(
y=alt.Y('city_line:O', axis=alt.Axis(labels=False)),
x=alt.X('stations:Q', axis=alt.Axis(format='~s', title='Number of Stations'))
# y=alt.Y('petalWidth:Q', bin=alt.Bin(maxbins=30)),
# color='species:N'
).properties(
height=600,
width=100
)
chart4 = alt.Chart(df3).mark_bar().encode(
y=alt.Y('city_line:O', axis=alt.Axis(labels=False)),
x=alt.X('length:Q', axis=alt.Axis(format='~s', title='Line Length (Km)'))
# y=alt.Y('petalWidth:Q', bin=alt.Bin(maxbins=30)),
# color='species:N'
).properties(
height=600,
width=100
)
c1 | chart2 | chart3 | chart4
# length - Length of proposed line in km
# tunnel_per - Percent of line length completed
# tunnel - Tunnel length of line completed in km
# stations - Number of stations where passengers can board/leave
# cost_km_millions - Cost/km in millions of USD
def fffff(row):
return row[2]+" - "+row[3]
df['city_line'] = df.apply(fffff, axis=1)
df[df.country=='IN'][['city_line','start_year','length', 'tunnel_per', 'tunnel', 'stations', 'cost_km_millions']]
# by year -> cost per km for india, china and rest of the world
# segment the countries into india, china and rest-of-the-world
def in_cn_row(row):
if row[1]=='IN' or row[1]=='CN':
return row[1]
else:
return 'RoW'
df['in_cn_row']=df.apply(in_cn_row, axis=1)
# crate a flag which shows if the line has more than x% tunnel
def tunnel_or_not(row):
if float(row[8].strip('%')) > 0:
return 'Yes'
else:
return 'No'
df['tunnel_or_not'] = df.apply(tunnel_or_not, axis=1)
source = df.groupby(['in_cn_row','start_year','tunnel_or_not']).mean()[['cost_km_millions']].reset_index()
c2=alt.Chart(source).mark_line().encode(
x='start_year:O',
y='cost_km_millions:Q',
color='in_cn_row:O',
strokeDash='tunnel_or_not',
)
# Seems like the cost of lines with tunnels is slightly higher in India and China but significantly higher for rest of the world.
# by number of stations -> cost per km for india, china and rest of the world
source = df.groupby(['in_cn_row','stations','tunnel_or_not']).mean()[['cost_km_millions']].reset_index()
c3=alt.Chart(source).mark_line().encode(
x='stations:O',
y='cost_km_millions:Q',
color='in_cn_row:O',
strokeDash='tunnel_or_not',
)
c1 & c2
|
{"hexsha": "2e9d48ccedb8f178c4e72deaeb6cac316f6594db", "size": 6117, "ext": "py", "lang": "Python", "max_stars_repo_path": "TidyTuesday/20210105-transit-cost.py", "max_stars_repo_name": "vivekparasharr/Challenges-and-Competitions", "max_stars_repo_head_hexsha": "c99d67838a0bb14762d5f4be4993dbcce6fe0c5a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-01-11T20:12:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-15T04:53:45.000Z", "max_issues_repo_path": "TidyTuesday/20210105-transit-cost.py", "max_issues_repo_name": "vivekparasharr/Challenges-and-Competitions", "max_issues_repo_head_hexsha": "c99d67838a0bb14762d5f4be4993dbcce6fe0c5a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TidyTuesday/20210105-transit-cost.py", "max_forks_repo_name": "vivekparasharr/Challenges-and-Competitions", "max_forks_repo_head_hexsha": "c99d67838a0bb14762d5f4be4993dbcce6fe0c5a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-30T19:15:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-30T19:15:46.000Z", "avg_line_length": 35.9823529412, "max_line_length": 137, "alphanum_fraction": 0.6665031878, "include": true, "reason": "import numpy", "num_tokens": 1800}
|
#! /usr/bin/env python3
import rospy
from geometry_msgs.msg import Point
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
from tf import transformations
from std_srvs.srv import *
import math
import numpy as np
import matplotlib.pyplot as plt
room_center_found_ = True
active_ = False
current_position_ = Point()
yaw_ = 0
room_center_ = Point()
def clbk_odom(msg):
global current_position_, yaw_
# position
current_position_ = msg.pose.pose.position
# yaw
quaternion = (
msg.pose.pose.orientation.x,
msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z,
msg.pose.pose.orientation.w)
euler = transformations.euler_from_quaternion(quaternion)
yaw_ = euler[2]
def clbk_laser(msg):
# global regions_, laserPoints_
global laserPoints_
laserPoints_ = []
corners = []
# regions = {
# 'right': [],
# 'fright': [],
# 'front': [],
# 'fleft': [],
# 'left': [],
# }
i =0
while (i<len(msg.ranges)):
laserPoints_.append([msg.ranges[i], msg.ranges[i+1]])
# ang = math.atan2(msg.ranges[i+1], msg.ranges[i])
# if ang<=-1.256:
# regions["right"].append(findRange([msg.ranges[i], msg.ranges[i+1]]))
# elif ang>-1.256 and ang<=-0.4186:
# regions["fright"].append(findRange([msg.ranges[i], msg.ranges[i+1]]))
# elif ang>-0.4186 and ang<=0.4186:
# regions["front"].append(findRange([msg.ranges[i], msg.ranges[i+1]]))
# elif ang>0.4186 and ang<=1.256:
# regions["fleft"].append(findRange([msg.ranges[i], msg.ranges[i+1]]))
# elif ang>=1.256:
# regions["left"].append(findRange([msg.ranges[i], msg.ranges[i+1]]))
i+=3
ang_thresh = (60/180)*math.pi
sampling_ratio = 10
indexes = np.linspace(0, len(laserPoints_),int(len(laserPoints_)/sampling_ratio))
# print(indexes[0])
for i in range(1, len(indexes)-2):
if pointDist(laserPoints_[int(indexes[i])])<4:
prev_idx = math.ceil(indexes[i-1])
idx = int(indexes[i])
next_idx = math.floor(indexes[i+1])
# print(prev_idx, next_idx, len(laserPoints_))
# dist1 = findDist(laserPoints[i-1], laserPoints_[i])
ang1 = findSlope(laserPoints_[prev_idx], laserPoints_[idx])
# dist2 = findDist(laserPoints[i], laserPoints_[i+1])
ang2 = findSlope(laserPoints_[idx], laserPoints_[next_idx])
ang_diff = ang2-ang1
# print(ang1, ang2, ang_diff, ang_thresh)
if math.fabs(ang_diff)>=ang_thresh:
# corners.append(laserPoints_[prev_idx])
corners.append(laserPoints_[idx])
# corners.append(laserPoints_[next_idx])
# print(len(corners))
corners = np.array(corners)
laserPoints_ = np.array(laserPoints_)
plt.plot(0,0,"bo")
plt.plot(laserPoints_[:,0], laserPoints_[:,1], "yo")
plt.plot(corners[:,0], corners[:,1], "ro")
# plt.show()
# regions_ = {
# 'right': 5 if len(regions["right"])==0 else min(min(rFalseegions["right"]), 5),
# 'fright': 5 if len(regions["fright"])==0 else min(min(regions["fright"]), 5),
# 'front': 5 if len(regions["front"])==0 else min(min(regions["front"]), 5),
# 'fleft': 5 if len(regions["fleft"])==0 else min(min(regions["fleft"]), 5),
# 'left': 5 if len(regions["left"])==0 else min(min(regions["left"]), 5),
# }
# print(regions_)
def findSlope(pt1, pt2):
return math.atan2(pt2[1]-pt1[1], pt2[0]-pt1[0])
def pointDist(pt):
return math.sqrt(pt[1]**2 + pt[0]**2)
def find_room_center(req):
global active_
active_ = req.data
res = SetBoolResponse()
res.success = True
res.message = 'Done'
return res
def main():
global room_center_, room_center_found_, active_
rospy.init_node('find_room_center')
# sub_laser = rospy.Subscriber('sim_ros_interface/scan', LaserScan, clbk_laser)
sub_odom = rospy.Subscriber('sim_ros_interface/odom', Odometry, clbk_odom)
desiredPosePub = rospy.Publisher('sim_ros_interface/desired_pose', Point, queue_size=1)
srv = rospy.Service('find_room_center', SetBool, find_room_center)
rate = rospy.Rate(20)
while not rospy.is_shutdown():
if not active_:
continue
else:
if room_center_found_:
room_center_.x = 0
room_center_.y = -0.5
room_center_.z = 0
desiredPosePub.publish(room_center_)
rate.sleep()
if __name__ == "__main__":
main()
|
{"hexsha": "d057e37e22c0a686d2a8145693dac71817586889", "size": 4681, "ext": "py", "lang": "Python", "max_stars_repo_path": "ros_pkg/robot_function/scripts/find_room_center.py", "max_stars_repo_name": "Pallav1299/coppeliasim_ros", "max_stars_repo_head_hexsha": "3c4db53be7ea7d64c53c1d56066bb93dd212a476", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ros_pkg/robot_function/scripts/find_room_center.py", "max_issues_repo_name": "Pallav1299/coppeliasim_ros", "max_issues_repo_head_hexsha": "3c4db53be7ea7d64c53c1d56066bb93dd212a476", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ros_pkg/robot_function/scripts/find_room_center.py", "max_forks_repo_name": "Pallav1299/coppeliasim_ros", "max_forks_repo_head_hexsha": "3c4db53be7ea7d64c53c1d56066bb93dd212a476", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0616438356, "max_line_length": 91, "alphanum_fraction": 0.6015808588, "include": true, "reason": "import numpy", "num_tokens": 1297}
|
[STATEMENT]
lemma var_assign_eval [intro!]: "(X x, s(x:=n)) -|-> (n, s(x:=n))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (X x, s(x := n)) -|-> (n, s(x := n))
[PROOF STEP]
by (rule fun_upd_same [THEN subst]) fast
|
{"llama_tokens": 108, "file": null, "length": 1}
|
# Modified from https://github.com/MIC-DKFZ/nnunet
import pickle
import torch
import tensorboardX
import numpy as np
from collections import OrderedDict
import SimpleITK as sitk
def pickle_load(in_file):
with open(in_file, "rb") as opened_file:
return pickle.load(opened_file)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Logger(object):
def __init__(self, model_name, header):
self.header = header
self.writer = tensorboardX.SummaryWriter("./runs/"+model_name.split("/")[-1].split(".h5")[0])
def __del(self):
self.writer.close()
def log(self, phase, values):
epoch = values['epoch']
for col in self.header[1:]:
self.writer.add_scalar(phase+"/"+col, float(values[col]), int(epoch))
def load_value_file(file_path):
with open(file_path, 'r') as input_file:
value = float(input_file.read().rstrip('\n\r'))
return value
def combine_labels(labels):
"""
Combine wt, tc, et into WT; tc, et into TC; et into ET
:param labels: torch.Tensor of size (bs, 3, ?,?,?); ? is the crop size
:return:
"""
whole_tumor = labels[:, :3, :, :, :].sum(1) # could have 2 or 3
tumor_core = labels[:, 1:3, :, :, :].sum(1)
enhanced_tumor = labels[:, 2:3, :, :, :].sum(1)
whole_tumor[whole_tumor != 0] = 1
tumor_core[tumor_core != 0] = 1
enhanced_tumor[enhanced_tumor != 0] = 1
return whole_tumor, tumor_core, enhanced_tumor # (bs, ?, ?, ?)
def calculate_accuracy(outputs, targets):
return dice_coefficient(outputs, targets)
def dice_coefficient(outputs, targets, threshold=0.5, eps=1e-8): # 搞三个dice看 每个label; 不要做soft dice
# batch_size = targets.size(0)
y_pred = outputs[:, :3, :, :, :] # targets[0,:3,:,:,:]
y_truth = targets[:, :3, :, :, :]
y_pred = y_pred > threshold
y_pred = y_pred.type(torch.FloatTensor)
wt_pred, tc_pred, et_pred = combine_labels(y_pred)
wt_truth, tc_truth, et_truth = combine_labels(y_truth)
res = dict()
res["dice_wt"] = dice_coefficient_single_label(wt_pred, wt_truth, eps)
res["dice_tc"] = dice_coefficient_single_label(tc_pred, tc_truth, eps)
res["dice_et"] = dice_coefficient_single_label(et_pred, et_truth, eps)
return res
def calculate_accuracy_singleLabel(outputs, targets, threshold=0.5, eps=1e-8):
y_pred = outputs[:, 0, :, :, :] # targets[0,:3,:,:,:]
y_truth = targets[:, 0, :, :, :]
y_pred = y_pred > threshold
y_pred = y_pred.type(torch.FloatTensor)
res = dice_coefficient_single_label(y_pred, y_truth, eps)
return res
def dice_coefficient_single_label(y_pred, y_truth, eps):
# batch_size = y_pred.size(0)
intersection = torch.sum(torch.mul(y_pred, y_truth), dim=(-3, -2, -1)) + eps / 2 # axis=?, (bs, 1)
union = torch.sum(y_pred, dim=(-3,-2,-1)) + torch.sum(y_truth, dim=(-3,-2,-1)) + eps # (bs, 1)
dice = 2 * intersection / union
return dice.mean()
# return dice / batch_size
def load_old_model(model, optimizer, saved_model_path, data_paralell=True):
print("Constructing model from saved file... ")
checkpoint = torch.load(saved_model_path, map_location='cpu')
epoch = checkpoint["epoch"]
if data_paralell:
state_dict = OrderedDict()
for k, v in checkpoint["state_dict"].items(): # remove "module."
if "module." in k:
node_name = k[7:]
else:
node_name = k
state_dict[node_name] = v
model.load_state_dict(state_dict)
else:
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
return model, epoch, optimizer
def combine_labels_predicting(output_array):
"""
# (1, 3, 240, 240, 155)
:param output_array: output of the model containing 3 seperated labels (3 channels)
:return: res_array: conbined labels (1 channel)
"""
shape = output_array.shape[-3:]
if len(output_array.shape) == 5:
bs = output_array.shape[0]
res_array = np.zeros((bs, ) + shape)
res_array[output_array[:, 0, :, :, :] == 1] = 2 # 1
res_array[output_array[:, 1, :, :, :] == 1] = 1 # 2
res_array[output_array[:, 2, :, :, :] == 1] = 4
elif len(output_array.shape) == 4:
res_array = np.zeros(shape)
res_array[output_array[0, :, :, :] == 1] = 2
res_array[output_array[1, :, :, :] == 1] = 1
res_array[output_array[2, :, :, :] == 1] = 4
return res_array
def dim_recovery(img_array, orig_shape=(155, 240, 240)):
"""
used when doing inference
:param img_array:
:param orig_shape:
:return:
"""
crop_shape = np.array(img_array.shape[-3:])
center = np.array(orig_shape) // 2
lower_limits = center - crop_shape // 2
upper_limits = center + crop_shape // 2
if len(img_array.shape) == 5:
bs, num_labels = img_array.shape[:2]
res_array = np.zeros((bs, num_labels) + orig_shape)
res_array[:, :, lower_limits[0]: upper_limits[0],
lower_limits[1]: upper_limits[1], lower_limits[2]: upper_limits[2]] = img_array
if len(img_array.shape) == 4:
num_labels = img_array.shape[0]
res_array = np.zeros((num_labels, ) + orig_shape)
res_array[:, lower_limits[0]: upper_limits[0],
lower_limits[1]: upper_limits[1], lower_limits[2]: upper_limits[2]] = img_array
if len(img_array.shape) == 3:
res_array = np.zeros(orig_shape)
res_array[lower_limits[0]: upper_limits[0],
lower_limits[1]: upper_limits[1], lower_limits[2]: upper_limits[2]] = img_array
return res_array
def convert_stik_to_nparray(gz_path):
sitkImage = sitk.ReadImage(gz_path)
nparray = sitk.GetArrayFromImage(sitkImage)
return nparray
def poly_lr_scheduler(epoch, num_epochs=300, power=0.9):
return (1 - epoch/num_epochs)**power
|
{"hexsha": "9d1f0ca447a3da5ce6653365b13029d9c7eb054d", "size": 6262, "ext": "py", "lang": "Python", "max_stars_repo_path": "postprocess/utils.py", "max_stars_repo_name": "BruceResearch/BiTr-Unet", "max_stars_repo_head_hexsha": "d1f5ad5df7ff5e65c7797bfafd51a782f6114af3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-03-01T16:00:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T04:05:05.000Z", "max_issues_repo_path": "postprocess/utils.py", "max_issues_repo_name": "JustaTinyDot/BiTr-Unet", "max_issues_repo_head_hexsha": "52c1a68a9fd1cc7968e43d3f89ef700bcd71d60d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-23T05:05:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T05:43:10.000Z", "max_forks_repo_path": "postprocess/utils.py", "max_forks_repo_name": "BruceResearch/BiTr-Unet", "max_forks_repo_head_hexsha": "d1f5ad5df7ff5e65c7797bfafd51a782f6114af3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7853403141, "max_line_length": 103, "alphanum_fraction": 0.6223251357, "include": true, "reason": "import numpy", "num_tokens": 1749}
|
#!/usr/bin/env python
import sys
import cv2
import numpy as np
import matplotlib.pyplot as plt
import copy
import random
import sift
class Calibrate():
def main():
# get image from webcam for now just read it in
img = cv2.imread("../images/saved.jpg", 1)
# Select crop region
crop_region = User_ROI_Selection(img)
crop_region.user_selection()
if __name__ == '__main__':
cal = Calibrate()
cal.main()
|
{"hexsha": "44b885f1b95edea64068ada3c7ba02614e68ea05", "size": 431, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pong_vision/src/calibrate.py", "max_stars_repo_name": "kekraft/golden_eye", "max_stars_repo_head_hexsha": "a857d9c31645451b09f68a148e996dfa213322ec", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/pong_vision/src/calibrate.py", "max_issues_repo_name": "kekraft/golden_eye", "max_issues_repo_head_hexsha": "a857d9c31645451b09f68a148e996dfa213322ec", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pong_vision/src/calibrate.py", "max_forks_repo_name": "kekraft/golden_eye", "max_forks_repo_head_hexsha": "a857d9c31645451b09f68a148e996dfa213322ec", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.3666666667, "max_line_length": 49, "alphanum_fraction": 0.6983758701, "include": true, "reason": "import numpy", "num_tokens": 111}
|
import cv2
import numpy as np
class DrawingClass(object):
def __init__(self):
self.draw_command ='None'
self.frame_count = 0
def drawing(self, frame, fps, num_egg, htc_egg, state):
cv2.putText(frame, 'FPS: {:.2f}'.format(fps),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), thickness=2)
cv2.putText(frame, 'Possessed EGG: {}'.format(num_egg),
(10, 100), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 255), thickness=2)
cv2.putText(frame, 'Hatched EGG: {}'.format(htc_egg),
(10, 130), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 255), thickness=2)
cv2.putText(frame, 'State: {}'.format(state),
(250, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 255), thickness=2)
return frame
def draw_controler(self, frame, command):
#print('draw',command)
if command =='LX MIN':
self.draw_command = 'LX MIN'
elif command =='LX MAX':
self.draw_command = 'LX MAX'
elif command =='LY MIN':
self.draw_command = 'LY MIN'
elif command =='LY MAX':
self.draw_command = 'LY MAX'
elif command =='Button A':
self.draw_command = 'Button A'
elif command =='Button B':
self.draw_command = 'Button B'
elif command =='Button X':
self.draw_command = 'Button X'
elif command =='Button Y':
self.draw_command = 'Button Y'
elif command =='HAT TOP':
self.draw_command = 'HAT TOP'
elif command =='HAT RIGHT':
self.draw_command = 'HAT RIGHT'
elif command =='HAT BOTTOM':
self.draw_command = 'HAT BOTTOM'
elif command =='HAT LEFT':
self.draw_command = 'HAT LEFT'
elif command =='Button START':
self.draw_command = 'Button START'
elif command =='STOP':
self.draw_command = 'STOP'
#stick
if self.draw_command =='LX MIN' or self.draw_command =='HAT LEFT':
cv2.circle(frame, (970, 490), 20, (0, 0, 255), thickness=-1)
elif self.draw_command =='LX MAX' or self.draw_command =='HAT RIGHT':
cv2.circle(frame, (1030, 490), 20, (0, 0, 255), thickness=-1)
elif self.draw_command =='LY MIN' or self.draw_command =='HAT TOP':
cv2.circle(frame, (1000, 460), 20, (0, 0, 255), thickness=-1)
elif self.draw_command =='LY MAX' or self.draw_command =='HAT BOTTOM':
cv2.circle(frame, (1000, 520), 20, (0, 0, 255), thickness=-1)
else:
cv2.circle(frame, (1000, 490), 20, (0, 0, 255), thickness=-1)
cv2.circle(frame, (1000, 490), 50, (0, 0, 255), thickness=2)
#button
if self.draw_command =='Button X':
cv2.circle(frame, (1180, 460), 15, (0, 0, 255), thickness=-1)
cv2.putText(frame, 'X',(1172, 468), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), thickness=2)
self.frame_count +=1
elif self.frame_count == 6:
cv2.circle(frame, (1180, 460), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'X',(1172, 468), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
self.frame_count =0
self.draw_command = 'None'
else:
cv2.circle(frame, (1180, 460), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'X',(1172, 468), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
if self.draw_command =='Button B':
cv2.circle(frame, (1180, 520), 15, (0, 0, 255), thickness=-1)
cv2.putText(frame, 'B',(1172, 528), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), thickness=2)
self.frame_count +=1
elif self.frame_count == 6:
cv2.circle(frame, (1180, 520), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'B',(1172, 528), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
self.frame_count =0
self.draw_command = 'None'
else:
cv2.circle(frame, (1180, 520), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'B',(1172, 528), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
if self.draw_command =='Button Y':
cv2.circle(frame, (1150, 490), 15, (0, 0, 255), thickness=-1)
cv2.putText(frame, 'Y',(1142, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), thickness=2)
self.frame_count +=1
elif self.frame_count == 6:
cv2.circle(frame, (1150, 490), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'Y',(1142, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
self.frame_count =0
self.draw_command = 'None'
else:
cv2.circle(frame, (1150, 490), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'Y',(1142, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
if self.draw_command =='Button A':
cv2.circle(frame, (1210, 490), 15, (0, 0, 255), thickness=-1)
cv2.putText(frame, 'A',(1202, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), thickness=2)
self.frame_count +=1
elif self.frame_count == 6:
cv2.circle(frame, (1210, 490), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'A',(1202, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
self.frame_count =0
self.draw_command = 'None'
else:
cv2.circle(frame, (1210, 490), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'A',(1202, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
if self.draw_command =='Button START':
cv2.circle(frame, (1130, 423), 10, (0, 0, 255), thickness=-1)
cv2.putText(frame, '+',(1120, 430), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), thickness=2)
self.frame_count +=1
elif self.frame_count == 6:
cv2.circle(frame, (1130, 423), 10, (0, 0, 255), thickness=1)
cv2.putText(frame, '+',(1120, 430), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
self.frame_count =0
self.draw_command = 'None'
else:
cv2.circle(frame, (1130, 423), 10, (0, 0, 255), thickness=1)
cv2.putText(frame, '+',(1120, 430), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
return frame
|
{"hexsha": "1cd41a80f04199f3be841ce38a8ac4428c343606", "size": 6620, "ext": "py", "lang": "Python", "max_stars_repo_path": "show/drawing.py", "max_stars_repo_name": "nohamanona/poke-auto-fuka", "max_stars_repo_head_hexsha": "9d355694efa0168738795afb403fc89264dcaeae", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-12-31T18:38:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-07T08:57:17.000Z", "max_issues_repo_path": "show/drawing.py", "max_issues_repo_name": "nohamanona/poke-auto-fuka", "max_issues_repo_head_hexsha": "9d355694efa0168738795afb403fc89264dcaeae", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "show/drawing.py", "max_forks_repo_name": "nohamanona/poke-auto-fuka", "max_forks_repo_head_hexsha": "9d355694efa0168738795afb403fc89264dcaeae", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-03T08:14:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-03T08:14:47.000Z", "avg_line_length": 51.3178294574, "max_line_length": 109, "alphanum_fraction": 0.5404833837, "include": true, "reason": "import numpy", "num_tokens": 2019}
|
# This file was generated by the Julia Swagger Code Generator
# Do not modify this file directly. Modify the swagger specification instead.
mutable struct DedicatedHostAvailableCapacity <: SwaggerModel
allocatableVMs::Any # spec type: Union{ Nothing, Vector{DedicatedHostAllocatableVM} } # spec name: allocatableVMs
function DedicatedHostAvailableCapacity(;allocatableVMs=nothing)
o = new()
validate_property(DedicatedHostAvailableCapacity, Symbol("allocatableVMs"), allocatableVMs)
setfield!(o, Symbol("allocatableVMs"), allocatableVMs)
o
end
end # type DedicatedHostAvailableCapacity
const _property_map_DedicatedHostAvailableCapacity = Dict{Symbol,Symbol}(Symbol("allocatableVMs")=>Symbol("allocatableVMs"))
const _property_types_DedicatedHostAvailableCapacity = Dict{Symbol,String}(Symbol("allocatableVMs")=>"Vector{DedicatedHostAllocatableVM}")
Base.propertynames(::Type{ DedicatedHostAvailableCapacity }) = collect(keys(_property_map_DedicatedHostAvailableCapacity))
Swagger.property_type(::Type{ DedicatedHostAvailableCapacity }, name::Symbol) = Union{Nothing,eval(Base.Meta.parse(_property_types_DedicatedHostAvailableCapacity[name]))}
Swagger.field_name(::Type{ DedicatedHostAvailableCapacity }, property_name::Symbol) = _property_map_DedicatedHostAvailableCapacity[property_name]
function check_required(o::DedicatedHostAvailableCapacity)
true
end
function validate_property(::Type{ DedicatedHostAvailableCapacity }, name::Symbol, val)
end
|
{"hexsha": "59490f850bf8b8c0988efb1cd6fa2103bb5f291e", "size": 1505, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Compute/ComputeManagementClient/model_DedicatedHostAvailableCapacity.jl", "max_stars_repo_name": "JuliaComputing/Azure.jl", "max_stars_repo_head_hexsha": "0e2b55e7602352d86bdf3579e547a74a9b5f44f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2019-12-18T16:23:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T07:39:13.000Z", "max_issues_repo_path": "src/Compute/ComputeManagementClient/model_DedicatedHostAvailableCapacity.jl", "max_issues_repo_name": "JuliaComputing/Azure.jl", "max_issues_repo_head_hexsha": "0e2b55e7602352d86bdf3579e547a74a9b5f44f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-05-08T19:57:11.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-11T11:20:41.000Z", "max_forks_repo_path": "src/Compute/ComputeManagementClient/model_DedicatedHostAvailableCapacity.jl", "max_forks_repo_name": "JuliaComputing/Azure.jl", "max_forks_repo_head_hexsha": "0e2b55e7602352d86bdf3579e547a74a9b5f44f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-05-07T10:26:07.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-11T13:04:47.000Z", "avg_line_length": 53.75, "max_line_length": 170, "alphanum_fraction": 0.8119601329, "num_tokens": 336}
|
import scrapy
import numpy
import pandas as pd
import csv
from arania_noticias.items import ComercioNew
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst
class SpiderNews(scrapy.Spider):
name = 'news'
urls = []
with open('urls.csv', 'r', encoding='utf-8') as urls_csv:
csv_reader = csv.reader(urls_csv, delimiter=',')
for row in csv_reader:
urls.append(row[1])
def start_requests(self):
for url in self.urls:
yield scrapy.Request(url = url)
def parse(self, response):
date_selector = response.css('div.date')
info_loader = ItemLoader(
item = ComercioNew(),
selector = date_selector
)
info_loader.default_output_processor = TakeFirst()
info_loader.add_css(
'Date',
'div::text'
)
title_selector = response.css('div.title')
info_loader.selector = title_selector
info_loader.add_css(
'Title',
'h1::text'
)
views_selector = response.css('div.social-nav')
info_loader.selector = views_selector
info_loader.add_css(
'Views',
'div.pageviews::text'
)
reactions_selector = response.css('div.rating>div.score')
reactions_names = ['Indignado', 'Triste', 'Indiferente', 'Sorprendido', 'Contento']
for i in range(0,5):
info_loader.selector = reactions_selector[i]
info_loader.add_css(
reactions_names[i],
'.number::text'
)
editor_selector = response.css('div.right-col>div.info')
info_loader.selector = editor_selector
info_loader.add_css(
'Editor',
'div.signature>div::text'
)
info_selector = response.css('div.breadcrumbs')
info_loader.selector = info_selector
info_loader.add_css(
'Category',
'a::text'
)
info_loader.add_css(
'Tag',
'a.highlighted::text'
)
yield info_loader.load_item()
|
{"hexsha": "5508d6a5b28d0544913a0f2bae7ca4dfc01d08a2", "size": 2204, "ext": "py", "lang": "Python", "max_stars_repo_path": "proyecto-2b/ScrapyDataset/scrapy/arania_noticias/arania_noticias/spiders/spider_news.py", "max_stars_repo_name": "2020-A-JS-GR1/py-velasquez-revelo-jefferson-david", "max_stars_repo_head_hexsha": "21733aef68c234f40e966c43ee3bf815b369a76f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "proyecto-2b/ScrapyDataset/scrapy/arania_noticias/arania_noticias/spiders/spider_news.py", "max_issues_repo_name": "2020-A-JS-GR1/py-velasquez-revelo-jefferson-david", "max_issues_repo_head_hexsha": "21733aef68c234f40e966c43ee3bf815b369a76f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "proyecto-2b/ScrapyDataset/scrapy/arania_noticias/arania_noticias/spiders/spider_news.py", "max_forks_repo_name": "2020-A-JS-GR1/py-velasquez-revelo-jefferson-david", "max_forks_repo_head_hexsha": "21733aef68c234f40e966c43ee3bf815b369a76f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.6078431373, "max_line_length": 91, "alphanum_fraction": 0.5617059891, "include": true, "reason": "import numpy", "num_tokens": 464}
|
#!/usr/bin/python3
import flask
from flask import Flask, jsonify, request
from waitress import serve
import datetime
import os
import json
import io
import cld_steiner as process_cld
from PIL import Image
from pathutils import remove_consecutive_duplicates, resample_path, smooth_path
import numpy as np
import sys
def log(*args):
print(*args)
sys.stdout.flush()
def save_to_disk(data, directory, extension):
now = datetime.datetime.now()
os.makedirs(directory, exist_ok=True)
fn = now.replace(microsecond=0).isoformat() + extension
fn = fn.replace(':', '-')
fn = os.path.join(directory, fn)
with open(fn, 'wb') as f:
f.write(data)
app = Flask(__name__)
@app.route('/', methods=['POST'])
def index():
log('received request')
data = request.get_data()
log('saving image', len(data), 'bytes')
save_to_disk(data, 'images', '.jpg')
log('sending response')
# bypass
# with open('sample.json', 'r') as f:
# ret = json.load(f)
# return jsonify(ret)
img_bytes = io.BytesIO(request.get_data())
img = Image.open(img_bytes)
try:
lines = process_cld.rgb2line_steiner(img)
path = np.asarray(lines['coordinates'])
path = remove_consecutive_duplicates(path)
path = resample_path(path, 0.2) # 1. resample
path = smooth_path(path, 3) # 2. smooth
path = path[::10] # 3. decimate
lines['coordinates'] = path.tolist()
# save to disk
with open('result.json', 'w') as f:
json.dump(lines, f)
return jsonify(lines)
except:
log('error')
with open('error.json') as f:
return jsonify(json.load(f))
serve(app, listen='*:8080')
|
{"hexsha": "4a74035cce8a8c3ab658141ba60372210bfce4f7", "size": 1741, "ext": "py", "lang": "Python", "max_stars_repo_path": "gce/server.py", "max_stars_repo_name": "kylemcdonald/bsp", "max_stars_repo_head_hexsha": "e33c71f5924bef61a15e2b87230ac27b8f8261aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-01T18:57:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-01T18:57:31.000Z", "max_issues_repo_path": "gce/server.py", "max_issues_repo_name": "kylemcdonald/bsp", "max_issues_repo_head_hexsha": "e33c71f5924bef61a15e2b87230ac27b8f8261aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-08-10T01:38:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-21T17:15:25.000Z", "max_forks_repo_path": "gce/server.py", "max_forks_repo_name": "kylemcdonald/bsp", "max_forks_repo_head_hexsha": "e33c71f5924bef61a15e2b87230ac27b8f8261aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-02-04T19:21:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T08:45:33.000Z", "avg_line_length": 24.8714285714, "max_line_length": 79, "alphanum_fraction": 0.6283744974, "include": true, "reason": "import numpy", "num_tokens": 438}
|
///////////////////////////////////////////////////////////////////////////////
//
// http://protoc.sourceforge.net/
//
// Copyright (C) 2013 Bjorn Reese <breese@users.sourceforge.net>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
// MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE AUTHORS AND
// CONTRIBUTORS ACCEPT NO RESPONSIBILITY IN ANY CONCEIVABLE MANNER.
//
///////////////////////////////////////////////////////////////////////////////
#include <boost/test/unit_test.hpp>
#include <sstream>
#include <boost/serialization/split_member.hpp>
#include <protoc/exceptions.hpp>
#include <protoc/transenc/detail/codes.hpp>
#include <protoc/transenc/stream_oarchive.hpp>
#include <protoc/transenc/vector_oarchive.hpp>
#include <protoc/transenc/string.hpp>
#include <protoc/transenc/vector.hpp>
#include <protoc/transenc/set.hpp>
#include <protoc/transenc/map.hpp>
#include <protoc/transenc/optional.hpp>
#include <protoc/serialization/nvp.hpp>
namespace format = protoc::transenc;
namespace detail = format::detail;
BOOST_AUTO_TEST_SUITE(transenc_oarchive_suite)
//-----------------------------------------------------------------------------
// Archive types
//-----------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(test_vector_oarchive)
{
std::vector<unsigned char> result;
format::vector_oarchive ar(result);
bool value = false;
ar << value;
unsigned char expected[] = { detail::code_false };
BOOST_REQUIRE_EQUAL_COLLECTIONS(result.begin(), result.end(),
expected, expected + sizeof(expected));
}
//-----------------------------------------------------------------------------
// Basic types
//-----------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(test_empty)
{
std::ostringstream result;
format::stream_oarchive ar(result);
char expected[] = { };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_false)
{
std::ostringstream result;
format::stream_oarchive ar(result);
bool value = false;
ar << value;
char expected[] = { detail::code_false };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_true)
{
std::ostringstream result;
format::stream_oarchive ar(result);
bool value = true;
ar << value;
char expected[] = { detail::code_true };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_const_false)
{
std::ostringstream result;
format::stream_oarchive ar(result);
const bool value = false;
ar << value;
char expected[] = { detail::code_false };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_const_true)
{
std::ostringstream result;
format::stream_oarchive ar(result);
const bool value = true;
ar << value;
char expected[] = { detail::code_true };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
//-----------------------------------------------------------------------------
// Integers
//-----------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(test_int_zero)
{
std::ostringstream result;
format::stream_oarchive ar(result);
int value = 0;
ar << value;
char expected[] = { 0x00 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_const_int_zero)
{
std::ostringstream result;
format::stream_oarchive ar(result);
const int value = 0;
ar << value;
char expected[] = { 0x00 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_int_one)
{
std::ostringstream result;
format::stream_oarchive ar(result);
int value = 1;
ar << value;
char expected[] = { 0x01 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_int_minus_one)
{
std::ostringstream result;
format::stream_oarchive ar(result);
int value = -1;
ar << value;
char expected[] = { 0xFF };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_int_minus_128)
{
std::ostringstream result;
format::stream_oarchive ar(result);
int value = -128;
ar << value;
char expected[] = { detail::code_int8, 0x80 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_int16)
{
std::ostringstream result;
format::stream_oarchive ar(result);
int value = 1 << 8;
ar << value;
char expected[] = { detail::code_int16, 0x00, 0x01 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_const_int16)
{
std::ostringstream result;
format::stream_oarchive ar(result);
const int value = 1 << 8;
ar << value;
char expected[] = { detail::code_int16, 0x00, 0x01 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_int32)
{
std::ostringstream result;
format::stream_oarchive ar(result);
int value = 1 << 16;
ar << value;
char expected[] = { detail::code_int32, 0x00, 0x00, 0x01, 0x00 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_const_int32)
{
std::ostringstream result;
format::stream_oarchive ar(result);
const int value = 1 << 16;
ar << value;
char expected[] = { detail::code_int32, 0x00, 0x00, 0x01, 0x00 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_int64)
{
std::ostringstream result;
format::stream_oarchive ar(result);
long long value = 1LL << 32;
ar << value;
char expected[] = { detail::code_int64, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_const_int64)
{
std::ostringstream result;
format::stream_oarchive ar(result);
const long long value = 1LL << 32;
ar << value;
char expected[] = { detail::code_int64, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_int_all_types)
{
std::ostringstream result;
format::stream_oarchive ar(result);
int alpha = 1;
int bravo = 0x0100;
int charlie = 0x010000;
long long delta = 0x0100000000LL;
ar << alpha << bravo << charlie << delta;
char expected[] = { 0x01,
detail::code_int16, 0x00, 0x01,
detail::code_int32, 0x00, 0x00, 0x01, 0x00,
detail::code_int64, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
//-----------------------------------------------------------------------------
// Floating-point
//-----------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(test_float32_one)
{
std::ostringstream result;
format::stream_oarchive ar(result);
protoc::float32_t value = 1.0f;
ar << value;
char expected[] = { detail::code_float32, 0x00, 0x00, 0x80, 0x3F };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_const_float32_one)
{
std::ostringstream result;
format::stream_oarchive ar(result);
const protoc::float32_t value = 1.0f;
ar << value;
char expected[] = { detail::code_float32, 0x00, 0x00, 0x80, 0x3F };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_float64_one)
{
std::ostringstream result;
format::stream_oarchive ar(result);
protoc::float64_t value = 1.0;
ar << value;
char expected[] = { detail::code_float64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0x3F };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_const_float64_one)
{
std::ostringstream result;
format::stream_oarchive ar(result);
const protoc::float64_t value = 1.0;
ar << value;
char expected[] = { detail::code_float64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0x3F };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
//-----------------------------------------------------------------------------
// String
//-----------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(test_string_empty)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::string value("");
ar << value;
char expected[] = { detail::code_string_int8, 0x00 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_const_string_empty)
{
std::ostringstream result;
format::stream_oarchive ar(result);
const std::string value("");
ar << value;
char expected[] = { detail::code_string_int8, 0x00 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_string_a)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::string value("A");
ar << value;
char expected[] = { detail::code_string_int8, 0x01, 0x41 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_string_alpha)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::string value("ALPHA");
ar << value;
char expected[] = { detail::code_string_int8, 0x05, 0x41, 0x4C, 0x50, 0x48, 0x41 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_literal_alpha)
{
std::ostringstream result;
format::stream_oarchive ar(result);
const char *value = "ALPHA";
ar << value;
char expected[] = { detail::code_string_int8, 0x05, 0x41, 0x4C, 0x50, 0x48, 0x41 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_literal_alpha_2)
{
std::ostringstream result;
format::stream_oarchive ar(result);
ar << "ALPHA";
char expected[] = { detail::code_string_int8, 0x05, 0x41, 0x4C, 0x50, 0x48, 0x41 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
//-----------------------------------------------------------------------------
// Pair
//-----------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(test_pair)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::pair<std::string, bool> value("A", true);
ar << value;
char expected[] = { detail::code_record_begin, detail::code_string_int8, 0x01, 0x41, detail::code_true, detail::code_record_end };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_const_pair)
{
std::ostringstream result;
format::stream_oarchive ar(result);
const std::pair<std::string, bool> value("A", true);
ar << value;
char expected[] = { detail::code_record_begin, detail::code_string_int8, 0x01, 0x41, detail::code_true, detail::code_record_end };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
//-----------------------------------------------------------------------------
// Optional
//-----------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(test_optional)
{
std::ostringstream result;
format::stream_oarchive ar(result);
boost::optional<std::string> value("A");
ar << value;
char expected[] = { detail::code_string_int8, 0x01, 0x41 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_optional_null)
{
std::ostringstream result;
format::stream_oarchive ar(result);
boost::optional<std::string> value;
ar << value;
char expected[] = { detail::code_null };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_const_optional)
{
std::ostringstream result;
format::stream_oarchive ar(result);
const boost::optional<std::string> value("A");
ar << value;
char expected[] = { detail::code_string_int8, 0x01, 0x41 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_const_optional_null)
{
std::ostringstream result;
format::stream_oarchive ar(result);
const boost::optional<std::string> value;
ar << value;
char expected[] = { detail::code_null };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
//-----------------------------------------------------------------------------
// Named value pair
//-----------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(test_nvp)
{
std::ostringstream result;
format::stream_oarchive out(result);
bool value = false;
out << boost::serialization::make_nvp("value", value);
char expected[] = { detail::code_false };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
//-----------------------------------------------------------------------------
// Container
//-----------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(test_vector_bool_empty)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::vector<bool> value;
ar << value;
char expected[] = { detail::code_array_begin, 0x00, detail::code_array_end };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_vector_bool_one)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::vector<bool> value;
value.push_back(true);
ar << value;
char expected[] = { detail::code_array_begin, 0x01, detail::code_true, detail::code_array_end };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_vector_bool_two)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::vector<bool> value;
value.push_back(true);
value.push_back(false);
ar << value;
char expected[] = { detail::code_array_begin, 0x02, detail::code_true, detail::code_false, detail::code_array_end };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_set_int_empty)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::set<int> value;
ar << value;
char expected[] = { detail::code_array_begin, detail::code_null, detail::code_array_end };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_set_int_one)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::set<int> value;
value.insert(1);
ar << value;
char expected[] = { detail::code_array_begin, detail::code_null, 0x01, detail::code_array_end };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_set_int_two)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::set<int> value;
value.insert(1);
value.insert(2);
ar << value;
char expected[] = { detail::code_array_begin, detail::code_null, 0x01, 0x02, detail::code_array_end };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_map_bool_empty)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::map<std::string, bool> value;
ar << value;
char expected[] = { detail::code_map_begin, detail::code_null, detail::code_map_end };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_map_bool_one)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::map<std::string, bool> value;
value["A"] = true;
ar << value;
char expected[] = { detail::code_map_begin, detail::code_null, detail::code_record_begin, detail::code_string_int8, 0x01, 0x41, detail::code_true, detail::code_record_end, detail::code_map_end };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_map_bool_two)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::map<std::string, bool> value;
value["A"] = true;
value["B"] = false;
ar << value;
char expected[] = { detail::code_map_begin, detail::code_null, detail::code_record_begin, detail::code_string_int8, 0x01, 0x41, detail::code_true, detail::code_record_end, detail::code_record_begin, detail::code_string_int8, 0x01, 0x42, detail::code_false, detail::code_record_end, detail::code_map_end };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
//-----------------------------------------------------------------------------
// Enum
//-----------------------------------------------------------------------------
#if 0 // FIXME
enum Number
{
one = 1
};
BOOST_AUTO_TEST_CASE(test_enum_one)
{
std::ostringstream result;
format::stream_oarchive ar(result);
enum Number value = one;
ar << value;
BOOST_REQUIRE_EQUAL(result.str().data(), "\xA3\x01");
}
#endif
//-----------------------------------------------------------------------------
// Struct
//-----------------------------------------------------------------------------
struct person
{
person(const std::string& name, int age)
: name(name),
age(age)
{}
template<typename T>
void serialize(T& archive, const unsigned int)
{
archive & name;
archive & age;
}
std::string name;
int age;
};
struct split_person
{
split_person(const std::string& name, int age)
: name(name),
age(age)
{}
template<typename T>
void load(T& archive, const unsigned int)
{
archive >> name;
archive >> age;
}
template<typename T>
void save(T& archive, const unsigned int) const
{
archive << name;
archive << age;
}
std::string name;
int age;
BOOST_SERIALIZATION_SPLIT_MEMBER()
};
BOOST_AUTO_TEST_CASE(test_struct_person)
{
std::ostringstream result;
format::stream_oarchive ar(result);
person value("KANT", 127);
ar << value;
char expected[] = { detail::code_record_begin, detail::code_string_int8, 0x04, 0x4B, 0x41, 0x4E, 0x54, 0x7F, detail::code_record_end };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_struct_split_person)
{
std::ostringstream result;
format::stream_oarchive ar(result);
split_person value("KANT", 127);
ar << value;
char expected[] = { detail::code_record_begin, detail::code_string_int8, 0x04, 0x4B, 0x41, 0x4E, 0x54, 0x7F, detail::code_record_end };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_vector_of_struct_person)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::vector<person> persons;
persons.push_back(person("KANT", 127));
ar << persons;
char expected[] = { detail::code_array_begin, 0x01, detail::code_record_begin, detail::code_string_int8, 0x04, 0x4B, 0x41, 0x4E, 0x54, 0x7F, detail::code_record_end, detail::code_array_end };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
//-----------------------------------------------------------------------------
// Binary
//-----------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(test_binary_empty)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::vector<unsigned char> value;
ar << value;
char expected[] = { detail::code_binary_int8, 0x00 };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_binary_one)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::vector<unsigned char> value(1, 0xFF);
ar << value;
char expected[] = { detail::code_binary_int8, 0x01, 0xFF };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_CASE(test_binary_two)
{
std::ostringstream result;
format::stream_oarchive ar(result);
std::vector<unsigned char> value(2, 0xFF);
ar << value;
char expected[] = { detail::code_binary_int8, 0x02, 0xFF, 0xFF };
std::string got = result.str();
BOOST_REQUIRE_EQUAL_COLLECTIONS(got.begin(), got.end(),
expected, expected + sizeof(expected));
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "07b435e24882607e8f7fe6ab6090e38231cefb3f", "size": 26099, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/transenc/oarchive_suite.cpp", "max_stars_repo_name": "skyformat99/protoc", "max_stars_repo_head_hexsha": "f0a72275c92bedc8492524cb98cc24c5821c4f11", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/transenc/oarchive_suite.cpp", "max_issues_repo_name": "skyformat99/protoc", "max_issues_repo_head_hexsha": "f0a72275c92bedc8492524cb98cc24c5821c4f11", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/transenc/oarchive_suite.cpp", "max_forks_repo_name": "skyformat99/protoc", "max_forks_repo_head_hexsha": "f0a72275c92bedc8492524cb98cc24c5821c4f11", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1812577065, "max_line_length": 309, "alphanum_fraction": 0.5862676731, "num_tokens": 5793}
|
(* Author: Dmitriy Traytel *)
header {* Normalization of WS1S Formulas *}
(*<*)
theory WS1S_Normalization
imports WS1S
begin
(*>*)
fun nNot where
"nNot (FNot \<phi>) = \<phi>"
| "nNot (FAnd \<phi>1 \<phi>2) = FOr (nNot \<phi>1) (nNot \<phi>2)"
| "nNot (FOr \<phi>1 \<phi>2) = FAnd (nNot \<phi>1) (nNot \<phi>2)"
| "nNot \<phi> = FNot \<phi>"
primrec norm where
"norm (FQ a m) = FQ a m"
| "norm (FLess m n) = FLess m n"
| "norm (FIn m M) = FIn m M"
| "norm (FOr \<phi> \<psi>) = FOr (norm \<phi>) (norm \<psi>)"
| "norm (FAnd \<phi> \<psi>) = FAnd (norm \<phi>) (norm \<psi>)"
| "norm (FNot \<phi>) = nNot (norm \<phi>)"
| "norm (FExists \<phi>) = FExists (norm \<phi>)"
| "norm (FEXISTS \<phi>) = FEXISTS (norm \<phi>)"
context formula
begin
lemma satisfies_nNot[simp]: "(w, I) \<Turnstile> nNot \<phi> \<longleftrightarrow> (w, I) \<Turnstile> FNot \<phi>"
by (induct \<phi> rule: nNot.induct) auto
lemma FOV_nNot[simp]: "FOV (nNot \<phi>) = FOV (FNot \<phi>)"
by (induct \<phi> rule: nNot.induct) auto
lemma SOV_nNot[simp]: "SOV (nNot \<phi>) = SOV (FNot \<phi>)"
by (induct \<phi> rule: nNot.induct) auto
lemma pre_wf_formula_nNot[simp]: "pre_wf_formula n (nNot \<phi>) = pre_wf_formula n (FNot \<phi>)"
by (induct \<phi> rule: nNot.induct) auto
lemma FOV_norm[simp]: "FOV (norm \<phi>) = FOV \<phi>"
by (induct \<phi>) auto
lemma SOV_norm[simp]: "SOV (norm \<phi>) = SOV \<phi>"
by (induct \<phi>) auto
lemma pre_wf_formula_norm[simp]: "pre_wf_formula n (norm \<phi>) = pre_wf_formula n \<phi>"
by (induct \<phi> arbitrary: n) auto
lemma satisfies_norm[simp]: "wI \<Turnstile> norm \<phi> \<longleftrightarrow> wI \<Turnstile> \<phi>"
by (induct \<phi> arbitrary: wI) auto
lemma lang\<^sub>W\<^sub>S\<^sub>1\<^sub>S_norm[simp]: "lang\<^sub>W\<^sub>S\<^sub>1\<^sub>S n (norm \<phi>) = lang\<^sub>W\<^sub>S\<^sub>1\<^sub>S n \<phi>"
unfolding lang\<^sub>W\<^sub>S\<^sub>1\<^sub>S_def by auto
end
(*<*)
end
(*>*)
|
{"author": "Josh-Tilles", "repo": "AFP", "sha": "f4bf1d502bde2a3469d482b62c531f1c3af3e881", "save_path": "github-repos/isabelle/Josh-Tilles-AFP", "path": "github-repos/isabelle/Josh-Tilles-AFP/AFP-f4bf1d502bde2a3469d482b62c531f1c3af3e881/thys/MSO_Regex_Equivalence/WS1S_Normalization.thy"}
|
import sys
import json
import time
import torch
import pickle
import socket
import logging
import numbers
import functools
import subprocess
import unicodedata
from typing import List, Union
from pathlib import Path
import yaml
import numpy as np
import hickle
import scipy.io as spio
import msgpack_numpy as msgpack_np
import zsvision.zs_data_structures
from beartype import beartype
from mergedeep import Strategy, merge
from typeguard import typechecked
from beartype.cave import AnyType, NoneTypeOr
@functools.lru_cache(maxsize=64, typed=False)
@beartype
def memcache(path: Union[Path, str], verbose: bool = True):
path = Path(path)
suffix = path.suffix
if verbose:
print(f"loading data from {path} ({socket.gethostname()})", end=" ", flush=True)
tic = time.time()
if suffix in {".pkl", ".pickle", ".pckl", ".pk"}:
res = pickle_loader(pkl_path=path, verbose=verbose)
elif suffix in {".hkl", ".hickle"}:
res = hickle.load(path)
elif suffix == ".npy":
res = np_loader(path, verbose=verbose)
elif suffix == ".mp":
res = msgpack_loader(path, verbose=verbose)
elif suffix == ".json":
with open(path, "r") as f:
res = json.load(f)
elif suffix in {".yaml", ".yml"}:
with open(path, "r") as f:
res = yaml.safe_load(f)
elif suffix == ".mat":
res = loadmat(path)
elif suffix == ".pth":
res = torch.load(path)
else:
raise ValueError(f"unknown suffix: {suffix} for path {path}")
if verbose:
print(f"[Total: {time.time() - tic:.1f}s]")
return res
@beartype
def support_old_pickles(buffer: bytes) -> object:
try:
data = pickle.loads(buffer, encoding="latin1")
except ModuleNotFoundError as exception:
if "datastructures" in str(exception.msg):
sys.modules['datastructures'] = zsvision.zs_data_structures
data = pickle.loads(buffer, encoding="latin1")
return data
@beartype
def pickle_loader(
pkl_path: Path,
verbose: bool,
backwards_compatible: bool = True,
) -> object:
"""Deserialise object from pickle.
Args:
pkl_path: the location of the path where the pickle path is stored
backwards_compatible: if true, support old pickle formats used with the.
ExpertStore format
Return:
The deserialised object.
"""
tic = time.time()
with open(pkl_path, "rb") as f:
buffer = f.read()
if verbose:
print(f"[I/O: {time.time() - tic:.1f}s]", end=" ")
tic = time.time()
if backwards_compatible:
data = support_old_pickles(buffer)
else:
data = pickle.loads(buffer, encoding="latin1")
if verbose:
print(f"[deserialisation: {time.time() - tic:.1f}s]", end=" ")
return data
@beartype
def msgpack_loader(mp_path: Path, verbose: bool):
"""Msgpack provides a faster serialisation routine than pickle, so is preferable
for loading and deserialising large feature sets from disk."""
tic = time.time()
with open(mp_path, "rb") as f:
buffer = f.read()
if verbose:
print(f"[I/O: {time.time() - tic:.1f}s]", end=" ")
tic = time.time()
data = msgpack_np.unpackb(buffer, raw=False)
if verbose:
print(f"[deserialisation: {time.time() - tic:.1f}s]", end=" ")
return data
@beartype
def np_loader(np_path: Path, verbose: bool, l2norm: bool = False):
with open(np_path, "rb") as f:
data = np.load(f, encoding="latin1", allow_pickle=True)
if isinstance(data, np.ndarray) and data.size == 1:
data = data[()] # handle numpy dict storage convnetion
if l2norm:
if verbose:
print("L2 normalizing features")
if isinstance(data, dict):
for key in data:
feats_ = data[key]
feats_ = feats_ / max(np.linalg.norm(feats_), 1E-6)
data[key] = feats_
elif data.ndim == 2:
data_norm = np.linalg.norm(data, axis=1)
data = data / np.maximum(data_norm.reshape(-1, 1), 1E-6)
else:
raise ValueError("unexpected data format {}".format(type(data)))
return data
@beartype
def set_nested_key_val(key: str, val: AnyType, target: dict):
"""Use a prefix key (e.g. key1.key2.key3) to set a value in a nested dict"""
# escape periods in keys
key = key.replace("_.", "&&")
subkeys = key.split(".")
subkeys = [x.replace("&&", ".") for x in subkeys]
nested = target
print("subkeys", subkeys)
for subkey in subkeys[:-1]:
try:
nested = nested.__getitem__(subkey)
except Exception as exception:
print(subkey)
raise exception
orig = nested[subkeys[-1]]
if orig == "":
if val == "":
val = 0
else:
val = str(val)
elif isinstance(orig, bool):
if val.lower() in {"0", "False"}:
val = False
else:
val = bool(val)
elif isinstance(orig, list):
if isinstance(val, str) and "," in val:
val = val.split(",")
# we use the convention that a trailing comma indicates a single item list
if len(val) == 2 and val[1] == "":
val.pop()
if val and not orig:
raise ValueError("Could not infer correct type from empty original list")
else:
val = [type(orig[0])(x) for x in val]
assert isinstance(val, list), "Failed to pass a list where expected"
elif isinstance(orig, int):
val = int(val)
elif isinstance(orig, float):
val = float(val)
elif isinstance(orig, str):
val = str(val)
else:
raise ValueError(f"unrecognised type: {type(val)}")
nested[subkeys[-1]] = val
@beartype
def loadmat(src_path: Path) -> dict:
"""This function should be called instead of direct spio.loadmat as it addresses the
problem of not properly recovering python dictionaries from mat files. It calls the
function check keys to cure all entries which are still mat-objects.
The function is heavily based on this reference:
https://stackoverflow.com/a/29126361
Args:
src_path: the location of the .mat file to load
Returns:
a parsed .mat file in the form of a python dictionary.
"""
def _check_keys(d):
"""Checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
"""
for key in d:
if isinstance(d[key], spio.matlab.mio5_params.mat_struct):
d[key] = _todict(d[key])
elif isinstance(d[key], np.ndarray):
d[key] = _tolist(d[key])
else:
pass
return d
def _todict(matobj):
"""A recursive function which constructs from matobjects nested dictionaries
"""
d = {}
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, spio.matlab.mio5_params.mat_struct):
d[strg] = _todict(elem)
elif isinstance(elem, np.ndarray):
d[strg] = _tolist(elem)
else:
d[strg] = elem
return d
def _tolist(ndarray):
"""A recursive function which constructs lists from cellarrays
(which are loaded as numpy ndarrays), recursing into the elements
if they contain matobjects or are non-numeric.
"""
if np.issubdtype(ndarray.dtype, np.number):
return ndarray
elem_list = []
for sub_elem in ndarray:
if isinstance(sub_elem, spio.matlab.mio5_params.mat_struct):
elem_list.append(_todict(sub_elem))
elif isinstance(sub_elem, np.ndarray):
elem_list.append(_tolist(sub_elem))
else:
elem_list.append(sub_elem)
return elem_list
data = spio.loadmat(src_path, struct_as_record=False, squeeze_me=True)
return _check_keys(data)
@functools.lru_cache(maxsize=64, typed=False)
def concat_features(feat_paths, axis):
aggregates = [memcache(x) for x in feat_paths]
tic = time.time()
msg = "expected to concatenate datastructures of a single type"
assert len(set(type(x) for x in aggregates)) == 1, msg
if isinstance(aggregates[0], dict):
keys = aggregates[0] # for now, we assume that all aggregates share keys
merged = {}
for key in keys:
merged[key] = np.concatenate([x[key] for x in aggregates], axis=axis)
elif isinstance(aggregates[0], zsvision.zs_data_structures.ExpertStore):
dims, stores = [], []
keys = aggregates[0].keys
for x in aggregates:
dims.append(x.dim)
stores.append(x.store)
assert x.keys == keys, "all aggregates must share identical keys"
msg = "expected to concatenate ExpertStores with a common dimension"
assert len(set(dims)) == 1, msg
dim = dims[0]
merged = zsvision.zs_data_structures.ExpertStore(keys, dim=dim)
merged.store = np.concatenate(stores, axis=axis)
else:
raise ValueError(f"Unknown datastructure: {type(aggregates[0])}")
# Force memory clearance
for aggregate in aggregates:
del aggregate
print("done in {:.3f}s".format(time.time() - tic))
return merged
class BlockTimer:
"""A minimal inline codeblock timer
Args:
msg: A string to be printed together with timing information
mute (default: False): whether to disable all reporting
precise: if true, provide timing information as a total number of seconds to
six decimal places, rather than as a formatted timestring (e.g. HhMmSs)
logger: if given, use the supplied logger, rather than printing messages to screen
"""
@beartype
def __init__(
self,
msg: str,
mute: bool = False,
precise: bool = False,
logger: NoneTypeOr[logging.Logger] = False,
):
self.msg = msg
self.mute = mute
self.precise = precise
self.logger = logger
self.start = None
def __enter__(self):
self.start = time.time()
if not self.mute:
msg = f"{self.msg}..."
if self.logger:
self.logger.info(msg)
else:
print(msg, end="", flush=True)
return self
def __exit__(self, *args):
if self.precise:
total = f"{time.time() - self.start:.6f}s"
else:
total = time.strftime('%Hh%Mm%Ss', time.gmtime(time.time() - self.start))
if not self.mute:
msg = f" took {total}"
if self.logger:
self.logger.info(msg)
else:
print(msg)
@beartype
def find_ancestors(cfg_fname: (Path, str)) -> list:
"""Search the hierarchy specified by the `inherit_from` attribute of a json config
via post-order traversal.
Args:
cfg_fname: the location of the json config file
Returns:
a list of loaded configs in the order specified by the inheritance.
"""
# Cannot use memcache here without risk of recursion
if Path(cfg_fname).suffix == ".json":
with open(cfg_fname, "r") as f:
config = json.load(f)
elif Path(cfg_fname).suffix in {".yaml", ".yml"}:
with open(cfg_fname, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Unknown config path type: {cfg_fname}")
ancestors = []
if "inherit_from" in config:
immediate_ancestors = config["inherit_from"].split(",")
for immediate_ancestor in immediate_ancestors:
ancestors.extend(find_ancestors(Path(immediate_ancestor)))
ancestors.append(config)
return ancestors
@beartype
def load_json_or_yaml_config(cfg_fname: (Path, str)) -> dict:
"""Load a configuration file into memory.
Args:
cfg_fname: the location of the config file (yaml or json)
Returns:
the loaded configuration
"""
ancestors = find_ancestors(cfg_fname)
config = ancestors.pop()
ancestors = reversed(ancestors)
for ancestor in ancestors:
merge(ancestor, config, strategy=Strategy.REPLACE)
config = ancestor
return config
@beartype
def load_json_config(cfg_fname: (Path, str)) -> dict:
"""Load a json configuration file into memory.
Args:
cfg_fname: the location of the json config file
Returns:
the loaded configuration
NOTES: A json file may include an `inherit_from`: "<path>" key, value pair which
points to a list of templates from which to inherit default values. Inheritance
specifiers are traversed in increasing order of importance, from left to right.
E.g. given
"inherit_from": "path-to-A,path-to-B",
the values of B will override the values of A.
"""
return load_json_or_yaml_config(cfg_fname)
@beartype
def load_yaml_config(cfg_fname: (Path, str)) -> dict:
"""Load a yaml configuration file into memory.
Args:
cfg_fname: the location of the yaml config file
Returns:
the loaded configuration
NOTES: A yaml file may include an `inherit_from`: "<path>" key, value pair which
points to a list of templates from which to inherit default values. Inheritance
specifiers are traversed in increasing order of importance, from left to right.
E.g. given
"inherit_from": "path-to-A,path-to-B",
the values of B will override the values of A.
"""
return load_json_or_yaml_config(cfg_fname)
@beartype
def seconds_to_timestr(secs: numbers.Number) -> str:
"""Convert a total number of seconds into a formatted time string.
Arguments:
secs: the total number of seconds
Returns:
a formatted time (HH:MM:SS.mmm)
NOTE: Probably this function is not needed. But I refuse to spend more of my life
looking at datetime/time/strftime combinations.
"""
assert secs >= 0, f"Expected a non-negative number of seconds, but requested {secs}"
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
ms = secs - int(secs)
return f"{int(hours):02d}:{int(mins):02d}:{int(secs):02d}.{int(ms * 1000):03d}"
@typechecked
def list_visible_gpu_types() -> List[str]:
"""Provide a list of the NVIDIA GPUs that are visible on the current machine.
Returns:
a list of GPU device types.
"""
cmd = ["nvidia-smi", "-L"]
try:
res = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
check=True)
device_strs = res.stdout.decode("utf-8").splitlines()
devices = [x.split(":")[1].split("(")[0].strip() for x in device_strs]
except FileNotFoundError:
devices = []
return devices
@beartype
def quote_and_escape_ffmpeg_path(path: (str, Path)) -> str:
"""Quote and escape paths for use with ffmpeg/ffprobe.
Args:
path: the location of a file to be processed by ffmpeg
Returns:
a quoted, dollar-escaped path
Example usage:
`os.system("ffprobe {quote_and_escape_ffmpeg_path(path)}")`
NOTE: This function is useful for processing file paths that may contain:
1. spaces
2. dollar characters ($)
3. percent sign characters (%)
when invoking ffmpeg or ffprobe from python.
"""
# Dollar signs need to be escaped when used in paths
escaped = str(path).replace("$", r"\$").replace("%", r"\%")
if "'" in escaped:
quoted = f'"{escaped}"'
else:
quoted = f"'{escaped}'"
return quoted
@beartype
def parse_tree_layout(
tree_layout_path: Path,
prefix_token: str = "── ",
) -> set:
"""Given a text dump of the output of the linux `tree` command, this function will
reconstruct the relative paths of the files in the tree.
Args:
tree_layout_path: the location of the text file containing the `tree` output
prefix_token: the token used by the `tree` command to denote a new file.
Returns:
the collection of parsed paths.
NOTES:
1. This function assumes that it is parsing the output of the tree command that
has been run in the directory of the structure it is displaying (i.e. `tree` is run)
without arguments.
2. The output of each row in the `tree` command is prefixed by a T-bar or an L-bar
(see example formats (1) and (2) resp. below).
3. If the file at `tree_layout_path` contains any rows that are not part of the tree
output, they are ignored.
Example:
Given tree outputs of the forms (1) or (2) shown below:
(1)
├── Conversation
│ ├── Belfast
│ │ ├── 11+12
(2)
└── Conversation
└── Belfast
└── 11+12
in both cases, this function will return a set of pathlib paths of the form:
{
"."
"Conversation",
"Conversation/Belfast",
"Conversation/Belfast/11+12",
}
"""
with open(tree_layout_path, "r") as f:
rows = f.read().splitlines()
# filter the input to only contain the file tree structure by searching for the
# presence of the tree prefix token
rows = [x for x in rows if prefix_token in x]
# convert nbsp escape codes into spaces
rows = [unicodedata.normalize("NFKD", x) for x in rows]
current_path = Path(".")
paths = {current_path}
known_prefix_heads = {"├", "└"}
for row in rows:
prefix, name = row.split(prefix_token)
prefix, prefix_head = prefix[:-1], list(prefix).pop(-1)
msg = f"Expected prefix head to be in {known_prefix_heads} found {prefix_head}"
assert prefix_head in known_prefix_heads, msg
assert len(prefix) % 4 == 0, "Expected prefix string length to be a multiple of 4"
depth = int(len(prefix) / 4)
current_path = Path(*current_path.parts[:depth]) / name
paths.add(current_path)
return paths
if __name__ == "__main__":
print(list_visible_gpu_types())
|
{"hexsha": "bd35eaf696d09a59f434fc7415fea2223583a2d5", "size": 18359, "ext": "py", "lang": "Python", "max_stars_repo_path": "zsvision/zs_utils.py", "max_stars_repo_name": "bjuncek/zsvision", "max_stars_repo_head_hexsha": "a84ecf93f334ecbdd99a8be7150fc767e732e6af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2018-01-18T13:47:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T00:00:43.000Z", "max_issues_repo_path": "zsvision/zs_utils.py", "max_issues_repo_name": "bjuncek/zsvision", "max_issues_repo_head_hexsha": "a84ecf93f334ecbdd99a8be7150fc767e732e6af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-10-16T18:24:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-16T18:24:10.000Z", "max_forks_repo_path": "zsvision/zs_utils.py", "max_forks_repo_name": "bjuncek/zsvision", "max_forks_repo_head_hexsha": "a84ecf93f334ecbdd99a8be7150fc767e732e6af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-09-21T09:34:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-26T16:17:04.000Z", "avg_line_length": 32.7839285714, "max_line_length": 90, "alphanum_fraction": 0.613105289, "include": true, "reason": "import numpy,import scipy", "num_tokens": 4429}
|
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import numpy as np
from sklearn.metrics import confusion_matrix
from ninolearn.learn.skillMeasures import seasonal_correlation
seismic = plt.cm.get_cmap('seismic', 256)
newcolors = seismic(np.linspace(0, 1, 256))
grey = np.array([192/256, 192/256, 192/256, 1])
newcolors[:1, :] = grey
newcmp = ListedColormap(newcolors)
seas_ticks = ['DJF', 'JFM', 'FMA', 'MAM', 'AMJ', 'MJJ',
'JJA', 'JAS', 'ASO', 'SON', 'OND', 'NDJ']
mon_ticks = ['J', 'F', 'M', 'A', 'M', 'J',
'J', 'A', 'S', 'O', 'N', 'D']
def plot_correlation(y, pred, time, title=None):
"""
make a bar plot of the correlation coeficent between y and the prediction
"""
m = np.arange(1, 13)
fig, ax = plt.subplots(figsize=(5,2.5))
r, p = seasonal_correlation(y, pred, time)
ax.set_ylim(0, 1)
ax.bar(m, r)
ax.set_xticks(m)
ax.set_xticklabels(seas_ticks)
ax.set_xlabel("Season")
ax.set_ylabel(f"Correlation coefficient")
if title is None:
ax.set_title(f"$r =$ {round(np.corrcoef(y,pred)[0,1], 2)}")
else:
ax.set_title(title)
plt.tight_layout()
def plot_confMat(y, pred, labels):
"""
Plot a confusion matrix. Here, the recall is on the diagonal!
:param y: The baseline.
:param pred: The prediction.
:param labels: The names of the classes.
"""
cm = confusion_matrix(y, pred)#.T
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues,
vmin = 1/len(labels), vmax = 0.8)
ax.figure.colorbar(im, ax=ax,extend='max')
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=labels,
yticklabels=labels,
title='Confusion Matrix',
xlabel='True label',
ylabel='Predicted label')
fmt = '.2f'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="black" if cm[i, j] > thresh else "black")
fig.tight_layout()
def plot_seasonal_skill(lead_time, data, vmin=-1, vmax=1, nlevels=20, cmap=newcmp, extend='min'):
fig, ax = plt.subplots(figsize=(5,3.5))
m = np.arange(1,13)
levels = np.linspace(vmin, vmax, nlevels+1)
C = ax.contourf(m,lead_time, data, levels=levels,
vmin=vmin, vmax=vmax,
cmap=cmap, extend=extend)
ax.set_xticks(m)
ax.set_xticklabels(seas_ticks, rotation='vertical')
ax.set_xlabel('Target Season')
ax.set_yticks(lead_time)
ax.set_yticklabels(lead_time)
ax.set_ylabel('Lead Time [Months]')
plt.colorbar(C, ticks=np.arange(vmin,vmax+0.1,0.2))
plt.tight_layout()
|
{"hexsha": "aa098c068b9c1cdb7be91afecaedef6abc399600", "size": 2924, "ext": "py", "lang": "Python", "max_stars_repo_path": "ninolearn/plot/evaluation.py", "max_stars_repo_name": "pjpetersik/ninolearn", "max_stars_repo_head_hexsha": "2a6912bbaaf3c5737f6dcda89e4d7d1fd885a35e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-12-01T14:58:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-10T23:45:09.000Z", "max_issues_repo_path": "ninolearn/plot/evaluation.py", "max_issues_repo_name": "MiriamSterl/ninolearn", "max_issues_repo_head_hexsha": "7cb85a0d51ef5cc7e1edad6d51330f2e0a17dda4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ninolearn/plot/evaluation.py", "max_forks_repo_name": "MiriamSterl/ninolearn", "max_forks_repo_head_hexsha": "7cb85a0d51ef5cc7e1edad6d51330f2e0a17dda4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-10-15T11:23:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-04T01:41:44.000Z", "avg_line_length": 32.1318681319, "max_line_length": 97, "alphanum_fraction": 0.6002051984, "include": true, "reason": "import numpy", "num_tokens": 843}
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Polygon
from geometry_tools.projective import ProjectivePlane
plane = ProjectivePlane()
plane.set_hyperplane_coordinates(np.array([1.0, 1.0, 1.0]))
plane.set_affine_origin([1.0, 1.0, 1.0])
plane.set_affine_direction([1.0, 0.0, 0.0], [0.0, 1.0])
pts = np.array([
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]
])
affine_triangle_pts = plane.affine_coordinates(pts)
affine_triangle = Polygon(affine_triangle_pts, fill=False,
edgecolor="black")
basepoint = np.array([1.0, 1.0, 1.0])
scale_factor = 1.5
triangle_automorphism = np.matrix([
[scale_factor, 0.0, 0.0 ],
[0.0, 1.0, 0.0 ],
[0.0, 0.0, 1 / scale_factor]
])
num_pts = 10
auts = np.array([
triangle_automorphism**k for k in range(-1 * num_pts, num_pts)
])
point_sequence = basepoint @ auts
xs, ys = plane.xy_coords(point_sequence)
fig, ax = plt.subplots()
ax.add_patch(affine_triangle)
plt.plot(xs, ys, 'bo')
plt.show()
|
{"hexsha": "e21b7ee54a59334d85ace1a810cb104fc480f05b", "size": 1068, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/examples.py", "max_stars_repo_name": "tjweisman/geometry_tools", "max_stars_repo_head_hexsha": "9523dd86f68606d5297b228e874020d62663d3db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-18T03:23:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T03:23:07.000Z", "max_issues_repo_path": "examples/examples.py", "max_issues_repo_name": "tjweisman/geometry_tools", "max_issues_repo_head_hexsha": "9523dd86f68606d5297b228e874020d62663d3db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/examples.py", "max_forks_repo_name": "tjweisman/geometry_tools", "max_forks_repo_head_hexsha": "9523dd86f68606d5297b228e874020d62663d3db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.25, "max_line_length": 66, "alphanum_fraction": 0.638576779, "include": true, "reason": "import numpy", "num_tokens": 360}
|
[STATEMENT]
lemma bin_rsplit_len_le: "n \<noteq> 0 \<longrightarrow> ws = bin_rsplit n (nw, w) \<longrightarrow> length ws \<le> m \<longleftrightarrow> nw \<le> m * n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. n \<noteq> 0 \<longrightarrow> ws = bin_rsplit n (nw, w) \<longrightarrow> (length ws \<le> m) = (nw \<le> m * n)
[PROOF STEP]
by (auto simp: bin_rsplit_def bin_rsplit_aux_len_le)
|
{"llama_tokens": 162, "file": "Word_Lib_Bits_Int", "length": 1}
|
import torch.nn as nn
import numpy as np
import torch
from functools import reduce
class MSSSIM(nn.Module):
def __init__(self, width, batch_size, n_channel,
cuda, c1=.01**2, c2=.02**2, n_sigmas=5):
super(MSSSIM, self).__init__()
self.c1 = c1
self.c2 = c2
sigmas = [0.5 * 2 ** i for i in xrange(n_sigmas)]
self.weights = np.zeros(shape=(n_sigmas,n_channel,n_channel,width,width),
dtype=np.float32)
def _get_kernel(sigma):
w = np.exp(-1.*np.arange(-(width/2), width/2)**2/(2*sigmas[n_layer]**2))
w = np.outer(w, w.reshape((width, 1)))
w = w/np.sum(w)
out = np.zeros(shape=(n_channel, n_channel, width, width),
dtype=np.float32)
out[range(n_channel),range(n_channel)] = w
return out
for n_layer in xrange(n_sigmas):
self.weights[n_layer] = _get_kernel(sigmas[n_layer])
self.weights = torch.Tensor(self.weights)
if cuda:
self.weights = self.weights.cuda()
def forward(self, input, target):
def _forward(kernel):
mux = nn.functional.conv2d(input, kernel)
muy = nn.functional.conv2d(target, kernel)
sigmax2 = nn.functional.conv2d(input**2,kernel) - mux**2
sigmay2 = nn.functional.conv2d(target**2,kernel) - muy**2
sigmaxy = nn.functional.conv2d(input*target,kernel) - mux*muy
return mux,muy,sigmax2,sigmay2,sigmaxy
nb, nc = input.shape[0], input.shape[1]
cs = []
for weight in self.weights:
mux,muy,sigmax2,sigmay2,sigmaxy = _forward(weight)
_cs = (2 * sigmaxy + self.c2) / (sigmax2 + sigmay2 + self.c2)
cs.append(_cs)
pcs = reduce(lambda x,y:x*y, cs)
l = (2 * mux * muy + self.c1)/(mux ** 2 + muy **2 + self.c1)
out = 1 - torch.sum((l*pcs) / (nb*nc))
return out
|
{"hexsha": "31f60078abc639ac4a9f90241a7a0d131521ba8f", "size": 2042, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/loss/msssim.py", "max_stars_repo_name": "muneebaadil/sisr-irl", "max_stars_repo_head_hexsha": "29ccf9ad970ade22fc8e158b83f952504db71a7b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2018-09-03T11:06:57.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-13T08:50:25.000Z", "max_issues_repo_path": "code/loss/msssim.py", "max_issues_repo_name": "muneebaadil/sisr-irl", "max_issues_repo_head_hexsha": "29ccf9ad970ade22fc8e158b83f952504db71a7b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-10-16T06:46:31.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-16T06:46:31.000Z", "max_forks_repo_path": "code/loss/msssim.py", "max_forks_repo_name": "muneebaadil/sisr-irl", "max_forks_repo_head_hexsha": "29ccf9ad970ade22fc8e158b83f952504db71a7b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-09-03T02:00:28.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-13T08:50:28.000Z", "avg_line_length": 35.8245614035, "max_line_length": 84, "alphanum_fraction": 0.5401567091, "include": true, "reason": "import numpy", "num_tokens": 555}
|
// Copyright 2013-2015 Stanford University
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <chrono>
#include <fstream>
#include <iostream>
#include <random>
#include <map>
#include <string>
#include <vector>
#include <cassert>
#include "src/ext/cpputil/include/command_line/command_line.h"
#include "src/ext/cpputil/include/signal/debug_handler.h"
#include "src/ext/cpputil/include/io/filterstream.h"
#include "src/ext/cpputil/include/io/column.h"
#include "src/ext/cpputil/include/io/console.h"
#include "src/ext/x64asm/src/reg_set.h"
#include "src/state/cpu_states.h"
#include "src/stategen/stategen.h"
#include "src/tunit/tunit.h"
#include "src/symstate/simplify.h"
#include "src/validator/bounded.h"
#include "src/validator/handler.h"
#include "src/validator/handlers/combo_handler.h"
#include "tools/gadgets/functions.h"
#include "tools/gadgets/solver.h"
#include "tools/gadgets/seed.h"
#include "tools/gadgets/validator.h"
#include "tools/gadgets/sandbox.h"
#include "src/specgen/specgen.h"
#include "src/specgen/support.h"
#define BOOST_NO_CXX11_SCOPED_ENUMS
#include <boost/filesystem.hpp>
using namespace cpputil;
using namespace std;
using namespace stoke;
using namespace x64asm;
using namespace std::chrono;
using namespace boost;
Heading& functions_heading =
cpputil::Heading::create("Auxiliary Function Options:");
auto& circuits_arg =
ValueArg<string>::create("circuit_dir")
.usage("<path/to/dir>")
.description("Directory containing the strata circuits")
.default_val("/home/sheule/dev/strata-data/circuits");
auto& two =
FlagArg::create("two")
.description("Analyse imm8 circuits");
int main(int argc, char** argv) {
// not actually required here
target_arg.required(false);
CommandLineConfig::strict_with_convenience(argc, argv);
SeedGadget seed;
FunctionsGadget aux_fxns;
SandboxGadget sb({}, aux_fxns);
// setup the stategen class
StateGen sg(&sb, 30);
sg.set_max_attempts(10)
.set_max_memory(30)
.set_allow_unaligned(false)
.set_seed(seed);
SolverGadget solver;
default_random_engine gen((size_t)seed);
auto strata_path = circuits_arg.value();
auto strata_handler = StrataHandler(strata_path, false);
auto strata_handler_simple = StrataHandler(strata_path, true);
auto stoke_handler = ComboHandler();
auto validator = BoundedValidator(solver);
auto get_strata_circuits = true;
auto sep = ",";
x64asm::RegSet supported =
(x64asm::RegSet::all_gps() | x64asm::RegSet::all_ymms()) +
x64asm::eflags_cf + x64asm::eflags_of + x64asm::eflags_pf +
x64asm::eflags_zf + x64asm::eflags_sf;
size_t nodes = 0;
size_t uifs = 0;
size_t muls = 0;
for (auto i = 0; i < X64ASM_NUM_OPCODES; ++i) {
for (auto j = 0; j < (two ? 256 : 1); j++) {
auto opcode = (Opcode)i;
auto reason = strata_handler.support_reason(opcode);
auto is_base = specgen_is_base(opcode);
if (is_base) {
reason = SupportReason::BASESET;
}
if (two && (!specgen_is_imm8(opcode) || specgen_is_duplicate(opcode))) continue;
auto strata_support = strata_handler.is_supported(opcode) || is_base;
auto stoke_support = validator.is_supported(opcode);
auto could_support = !specgen_is_system(opcode) &&
!specgen_is_float(opcode) &&
!specgen_is_jump(opcode) &&
!specgen_is_mm(opcode) &&
!specgen_is_crypto(opcode) &&
!specgen_is_sandbox_unsupported(opcode);
if (!strata_support) {
if (!specgen_is_system(opcode) &&
!specgen_is_float(opcode) &&
!specgen_is_jump(opcode) &&
!specgen_is_mm(opcode) &&
!specgen_is_crypto(opcode) &&
!specgen_is_sandbox_unsupported(opcode)) {
// cout << opcode << endl;
}
}
if (!could_support) continue;
Instruction instr(XOR_R8_R8);
RegSet rs;
if (two) {
instr = get_instruction(opcode, j);
rs = supported & instr.maybe_write_set();
strata_support = strata_handler.get_support(instr);
} else if (strata_support || stoke_support) {
instr = get_random_instruction(opcode, gen);
rs = supported & instr.maybe_write_set();
}
SymState stoke_state("", true);
if (stoke_support) {
stoke_handler.build_circuit(instr, stoke_state);
if (stoke_handler.has_error()) {
// this is necessary because stoke lies about support
stoke_support = false;
}
}
auto used_for = 0;
auto is_learned = reason == SupportReason::LEARNED;
if (is_learned || is_base || two) {
used_for = strata_handler.used_for(opcode);
}
cout << "{ ";
cout << " \"instr\":\"" << opcode;
if (two) {
cout << "_" << dec << j;
}
cout << "\"" << sep;
cout << " \"is_base\":" << (specgen_is_base(opcode)?"true":"false") << sep;
cout << " \"strata_support\":" << (strata_support?"true":"false") << sep;
cout << " \"strata_reason\":" << (two?SupportReason::IMM8:((int32_t)reason)) << sep;
cout << " \"used_for\":" << used_for << sep;
cout << " \"stoke_support\":" << (stoke_support?"true":"false") << sep;
if (strata_support && get_strata_circuits && (is_learned || two)) {
SymState state("", true);
strata_handler.build_circuit(instr, state);
if (strata_handler.has_error()) {
cout << instr << endl;
cout << "strata handler produced an error: " << strata_handler.error() << endl;
exit(1);
}
SymState state_simple("", true);
strata_handler_simple.build_circuit(instr, state_simple);
if (strata_handler_simple.has_error()) {
cout << instr << endl;
cout << "strata handler produced an error: " << strata_handler_simple.error() << endl;
exit(1);
}
measure_complexity(state, rs, &nodes, &uifs, &muls);
cout << "\"strata_long\":{";
cout << "\"uif\":" << uifs << sep;
cout << "\"mult\":" << muls << sep;
cout << "\"nodes\":" << nodes;
cout << "},";
measure_complexity(state_simple, rs, &nodes, &uifs, &muls, true);
cout << "\"strata\":{";
cout << "\"uif\":" << uifs << sep;
cout << "\"mult\":" << muls << sep;
cout << "\"nodes\":" << nodes;
cout << "},";
}
if (stoke_support && strata_support) {
measure_complexity(stoke_state, rs, &nodes, &uifs, &muls);
cout << "\"stoke\":{";
cout << "\"uif\":" << uifs << sep;
cout << "\"mult\":" << muls << sep;
cout << "\"nodes\":" << nodes;
cout << "},";
}
cout << "\"delim\": 0";
cout << " }";
cout << endl;
}
}
}
|
{"hexsha": "0866636090c1323793aefb50b3fbad5f11c7650d", "size": 7359, "ext": "cc", "lang": "C++", "max_stars_repo_path": "tools/apps/specgen_statistics.cc", "max_stars_repo_name": "sdasgup3/strata-stoke", "max_stars_repo_head_hexsha": "b9981a48a82a72069896d29863649cfad1b4d98c", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2019-02-18T04:18:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-26T06:42:46.000Z", "max_issues_repo_path": "tools/apps/specgen_statistics.cc", "max_issues_repo_name": "sdasgup3/strata-stoke", "max_issues_repo_head_hexsha": "b9981a48a82a72069896d29863649cfad1b4d98c", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/apps/specgen_statistics.cc", "max_forks_repo_name": "sdasgup3/strata-stoke", "max_forks_repo_head_hexsha": "b9981a48a82a72069896d29863649cfad1b4d98c", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7568807339, "max_line_length": 96, "alphanum_fraction": 0.620328849, "num_tokens": 1879}
|
"""
Helper Classes and Functions for docking fingerprint computation.
"""
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar and Jacob Durrant"
__license__ = "GNU General Public License"
import logging
import math
import os
import subprocess
import numpy as np
import deepchem.utils.rdkit_util as rdkit_util
def force_partial_charge_computation(mol):
"""Force computation of partial charges for molecule.
Parameters
----------
mol: Rdkit Mol
Molecule on which we compute partial charges.
"""
rdkit_util.compute_charges(mol)
def pdbqt_to_pdb(input_file, output_directory):
"""Convert pdbqt file to pdb file.
Parameters
----------
input_file: String
Path to input file.
output_directory: String
Path to desired output directory.
"""
logging.info(input_file, output_directory)
raise ValueError("Not yet implemented")
def hydrogenate_and_compute_partial_charges(input_file,
input_format,
hyd_output=None,
pdbqt_output=None,
protein=True,
verbose=True):
"""Outputs a hydrogenated pdb and a pdbqt with partial charges.
Takes an input file in specified format. Generates two outputs:
-) A pdb file that contains a hydrogenated (at pH 7.4) version of
original compound.
-) A pdbqt file that has computed Gasteiger partial charges. This pdbqt
file is build from the hydrogenated pdb.
TODO(rbharath): Can do a bit of refactoring between this function and
pdbqt_to_pdb.
Parameters
----------
input_file: String
Path to input file.
input_format: String
Name of input format.
"""
mol = rdkit_util.load_molecule(
input_file, add_hydrogens=True, calc_charges=True)[1]
if verbose:
logging.info("Create pdb with hydrogens added")
rdkit_util.write_molecule(mol, str(hyd_output), is_protein=protein)
if verbose:
logging.info("Create a pdbqt file from the hydrogenated pdb above.")
rdkit_util.write_molecule(mol, str(pdbqt_output), is_protein=protein)
if protein:
logging.info("Removing ROOT/ENDROOT/TORSDOF")
with open(pdbqt_output) as f:
pdbqt_lines = f.readlines()
filtered_lines = []
for line in pdbqt_lines:
filtered_lines.append(line)
with open(pdbqt_output, "w") as f:
f.writelines(filtered_lines)
class AromaticRing(object):
"""Holds information about an aromatic ring."""
def __init__(self, center, indices, plane_coeff, radius):
"""
Initializes an aromatic.
Parameters
----------
center: float
Center of the ring.
indices: list
List of the atom indices for ring atoms.
plane_coeff: list
A list of elements [a, b, c, d] that define a plane by equation
a x + b y + c z = d.
radius: float
Ring radius from center.
"""
self.center = center
self.indices = indices
# a*x + b*y + c*z = dI think that
self.plane_coeff = plane_coeff
self.radius = radius
def average_point(points):
"""Returns the point with averaged coordinates of arguments.
Parameters
----------
points: list
List of point objects.
Returns
-------
pavg: Point object
Has coordinates the arithmetic average of those of p1 and p2.
"""
coords = np.array([0, 0, 0])
for point in points:
coords += point.as_array().astype(coords.dtype)
if len(points) > 0:
return Point(coords=coords / len(points))
else:
return Point(coords=coords)
class Point(object):
"""
Simple implementation for a point in 3-space.
"""
def __init__(self, x=None, y=None, z=None, coords=None):
"""
Inputs can be specified either by explicitly providing x, y, z coords
or by providing a numpy array of length 3.
Parameters
----------
x: float
X-coord.
y: float
Y-coord.
z: float
Z-coord.
coords: np.ndarray
Should be of length 3 in format np.array([x, y, z])
Raises
------
ValueError: If no arguments are provided.
"""
if x and y and z:
#self.x, self.y, self.z = x, y, z
self.coords = np.array([x, y, z])
elif coords is not None: # Implicit eval doesn't work on numpy arrays.
#self.x, self.y, self.z = coords[0], coords[1], coords[2]
self.coords = coords
else:
raise ValueError("Must specify coordinates for Point!")
# TODO(bramsundar): Should this be __copy__?
def copy_of(self):
"""Return a copy of this point."""
return Point(coords=np.copy(self.coords))
def dist_to(self, point):
"""Distance (in 2-norm) from this point to another."""
return np.linalg.norm(self.coords - point.coords)
def magnitude(self):
"""Magnitude of this point (in 2-norm)."""
return np.linalg.norm(self.coords)
#return self.dist_to(Point(coords=np.array([0, 0, 0])))
def as_array(self):
"""Return the coordinates of this point as array."""
#return np.array([self.x, self.y, self.z])
return self.coords
class Atom(object):
"""
Implements a container class for atoms. This class contains useful
annotations about the atom.
"""
def __init__(self,
atomname="",
residue="",
coordinates=Point(coords=np.array([99999, 99999, 99999])),
element="",
pdb_index="",
line="",
atomtype="",
indices_of_atoms_connecting=None,
charge=0,
resid=0,
chain="",
structure="",
comment=""):
"""
Initializes an atom.
Assumes that atom is loaded from a PDB file.
Parameters
----------
atomname: string
Name of atom. Note that atomname is not the same as residue since
atomnames often have extra annotations (e.g., CG, NZ, etc).
residue: string:
Name of protein residue this atom belongs to.
element: string
Name of atom's element.
coordinate: point
A point object (x, y, z are in Angstroms).
pdb_index: string
Index of the atom in source PDB file.
line: string
The line in the PDB file which specifies this atom.
atomtype: string
Element of atom. This differs from atomname which typically has extra
annotations (e.g. CA, OA, HD, etc)
IndicesOfAtomConnecting: list
The indices (in a PDB object) of all atoms bonded to this one.
charge: float
Associated electrostatic charge.
resid: int
The residue number in the receptor (listing the protein as a chain from
N-Terminus to C-Terminus). Assumes this is a protein atom.
chain: string
Chain identifier for molecule. See PDB spec.
structure: string
One of ALPHA, BETA, or OTHER for the type of protein secondary
structure this atom resides in (assuming this is a receptor atom).
comment: string
Either LIGAND or RECEPTOR depending on whether this is a ligand or
receptor atom.
"""
self.atomname = atomname
self.residue = residue
self.coordinates = coordinates
self.element = element
self.pdb_index = pdb_index
self.line = line
self.atomtype = atomtype
if indices_of_atoms_connecting is not None:
self.indices_of_atoms_connecting = indices_of_atoms_connecting
else:
self.indices_of_atoms_connecting = []
self.charge = charge
self.resid = resid
self.chain = chain
self.structure = structure
self.comment = comment
def copy_of(self):
"""Make a copy of this atom."""
theatom = Atom()
theatom.atomname = self.atomname
theatom.residue = self.residue
theatom.coordinates = self.coordinates.copy_of()
theatom.element = self.element
theatom.pdb_index = self.pdb_index
theatom.line = self.line
theatom.atomtype = self.atomtype
theatom.indices_of_atoms_connecting = self.indices_of_atoms_connecting[:]
theatom.charge = self.charge
theatom.resid = self.resid
theatom.chain = self.chain
theatom.structure = self.structure
theatom.comment = self.comment
return theatom
def create_pdb_line(self, index):
"""
Generates appropriate ATOM line for pdb file.
Parameters
----------
index: int
Index in associated PDB file.
"""
output = "ATOM "
output = (
output + str(index).rjust(6) + self.atomname.rjust(5) +
self.residue.rjust(4) + self.chain.rjust(2) + str(self.resid).rjust(4))
coords = self.coordinates.as_array() # [x, y, z]
output = output + ("%.3f" % coords[0]).rjust(12)
output = output + ("%.3f" % coords[1]).rjust(8)
output = output + ("%.3f" % coords[2]).rjust(8)
output = output + self.element.rjust(24)
return output
def number_of_neighbors(self):
"""Reports number of neighboring atoms."""
return len(self.indices_of_atoms_connecting)
def add_neighbor_atom_indices(self, indices):
"""
Adds atoms with provided PDB indices as neighbors.
Parameters
----------
index: list
List of indices of neighbors in PDB object.
"""
for index in indices:
if index not in self.indices_of_atoms_connecting:
self.indices_of_atoms_connecting.append(index)
def side_chain_or_backbone(self):
"""Determine whether receptor atom belongs to residue sidechain or backbone.
"""
# TODO(rbharath): Should this be an atom function?
if (self.atomname.strip() == "CA" or self.atomname.strip() == "C" or
self.atomname.strip() == "O" or self.atomname.strip() == "N"):
return "BACKBONE"
else:
return "SIDECHAIN"
def read_atom_pdb_line(self, line):
"""
TODO(rbharath): This method probably belongs in the PDB class, and not
in the Atom class.
Reads an ATOM or HETATM line from PDB and instantiates fields.
Atoms in PDBs are represented by ATOM or HETATM statements. ATOM and
HETATM statements follow the following record format:
(see ftp://ftp.wwpdb.org/pub/pdb/doc/format_descriptions/Format_v33_Letter.pdf)
COLUMNS DATA TYPE FIELD DEFINITION
-------------------------------------------------------------------------------------
1 - 6 Record name "ATOM "/"HETATM"
7 - 11 Integer serial Atom serial number.
13 - 16 Atom name Atom name.
17 Character altLoc Alternate location indicator.
18 - 20 Residue name resName Residue name.
22 Character chainID Chain identifier.
23 - 26 Integer resSeq Residue sequence number.
27 AChar iCode Code for insertion of residues.
31 - 38 Real(8.3) x Orthogonal coordinates for X in Angstroms.
39 - 46 Real(8.3) y Orthogonal coordinates for Y in Angstroms.
47 - 54 Real(8.3) z Orthogonal coordinates for Z in Angstroms.
55 - 60 Real(6.2) occupancy Occupancy.
61 - 66 Real(6.2) tempFactor Temperature factor.
77 - 78 LString(2) element Element symbol, right-justified.
79 - 80 LString(2) charge Charge on the atom.
"""
self.line = line
self.atomname = line[11:16].strip()
if len(self.atomname) == 1:
self.atomname = self.atomname + " "
elif len(self.atomname) == 2:
self.atomname = self.atomname + " "
elif len(self.atomname) == 3:
# This line is necessary for babel to work, though many PDBs in
# the PDB would have this line commented out
self.atomname = self.atomname + " "
self.coordinates = Point(
coords=np.array(
[float(line[30:38]),
float(line[38:46]),
float(line[46:54])]))
# now atom type (for pdbqt)
if line[77:79].strip():
self.atomtype = line[77:79].strip().upper()
elif self.atomname:
# If atomtype is not specified, but atomname is, set atomtype to the
# first letter of atomname. This heuristic suffices for proteins,
# since no two-letter elements appear in standard amino acids.
self.atomtype = self.atomname[:1]
else:
self.atomtype = ""
if line[69:76].strip() != "":
self.charge = float(line[69:76])
else:
self.charge = 0.0
if self.element == "": # try to guess at element from name
two_letters = self.atomname[0:2].strip().upper()
valid_two_letters = [
"BR", "CL", "BI", "AS", "AG", "LI", "HG", "MG", "MN", "RH", "ZN", "FE"
]
if two_letters in valid_two_letters:
self.element = two_letters
else: #So, just assume it's the first letter.
# Any number needs to be removed from the element name
self.element = self.atomname
self.element = self.element.replace('0', '')
self.element = self.element.replace('1', '')
self.element = self.element.replace('2', '')
self.element = self.element.replace('3', '')
self.element = self.element.replace('4', '')
self.element = self.element.replace('5', '')
self.element = self.element.replace('6', '')
self.element = self.element.replace('7', '')
self.element = self.element.replace('8', '')
self.element = self.element.replace('9', '')
self.element = self.element.replace('@', '')
self.element = self.element[0:1].strip().upper()
self.pdb_index = line[6:12].strip()
self.residue = line[16:20]
# this only uses the rightmost three characters, essentially
# removing unique rotamer identification
self.residue = " " + self.residue[-3:]
if line[23:26].strip() != "":
self.resid = int(line[23:26])
else:
self.resid = 1
self.chain = line[21:22]
if self.residue.strip() == "":
self.residue = " MOL"
class Charged(object):
"""
A class that represeents a charged atom.
"""
def __init__(self, coordinates, indices, positive):
"""
Parameters
----------
coordinates: point
Coordinates of atom.
indices: list
Contains boolean true or false entries for self and neighbors to
specify if positive or negative charge
positive: bool
Whether this atom is positive or negative.
"""
self.coordinates = coordinates
self.indices = indices
self.positive = positive
def vector_subtraction(point1, point2): # point1 - point2
"""Subtracts the coordinates of the provided points."""
return Point(coords=point1.as_array() - point2.as_array())
def cross_product(point1, point2): # never tested
"""Calculates the cross-product of provided points."""
return Point(coords=np.cross(point1.as_array(), point2.as_array()))
def vector_scalar_multiply(point, scalar):
"""Multiplies the provided point by scalar."""
return Point(coords=scalar * point.as_array())
def dot_product(point1, point2):
"""Dot product of points."""
return np.dot(point1.as_array(), point2.as_array())
def dihedral(point1, point2, point3, point4): # never tested
"""Compute dihedral angle between 4 points.
TODO(rbharath): Write a nontrivial test for this.
"""
b1 = vector_subtraction(point2, point1)
b2 = vector_subtraction(point3, point2)
b3 = vector_subtraction(point4, point3)
b2Xb3 = cross_product(b2, b3)
b1Xb2 = cross_product(b1, b2)
b1XMagb2 = vector_scalar_multiply(b1, b2.magnitude())
radians = math.atan2(dot_product(b1XMagb2, b2Xb3), dot_product(b1Xb2, b2Xb3))
return radians
def angle_between_three_points(point1, point2, point3):
"""Computes the angle (in radians) between the three provided points."""
return angle_between_points(
vector_subtraction(point1, point2), vector_subtraction(point3, point2))
def angle_between_points(point1, point2):
"""Computes the angle (in radians) between two points."""
return math.acos(
dot_product(point1, point2) / (point1.magnitude() * point2.magnitude()))
def normalized_vector(point):
"""Normalize provided point."""
return Point(coords=point.as_array() / np.linalg.norm(point.as_array()))
def distance(point1, point2):
"""Computes distance between two points."""
return point1.dist_to(point2)
def project_point_onto_plane(point, plane_coefficients):
"""Finds nearest point on specified plane to given point.
Parameters
----------
point: Point
Given point
plane_coefficients: list
[a, b, c, d] where place equation is ax + by + cz = d
"""
# The normal vector to plane is n = [a, b, c]
offset = plane_coefficients[3]
normal = np.array(plane_coefficients[:3])
# We first shift by basepoint (a point on given plane) to make math
# simpler. basepoint is given by d/||n||^2 * n
basepoint = (offset / np.linalg.norm(normal)**2) * normal
diff = point.as_array() - basepoint
# The perpendicular component of diff to plane is
# (n^T diff / ||n||^2) * n
perp = (np.dot(normal, diff) / np.linalg.norm(normal)**2) * normal
closest = basepoint + (diff - perp)
return Point(coords=np.array(closest))
|
{"hexsha": "656aeae02526ae400d8b9f12fa0a8c0a5f15147c", "size": 17258, "ext": "py", "lang": "Python", "max_stars_repo_path": "deepchem/feat/nnscore_utils.py", "max_stars_repo_name": "n3011/deepchem", "max_stars_repo_head_hexsha": "c316d998c462ce01032f0dae883856b400ea4765", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-02-06T09:13:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-19T15:03:35.000Z", "max_issues_repo_path": "deepchem/feat/nnscore_utils.py", "max_issues_repo_name": "n3011/deepchem", "max_issues_repo_head_hexsha": "c316d998c462ce01032f0dae883856b400ea4765", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-14T23:16:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T23:16:27.000Z", "max_forks_repo_path": "deepchem/feat/nnscore_utils.py", "max_forks_repo_name": "n3011/deepchem", "max_forks_repo_head_hexsha": "c316d998c462ce01032f0dae883856b400ea4765", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-06T20:32:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-06T20:32:02.000Z", "avg_line_length": 32.0185528757, "max_line_length": 90, "alphanum_fraction": 0.6347780739, "include": true, "reason": "import numpy", "num_tokens": 4315}
|
# In this example we do a few things to detect the edge
# 1. We define two filers (aka sobel filters) called Hx, Hy
# 2. We perform a convolution on Hx and Hy to get Gx, Gy
# 3. From there we calculate the edge detection output and solve for G
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy.signal import convolve2d
# Import the image
im = Image.open('Lenna.png')
Hx = np.array([[1,0,-1],[2,0,-2],[1,0,-1]]) # Filter 1
Hy = np.array([[1,2,1],[0,0,0],[-1,-2,-1]]) # Filter 2
# Convert image to grayscale, use only two dimensions of color
gray = np.mean(im, axis =2)
# Apply convolution function
Gx = convolve2d(gray, Hx)
Gy = convolve2d(gray, Hy)
# Solve for G
G = np.sqrt((Gx**2)+(Gy**2))
# Plot original image, and new blur image side-by-side
# Two plots, thing in first position
plt.subplot(1,2,1)
plt.imshow(im)
# Two plots, thing in second position
plt.subplot(1,2,2)
plt.imshow(G, cmap='gray')
plt.show()
|
{"hexsha": "710ca87a479c8025b0291ed50568ec979bb5dfb8", "size": 959, "ext": "py", "lang": "Python", "max_stars_repo_path": "scipy/edge-detection.py", "max_stars_repo_name": "0w8States/numpy-stack-samples", "max_stars_repo_head_hexsha": "2fca4ee45cb532cc12d5646276ee53a7e4f0d0f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scipy/edge-detection.py", "max_issues_repo_name": "0w8States/numpy-stack-samples", "max_issues_repo_head_hexsha": "2fca4ee45cb532cc12d5646276ee53a7e4f0d0f2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scipy/edge-detection.py", "max_forks_repo_name": "0w8States/numpy-stack-samples", "max_forks_repo_head_hexsha": "2fca4ee45cb532cc12d5646276ee53a7e4f0d0f2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2368421053, "max_line_length": 70, "alphanum_fraction": 0.6923879041, "include": true, "reason": "import numpy,from scipy", "num_tokens": 304}
|
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from torchid.statespace.module.ssmodels_ct import NeuralStateSpaceModel
from torchid.statespace.module.ss_simulator_ct import ForwardEulerSimulator
import gpytorch
import finite_ntk
import loader
from torchid import metrics
class StateSpaceWrapper(torch.nn.Module):
def __init__(self, model):
super(StateSpaceWrapper, self).__init__()
self.model = model
def forward(self, u_in):
x_0 = torch.zeros(2) # np.zeros(2).astype(np.float32)
x_sim_torch = self.model(x_0, u_in)
y_out = x_sim_torch[:, [0]]
return y_out
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood, model, use_linearstrategy=False):
super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = finite_ntk.lazy.NTK(model=model, use_linearstrategy=use_linearstrategy)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
if __name__ == '__main__':
# In[Set seed for reproducibility]
np.random.seed(0)
torch.manual_seed(0)
# In[Settings]
use_linearstrategy = False
sigma = 0.1
model_type = "256step_noise_V"
# In[Load dataset]
t, u, y, x = loader.rlc_loader("transfer", noise_std=sigma, n_data=2000)
seq_len = t.size
# In[Second-order dynamical system custom defined]
# Setup neural model structure and load fitted model parameters
ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=50)
nn_solution = ForwardEulerSimulator(ss_model)
model_filename = f"model_SS_{model_type}.pt"
nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", model_filename)))
# In[Model wrapping]
input_size = 1
output_size = 1
model_wrapped = StateSpaceWrapper(nn_solution)
u_torch = torch.tensor(u[None, ...], dtype=torch.float, requires_grad=False)
y_torch = torch.tensor(y[None, ...], dtype=torch.float)
u_torch_f = torch.clone(u_torch.view((1 * seq_len, input_size))) # [bsize*seq_len, n_in]
y_torch_f = torch.clone(y_torch.view(1 * seq_len, output_size)) # [bsize*seq_len, ]
gp_lh = gpytorch.likelihoods.GaussianLikelihood()
gp_lh.noise = sigma**2
gp_model = ExactGPModel(u_torch_f, y_torch_f.squeeze(), gp_lh, model_wrapped, use_linearstrategy=use_linearstrategy)
# No GP training (we consider the kernel (hyper)parameters fixed.
# We may think of training the measurement noise by mll optimization...
gp_model.eval()
gp_lh.eval()
# In[Evaluate the GP-like model on new data]
t_new, u_new, y_new, x_new = loader.rlc_loader("eval", noise_std=0.0, n_data=2000)
u_torch_new = torch.tensor(u_new[None, :, :])
u_torch_new_f = torch.clone(u_torch_new.view((1 * seq_len, input_size))) # [bsize*seq_len, n_in]
with gpytorch.settings.fast_pred_var(): #, gpytorch.settings.max_cg_iterations(4000), gpytorch.settings.cg_tolerance(0.1):
predictive_dist = gp_model(u_torch_new_f)
y_lin_new_f = predictive_dist.mean.data
y_lin_new = y_lin_new_f.reshape(seq_len, output_size).detach().numpy()
# In[Nominal model output]
with torch.no_grad():
y_sim_new_f = model_wrapped(u_torch_new_f)
y_sim_new = y_sim_new_f.reshape(seq_len, output_size).detach().numpy()
# In[Plot]
plt.plot(t_new, y_new, 'k', label="True")
plt.plot(t_new, y_sim_new, 'r', label="Sim")
plt.plot(t_new, y_lin_new, 'b', label="Lin")
plt.legend()
# R-squared metrics
R_sq = metrics.r_squared(y_new, y_lin_new)
print(f"R-squared linear model: {R_sq}")
R_sq = metrics.r_squared(y_new, y_sim_new)
print(f"R-squared nominal model: {R_sq}")
#if use_linearstrategy:
# np.save("y_lin_gp_parspace.npy", y_lin_new.detach().numpy())
#else:
# np.save("y_lin_gp_funspace.npy", y_lin_new.detach().numpy())
|
{"hexsha": "cc9faf53f9cdae76f19a17386125ec6a3c986f1b", "size": 4080, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/RLC/RLC_SS_transfer_gp.py", "max_stars_repo_name": "forgi86/RNN-adaptation", "max_stars_repo_head_hexsha": "d32e8185c6a746060dd726a0f5080231e0c9439b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-13T10:50:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T10:50:02.000Z", "max_issues_repo_path": "examples/RLC/RLC_SS_transfer_gp.py", "max_issues_repo_name": "forgi86/RNN-adaptation", "max_issues_repo_head_hexsha": "d32e8185c6a746060dd726a0f5080231e0c9439b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/RLC/RLC_SS_transfer_gp.py", "max_forks_repo_name": "forgi86/RNN-adaptation", "max_forks_repo_head_hexsha": "d32e8185c6a746060dd726a0f5080231e0c9439b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4311926606, "max_line_length": 126, "alphanum_fraction": 0.7034313725, "include": true, "reason": "import numpy", "num_tokens": 1131}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.