index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
26,303,958
|
Rhaall/sparetime-backend
|
refs/heads/master
|
/Models/Event.py
|
from sqlalchemy import Column, Integer, String, ForeignKey, Text
from database.database import Base
from sqlalchemy.orm import relationship
from Models.Keyword import Keyword
class Event(Base):
__tablename__ = 'event'
id = Column(Integer, primary_key=True)
label = Column(String(100), unique=True, nullable=False)
title = Column(Text(), nullable=False)
type = Column(Text(), nullable=False)
description = Column(Text(), nullable=False)
company_name = Column(String(100), nullable=False)
price = Column(String(100), nullable=False)
date = Column(String(100), nullable=False)
duration = Column(String(100), nullable=False)
address = Column(String(100), nullable=False)
zipcode = Column(String(100), nullable=False)
picture1 = Column(String(255), nullable=False)
picture2 = Column(String(255), nullable=False)
picture3 = Column(String(255), nullable=False)
location_id = Column(Integer, ForeignKey('location.id'), nullable=False)
Keyword = relationship("Keyword")
def __repr__(self):
return '<Event %r>' % self.label
|
{"/Models/Event.py": ["/Models/Keyword.py"], "/Models/User.py": ["/Models/KeywordByUser.py"], "/app.py": ["/Models/User.py", "/Models/Event.py", "/Models/Keyword.py", "/Models/KeywordByUser.py", "/Models/Location.py"], "/Models/Location.py": ["/Models/Event.py"], "/Models/Keyword.py": ["/Models/KeywordByUser.py"]}
|
26,303,959
|
Rhaall/sparetime-backend
|
refs/heads/master
|
/Models/Location.py
|
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import relationship
from database.database import Base
from Models.Event import Event
class Location(Base):
__tablename__ = 'location'
id = Column(Integer, primary_key=True)
city = Column(String(100), unique=True, nullable=False)
Events = relationship("Event")
def __repr__(self):
return '<Location %r>' % self.city
|
{"/Models/Event.py": ["/Models/Keyword.py"], "/Models/User.py": ["/Models/KeywordByUser.py"], "/app.py": ["/Models/User.py", "/Models/Event.py", "/Models/Keyword.py", "/Models/KeywordByUser.py", "/Models/Location.py"], "/Models/Location.py": ["/Models/Event.py"], "/Models/Keyword.py": ["/Models/KeywordByUser.py"]}
|
26,303,960
|
Rhaall/sparetime-backend
|
refs/heads/master
|
/Models/Keyword.py
|
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from database.database import Base
from Models.KeywordByUser import KeywordByUser
class Keyword(Base):
__tablename__ = 'keyword'
id = Column(Integer, primary_key=True)
label = Column(String(100), unique=True, nullable=False)
event_id = Column(Integer, ForeignKey('event.id'), nullable=False)
keyword_by_user = relationship("KeywordByUser")
def __repr__(self):
return '<Keyword %r>' % self.label
|
{"/Models/Event.py": ["/Models/Keyword.py"], "/Models/User.py": ["/Models/KeywordByUser.py"], "/app.py": ["/Models/User.py", "/Models/Event.py", "/Models/Keyword.py", "/Models/KeywordByUser.py", "/Models/Location.py"], "/Models/Location.py": ["/Models/Event.py"], "/Models/Keyword.py": ["/Models/KeywordByUser.py"]}
|
26,402,088
|
d-giles/F-Engine_Search
|
refs/heads/main
|
/test_bench/test.py
|
import sys
sys.path.insert(1, '../GBT_pipeline')
from synthetic import create_true, create_full_cadence, create_false, create_true_single_shot
import matplotlib.pyplot as plt
import numpy as np
from single_search import search
from execute_model import model_load
import tensorflow as tf
tf.get_logger().setLevel('INFO')
NUM_SAMPLES = 10000
print("Loading in plate")
plate = np.load('../../filtered.npy')
print("Creating False")
false_data = create_full_cadence(create_false, plate = plate, samples = NUM_SAMPLES, snr_base=300, snr_range=20)
print("Creating True")
true_data = create_full_cadence(create_true, plate = plate, samples = NUM_SAMPLES, snr_base=300, snr_range=20, factor =0.1)
# print("Creating Single Shot True")
# true_single_shot = create_full_cadence(create_true_single_shot, plate = plate, samples = 10000, snr_base=300, snr_range=20, factor=10)
print("Load Model")
model = model_load("VAE-ENCODERv9.h5")
print("Search False")
search(false_data, model, False)
print("Search True")
search(true_data, model, True)
# print("Search True Single Shot")
# search(true_single_shot, model, True)
|
{"/test.py": ["/preprocess.py"], "/GBT_pipeline/decorated_search_multicore.py": ["/preprocess.py"]}
|
26,402,089
|
d-giles/F-Engine_Search
|
refs/heads/main
|
/ML_Training/preprocess.py
|
import numpy as np
import matplotlib.pyplot as plt
from numba import jit, prange, njit
from blimpy import Waterfall
import time
import random
import warnings
from tqdm import tqdm
warnings.filterwarnings("ignore")
# data preprocessing operations
# Goal is to take a full cadence and shape it into something usable
# for a wide range of ML pipelines
# We get the data for a strict shape of freq 256, and time 16 and we stack them together.
# returns the stack of all the slices in order and log normalized and scaled between 1 and 0.
def get_data(cadence, start, end):
warnings.filterwarnings("ignore")
print("Getting Data")
# Waterfall(cadence[0], load_data=False).info()
A1 = Waterfall(cadence[0], f_start=start, f_stop=end, max_load=10).data
B = Waterfall(cadence[1], f_start=start, f_stop=end, max_load=10).data
A2 = Waterfall(cadence[2], f_start=start, f_stop=end, max_load=10).data
C = Waterfall(cadence[3], f_start=start, f_stop=end, max_load=10).data
A3 = Waterfall(cadence[4], f_start=start, f_stop=end, max_load=10).data
D = Waterfall(cadence[5], f_start=start, f_stop=end, max_load=10).data
start_pre = time.time()
A1 =shaping_data(A1)
B =shaping_data(B)
A2 =shaping_data(A2)
C =shaping_data(C)
A3 =shaping_data(A3)
D =shaping_data(D)
data = combine_cadence(A1,A2,A3,B,C,D)
print("Execution Time: "+str(time.time()-start_pre))
return data
# shaping the data by stacking them together.
@jit(parallel=True)
def shaping_data( data):
samples = data.shape[2]//256
new_data = np.zeros((samples, 16, 256, 1))
for i in prange(samples):
new_data[i,:,:,0] = data[:,0,i*256:(i+1)*256]
return new_data
# preprocess the data with the following operations acclerated via numba
@njit(nopython=True)
def pre_proc(data):
# data= data - data.min()+1
data = np.log(data)
data= data - data.min()
data = data/data.max()
return data
#combing all the data together
@jit(parallel=True, nopython=True)
def combine_cadence(A1,A2,A3,B,C,D):
samples = A1.shape[0]
print(samples)
data = np.zeros((samples,6, 16, 256, 1))
for i in prange(samples):
# print(" "+str(i)+" ")
data[i,0,:,:,:] = A1[i,:,:,:]
data[i,1,:,:,:] = B[i,:,:,:]
data[i,2,:,:,:] = A2[i,:,:,:]
data[i,3,:,:,:] = C[i,:,:,:]
data[i,4,:,:,:] = A3[i,:,:,:]
data[i,5,:,:,:] = D[i,:,:,:]
data[i,:,:,:,:] = pre_proc(data[i,:,:,:,:] )
return data
|
{"/test.py": ["/preprocess.py"], "/GBT_pipeline/decorated_search_multicore.py": ["/preprocess.py"]}
|
26,402,090
|
d-giles/F-Engine_Search
|
refs/heads/main
|
/GBT_pipeline/decorated_search_multicore.py
|
# ============================================================
# Author: Peter Xiangyuan Ma
# Date: May 19 2021
# Purpose: split the search functionality into smaller chuncks
# to be called by the full_search.py pipeline. This code, loops
# through chunks of the cadence and preprocesses it,
# feed into neural network and then runs the clustering algorithm
# in parallel using multiple CPU cores.
# ============================================================
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import sys
sys.path.insert(1, '../ML_Training')
from execute_model import model_predict_distribute
from preprocess import get_data
from numba import jit, prange, njit
from blimpy import Waterfall
import time
import random
from sklearn.cluster import SpectralClustering
import pandas as pd
import tensorflow as tf
from multiprocessing import Pool
import functools
import warnings
from tqdm import tqdm
from sklearn.metrics import silhouette_score
def sizeof_fmt(num, suffix='B'):
''' by Fred Cirera, https://stackoverflow.com/a/1094933/1870254, modified'''
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
def screening(data, labels, index):
metric = [0.9,0.9,0.9,0.9,0.9,0.9,0.9,
0.9,0.9,0.9,0.9,0.9,0.9,0.9 ]
fit = silhouette_score(data,labels)
if fit < metric[index]:
return False, fit
return True, fit
# Function takes in small distributed chunks of data and runs spectral clustering on the data set
# returns a list of candidates with the frequency range.
def compute_parallel(result, cadence_length,WINDOW_SIZE,index,freq_ranges, n):
# spectral clustering
labels = SpectralClustering(n_clusters=2, assign_labels="discretize",
random_state=0).fit_predict( result[n*cadence_length: (n+1)*cadence_length, : ])
if strong_cadence_pattern(labels):
if screening(result[n*6: (n+1)*6, : ], labels, index)[0]:
screen_flag, fit = screening(result[n*6: (n+1)*6, : ], labels, index)
# Windowsize is the width of the snipet in terms of Hz
hit_start = freq_ranges[index][0] + n*WINDOW_SIZE
hit_end = hit_start + WINDOW_SIZE
# Computes the frequency start and end of this given window
return [hit_start,hit_end, fit]
# elif screen_flag:
# # Windowsize is the width of the snipet in terms of Hz
# hit_start = freq_ranges[index][0] + n*WINDOW_SIZE
# hit_end = hit_start + WINDOW_SIZE
# # Computes the frequency start and end of this given window
# return [hit_start,hit_end]
# Weakest cadence pattern where anything with a on, and adjacent off pattern is accepted
def weak_cadence_pattern(labels):
return labels[0]!=labels[1] or labels[1]!=labels[2] and labels[2]!= labels[3] or labels[3]!=labels[4] and labels[4]!=labels[5]
# Strongest cadence pattern where only on,off,on,off,on,off patterns are accepeted.
def strong_cadence_pattern(labels):
return labels[0]!=labels[1] and labels[1]!=labels[2] and labels[2]!= labels[3] and labels[3]!=labels[4] and labels[4]!=labels[5]
# Combines all the data together into one chunkc of data instead of in separate cadence samples.
@jit(parallel=True)
def combine(data):
new_data = np.zeros((data.shape[0]*data.shape[1],data.shape[2],data.shape[3],data.shape[4]))
for i in prange(data.shape[0]):
# Takes set of cadences and collapsing it down without that cadence axis, order is preserved.
new_data[i*data.shape[1] : (i+1)*data.shape[1],:,:,:] = data[i,:,:,:,:]
return new_data
# computes the statistical sampling from the two layers of mean and variance
def sample_creation(inputs):
z_mean = inputs[0]
z_log_var = inputs[1]
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
# Classification function
def classification_data(target_name,cadence, model, out_dir, iterations=6):
# Create empty list to store the results
f_hit_start = []
f_hit_end = []
# Get the header information
header = Waterfall(cadence[0]).header
# Get the maximum freq in MHz
end = header['fch1']
# calculate the start by taking the resolution time thes number of samples and then adding it to the maximum [it is negative resolution]
start = header['fch1']+ header['nchans']*header['foff']
interval = (end-start)/iterations
# Compute the window size in MHz
WINDOW_SIZE = abs(256*header['foff'])
# Break down the frequency into chuncks of smaller sizes to processes
freq_ranges = []
for i in range(iterations):
f_start = start+i *interval
f_stop = start+(i+1)*(interval)
freq_ranges.append([f_start, f_stop])
print(freq_ranges)
all_candidates = []
#execution looop through each of the individual chunks of data
for index in range(1):
print(target_name+ " Iteration: "+str(index)+ " Range: "+str(freq_ranges[index]))
# Get the chunk of data via the preprocessing function
data = get_data(cadence,start =freq_ranges[index][0],end =freq_ranges[index][1])
num_samples = data.shape[0]
cadence_length = data.shape[1]
# Collapse the data without the cadence axis, however keeping the order of the cadences
data = combine(data)
# Feed through neural network
net = time.time()
result = model.predict(data, batch_size=8000, use_multiprocessing =True)[2]
print("Push Through Neural Net: "+str(time.time()-net))
# Run spectral clustering in parallel with one idle core
cluster = time.time()
# for name, size in sorted(((name, sys.getsizeof(value)) for name, value in locals().items()),
# key= lambda x: -x[1])[:10]:
# print("{:>30}: {:>8}".format(name, sizeof_fmt(size)))
with Pool(39) as p:
candidates = p.map(functools.partial(compute_parallel, result, cadence_length,WINDOW_SIZE,index, freq_ranges), range(num_samples))
print("Parallel Spectral Clustering: "+str(time.time()-cluster))
# Shows the results
final_can = [i for i in candidates if i]
print(len(final_can))
all_candidates.append(final_can)
final_set = []
for k in range(len(all_candidates)):
for el in all_candidates[k]:
final_set.append(el)
print("Number of Final Candidates "+str(len(final_set)))
df = pd.DataFrame(final_set, columns =['start_freq', 'end_freq', 'Confidence'], dtype = float)
df.to_csv(target_name+".csv")
|
{"/test.py": ["/preprocess.py"], "/GBT_pipeline/decorated_search_multicore.py": ["/preprocess.py"]}
|
26,424,545
|
juandavidospina/dataprep
|
refs/heads/master
|
/dataprep/eda/missing/compute/univariate.py
|
"""This module implements the plot_missing(df) function's
calculating intermediate part
"""
from typing import Any, Generator, List, Optional
import numpy as np
import pandas as pd
from ...data_array import DataArray
from ...dtypes import (
Continuous,
DTypeDef,
Nominal,
detect_dtype,
is_dtype,
)
from ...intermediate import ColumnsMetadata, Intermediate
from ...staged import staged
from .common import LABELS, histogram
def _compute_missing_univariate( # pylint: disable=too-many-locals
df: DataArray, x: str, bins: int, dtype: Optional[DTypeDef] = None,
) -> Generator[Any, Any, Intermediate]:
"""Calculate the distribution change on other columns when
the missing values in x is dropped."""
j = df.columns.get_loc(x)
hists = {}
for i in range(len(df.columns)):
if i == j:
continue
col_name = df.columns[i]
col0 = df.values[~df.nulls[:, i], i].astype(df.dtypes[col_name])
col1 = df.values[~(df.nulls[:, j] | df.nulls[:, i]), i].astype(
df.dtypes[col_name]
)
hist_range = None # pylint: disable=redefined-builtin
if is_dtype(detect_dtype(col0, dtype), Continuous()):
hist_range = (col0.min(axis=0), col0.max(axis=0))
hists[col_name] = [
histogram(col, dtype=dtype, bins=bins, return_edges=True, range=hist_range)
for col in [col0, col1]
]
### Lazy Region End
hists = yield hists
### Eager Region Begin
dfs = {}
meta = ColumnsMetadata()
for col_name, hists_ in hists.items():
counts, xs, *edges = zip(*hists_)
labels = np.repeat(LABELS, [len(x) for x in xs])
data = {
"x": np.concatenate(xs),
"count": np.concatenate(counts),
"label": labels,
}
if edges:
lower_bound: List[float] = []
upper_bound: List[float] = []
for edge in edges[0]:
lower_bound.extend(edge[:-1])
upper_bound.extend(edge[1:])
data["lower_bound"] = lower_bound
data["upper_bound"] = upper_bound
ret_df = pd.DataFrame(data)
# If the cardinality of a categorical column is too large,
# we show the top `num_bins` values, sorted by their count before drop
if len(counts[0]) > bins and is_dtype(
detect_dtype(df.frame[col_name], dtype), Nominal()
):
sortidx = np.argsort(-counts[0])
selected_xs = xs[0][sortidx[:bins]]
ret_df = ret_df[ret_df["x"].isin(selected_xs)]
meta[col_name, "partial"] = (bins, len(counts[0]))
else:
meta[col_name, "partial"] = (len(counts[0]), len(counts[0]))
meta[col_name, "dtype"] = detect_dtype(df.frame[col_name], dtype)
dfs[col_name] = ret_df
return Intermediate(data=dfs, x=x, meta=meta, visual_type="missing_impact_1vn")
# Not using decorator here because jupyter autoreload does not support it.
compute_missing_univariate = staged( # pylint: disable=invalid-name
_compute_missing_univariate
)
|
{"/dataprep/eda/missing/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/missing/compute/univariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/distribution/compute/trivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_correlation.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/missing/__init__.py": ["/dataprep/eda/missing/compute/__init__.py"], "/dataprep/eda/distribution/compute/overview.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_missing.py": ["/dataprep/eda/missing/__init__.py", "/dataprep/eda/utils.py"], "/dataprep/tests/clean/test_clean_phone.py": ["/dataprep/clean/__init__.py"], "/dataprep/eda/distribution/compute/__init__.py": ["/dataprep/eda/utils.py", "/dataprep/eda/distribution/compute/bivariate.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/trivariate.py", "/dataprep/eda/distribution/compute/univariate.py"], "/dataprep/clean/__init__.py": ["/dataprep/clean/clean_lat_long.py", "/dataprep/clean/clean_email.py", "/dataprep/clean/clean_country.py", "/dataprep/clean/clean_url.py", "/dataprep/clean/clean_phone.py", "/dataprep/clean/clean_ip.py"], "/dataprep/eda/create_report/formatter.py": ["/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/data_array.py", "/dataprep/eda/distribution/__init__.py", "/dataprep/eda/distribution/compute/common.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/univariate.py", "/dataprep/eda/distribution/render.py", "/dataprep/eda/missing/__init__.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/distribution/compute/bivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/eda/create_report/__init__.py": ["/dataprep/eda/create_report/formatter.py"], "/dataprep/eda/distribution/compute/univariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/connector/test_integration.py": ["/dataprep/connector/__init__.py", "/dataprep/utils.py"], "/dataprep/eda/distribution/__init__.py": ["/dataprep/eda/distribution/compute/__init__.py", "/dataprep/eda/distribution/render.py"], "/dataprep/eda/missing/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/missing/compute/bivariate.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/missing/compute/univariate.py"], "/dataprep/tests/eda/test_plot.py": ["/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/bivariate.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/correlation/compute/nullivariate.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/common.py"], "/dataprep/eda/missing/compute/nullivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/report.py": ["/dataprep/utils.py"]}
|
26,424,546
|
juandavidospina/dataprep
|
refs/heads/master
|
/dataprep/connector/__init__.py
|
"""
DataConnector
"""
from .connector import Connector
__all__ = ["Connector"]
|
{"/dataprep/eda/missing/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/missing/compute/univariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/distribution/compute/trivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_correlation.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/missing/__init__.py": ["/dataprep/eda/missing/compute/__init__.py"], "/dataprep/eda/distribution/compute/overview.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_missing.py": ["/dataprep/eda/missing/__init__.py", "/dataprep/eda/utils.py"], "/dataprep/tests/clean/test_clean_phone.py": ["/dataprep/clean/__init__.py"], "/dataprep/eda/distribution/compute/__init__.py": ["/dataprep/eda/utils.py", "/dataprep/eda/distribution/compute/bivariate.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/trivariate.py", "/dataprep/eda/distribution/compute/univariate.py"], "/dataprep/clean/__init__.py": ["/dataprep/clean/clean_lat_long.py", "/dataprep/clean/clean_email.py", "/dataprep/clean/clean_country.py", "/dataprep/clean/clean_url.py", "/dataprep/clean/clean_phone.py", "/dataprep/clean/clean_ip.py"], "/dataprep/eda/create_report/formatter.py": ["/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/data_array.py", "/dataprep/eda/distribution/__init__.py", "/dataprep/eda/distribution/compute/common.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/univariate.py", "/dataprep/eda/distribution/render.py", "/dataprep/eda/missing/__init__.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/distribution/compute/bivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/eda/create_report/__init__.py": ["/dataprep/eda/create_report/formatter.py"], "/dataprep/eda/distribution/compute/univariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/connector/test_integration.py": ["/dataprep/connector/__init__.py", "/dataprep/utils.py"], "/dataprep/eda/distribution/__init__.py": ["/dataprep/eda/distribution/compute/__init__.py", "/dataprep/eda/distribution/render.py"], "/dataprep/eda/missing/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/missing/compute/bivariate.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/missing/compute/univariate.py"], "/dataprep/tests/eda/test_plot.py": ["/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/bivariate.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/correlation/compute/nullivariate.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/common.py"], "/dataprep/eda/missing/compute/nullivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/report.py": ["/dataprep/utils.py"]}
|
26,424,547
|
juandavidospina/dataprep
|
refs/heads/master
|
/dataprep/eda/correlation/compute/common.py
|
"""Common components for compute correlation."""
from enum import Enum, auto
import dask
import numpy as np
from bottleneck import rankdata as rankdata_, nanrankdata as nanrankdata_
from scipy.stats import kendalltau as kendalltau_
class CorrelationMethod(Enum):
"""Supported correlation methods"""
Pearson = auto()
Spearman = auto()
KendallTau = auto()
@dask.delayed( # pylint: disable=no-value-for-parameter
name="rankdata-bottleneck", pure=True
)
def rankdata(data: np.ndarray, axis: int = 0) -> np.ndarray:
"""delayed version of rankdata"""
return rankdata_(data, axis=axis)
@dask.delayed( # pylint: disable=no-value-for-parameter
name="rankdata-bottleneck", pure=True
)
def nanrankdata(data: np.ndarray, axis: int = 0) -> np.ndarray:
"""delayed version of rankdata."""
return nanrankdata_(data, axis=axis)
@dask.delayed( # pylint: disable=no-value-for-parameter
name="kendalltau-scipy", pure=True
)
def kendalltau( # pylint: disable=invalid-name
a: np.ndarray, b: np.ndarray
) -> np.ndarray:
"""delayed version of kendalltau."""
corr = kendalltau_(a, b).correlation
return np.float64(corr) # Sometimes corr is a float, causes dask error
@dask.delayed( # pylint: disable=no-value-for-parameter
name="kendalltau-scipy", pure=True
)
def corrcoef(arr: np.ndarray) -> np.ndarray:
"""delayed version of np.corrcoef."""
_, (corr, _) = np.corrcoef(arr, rowvar=False)
return corr
|
{"/dataprep/eda/missing/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/missing/compute/univariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/distribution/compute/trivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_correlation.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/missing/__init__.py": ["/dataprep/eda/missing/compute/__init__.py"], "/dataprep/eda/distribution/compute/overview.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_missing.py": ["/dataprep/eda/missing/__init__.py", "/dataprep/eda/utils.py"], "/dataprep/tests/clean/test_clean_phone.py": ["/dataprep/clean/__init__.py"], "/dataprep/eda/distribution/compute/__init__.py": ["/dataprep/eda/utils.py", "/dataprep/eda/distribution/compute/bivariate.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/trivariate.py", "/dataprep/eda/distribution/compute/univariate.py"], "/dataprep/clean/__init__.py": ["/dataprep/clean/clean_lat_long.py", "/dataprep/clean/clean_email.py", "/dataprep/clean/clean_country.py", "/dataprep/clean/clean_url.py", "/dataprep/clean/clean_phone.py", "/dataprep/clean/clean_ip.py"], "/dataprep/eda/create_report/formatter.py": ["/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/data_array.py", "/dataprep/eda/distribution/__init__.py", "/dataprep/eda/distribution/compute/common.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/univariate.py", "/dataprep/eda/distribution/render.py", "/dataprep/eda/missing/__init__.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/distribution/compute/bivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/eda/create_report/__init__.py": ["/dataprep/eda/create_report/formatter.py"], "/dataprep/eda/distribution/compute/univariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/connector/test_integration.py": ["/dataprep/connector/__init__.py", "/dataprep/utils.py"], "/dataprep/eda/distribution/__init__.py": ["/dataprep/eda/distribution/compute/__init__.py", "/dataprep/eda/distribution/render.py"], "/dataprep/eda/missing/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/missing/compute/bivariate.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/missing/compute/univariate.py"], "/dataprep/tests/eda/test_plot.py": ["/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/bivariate.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/correlation/compute/nullivariate.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/common.py"], "/dataprep/eda/missing/compute/nullivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/report.py": ["/dataprep/utils.py"]}
|
26,424,548
|
juandavidospina/dataprep
|
refs/heads/master
|
/dataprep/connector/generator/__init__.py
|
"""ConfigGenerator"""
from .generator import ConfigGenerator
__all__ = ["ConfigGenerator"]
|
{"/dataprep/eda/missing/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/missing/compute/univariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/distribution/compute/trivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_correlation.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/missing/__init__.py": ["/dataprep/eda/missing/compute/__init__.py"], "/dataprep/eda/distribution/compute/overview.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_missing.py": ["/dataprep/eda/missing/__init__.py", "/dataprep/eda/utils.py"], "/dataprep/tests/clean/test_clean_phone.py": ["/dataprep/clean/__init__.py"], "/dataprep/eda/distribution/compute/__init__.py": ["/dataprep/eda/utils.py", "/dataprep/eda/distribution/compute/bivariate.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/trivariate.py", "/dataprep/eda/distribution/compute/univariate.py"], "/dataprep/clean/__init__.py": ["/dataprep/clean/clean_lat_long.py", "/dataprep/clean/clean_email.py", "/dataprep/clean/clean_country.py", "/dataprep/clean/clean_url.py", "/dataprep/clean/clean_phone.py", "/dataprep/clean/clean_ip.py"], "/dataprep/eda/create_report/formatter.py": ["/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/data_array.py", "/dataprep/eda/distribution/__init__.py", "/dataprep/eda/distribution/compute/common.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/univariate.py", "/dataprep/eda/distribution/render.py", "/dataprep/eda/missing/__init__.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/distribution/compute/bivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/eda/create_report/__init__.py": ["/dataprep/eda/create_report/formatter.py"], "/dataprep/eda/distribution/compute/univariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/connector/test_integration.py": ["/dataprep/connector/__init__.py", "/dataprep/utils.py"], "/dataprep/eda/distribution/__init__.py": ["/dataprep/eda/distribution/compute/__init__.py", "/dataprep/eda/distribution/render.py"], "/dataprep/eda/missing/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/missing/compute/bivariate.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/missing/compute/univariate.py"], "/dataprep/tests/eda/test_plot.py": ["/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/bivariate.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/correlation/compute/nullivariate.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/common.py"], "/dataprep/eda/missing/compute/nullivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/report.py": ["/dataprep/utils.py"]}
|
26,424,549
|
juandavidospina/dataprep
|
refs/heads/master
|
/dataprep/utils.py
|
"""Utility functions used by the whole library."""
from typing import Any
def is_notebook() -> Any:
"""
:return: whether it is running in jupyter notebook
"""
try:
# pytype: disable=import-error
from IPython import get_ipython # pylint: disable=import-outside-toplevel
# pytype: enable=import-error
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return True
return False
except (NameError, ImportError):
return False
|
{"/dataprep/eda/missing/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/missing/compute/univariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/distribution/compute/trivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_correlation.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/missing/__init__.py": ["/dataprep/eda/missing/compute/__init__.py"], "/dataprep/eda/distribution/compute/overview.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_missing.py": ["/dataprep/eda/missing/__init__.py", "/dataprep/eda/utils.py"], "/dataprep/tests/clean/test_clean_phone.py": ["/dataprep/clean/__init__.py"], "/dataprep/eda/distribution/compute/__init__.py": ["/dataprep/eda/utils.py", "/dataprep/eda/distribution/compute/bivariate.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/trivariate.py", "/dataprep/eda/distribution/compute/univariate.py"], "/dataprep/clean/__init__.py": ["/dataprep/clean/clean_lat_long.py", "/dataprep/clean/clean_email.py", "/dataprep/clean/clean_country.py", "/dataprep/clean/clean_url.py", "/dataprep/clean/clean_phone.py", "/dataprep/clean/clean_ip.py"], "/dataprep/eda/create_report/formatter.py": ["/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/data_array.py", "/dataprep/eda/distribution/__init__.py", "/dataprep/eda/distribution/compute/common.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/univariate.py", "/dataprep/eda/distribution/render.py", "/dataprep/eda/missing/__init__.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/distribution/compute/bivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/eda/create_report/__init__.py": ["/dataprep/eda/create_report/formatter.py"], "/dataprep/eda/distribution/compute/univariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/connector/test_integration.py": ["/dataprep/connector/__init__.py", "/dataprep/utils.py"], "/dataprep/eda/distribution/__init__.py": ["/dataprep/eda/distribution/compute/__init__.py", "/dataprep/eda/distribution/render.py"], "/dataprep/eda/missing/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/missing/compute/bivariate.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/missing/compute/univariate.py"], "/dataprep/tests/eda/test_plot.py": ["/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/bivariate.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/correlation/compute/nullivariate.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/common.py"], "/dataprep/eda/missing/compute/nullivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/report.py": ["/dataprep/utils.py"]}
|
26,424,550
|
juandavidospina/dataprep
|
refs/heads/master
|
/dataprep/eda/correlation/compute/bivariate.py
|
"""This module implements the intermediates computation
for plot_correlation(df) function."""
from operator import itruediv
from typing import Optional, Tuple
import dask
import dask.array as da
import numpy as np
import pandas as pd
from ...data_array import DataArray
from ...intermediate import Intermediate
def _calc_bivariate(
df: DataArray,
x: Optional[str] = None,
y: Optional[str] = None,
*,
k: Optional[int] = None,
) -> Intermediate:
if x not in df.columns:
raise ValueError(f"{x} not in columns names")
if y not in df.columns:
raise ValueError(f"{y} not in columns names")
xname, yname = x, y
df.compute()
xloc = df.columns.get_loc(x)
yloc = df.columns.get_loc(y)
x = df.values[:, xloc]
y = df.values[:, yloc]
coeffs, (x, y), influences = scatter_with_regression(x, y, k=k, sample_size=1000,)
coeffs, (x, y), influences = dask.compute(coeffs, (x, y), influences)
# lazy/eager border line
result = {
"coeffs": coeffs,
"data": pd.DataFrame({xname: x, yname: y}),
}
if (influences is None) != (k is None):
raise RuntimeError("Not possible")
if influences is not None and k is not None:
infidx = np.argsort(influences)
labels = np.full(len(influences), "=")
# pylint: disable=invalid-unary-operand-type
labels[infidx[-k:]] = "-" # type: ignore
# pylint: enable=invalid-unary-operand-type
labels[infidx[:k]] = "+"
result["data"]["influence"] = labels
return Intermediate(**result, visual_type="correlation_scatter")
def scatter_with_regression(
x: da.Array, y: da.Array, sample_size: int, k: Optional[int] = None
) -> Tuple[Tuple[da.Array, da.Array], Tuple[da.Array, da.Array], Optional[da.Array]]:
"""Calculate pearson correlation on 2 given arrays.
Parameters
----------
xarr : da.Array
yarr : da.Array
sample_size : int
k : Optional[int] = None
Highlight k points which influence pearson correlation most
"""
if k == 0:
raise ValueError("k should be larger than 0")
xp1 = da.vstack([x, da.ones_like(x)]).T
xp1 = xp1.rechunk((xp1.chunks[0], -1))
mask = ~(da.isnan(x) | da.isnan(y))
# if chunk size in the first dimension is 1, lstsq will use sfqr instead of tsqr,
# where the former does not support nan in shape.
if len(xp1.chunks[0]) == 1:
xp1 = xp1.rechunk((2, -1))
y = y.rechunk((2, -1))
mask = mask.rechunk((2, -1))
(coeffa, coeffb), _, _, _ = da.linalg.lstsq(xp1[mask], y[mask])
if sample_size < x.shape[0]:
samplesel = da.random.choice(x.shape[0], int(sample_size), chunks=x.chunksize)
x = x[samplesel]
y = y[samplesel]
if k is None:
return (coeffa, coeffb), (x, y), None
influences = pearson_influence(x, y)
return (coeffa, coeffb), (x, y), influences
def pearson_influence(xarr: da.Array, yarr: da.Array) -> da.Array:
"""Calculating the influence for deleting a point on the pearson correlation"""
if xarr.shape != yarr.shape:
raise ValueError(
f"The shape of xarr and yarr should be same, got {xarr.shape}, {yarr.shape}"
)
# Fast calculating the influence for removing one element on the correlation
n = xarr.shape[0]
x2, y2 = da.square(xarr), da.square(yarr)
xy = xarr * yarr
# The influence is vectorized on xarr and yarr, so we need to repeat all the sums for n times
xsum = da.ones(n) * da.sum(xarr)
ysum = da.ones(n) * da.sum(yarr)
xysum = da.ones(n) * da.sum(xy)
x2sum = da.ones(n) * da.sum(x2)
y2sum = da.ones(n) * da.sum(y2)
# Note: in we multiply (n-1)^2 to both denominator and numerator to avoid divisions.
numerator = (n - 1) * (xysum - xy) - (xsum - xarr) * (ysum - yarr)
varx = (n - 1) * (x2sum - x2) - da.square(xsum - xarr)
vary = (n - 1) * (y2sum - y2) - da.square(ysum - yarr)
denominator = da.sqrt(varx * vary)
return da.map_blocks(itruediv, numerator, denominator, dtype=numerator.dtype)
|
{"/dataprep/eda/missing/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/missing/compute/univariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/distribution/compute/trivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_correlation.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/missing/__init__.py": ["/dataprep/eda/missing/compute/__init__.py"], "/dataprep/eda/distribution/compute/overview.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_missing.py": ["/dataprep/eda/missing/__init__.py", "/dataprep/eda/utils.py"], "/dataprep/tests/clean/test_clean_phone.py": ["/dataprep/clean/__init__.py"], "/dataprep/eda/distribution/compute/__init__.py": ["/dataprep/eda/utils.py", "/dataprep/eda/distribution/compute/bivariate.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/trivariate.py", "/dataprep/eda/distribution/compute/univariate.py"], "/dataprep/clean/__init__.py": ["/dataprep/clean/clean_lat_long.py", "/dataprep/clean/clean_email.py", "/dataprep/clean/clean_country.py", "/dataprep/clean/clean_url.py", "/dataprep/clean/clean_phone.py", "/dataprep/clean/clean_ip.py"], "/dataprep/eda/create_report/formatter.py": ["/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/data_array.py", "/dataprep/eda/distribution/__init__.py", "/dataprep/eda/distribution/compute/common.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/univariate.py", "/dataprep/eda/distribution/render.py", "/dataprep/eda/missing/__init__.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/distribution/compute/bivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/eda/create_report/__init__.py": ["/dataprep/eda/create_report/formatter.py"], "/dataprep/eda/distribution/compute/univariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/connector/test_integration.py": ["/dataprep/connector/__init__.py", "/dataprep/utils.py"], "/dataprep/eda/distribution/__init__.py": ["/dataprep/eda/distribution/compute/__init__.py", "/dataprep/eda/distribution/render.py"], "/dataprep/eda/missing/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/missing/compute/bivariate.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/missing/compute/univariate.py"], "/dataprep/tests/eda/test_plot.py": ["/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/bivariate.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/correlation/compute/nullivariate.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/common.py"], "/dataprep/eda/missing/compute/nullivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/report.py": ["/dataprep/utils.py"]}
|
26,424,551
|
juandavidospina/dataprep
|
refs/heads/master
|
/dataprep/eda/correlation/compute/nullivariate.py
|
"""Implementations of correlations.
Currently this boils down to pandas' implementation."""
from functools import partial
from typing import Dict, Optional, Tuple
import dask
import dask.array as da
import numpy as np
import pandas as pd
from ...data_array import DataArray
from ...intermediate import Intermediate
from .common import CorrelationMethod
def _calc_nullivariate(
df: DataArray,
*,
value_range: Optional[Tuple[float, float]] = None,
k: Optional[int] = None,
) -> Intermediate:
if value_range is not None and k is not None:
raise ValueError("value_range and k cannot be present in both")
cordx, cordy, corrs = correlation_nxn(df)
# The computations below is not expensive (scales with # of columns)
# So we do them in pandas
(corrs,) = dask.compute(corrs)
dfs = {}
for method, corr in corrs.items():
ndf = pd.DataFrame(
{
"x": df.columns[cordx],
"y": df.columns[cordy],
"correlation": corr.ravel(),
}
)
ndf = ndf[cordy > cordx] # Retain only lower triangle (w/o diag)
if k is not None:
thresh = ndf["correlation"].abs().nlargest(k).iloc[-1]
ndf = ndf[(ndf["correlation"] >= thresh) | (ndf["correlation"] <= -thresh)]
elif value_range is not None:
mask = (value_range[0] <= ndf["correlation"]) & (
ndf["correlation"] <= value_range[1]
)
ndf = ndf[mask]
dfs[method.name] = ndf
return Intermediate(
data=dfs,
axis_range=list(df.columns.unique()),
visual_type="correlation_heatmaps",
)
def correlation_nxn(
df: DataArray,
) -> Tuple[np.ndarray, np.ndarray, Dict[CorrelationMethod, da.Array]]:
"""
Calculation of a n x n correlation matrix for n columns
Returns
-------
The long format of the correlations
"""
ncols = len(df.columns)
cordx, cordy = np.meshgrid(range(ncols), range(ncols))
cordx, cordy = cordy.ravel(), cordx.ravel()
corrs = {
CorrelationMethod.Pearson: _pearson_nxn(df),
CorrelationMethod.Spearman: _spearman_nxn(df),
CorrelationMethod.KendallTau: _kendall_tau_nxn(df),
}
return cordx, cordy, corrs
def _pearson_nxn(df: DataArray) -> da.Array:
"""Calculate column-wise pearson correlation."""
return (
df.frame.repartition(npartitions=1)
.map_partitions(partial(pd.DataFrame.corr, method="pearson"))
.to_dask_array()
)
def _spearman_nxn(df: DataArray) -> da.Array:
"""Calculate column-wise spearman correlation."""
return (
df.frame.repartition(npartitions=1)
.map_partitions(partial(pd.DataFrame.corr, method="spearman"))
.to_dask_array()
)
def _kendall_tau_nxn(df: DataArray) -> da.Array:
"""Calculate column-wise kendalltau correlation."""
return (
df.frame.repartition(npartitions=1)
.map_partitions(partial(pd.DataFrame.corr, method="kendall"))
.to_dask_array()
)
## The code below is the correlation algorithms for array. Since we don't have
## block-wise algorithms for spearman and kendalltal, it might be more suitable
## to just use the pandas version of correlation.
## The correlations from pandas use double for-loops but they write them in cython
## and they are super fast already.
#
# def _pearson_nxn(data: da.Array) -> da.Array:
# """Calculate column-wise pearson correlation."""
# mean = data.mean(axis=0)[None, :]
# dem = data - mean
# num = dem.T @ dem
# std = data.std(axis=0, keepdims=True)
# dom = data.shape[0] * (std * std.T)
# correl = num / dom
# return correl
# def _spearman_nxn(array: da.Array) -> da.Array:
# rank_array = (
# array.rechunk((-1, None)) #! TODO: avoid this
# .map_blocks(partial(rankdata, axis=0))
# .rechunk("auto")
# )
# return _pearson_nxn(rank_array)
# def _kendall_tau_nxn(array: da.Array) -> da.Array:
# """Kendal Tau correlation outputs an n x n correlation matrix for n columns."""
# _, ncols = array.shape
# corrmat = []
# for _ in range(ncols):
# corrmat.append([float("nan")] * ncols)
# for i in range(ncols):
# corrmat[i][i] = 1.0
# for i in range(ncols):
# for j in range(i + 1, ncols):
# tmp = kendalltau(array[:, i], array[:, j])
# corrmat[j][i] = corrmat[i][j] = da.from_delayed(
# tmp, shape=(), dtype=np.float
# )
# return da.stack(corrmat)
|
{"/dataprep/eda/missing/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/missing/compute/univariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/distribution/compute/trivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_correlation.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/missing/__init__.py": ["/dataprep/eda/missing/compute/__init__.py"], "/dataprep/eda/distribution/compute/overview.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_missing.py": ["/dataprep/eda/missing/__init__.py", "/dataprep/eda/utils.py"], "/dataprep/tests/clean/test_clean_phone.py": ["/dataprep/clean/__init__.py"], "/dataprep/eda/distribution/compute/__init__.py": ["/dataprep/eda/utils.py", "/dataprep/eda/distribution/compute/bivariate.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/trivariate.py", "/dataprep/eda/distribution/compute/univariate.py"], "/dataprep/clean/__init__.py": ["/dataprep/clean/clean_lat_long.py", "/dataprep/clean/clean_email.py", "/dataprep/clean/clean_country.py", "/dataprep/clean/clean_url.py", "/dataprep/clean/clean_phone.py", "/dataprep/clean/clean_ip.py"], "/dataprep/eda/create_report/formatter.py": ["/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/data_array.py", "/dataprep/eda/distribution/__init__.py", "/dataprep/eda/distribution/compute/common.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/univariate.py", "/dataprep/eda/distribution/render.py", "/dataprep/eda/missing/__init__.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/distribution/compute/bivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/eda/create_report/__init__.py": ["/dataprep/eda/create_report/formatter.py"], "/dataprep/eda/distribution/compute/univariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/connector/test_integration.py": ["/dataprep/connector/__init__.py", "/dataprep/utils.py"], "/dataprep/eda/distribution/__init__.py": ["/dataprep/eda/distribution/compute/__init__.py", "/dataprep/eda/distribution/render.py"], "/dataprep/eda/missing/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/missing/compute/bivariate.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/missing/compute/univariate.py"], "/dataprep/tests/eda/test_plot.py": ["/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/bivariate.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/correlation/compute/nullivariate.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/common.py"], "/dataprep/eda/missing/compute/nullivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/report.py": ["/dataprep/utils.py"]}
|
26,424,552
|
juandavidospina/dataprep
|
refs/heads/master
|
/dataprep/eda/missing/compute/nullivariate.py
|
"""This module implements the plot_missing(df) function's
calculating intermediate part
"""
from typing import Any, Callable, Dict, Generator, Optional, Tuple
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
from dask import delayed
from scipy.cluster import hierarchy
from ...data_array import DataArray
from ...intermediate import Intermediate
from ...staged import staged
def _compute_missing_nullivariate(
df: DataArray, bins: int
) -> Generator[Any, Any, Intermediate]:
"""Calculate the data for visualizing the plot_missing(df).
This contains the missing spectrum, missing bar chart and missing heatmap."""
df.compute()
nullity = df.nulls
null_cnts = nullity.sum(axis=0)
nrows = df.shape[0]
null_perc = null_cnts / nrows
tasks = (
missing_spectrum(df, bins=bins),
null_perc,
missing_bars(null_cnts, df.columns.values, nrows),
missing_heatmap(df),
missing_dendrogram(df),
)
### Lazy Region End
spectrum, null_perc, bars, heatmap, dendrogram = yield tasks
### Eager Region Begin
sel = ~((null_perc == 0) | (null_perc == 1))
heatmap = pd.DataFrame(
data=heatmap[:, sel][sel, :], columns=df.columns[sel], index=df.columns[sel]
)
return Intermediate(
data_total_missing={col: null_perc[idx] for idx, col in enumerate(df.columns)},
data_spectrum=pd.DataFrame(spectrum),
data_bars=bars,
data_heatmap=heatmap,
data_dendrogram=dendrogram,
visual_type="missing_impact",
)
# Not using decorator here because jupyter autoreload does not support it.
compute_missing_nullivariate = staged( # pylint: disable=invalid-name
_compute_missing_nullivariate
)
def missing_perc_blockwise(bin_size: int) -> Callable[[np.ndarray], np.ndarray]:
"""Compute the missing percentage in a block."""
def imp(block: np.ndarray) -> np.ndarray:
nbins = block.shape[0] // bin_size
sep = nbins * bin_size
block1 = block[:sep].reshape((bin_size, nbins, *block.shape[1:]))
ret = block1.sum(axis=0) / bin_size
# remaining data that cannot be fit into a single bin
if block.shape[0] != sep:
ret_remainder = block[sep:].sum(axis=0, keepdims=True) / (
block.shape[0] - sep
)
ret = np.concatenate([ret, ret_remainder], axis=0)
return ret
return imp
def missing_spectrum( # pylint: disable=too-many-locals
df: DataArray, bins: int
) -> Dict[str, da.Array]:
"""Calculate a missing spectrum for each column."""
nrows, ncols = df.shape
data = df.nulls
num_bins = min(bins, nrows - 1)
bin_size = nrows // num_bins
chunk_size = min(
1024 * 1024 * 128, nrows * ncols
) # max 1024 x 1024 x 128 Bytes bool values
nbins_per_chunk = max(chunk_size // (bin_size * data.shape[1]), 1)
chunk_size = nbins_per_chunk * bin_size
data = data.rechunk((chunk_size, None))
sep = nrows // chunk_size * chunk_size
spectrum_missing_percs = data[:sep].map_blocks(
missing_perc_blockwise(bin_size),
chunks=(nbins_per_chunk, *data.chunksize[1:]),
dtype=float,
)
# calculation for the last chunk
if sep != nrows:
spectrum_missing_percs_remain = data[sep:].map_blocks(
missing_perc_blockwise(bin_size),
chunks=(int(np.ceil((nrows - sep) / bin_size)), *data.shape[1:]),
dtype=float,
)
spectrum_missing_percs = da.concatenate(
[spectrum_missing_percs, spectrum_missing_percs_remain], axis=0
)
num_bins = spectrum_missing_percs.shape[0]
locs0 = da.arange(num_bins) * bin_size
locs1 = da.minimum(locs0 + bin_size, nrows)
locs_middle = locs0 + bin_size / 2
return {
"column": da.repeat(da.from_array(df.columns.values, (1,)), num_bins),
"location": da.tile(locs_middle, ncols),
"missing_rate": spectrum_missing_percs.T.ravel().rechunk(locs_middle.shape[0]),
"loc_start": da.tile(locs0, ncols),
"loc_end": da.tile(locs1, ncols),
}
def missing_bars(
null_cnts: da.Array, cols: np.ndarray, nrows: dd.core.Scalar
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Calculate a bar chart visualization of nullity correlation
in the given DataFrame."""
return nrows - null_cnts, null_cnts, cols
def missing_heatmap(df: DataArray) -> Optional[pd.DataFrame]:
"""Calculate a heatmap visualization of nullity correlation
in the given DataFrame."""
return da.corrcoef(df.nulls, rowvar=False)
def missing_dendrogram(df: DataArray) -> Any:
"""Calculate a missing values dendrogram."""
# Link the hierarchical output matrix, figure out orientation, construct base dendrogram.
linkage_matrix = delayed(hierarchy.linkage)(df.nulls.T, "average")
dendrogram = delayed(hierarchy.dendrogram)(
Z=linkage_matrix,
orientation="bottom",
labels=df.columns,
distance_sort="descending",
no_plot=True,
)
return dendrogram
|
{"/dataprep/eda/missing/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/missing/compute/univariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/distribution/compute/trivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_correlation.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/missing/__init__.py": ["/dataprep/eda/missing/compute/__init__.py"], "/dataprep/eda/distribution/compute/overview.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_missing.py": ["/dataprep/eda/missing/__init__.py", "/dataprep/eda/utils.py"], "/dataprep/tests/clean/test_clean_phone.py": ["/dataprep/clean/__init__.py"], "/dataprep/eda/distribution/compute/__init__.py": ["/dataprep/eda/utils.py", "/dataprep/eda/distribution/compute/bivariate.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/trivariate.py", "/dataprep/eda/distribution/compute/univariate.py"], "/dataprep/clean/__init__.py": ["/dataprep/clean/clean_lat_long.py", "/dataprep/clean/clean_email.py", "/dataprep/clean/clean_country.py", "/dataprep/clean/clean_url.py", "/dataprep/clean/clean_phone.py", "/dataprep/clean/clean_ip.py"], "/dataprep/eda/create_report/formatter.py": ["/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/data_array.py", "/dataprep/eda/distribution/__init__.py", "/dataprep/eda/distribution/compute/common.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/univariate.py", "/dataprep/eda/distribution/render.py", "/dataprep/eda/missing/__init__.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/distribution/compute/bivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/eda/create_report/__init__.py": ["/dataprep/eda/create_report/formatter.py"], "/dataprep/eda/distribution/compute/univariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/connector/test_integration.py": ["/dataprep/connector/__init__.py", "/dataprep/utils.py"], "/dataprep/eda/distribution/__init__.py": ["/dataprep/eda/distribution/compute/__init__.py", "/dataprep/eda/distribution/render.py"], "/dataprep/eda/missing/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/missing/compute/bivariate.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/missing/compute/univariate.py"], "/dataprep/tests/eda/test_plot.py": ["/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/bivariate.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/correlation/compute/nullivariate.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/common.py"], "/dataprep/eda/missing/compute/nullivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/report.py": ["/dataprep/utils.py"]}
|
26,424,553
|
juandavidospina/dataprep
|
refs/heads/master
|
/dataprep/clean/__init__.py
|
"""
dataprep.clean
==============
"""
from .clean_lat_long import clean_lat_long, validate_lat_long
from .clean_email import clean_email, validate_email
__all__ = ["clean_lat_long", "validate_lat_long", "clean_email", "validate_email"]
|
{"/dataprep/eda/missing/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/missing/compute/univariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/distribution/compute/trivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_correlation.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/missing/__init__.py": ["/dataprep/eda/missing/compute/__init__.py"], "/dataprep/eda/distribution/compute/overview.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_missing.py": ["/dataprep/eda/missing/__init__.py", "/dataprep/eda/utils.py"], "/dataprep/tests/clean/test_clean_phone.py": ["/dataprep/clean/__init__.py"], "/dataprep/eda/distribution/compute/__init__.py": ["/dataprep/eda/utils.py", "/dataprep/eda/distribution/compute/bivariate.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/trivariate.py", "/dataprep/eda/distribution/compute/univariate.py"], "/dataprep/clean/__init__.py": ["/dataprep/clean/clean_lat_long.py", "/dataprep/clean/clean_email.py", "/dataprep/clean/clean_country.py", "/dataprep/clean/clean_url.py", "/dataprep/clean/clean_phone.py", "/dataprep/clean/clean_ip.py"], "/dataprep/eda/create_report/formatter.py": ["/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/data_array.py", "/dataprep/eda/distribution/__init__.py", "/dataprep/eda/distribution/compute/common.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/univariate.py", "/dataprep/eda/distribution/render.py", "/dataprep/eda/missing/__init__.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/distribution/compute/bivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/eda/create_report/__init__.py": ["/dataprep/eda/create_report/formatter.py"], "/dataprep/eda/distribution/compute/univariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/connector/test_integration.py": ["/dataprep/connector/__init__.py", "/dataprep/utils.py"], "/dataprep/eda/distribution/__init__.py": ["/dataprep/eda/distribution/compute/__init__.py", "/dataprep/eda/distribution/render.py"], "/dataprep/eda/missing/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/missing/compute/bivariate.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/missing/compute/univariate.py"], "/dataprep/tests/eda/test_plot.py": ["/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/bivariate.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/correlation/compute/nullivariate.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/common.py"], "/dataprep/eda/missing/compute/nullivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/report.py": ["/dataprep/utils.py"]}
|
26,424,554
|
juandavidospina/dataprep
|
refs/heads/master
|
/dataprep/eda/report.py
|
"""
This module implements the Report class.
"""
import sys
import webbrowser
from pathlib import Path
from tempfile import NamedTemporaryFile
from bokeh.io import save
from bokeh.io.notebook import load_notebook
from bokeh.embed.notebook import notebook_content
from bokeh.models import LayoutDOM
from bokeh.resources import CDN
from jinja2 import Template
from ..utils import is_notebook
INLINE_TEMPLATE = Template(
"""
{% from macros import embed %}
{% block inner_body %}
{% block contents %}
{% for doc in docs %}
{{ embed(doc) if doc.elementid }}
{% for root in doc.roots %}
{% block root scoped %}
{{ embed(root) | indent(10) }}
{% endblock %}
{% endfor %}
{% endfor %}
{% endblock %}
{{ plot_script | indent(8) }}
{% endblock %}
"""
)
class Report:
"""
This class creates a customized Report object for the plot* functions.
"""
to_render: LayoutDOM
def __init__(self, to_render: LayoutDOM) -> None:
self.to_render = to_render
def save(self, filename: str) -> None:
"""
save function
"""
save(
self.to_render,
filename=filename,
resources=CDN,
title="DataPrep.EDA Report",
)
def _repr_html_(self) -> str:
"""
Display itself inside a notebook
"""
# Speical case inside Google Colab
if "google.colab" in sys.modules:
load_notebook(hide_banner=True)
script, div, _ = notebook_content(self.to_render)
return f"{div}<script>{script}</script>"
# Windows forbids us open the file twice as the result bokeh cannot
# write to the opened temporary file.
with NamedTemporaryFile(suffix=".html", delete=False) as tmpf:
pass
save(
self.to_render,
filename=tmpf.name,
resources=CDN,
template=INLINE_TEMPLATE,
title="DataPrep.EDA Report",
)
with open(tmpf.name, "r") as f:
output_html = f.read()
# Delete the temporary file
Path(tmpf.name).unlink()
# Fix the bokeh: bokeh wrongly call the "waiting for bokeh to load" function
# inside "Bokeh.safely", which causes Bokeh not found because
# Bokeh is even not loaded!
patched_html = output_html.replace(
"Bokeh.safely",
"var __dataprep_bokeh_fix = (f) => document.Bokeh === undefined ? setTimeout(f, 1000) : f(); __dataprep_bokeh_fix", # pylint: disable=line-too-long
)
# embed into report template created by us here
return patched_html
def show(self) -> None:
"""
Render the report. This is useful when calling plot in a for loop.
"""
# if not call from notebook environment, ref to show_browser function.
if not is_notebook():
print(
"The report will not show in a notebook environment, "
"please try 'show_browser' if you want to open it in browser",
file=sys.stderr,
)
try:
from IPython.display import ( # pylint: disable=import-outside-toplevel
HTML,
display,
)
display(HTML(self._repr_html_()))
except ImportError:
pass
def show_browser(self) -> None:
"""
Open the report in the browser. This is useful when plotting
from terminmal or when the fig is very large in notebook.
"""
# set delete = False to avoid early delete when user open multiple plots.
with NamedTemporaryFile(suffix=".html", delete=False) as tmpf:
save(
self.to_render,
filename=tmpf.name,
resources=CDN,
title="DataPrep.EDA Report",
)
webbrowser.open_new_tab(f"file://{tmpf.name}")
|
{"/dataprep/eda/missing/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/missing/compute/univariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/distribution/compute/trivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_correlation.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/missing/__init__.py": ["/dataprep/eda/missing/compute/__init__.py"], "/dataprep/eda/distribution/compute/overview.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/eda/test_plot_missing.py": ["/dataprep/eda/missing/__init__.py", "/dataprep/eda/utils.py"], "/dataprep/tests/clean/test_clean_phone.py": ["/dataprep/clean/__init__.py"], "/dataprep/eda/distribution/compute/__init__.py": ["/dataprep/eda/utils.py", "/dataprep/eda/distribution/compute/bivariate.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/trivariate.py", "/dataprep/eda/distribution/compute/univariate.py"], "/dataprep/clean/__init__.py": ["/dataprep/clean/clean_lat_long.py", "/dataprep/clean/clean_email.py", "/dataprep/clean/clean_country.py", "/dataprep/clean/clean_url.py", "/dataprep/clean/clean_phone.py", "/dataprep/clean/clean_ip.py"], "/dataprep/eda/create_report/formatter.py": ["/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/data_array.py", "/dataprep/eda/distribution/__init__.py", "/dataprep/eda/distribution/compute/common.py", "/dataprep/eda/distribution/compute/overview.py", "/dataprep/eda/distribution/compute/univariate.py", "/dataprep/eda/distribution/render.py", "/dataprep/eda/missing/__init__.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/distribution/compute/bivariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/eda/create_report/__init__.py": ["/dataprep/eda/create_report/formatter.py"], "/dataprep/eda/distribution/compute/univariate.py": ["/dataprep/eda/distribution/compute/common.py"], "/dataprep/tests/connector/test_integration.py": ["/dataprep/connector/__init__.py", "/dataprep/utils.py"], "/dataprep/eda/distribution/__init__.py": ["/dataprep/eda/distribution/compute/__init__.py", "/dataprep/eda/distribution/render.py"], "/dataprep/eda/missing/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/missing/compute/bivariate.py", "/dataprep/eda/missing/compute/nullivariate.py", "/dataprep/eda/missing/compute/univariate.py"], "/dataprep/tests/eda/test_plot.py": ["/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/__init__.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/bivariate.py", "/dataprep/eda/correlation/compute/nullivariate.py", "/dataprep/eda/utils.py"], "/dataprep/eda/correlation/compute/bivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/correlation/compute/nullivariate.py": ["/dataprep/eda/data_array.py", "/dataprep/eda/correlation/compute/common.py"], "/dataprep/eda/missing/compute/nullivariate.py": ["/dataprep/eda/data_array.py"], "/dataprep/eda/report.py": ["/dataprep/utils.py"]}
|
26,450,419
|
eL-Squaz/commonplace
|
refs/heads/master
|
/commonplace/db_bootstrap.py
|
from __future__ import annotations
from typing import *
import asyncio
import contextlib
import logging
import datetime
import random
import click
import edgedb
from . import app
from .convenience import lorem_ipsum, nowtz, random_string, sha1
logger = logging.getLogger("commonplace.db_boostrap")
logger.setLevel(logging.DEBUG if app.debug else logging.INFO)
full_dsn = f"edgedb://{app.db_user}:{app.db_password}@{app.db_host}/{app.db_db}"
no_db_dsn = f"edgedb://{app.db_user}:{app.db_password}@{app.db_host}/edgedb"
no_user_dsn = f"edgedb://edgedb@{app.db_host}/edgedb"
async def try_connect() -> Tuple[edgedb.AsyncIOConnection, str]:
try:
return (await edgedb.async_connect(full_dsn)), full_dsn
except edgedb.AuthenticationError as ae:
logger.warning(f"Full DSN doesn't work: {ae}")
try:
return (await edgedb.async_connect(no_db_dsn)), no_db_dsn
except edgedb.AuthenticationError as ae:
logger.warning(f"No DB DSN doesn't work: {ae}")
try:
return (await edgedb.async_connect(no_user_dsn)), no_user_dsn
except edgedb.AuthenticationError as ae:
logger.warning(f"No user DSN doesn't work: {ae}")
raise LookupError("No way to connect to the given database found.")
@contextlib.asynccontextmanager
async def ensure_connection(
*,
db: Optional[edgedb.AsyncIOConnection] = None,
pool: Optional[edgedb.AsyncIOPool] = None,
) -> AsyncGenerator[edgedb.AsyncIOConnection, None]:
if db is not None:
yield db
return
if pool is not None:
db = await pool.acquire()
try:
yield db
finally:
await pool.release(db)
return
db, dsn = await try_connect()
if dsn != full_dsn:
raise ValueError(
f"Opening database {app.db_db} does not work. Bootstrap first?"
)
try:
yield db
finally:
await db.aclose()
async def update_schema(
*,
db: Optional[edgedb.AsyncIOConnection] = None,
pool: Optional[edgedb.AsyncIOPool] = None,
) -> AsyncGenerator[str, None]:
yield "Updating DB schema\n"
async with ensure_connection(db=db, pool=pool) as conn:
esdl = app.current_dir.parent / "database.esdl"
if not esdl.is_file():
raise LookupError("database.esdl not found")
schema = esdl.read_text()
async with conn.transaction():
yield "Creating migration\n"
await conn.execute(f"CREATE MIGRATION setupdb TO {{ {schema} }};")
yield "Committing migration\n"
await conn.execute(f"COMMIT MIGRATION setupdb;")
yield "Done updating schema\n"
async def bootstrap() -> AsyncGenerator[str, None]:
conn, dsn = await try_connect()
if dsn == no_user_dsn:
yield f"User {app.db_user} does not exist, creating\n"
await conn.execute(
f"""
CREATE SUPERUSER ROLE {app.db_user} {{
SET password := {app.db_password}
}}
"""
)
await conn.aclose()
conn, dsn = await try_connect()
if dsn == no_user_dsn:
raise ValueError(f"Logging with created role {app.db_user} does not work")
if dsn == no_db_dsn:
yield f"Connected to EdgeDB as {app.db_user}, creating database {app.db_db}\n"
# TODO: uncomment when passwords work with RDS
# await conn.execute("CONFIGURE SYSTEM RESET Auth FILTER Auth.method IS Trust;")
await conn.execute(f"CREATE DATABASE {app.db_db}")
await conn.aclose()
conn, dsn = await try_connect()
if dsn != full_dsn:
raise ValueError(f"Opening database {app.db_db} does not work")
yield (
f"Connected to database {app.db_db} as {app.db_user},"
f" migrating schema to latest ESDL\n"
)
async for message in update_schema(db=conn):
yield message
yield "Done bootstrapping\n"
async def drop_test_data(
*,
db: Optional[edgedb.AsyncIOConnection] = None,
pool: Optional[edgedb.AsyncIOPool] = None,
) -> AsyncGenerator[str, None]:
yield f"Dropping all data from {app.db_db}\n"
async with ensure_connection(db=db, pool=pool) as conn:
typenames = await conn.fetchall(
"""
SELECT schema::ObjectType { name }
FILTER .name LIKE 'commonplace::%'
AND .is_abstract = false;
"""
)
retry = True
while retry:
retry = False
for typeobj in typenames:
typename = typeobj.name
yield f"Deleting {typename} objects\n"
try:
await conn.execute(f"DELETE {typename}")
except edgedb.ConstraintViolationError:
retry = True
yield "Done dropping test data\n"
async def make_test_data(
*,
db: Optional[edgedb.AsyncIOConnection] = None,
pool: Optional[edgedb.AsyncIOPool] = None,
) -> AsyncGenerator[str, None]:
yield "Populating test data\n"
async with ensure_connection(db=db, pool=pool) as conn:
usernames = ["ambv", "1st1", "elprans"]
tags = [
"articles",
"bookmarks",
"fiction",
"guitar",
"inspirations",
"journal",
"learning",
"philosophy",
"python",
"quotes",
]
seconds_in_3_years = 60 * 60 * 24 * 365 * 3
for user in usernames:
yield f"Inserting user {user}\n"
await conn.fetchall(
"""
WITH MODULE commonplace
INSERT User { name := <Slug>$name };
""",
name=user,
)
content_count = 100
for i in range(1, content_count + 1):
yield f"{i}/{content_count}: inserting note\n"
text = " ".join(lorem_ipsum(random.randint(3, 20)))
editor_name = random.choice(usernames)
seconds = random.randint(0, seconds_in_3_years)
seconds = random.randint(0, seconds)
seconds = random.randint(0, seconds)
ts = nowtz() - datetime.timedelta(seconds=seconds)
note = await conn.fetchone(
"""
WITH MODULE commonplace
INSERT Note {
text := <str>$text,
sha1 := <bytes>$hash,
ts := <datetime>$ts,
editor := (
SELECT User FILTER User.name = <Slug>$name
)
}
""",
text=text,
hash=sha1(text),
ts=ts,
name=editor_name,
)
content_title = None
if random.random() > 0.8:
content_title = " ".join(lorem_ipsum(5))
content_name = "-".join(random_string(8) for _ in range(3)).lower()
public_toss = random.random()
public_since: Optional[datetime.datetime]
public_until: Optional[datetime.datetime]
if public_toss < 0.25:
public_since = nowtz() - datetime.timedelta(seconds=seconds)
public_until = nowtz()
elif public_toss < 0.5:
public_since = nowtz() + datetime.timedelta(seconds=seconds)
public_until = None
elif public_toss >= 0.5:
public_since = None
public_until = None
yield f"{i}/{content_count}: inserting content\n"
await conn.fetchone(
"""
WITH MODULE commonplace
INSERT Content {
latest := (
SELECT Note FILTER Note.id = <uuid>$noteid
),
title := <str>$title,
name := <Slug>$name,
tags := array_unpack(<array<Tag>>$tags),
public_since := <datetime>$public_since,
public_until := <datetime>$public_until,
deleted := <bool>$deleted
}
""",
noteid=note.id,
title=content_title,
name=content_name,
tags=random.sample(tags, random.randint(0, len(tags))),
public_since=public_since,
public_until=public_until,
deleted=False if random.random() > 0.1 else True,
)
yield "Done making test data\n"
async def log(agen: AsyncGenerator[str, None]) -> None:
async for message in agen:
logger.info(message.rstrip())
@click.command()
@click.option(
"--bootstrap",
"operation",
flag_value="bootstrap",
default=True,
help=(
f"Bootstrap EdgeDB at {app.db_host} to include user {app.db_user},"
f" database {app.db_db}, and an up-to-date schema from database.esdl"
),
)
@click.option(
"--make-test-data",
"operation",
flag_value="make-test-data",
help=f"Writes random test data to edgedb://{app.db_host}/{app.db_db}",
)
@click.option(
"--drop-test-data",
"operation",
flag_value="drop-test-data",
help=f"Deletes all data from edgedb://{app.db_host}/{app.db_db}",
)
def main(operation: str) -> None:
logging.basicConfig()
if operation == "bootstrap":
agen = bootstrap()
elif operation == "make-test-data":
agen = make_test_data()
elif operation == "drop-test-data":
agen = drop_test_data()
else:
raise click.UsageError(f"Unknown action {operation}")
asyncio.run(log(agen))
if __name__ == "__main__":
main()
|
{"/commonplace/app.py": ["/commonplace/__init__.py", "/commonplace/convenience.py"], "/commonplace/db_bootstrap.py": ["/commonplace/__init__.py", "/commonplace/convenience.py"]}
|
26,450,420
|
eL-Squaz/commonplace
|
refs/heads/master
|
/commonplace/convenience.py
|
"""Almost one liners. But handy."""
from __future__ import annotations
from typing import *
import datetime
import hashlib
from pathlib import Path
import random
import string
import dateutil.tz
def find_dot_env(directory: Path) -> Optional[Path]:
if not directory.is_dir():
return None
while True:
maybe_result = directory / ".env"
if maybe_result.is_file():
return maybe_result
if directory.root == directory:
# we are at the root, we failed
return None
directory = directory.parent
lorem_ipsum_words = [
"ad",
"adipiscing",
"aliqua",
"aliquip",
"amet",
"anim",
"aute",
"cillum",
"commodo",
"consectetur",
"consequat",
"culpa",
"cupidatat",
"deserunt",
"do",
"dolor",
"dolore",
"duis",
"ea",
"eiusmod",
"elit",
"enim",
"esse",
"est",
"et",
"eu",
"ex",
"excepteur",
"exercitation",
"fugiat",
"id",
"in",
"incididunt",
"ipsum",
"irure",
"labore",
"laboris",
"laborum",
"lorem",
"magna",
"minim",
"mollit",
"nisi",
"non",
"nostrud",
"nulla",
"occaecat",
"officia",
"pariatur",
"proident",
"qui",
"quis",
"reprehenderit",
"sed",
"sint",
"sit",
"sunt",
"tempor",
"ullamco",
"ut",
"velit",
"veniam",
"voluptate",
]
def lorem_ipsum(count: int) -> Iterator[str]:
sentence_start = True
for _ in range(count):
word = random.choice(lorem_ipsum_words)
if sentence_start:
sentence_start = False
word = word.capitalize()
if random.random() > 0.8:
word += random.choice(".....!!?")
sentence_start = True
yield word
def random_string(length: int) -> str:
return "".join(
random.choice(string.ascii_letters + string.digits)
for _ in range(length)
)
def nowtz() -> datetime.datetime:
return datetime.datetime.now(tz=dateutil.tz.tzlocal())
def sha1(arg: Union[str, bytes]) -> bytes:
hash = hashlib.sha1()
if isinstance(arg, str):
arg = arg.encode("utf8")
hash.update(arg)
return hash.digest()
def get_english_timedelta_description(delta: datetime.timedelta) -> str:
days = delta.days + delta.seconds / 3600 / 24
hours = delta.seconds / 3600
minutes = delta.seconds % 3600 / 60
seconds = delta.seconds % 3600 % 60
if days > 0.5:
if 0.9 < days < 1:
days = 0.9
unit = "day" if days == 1 else "days"
return (
f"{days:.0f} {unit}" if days == int(days) else f"{days:.1f} {unit}"
)
elif hours > 0.5:
if 0.9 < hours < 1:
hours = 0.9
unit = "hour" if hours == 1 else "hours"
if hours == int(hours):
return f"{hours:.0f} {unit}"
return f"{hours:.1f} {unit}"
elif minutes > 0.5:
if 0.9 < minutes < 1:
minutes = 0.9
unit = "minute" if minutes == 1 else "minutes"
if minutes == int(minutes):
return f"{minutes:.0f} {unit}"
return f"{minutes:.1f} {unit}"
else:
unit = "second" if seconds == 1 else "seconds"
return f"{seconds} {unit}"
def get_english_dt_description_from_now(ts: datetime.datetime) -> str:
delta = nowtz() - ts
return get_english_timedelta_description(delta)
# Those are mapped to uil-* classes in https://github.com/iconscout/unicons/
_icon_classes = {
"anger": "angry",
"award": "medal",
"article": "notes",
"book": "book-open",
"bookmark": "bookmark",
"car": "car",
"challenger": "car",
"company": "chart-line",
"conference": "meeting-board",
"database": "database",
"design": "ruler",
"diary": "diary",
"drawing": "pen",
"edgedb": "database",
"experiment": "flask-potion",
"favorite": "star",
"favourite": "star",
"fiction": "pen",
"film": "film",
"finance": "money-stack",
"free-will": "wind",
"future": "mountains-sun",
"game": "table-tennis",
"growth": "arrow-growth",
"guitar": "music",
"haiku": "pen",
"happiness": "smile-dizzy",
"health": "medkit",
"home": "home",
"house": "home",
"humor": "smile-beam",
"inspiration": "lightbulb-alt",
"interpretation": "comment-question",
"joy": "smile-squint-wink",
"learning": "graduation-cap",
"lightbulb": "lightbulb-alt",
"lyric": "microphone",
"medal": "medal",
"megaphone": "megaphone",
"money": "money-stack",
"montypython": "smile-wink-alt",
"monty-python": "smile-wink-alt",
"motivation": "game-structure",
"movie": "film",
"music": "music",
"negotiation": "ninja",
"ninja": "ninja",
"notes": "notes",
"journal": "diary",
"opensource": "code-branch",
"open-source": "code-branch",
"paiting": "brush-alt",
"philosophy": "map-marker-question",
"podcast": "microphone",
"poetry": "pen",
"presentation": "presentation-play",
"procrastination": "squint",
"programming": "bug",
"project": "code-branch",
"publishing": "megaphone",
"python": "parking-circle",
"reason": "comment-question",
"receipt": "receipt",
"reflection": "thunderstorm-sun",
"question": "question-circle",
"quote": "align-left-justify",
"recognition": "medal",
"singing": "microphone",
"speaking": "megaphone",
"talk": "megaphone",
"tracking": "monitor-heart-rate",
"travel": "desert",
"weltschmerz": "thunderstorm",
"work": "constructor",
"writing": "pen",
}
def icon_class(tag: str) -> str:
cls = _icon_classes.get(tag.lower(), "")
if not cls and tag.endswith("s"):
cls = _icon_classes.get(tag.lower()[:-1], "")
return "uil-" + (cls or "angle-right")
|
{"/commonplace/app.py": ["/commonplace/__init__.py", "/commonplace/convenience.py"], "/commonplace/db_bootstrap.py": ["/commonplace/__init__.py", "/commonplace/convenience.py"]}
|
26,450,421
|
eL-Squaz/commonplace
|
refs/heads/master
|
/commonplace/app.py
|
from __future__ import annotations
from typing import *
import asyncio
import logging
from pathlib import Path
import edgedb
from starlette.applications import Starlette
from starlette.config import Config
from starlette.datastructures import URL, Secret
from starlette.requests import Request
from starlette.responses import Response, RedirectResponse, StreamingResponse
from starlette.staticfiles import StaticFiles
from starlette.templating import Jinja2Templates
from commonplace import queries
from commonplace.convenience import (
find_dot_env,
get_english_dt_description_from_now,
icon_class,
)
current_dir = Path(__file__).parent
config = Config(find_dot_env(current_dir))
debug = config("COMMONPLACE_DEBUG", cast=bool, default=False)
db_host = config("EDGEDB_HOST")
db_user = config("EDGEDB_USER")
db_password = config("EDGEDB_PASSWORD", cast=Secret)
db_db = config("EDGEDB_DB", default="commonplace")
db_dsn = URL(f"edgedb://{db_user}:{db_password}@{db_host}/{db_db}")
db_pool: edgedb.AsyncIOPool
templates = Jinja2Templates(directory=str(current_dir / "templates"))
logger = logging.getLogger("commonplace.app")
logger.setLevel(logging.DEBUG if debug else logging.INFO)
app = Starlette(debug=debug)
app.mount("/static", StaticFiles(directory=str(current_dir / "static")), name="static")
@app.on_event("startup")
async def startup() -> None:
global db_pool
logger.info("Creating an async connection pool to EdgeDB")
db_pool = await edgedb.create_async_pool(dsn=str(db_dsn), min_size=1, max_size=16)
@app.on_event("shutdown")
async def shutdown() -> None:
await db_pool.aclose()
@app.route("/")
async def homepage(request: Request) -> Response:
query_tags: FrozenSet[str] = frozenset(request.query_params.getlist("t"))
content, all_tags = await asyncio.gather(
queries.get_all_content(db_pool, query_tags),
queries.get_all_tags(db_pool),
)
available_tags = {tag for o in content for tag in o.tags}
tags = sorted((tag, tag in available_tags) for tag in all_tags)
return templates.TemplateResponse(
name="index.html",
context={
"request": request,
"title": "Łukasz Langa",
"domain": "lukasz.langa.pl",
"tags": tags,
"query_tags": query_tags,
"content": content,
"make_tags_query": make_tags_query,
"humanize_dt": get_english_dt_description_from_now,
"icon_class": icon_class,
},
)
@app.route("/favicon.ico")
async def favicon(request: Request) -> Response:
return RedirectResponse(url="/static/favicon32.png")
@app.route("/error")
async def error(request: Request) -> Response:
"""
An example error. Switch the `debug` setting to see either tracebacks or 500 pages.
"""
raise RuntimeError("Oh no")
@app.exception_handler(404)
async def not_found(request: Request, exc: Exception) -> Response:
"""
Return an HTTP 404 page.
"""
return templates.TemplateResponse(
name="404.html", context={"request": request}, status_code=404
)
@app.exception_handler(500)
async def server_error(request: Request, exc: Exception) -> Response:
"""
Return an HTTP 500 page.
"""
return templates.TemplateResponse(
name="500.html", context={"request": request, "exception": exc}, status_code=500
)
def make_tags_query(needle: str, haystack: AbstractSet[str]) -> str:
"""Return a URL query string for tags.
`haystack` is the previous set of tags. If `needle` was in it, remove it.
If not, add it.
"""
new_tags = set(haystack)
if needle in haystack:
new_tags.remove(needle)
else:
new_tags.add(needle)
return "&".join(f"t={tag}" for tag in new_tags)
if db_db == "cptest":
@app.route("/update-schema")
async def update_schema(request: Request) -> StreamingResponse:
from commonplace import db_bootstrap
return StreamingResponse(
db_bootstrap.update_schema(pool=db_pool), media_type="text/plain"
)
@app.route("/drop-test-data")
async def drop_test_data(request: Request) -> StreamingResponse:
from commonplace import db_bootstrap
return StreamingResponse(
db_bootstrap.drop_test_data(pool=db_pool), media_type="text/plain"
)
@app.route("/make-test-data")
async def make_test_data(request: Request) -> StreamingResponse:
from commonplace import db_bootstrap
return StreamingResponse(
db_bootstrap.make_test_data(pool=db_pool), media_type="text/plain"
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
|
{"/commonplace/app.py": ["/commonplace/__init__.py", "/commonplace/convenience.py"], "/commonplace/db_bootstrap.py": ["/commonplace/__init__.py", "/commonplace/convenience.py"]}
|
26,450,422
|
eL-Squaz/commonplace
|
refs/heads/master
|
/commonplace/queries.py
|
from __future__ import annotations
from typing import *
import datetime
import logging
import edgedb
logger = logging.getLogger("commonplace.queries")
class ContentItem(Protocol):
ts: datetime.datetime
name: str
title: Optional[str]
text: str
tags: List[str]
async def get_all_tags(pool: edgedb.AsyncIOPool) -> List[Tuple[str, bool]]:
async with pool.acquire() as db:
return await _get_all_tags(db)
async def get_all_content(
pool: edgedb.AsyncIOPool, tags: AbstractSet[str] = frozenset()
) -> List[ContentItem]:
async with pool.acquire() as db:
return await _get_all_content(db, tags)
async def _get_all_tags(db: edgedb.AsyncIOConnection) -> List[Tuple[str, bool]]:
"""Return a sorted list of 2-tuples like: ("tag name", bool("tag name" in seen))."""
return sorted(await db.fetchall("SELECT DISTINCT commonplace::Content.tags;"))
async def _get_all_content(
db: edgedb.AsyncIOConnection, tags: AbstractSet[str] = frozenset()
) -> List[ContentItem]:
"""Return a sorted list of ContentItem-like objects that contain `tags`."""
base_query = """
WITH MODULE commonplace
SELECT Content {
ts := .latest.ts,
name,
title,
text := .latest[IS Note].text,
tags
}
"""
if len(tags) == 0:
content = await db.fetchall(base_query + ";")
elif len(tags) == 1:
taglist = list(tags)
content = await db.fetchall(
base_query + "FILTER <Tag>$t0 IN .tags;", t0=taglist[0],
)
elif len(tags) == 2:
taglist = list(tags)
content = await db.fetchall(
base_query + "FILTER <Tag>$t0 IN .tags AND <Tag>$t1 IN .tags;",
t0=taglist[0],
t1=taglist[1],
)
else: # len(tags) > 2
content = await db.fetchall(
base_query + "FILTER all(array_unpack(<array<Tag>>$tags) IN .tags);",
tags=list(tags),
)
return sorted(content, key=lambda o: o.ts, reverse=True)
|
{"/commonplace/app.py": ["/commonplace/__init__.py", "/commonplace/convenience.py"], "/commonplace/db_bootstrap.py": ["/commonplace/__init__.py", "/commonplace/convenience.py"]}
|
26,450,423
|
eL-Squaz/commonplace
|
refs/heads/master
|
/commonplace/__init__.py
|
__version__ = '20.4.0'
|
{"/commonplace/app.py": ["/commonplace/__init__.py", "/commonplace/convenience.py"], "/commonplace/db_bootstrap.py": ["/commonplace/__init__.py", "/commonplace/convenience.py"]}
|
26,510,798
|
jeonjw25/Pygame
|
refs/heads/master
|
/f_screen.py
|
import pygame, sys
# from pygame.rect import Rect
import start, stage, sound
from start import *
from stage import *
g = start.game()
def menu(screen):
sound.intro_music(0.3)
image = pygame.transform.scale(pygame.image.load("resources/images/screen_images/backscreen.png"), (1000, 736)).convert_alpha()
screen.blit(image, (0, 0))
start_button = pygame.transform.scale(pygame.image.load("resources/images/button_images/start_button.png"), (200, 45)).convert_alpha()
start_rect = start_button.get_rect(x = 145, y = 580)
help_button = pygame.transform.scale(pygame.image.load("resources/images/button_images/help_button.png"), (143, 50)).convert_alpha()
help_rect = help_button.get_rect(x = 428, y = 577)
close_button = pygame.transform.scale(pygame.image.load("resources/images/button_images/close_button.png"), (175, 45)).convert_alpha()
close_rect = close_button.get_rect(x = 670, y = 582)
screen.blit(start_button, start_rect)
screen.blit(help_button, help_rect)
screen.blit(close_button, close_rect)
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if start_rect.collidepoint(pygame.mouse.get_pos()):
if event.type == pygame.MOUSEBUTTONDOWN:
return 1
if help_rect.collidepoint(pygame.mouse.get_pos()):
if event.type == pygame.MOUSEBUTTONDOWN:
help_screen(screen)
if close_rect.collidepoint(pygame.mouse.get_pos()):
if event.type == pygame.MOUSEBUTTONDOWN:
pygame.quit()
sys.exit()
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
return 0
def help_screen(screen):
image = pygame.transform.scale(pygame.image.load("resources/images/screen_images/helpscreen.png"), (1000, 736)).convert_alpha()
screen.blit(image, (0, 0))
goback_button = pygame.transform.scale(pygame.image.load("resources/images/button_images/goback_button.png"), (175, 45)).convert_alpha()
goback_rect = goback_button.get_rect(x = 100, y = 600)
screen.blit(goback_button, goback_rect)
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if goback_rect.collidepoint(pygame.mouse.get_pos()):
if event.type == pygame.MOUSEBUTTONDOWN:
menu(screen)
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
def complete(screen):
sound.complete_music(0.3)
image = pygame.transform.scale(pygame.image.load("resources/images/screen_images/complete.png"), (1000, 736)).convert_alpha() #1000,667
screen.blit(image, (0, 0))
continue_button = pygame.transform.scale(pygame.image.load("resources/images/button_images/continue_button.png"), (210, 48)).convert_alpha()
continue_rect = continue_button.get_rect(x = 730, y = 50)
screen.blit(continue_button, continue_rect)
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if continue_rect.collidepoint(pygame.mouse.get_pos()):
if event.type == pygame.MOUSEBUTTONDOWN:
# g.play(screen, stage.stage1())
return 1
pass
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
def gameover(screen):
sound.gameover_music(0.3)
image = pygame.image.load("resources/images/screen_images/gameover.png")
screen.blit(image, (0, 0))
sound.gameover_music(0.3)
continue_button = pygame.transform.scale(pygame.image.load("resources/images/button_images/continue_button.png"), (210, 48)).convert_alpha()
continue_rect = continue_button.get_rect(x = 730, y = 520)
screen.blit(continue_button, continue_rect)
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if continue_rect.collidepoint(pygame.mouse.get_pos()):
if event.type == pygame.MOUSEBUTTONDOWN:
# restart option
return 1
elif event.type == pygame.QUIT:
pygame.quit()
sys.exit()
|
{"/enemy.py": ["/item.py"], "/start.py": ["/marco.py", "/enemy.py", "/stage.py", "/sound.py"], "/stage.py": ["/enemy.py"], "/main.py": ["/stage.py", "/start.py", "/f_screen.py", "/sound.py", "/marco.py"], "/f_screen.py": ["/start.py", "/stage.py", "/sound.py"], "/marco.py": ["/sound.py"]}
|
26,510,799
|
jeonjw25/Pygame
|
refs/heads/master
|
/main.py
|
#import screen #screen(image, option) option 0:menu, 1:help, 2:complete, 3:gameover
import pygame
from stage import *
from start import *
from f_screen import *
from sound import *
import marco
if __name__ == "__main__":
g = start.game()
screen = pygame.display.set_mode((1000,736))
pygame.display.set_caption("METAL SLUG")
menu(screen)
while g.running:
if g.play(screen, stage1()):
g.all_sprites.empty()
g.enemy_bullets.empty()
g.enemys.empty()
g.stage_no = 1
shootmode = g.player.shootMode
score = g.player.score
hp = g.player.hp
g.player = marco.rossi(shootmode, score, hp)
g.all_sprites.add(g.player)
if g.play(screen, boss_stage()):
if complete(screen) == 1: #게임재시작
sound.intro_music(0.3)
g = start.game()
screen = pygame.display.set_mode((1000, 736))
pygame.display.set_caption("METAL SLUG")
else:
screen = pygame.display.set_mode((1000,600))
marco_die(0.3)
if gameover(screen)== 1: #게임 재시작
sound.intro_music(0.3)
g = start.game()
screen = pygame.display.set_mode((1000, 736))
pygame.display.set_caption("METAL SLUG")
else:
screen = pygame.display.set_mode((1000,600))
marco_die(0.3)
if gameover(screen)== 1:
sound.intro_music(0.3)
g = start.game()
screen = pygame.display.set_mode((1000, 736))
pygame.display.set_caption("METAL SLUG")
pygame.quit()
|
{"/enemy.py": ["/item.py"], "/start.py": ["/marco.py", "/enemy.py", "/stage.py", "/sound.py"], "/stage.py": ["/enemy.py"], "/main.py": ["/stage.py", "/start.py", "/f_screen.py", "/sound.py", "/marco.py"], "/f_screen.py": ["/start.py", "/stage.py", "/sound.py"], "/marco.py": ["/sound.py"]}
|
26,510,800
|
jeonjw25/Pygame
|
refs/heads/master
|
/start.py
|
import pygame
import marco, enemy
import stage
from sound import *
class game:
def __init__(self, con = None):
pygame.init()
self.clock = pygame.time.Clock()
self.running = True
#event buffer
self.keys = [False, False, False]
#sprite group
self.all_sprites = pygame.sprite.Group()
self.enemys = pygame.sprite.Group()
self.bullets = pygame.sprite.Group()
self.enemy_bullets = pygame.sprite.Group()
self.player = marco.rossi(con)
self.items = pygame.sprite.Group()
self.all_sprites.add(self.player)
self.FPS = 30
self.stage_no = 0
# health
self.health_img = pygame.image.load("resources/images/health/health.png")
self.healthbar_img = pygame.image.load("resources/images/health/healthbar.png")
self.healthbar_img = pygame.transform.scale(self.healthbar_img, (195, 20))
self.a = 0
# score
self.font = pygame.font.SysFont("resources/font/metal1.ttf", 35)
self.stage_info = None
def events(self): #키보드 입력
for event in pygame.event.get():
key_event = pygame.key.get_pressed()
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
self.player.isup = True
self.keys[2] = True
elif event.key == pygame.K_RIGHT:
self.player.iswalk = True
self.keys[0] = True
elif event.key == pygame.K_a:
self.player.isshoot = True
elif event.key == pygame.K_LEFT:
self.player.iswalk = True
self.keys[1] = True
elif event.key == pygame.K_SPACE:
self.player.isjump = True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT:
self.player.iswalk = False
self.keys[0] = False
elif event.key == pygame.K_LEFT:
self.player.iswalk = False
self.keys[1] = False
elif event.key == pygame.K_UP:
self.player.isup = False
self.keys[2] = False
elif event.key == pygame.K_a:
self.player.isshoot = False
def play(self, screen, stage):
# print(self.all_sprites)
#stage 정보 load
self.stage_info = stage
buf = 0
self.player.posx, self.player.posy = 50, 400 #player 시작 위치
if self.stage_no == 1: # boss stage 일때
self.stage_info.generation_boss(self.all_sprites, self.enemys)
while self.running:
if self.player.hp == 0:
return 0
self.clock.tick(self.FPS)
self.events()
# print(self.all_sprites)
self.player.update()
if self.stage_no == 0:
self.stage_info.generation_soldier(self.all_sprites, self.enemys)
self.stage_info.generation_movesoldier(self.all_sprites, self.enemys)
self.stage_info.generation_shootsoldier(self.all_sprites, self.enemys, self.enemy_bullets)
self.stage_info.generation_ufo(self.all_sprites, self.enemys, self.enemy_bullets)
else:
if self.stage_info.end() == 1:
self.stage_info.bossmon.endsig = True
if self.stage_info.bossmon.posy > 400:
return 1
# player shoot
if self.player.isshoot == True:
buf = (buf + 1) % 4
if self.keys[2] is True:
self.player.motion(3)
else:
self.player.motion(2)
if buf == 0:
self.player.shoot(screen, self.all_sprites, self.bullets)
#player move
if self.keys[0] == True:
movement = self.stage_info.move(self.player)
if movement == 0:
self.enemys.update(-self.player.speed)
self.enemy_bullets.update(-self.player.speed)
self.items.update(-self.player.speed)
elif movement == -1:
break
if self.keys[0] and self.keys[2] == True:
self.player.motion(3)
#elif self.stage_no == 1:
# self.player.motion()
else:
self.player.motion()
self.player.update(movement,0)
elif self.keys[1] == True:
if self.keys[1] == True and self.keys[2] == True:
self.player.motion(3)
else:
self.player.motion()
if self.player.posx > 30:
self.player.update(-self.stage_info.speed,0)
elif self.keys[2] == True:
self.player.motion(3)
elif self.player.isshoot == False:
self.player.motion(1)
#적 생성
for i in self.enemys:
if i.isshoot == True:
i.shoot(self.all_sprites, self.enemy_bullets)
for i in self.bullets:
if i.posx > 1000:
self.all_sprites.remove(i)
self.bullets.remove(i)
#츙돌처리
for i in self.enemys:
if self.stage_no == 0:
i.hit(self.all_sprites, self.bullets, self.player)
else:
i.hit(self.all_sprites, self.bullets, screen)
i.motion(self.all_sprites, self.enemys, self.items)
self.player.hit(self.all_sprites, self.items, self.enemy_bullets)
#image draw
self.draw(self.stage_info, screen)
return 1
def draw(self, stage, screen): #화면에 객체 띄우기
self.all_sprites.update()
stage.draw(screen)
self.all_sprites.draw(screen)
screen.blit(self.healthbar_img, (25, 25))
for i in range(self.player.hp*19):
screen.blit(self.health_img, (i+28, 28))
text = self.font.render(("SCORE : %d") % (self.player.score), True, (0, 0, 0))
screen.blit(text, (30, 50))
pygame.display.update()
|
{"/enemy.py": ["/item.py"], "/start.py": ["/marco.py", "/enemy.py", "/stage.py", "/sound.py"], "/stage.py": ["/enemy.py"], "/main.py": ["/stage.py", "/start.py", "/f_screen.py", "/sound.py", "/marco.py"], "/f_screen.py": ["/start.py", "/stage.py", "/sound.py"], "/marco.py": ["/sound.py"]}
|
26,510,801
|
jeonjw25/Pygame
|
refs/heads/master
|
/marco.py
|
import pygame
from sound import *
class rossi(pygame.sprite.Sprite): #주인공
def __init__(self, bullet = None, score = 0, hp = 10):
super(rossi, self).__init__()
self.image = pygame.image.load('resources/images/movement/walk/1.gif')
self.org_size = self.image.get_size()
self.mask = pygame.mask.from_surface(self.image)
self.size = (120, 230)
self.image = pygame.transform.scale(self.image,self.size)
self.posx = 50
self.posy = 400
self.rect = pygame.Rect(self.image.get_rect())
self.rect.move_ip(self.posx,self.posy)
self.isjump = False
self.isshoot = False
self.iswalk = False
self.v = 8
self.m = 2
self.index = 0
self.hp = hp
self.speed = 10
if bullet == True:
self.shootMode = True
else:
self.shootMode = False
self.score = score
self.isup = False
self.delay = 0
def update(self, x=0, y=0): #주인공 위치이동함수
self.posx +=x
self.posy +=y
self.rect.move_ip(x, y)
if self.isjump:
F = (1/2) *self.m*(self.v**2)
self.rect.move_ip(0, -F)
self.posy -= F
self.v -= 1
if self.v<0:
self.m = -2
if self.v == -9:
self.isjump = False
self.v, self.m = 8,2
def motion(self,option = 0): #주인공 행동모션함수
walk_image = ['resources/images/movement/walk/1.gif', 'resources/images/movement/walk/2.gif','resources/images/movement/walk/3.gif', 'resources/images/movement/walk/4.gif',
'resources/images/movement/walk/5.gif','resources/images/movement/walk/6.gif', 'resources/images/movement/walk/7.gif',
'resources/images/movement/walk/8.gif', 'resources/images/movement/walk/9.gif','resources/images/movement/walk/10.gif', 'resources/images/movement/walk/11.gif']
if self.shootMode == False:
shoot_image_stand = ['resources/images/movement/shoot/1.png','resources/images/movement/shoot/2.png','resources/images/movement/shoot/3.png']
shoot_image_walk = ['resources/images/movement/shoot/walk2/1.png','resources/images/movement/shoot/walk2/2.png','resources/images/movement/shoot/walk2/3.png','resources/images/movement/shoot/walk2/4.png',
'resources/images/movement/shoot/walk2/5.png','resources/images/movement/shoot/walk2/6.png','resources/images/movement/shoot/walk2/7.png','resources/images/movement/shoot/walk2/8.png',
'resources/images/movement/shoot/walk2/9.png']
shoot_up = ['resources/images/movement/shoot/walk_up/1.gif','resources/images/movement/shoot/walk_up/2.gif','resources/images/movement/shoot/walk_up/3.gif','resources/images/movement/shoot/walk_up/4.gif',
'resources/images/movement/shoot/walk_up/5.gif','resources/images/movement/shoot/walk_up/6.gif','resources/images/movement/shoot/walk_up/7.gif']
up_image = ['resources/images/movement/shoot/4.gif','resources/images/movement/shoot/5.gif','resources/images/movement/shoot/6.gif']
if option == 0 and self.isshoot != True:
if self.isjump == False:
if self.delay % 2 == 0:
self.index += 1
self.delay += 1
self.index = self.index%len(walk_image)
self.image = pygame.image.load(walk_image[self.index])
self.image = pygame.transform.scale(self.image,self.size)
else:
self.image = pygame.image.load(walk_image[3])
self.image = pygame.transform.scale(self.image,self.size)
elif option == 2:
if self.iswalk == True:
self.index = (self.index+1) % len(shoot_image_walk)
self.image = pygame.image.load(shoot_image_walk[self.index])
else:
self.index = (self.index+1)%len(shoot_image_stand)
self.image = pygame.image.load(shoot_image_stand[self.index])
size = self.imagesize(self.image)
self.image = pygame.transform.scale(self.image, size)
elif option == 1:
self.image = pygame.image.load(walk_image[0])
self.image = pygame.transform.scale(self.image,self.size)
elif option == 3:
if self.isshoot == True and self.iswalk == True:
self.index = (self.index + 1) % len(shoot_up)
self.image = pygame.image.load(shoot_up[self.index])
self.image = pygame.transform.scale(self.image, self.size)
elif self.isshoot == True:
self.index = (self.index + 1) % len(up_image)
self.image = pygame.image.load(up_image[self.index])
self.image = pygame.transform.scale(self.image, self.size)
else:
self.image = pygame.image.load('resources/images/movement/shoot/13.gif')
self.image = pygame.transform.scale(self.image, self.size)
else:
shoot_image_stand = ['resources/images/movement/shoot/7.png','resources/images/movement/shoot/8.png']
shoot_image_walk = ['resources/images/movement/shoot/walk1/1.png','resources/images/movement/shoot/walk1/2.png','resources/images/movement/shoot/walk1/3.png','resources/images/movement/shoot/walk1/4.png',
'resources/images/movement/shoot/walk1/5.png','resources/images/movement/shoot/walk1/6.png','resources/images/movement/shoot/walk1/7.png','resources/images/movement/shoot/walk1/8.png']
shoot_up = ['resources/images/movement/shoot/walk_up2/1.png','resources/images/movement/shoot/walk_up2/2.png','resources/images/movement/shoot/walk_up2/3.png','resources/images/movement/shoot/walk_up2/4.png',
'resources/images/movement/shoot/walk_up2/5.png','resources/images/movement/shoot/walk_up2/6.png','resources/images/movement/shoot/walk_up2/7.png']
up_image = ['resources/images/movement/shoot/9.gif','resources/images/movement/shoot/10.gif','resources/images/movement/shoot/11.gif','resources/images/movement/shoot/12.gif']
if option == 0 and self.isshoot != True:
if self.isjump == False:
if self.delay % 2 == 0:
self.index += 1
self.delay += 1
self.index = self.index%len(walk_image)
self.image = pygame.image.load(walk_image[self.index])
self.image = pygame.transform.scale(self.image,self.size)
else:
self.image = pygame.image.load(walk_image[3])
self.image = pygame.transform.scale(self.image,self.size)
elif option == 2:
if self.iswalk == True:
self.index = (self.index+1) % len(shoot_image_walk)
self.image = pygame.image.load(shoot_image_walk[self.index])
else:
self.index = (self.index+1)%len(shoot_image_stand)
self.image = pygame.image.load(shoot_image_stand[self.index])
size = self.imagesize(self.image)
self.image = pygame.transform.scale(self.image, size)
elif option == 1:
self.image = pygame.image.load(walk_image[0])
self.image = pygame.transform.scale(self.image,self.size)
elif option == 3:
if self.isshoot == True and self.iswalk == True:
self.index = (self.index + 1) % len(shoot_up)
self.image = pygame.image.load(shoot_up[self.index])
size = self.imagesize(self.image)
self.image = pygame.transform.scale(self.image, size)
elif self.isshoot == True:
self.index = (self.index + 1) % len(up_image)
self.image = pygame.image.load(up_image[self.index])
self.image = pygame.transform.scale(self.image, self.size)
else:
self.image = pygame.image.load('resources/images/movement/shoot/9.gif')
self.image = pygame.transform.scale(self.image, self.size)
def imagesize(self, change_img): #주인공 이미지 바뀔때 사이즈조정함수
change_size = change_img.get_size()
ratio = self.size[0] / self.org_size[0]
return (int(change_size[0]*ratio), self.size[1])
def hit(self, all_sprites, items, enemy_bullets): #주인공 다른객체와 충돌시 충돌객체 없애는 함수
hits = pygame.sprite.spritecollide(self, items, True)
if hits:
if hits[0].itemno == 0:
self.shootMode = True
elif hits[0].itemno ==1:
if self.hp<10:
self.hp += 1
elif hits[0].itemno ==2:
self.score += 5
elif hits[0].itemno ==3:
self.score += 10
items.remove(hits[0])
all_sprites.remove(hits[0])
hits = pygame.sprite.spritecollide(self, enemy_bullets, False)
if hits:
self.hp -= 1
all_sprites.remove(hits[0])
enemy_bullets.remove(hits[0])
# print(self.hp)
return True
def shoot(self, screen, all_sprites, bullets): #주인공 총발사시 총알궤적 함수
gun = bullet()
marco_shoot(0.3)
if self.shootMode == True:
gun.gun_change()
if self.isup == True:
gun.isup = True
gun.change_dir()
gun.posx = self.posx + 30
gun.posy = self.posy
else:
gun.posx = self.posx + self.size[0]
gun.posy = self.posy + 150
# print(self.posx, gun.posx)
gun.rect.move_ip(gun.posx, gun.posy)
all_sprites.add(gun)
bullets.add(gun)
# def islive(self):
# if self.hp == 0:
# marco_die(0.3)
# return 0
# return 1
class bullet(pygame.sprite.Sprite):
def __init__(self):
super(bullet, self).__init__()
self.image = pygame.image.load('resources/images/bullet/bullet3.png')
self.size = (100,20)
self.image = pygame.transform.scale(self.image,self.size)
self.posx = 0
self.posy = 0
self.damage = 0.5
self.rect = pygame.Rect(self.image.get_rect())
self.speed = 30
self.isup = False
def update(self):
if self.isup == False:
self.posx += self.speed
self.rect.move_ip(self.speed,0)
else:
self.posy += self.speed
self.rect.move_ip(0, -self.speed)
def gun_change(self):
self.image = pygame.image.load('resources/images/bullet/bullet2.png')
self.size = (130, 40)
self.image = pygame.transform.scale(self.image, self.size)
self.damage = 1
self.rect = pygame.Rect(self.image.get_rect())
self.speed = 40
def change_dir(self):
if self.speed == 40:
self.image = pygame.image.load('resources/images/bullet/bullet8.png')
self.size = (40, 130)
self.speed = 40
else:
self.image = pygame.image.load('resources/images/bullet/bullet7.png')
self.size = (20, 100)
self.image = pygame.transform.scale(self.image, self.size)
self.rect = pygame.Rect(self.image.get_rect())
|
{"/enemy.py": ["/item.py"], "/start.py": ["/marco.py", "/enemy.py", "/stage.py", "/sound.py"], "/stage.py": ["/enemy.py"], "/main.py": ["/stage.py", "/start.py", "/f_screen.py", "/sound.py", "/marco.py"], "/f_screen.py": ["/start.py", "/stage.py", "/sound.py"], "/marco.py": ["/sound.py"]}
|
26,510,802
|
jeonjw25/Pygame
|
refs/heads/master
|
/stage.py
|
import pygame
import enemy
class stage1: #스테이지
def __init__(self):
self.w1, self.h1, self.w2, self.h2 = 0, 0, 1000, 736
self.rect = pygame.Rect(((self.w1,self.h1),(self.w2,self.h2)))
self.image = pygame.image.load("resources/images/map2.png").convert_alpha()
self.image = pygame.transform.scale(self.image,(3072,736))
self.speed = 10 #map 이동속도
def draw(self, screen): #스테이지 화면에 출력
self.rect = pygame.Rect(((self.w1,self.h1),(self.w2,self.h2)))
screen.blit(self.image,self.rect)
def generation_soldier(self, all_sprites, enemys): #적생성
gen_position = [(-500,500), (-850,265) ,(-1000,500) ,(-1700,500)]
for i in gen_position:
if self.w1 == i[0]:
enm = enemy.soldier()
enm.gen(i[1])
all_sprites.add(enm)
enemys.add(enm)
def generation_movesoldier(self, all_sprites, enemys): #이동하는 적생성
gen_position = [(-400,500),(-800,500), (-1200,500) ,(-1600,500)]
for i in gen_position:
if self.w1 == i[0]:
enm = enemy.soldier()
enm.iswalk = True
enm.gen(i[1])
all_sprites.add(enm)
enemys.add(enm)
def generation_shootsoldier(self, all_sprites, enemys,enemy_bullets): #적 총알 생성
gen_position = [(-1400,500),(-1800,257)]
for i in gen_position:
if self.w1 == i[0]:
enm = enemy.soldier()
enm.isshoot = True
enm.gen(i[1])
all_sprites.add(enm)
enemys.add(enm)
enm.shoot(all_sprites, enemy_bullets)
def generation_ufo(self, all_sprites, enemys, enemy_bullets): #ufo적 생성
gen_position = [(-1000, 100), (-1500, 120),(-2000, 110)]
for i in gen_position:
if self.w1 == i[0]:
enm = enemy.UFO()
enm.gen(i[1])
all_sprites.add(enm)
enemys.add(enm)
enm.shoot(all_sprites, enemy_bullets)
def move(self, player): #주인공 위치에 따른 화면조정
if player.posx >= 300 and self.w2 < -1000:
if player.posx > 1000:
return -1
return self.speed
elif player.posx >= 300:
self.w1 -= player.speed
self.w2 -= player.speed
return 0
else:
return self.speed
def end(self):
pass
class boss_stage(): #보스스테이지 클래스
def __init__(self):
self.image = pygame.image.load("resources/images/boss/bossmap.png")
self.w1, self.h1, self.w2, self.h2 = -150, -0, 1000, 657
self.rect = pygame.Rect(((self.w1,self.h1),(self.w2,self.h2)))
self.image = pygame.transform.scale(self.image, (1344, 736))
self.speed = 10
self.bossmon = enemy.boss()
def draw(self,screen): #보스 화면에 출력
self.rect = pygame.Rect(((self.w1, self.h1), (self.w2, self.h2)))
screen.blit(self.image, self.rect)
def move(self, player): #보스속도
return player.speed
def generation_boss(self, all_sprites, enemys): #보스를 sprites에 추가
all_sprites.add(self.bossmon)
enemys.add(self.bossmon)
def end(self): #보스 죽을때
if self.bossmon.hp == 0:
return 1
|
{"/enemy.py": ["/item.py"], "/start.py": ["/marco.py", "/enemy.py", "/stage.py", "/sound.py"], "/stage.py": ["/enemy.py"], "/main.py": ["/stage.py", "/start.py", "/f_screen.py", "/sound.py", "/marco.py"], "/f_screen.py": ["/start.py", "/stage.py", "/sound.py"], "/marco.py": ["/sound.py"]}
|
26,510,803
|
jeonjw25/Pygame
|
refs/heads/master
|
/enemy.py
|
import pygame
import item
import random
class soldier(pygame.sprite.Sprite):
def __init__(self):
super(soldier, self).__init__()
self.image = pygame.image.load('resources/images/enemy/enemy2/soldier1.png')
self.size = 120
self.image = pygame.transform.scale(self.image,(self.size,self.size))
self.posx, self.posy = 0, 0
self.rect = pygame.Rect((self.image.get_rect()))
self.rect.move_ip(self.posx, self.posy)
self.ishit = False
self.hitno = 0
self.iswalk = False
self.speed = 5
self.index = 0
self.walk_index = 0
self.shoot_index = 0
self.isshoot = False
self.delay = 0
def gen(self, y): #적출현
self.update(1000,y)
def hit(self, all_sprites, bullets, player): #적이 총에맞았을때
hits = pygame.sprite.spritecollide(self ,bullets ,False)
if hits:
self.ishit = True
all_sprites.remove(hits[0])
bullets.remove(hits[0])
player.score += 5
return True
def update(self, x=0, y=0): #화면이동에따른 적위치이동
if self.iswalk == True:
self.posx -= self.speed
self.rect.move_ip(-self.speed, 0)
self.posx +=x
self.posy +=y
self.rect.move_ip(x, y)
def shoot(self, all_sprites, enemy_bullets): #적 총발사
self.delay += 1
if self.isshoot == True and self.delay%25 == 0:
gun = enemy_bullet()
gun.rect.move_ip(self.posx, self.posy+20)
enemy_bullets.add(gun)
all_sprites.add(gun)
def motion(self, all_sprites, enemys, items): #적 행동모션
hit_image = ['resources/images/enemy/enemy2/soldier10.png','resources/images/enemy/enemy2/soldier11.png']
walk_image = ['resources/images/enemy/enemy2/soldier1.png','resources/images/enemy/enemy2/soldier2.png','resources/images/enemy/enemy2/soldier3.png','resources/images/enemy/enemy2/soldier4.png']
shoot_image = ['resources/images/enemy/enemy2/soldier8.png','resources/images/enemy/enemy2/soldier9.png']
if self.hitno == 2:
new_item = item.ITEM()
new_item.rect.move_ip(self.posx, self.posy+60)
items.add(new_item)
all_sprites.add(new_item)
all_sprites.remove(self)
enemys.remove(self)
if self.ishit and self.hitno<2:
self.index = (self.hitno)%len(hit_image)
self.image = pygame.image.load(hit_image[self.hitno])
self.image = pygame.transform.scale(self.image,(self.size,self.size))
self.hitno += 1
elif self.iswalk == True:
self.walk_index = (self.walk_index+1) % len(walk_image)
self.image = pygame.image.load(walk_image[self.walk_index])
self.image = pygame.transform.scale(self.image, (self.size, self.size))
else:
self.image = pygame.image.load('resources/images/enemy/enemy2/soldier1.png')
self.image = pygame.transform.scale(self.image,(self.size,self.size))
if self.isshoot ==True and self.delay%25 == 0:
self.shoot_index = (self.shoot_index + 1) % len(shoot_image)
self.image = pygame.image.load(shoot_image[self.shoot_index])
self.image = pygame.transform.scale(self.image, (self.size, self.size))
elif self.isshoot ==True:
self.image = pygame.image.load(shoot_image[0])
self.image = pygame.transform.scale(self.image, (self.size, self.size))
class enemy_bullet(pygame.sprite.Sprite): #적총알
def __init__(self):
super(enemy_bullet, self).__init__()
self.image = pygame.image.load('resources/images/bullet/bullet6.png')
self.size = (40,40)
self.image = pygame.transform.scale(self.image,self.size)
self.posx = 0
self.posy = 0
self.damage = 0.5
self.rect = pygame.Rect(self.image.get_rect())
self.speed = 30
self.mode = 0
def update(self, x=0, y=0): #적 총알 위치변경
if self.mode == 0:
self.posx -= self.speed
self.rect.move_ip(-self.speed,0)
elif self.mode == 1:
self.posy += self.speed
self.rect.move_ip(0, self.speed)
self.posx +=x
self.posy +=y
self.rect.move_ip(x, y)
def gun_change(self): #총알변경
self.mode = 1
self.image = pygame.image.load('resources/images/bullet/bullet9.png')
self.size = (100, 120)
self.image = pygame.transform.scale(self.image, self.size)
self.damage = 1
self.speed = 15
class UFO(pygame.sprite.Sprite):
def __init__(self):
super(UFO, self).__init__()
self.image = pygame.image.load('resources/images/ufo/ufo1.png')
self.size = 120
self.image = pygame.transform.scale(self.image, (self.size, self.size))
self.posx, self.posy = 0,0
self.rect = pygame.Rect((self.image.get_rect()))
self.rect.move_ip(self.posx, self.posy)
self.ishit = False
self.hitno = 0
self.delay = 0
self.isshoot = True
self.speed = 2
def motion(self, all_sprites, enemys, items):
hit_image = ['resources/images/ufo/ufo4.png', 'resources/images/ufo/ufo5.png', 'resources/images/ufo/ufo6.png']
if self.hitno == 3:
all_sprites.remove(self)
enemys.remove(self)
if self.ishit and self.hitno < 3:
self.index = (self.hitno) % len(hit_image)
self.image = pygame.image.load(hit_image[self.hitno])
self.image = pygame.transform.scale(self.image, (self.size, self.size))
self.hitno += 1
else:
self.image = pygame.image.load('resources/images/ufo/ufo1.png')
self.image = pygame.transform.scale(self.image, (self.size, self.size))
def hit(self, all_sprites, bullets, player): #적이 총에맞앗을때
hits = pygame.sprite.spritecollide(self, bullets, False)
if hits:
self.ishit = True
all_sprites.remove(hits[0])
bullets.remove(hits[0])
player.score += 10
return True
def gen(self, y): #적 생성위치
self.posx = 1000
self.posy = y
self.rect.move_ip(1000, y)
def update(self, x=0, y=0): #적 위치이동
self.posx -= self.speed
self.rect.move_ip(-self.speed, 0)
self.posx += x
self.posy += y
self.rect.move_ip(x, y)
def shoot(self, all_sprites, enemy_bullets): #적 총발사
self.delay += 1
if self.delay%25 == 0:
gun = enemy_bullet()
gun.gun_change()
gun.rect.move_ip(self.posx, self.posy)
enemy_bullets.add(gun)
all_sprites.add(gun)
class boss(pygame.sprite.Sprite):
def __init__(self):
super(boss, self).__init__()
self.image = pygame.image.load('resources/images/boss/boss1.png')
self.size = (300,300)
self.image = pygame.transform.scale(self.image, self.size)
self.rect = self.image.get_rect()
self.posx = 400
self.posy = -400
self.hp = 30
self.speed = 7
self.xdirection = 0
self.ydirection = 0
self.hit_size = (30,30)
self.isshoot = True
self.delay = 0
self.endsig = False
self.hit_effect = pygame.image.load('resources/images/ufo/ufo7.png')
def motion(self, all_sprites =None, enemys = None, items = None):
pass
def hit(self, all_sprites, bullets, screen):
hits = pygame.sprite.spritecollide(self, bullets, False)
if hits:
self.hp -= hits[0].damage
all_sprites.remove(hits[0])
bullets.remove(hits[0])
if self.hp < 4:
self.image = pygame.image.load('resources/images/boss/boss3.png')
self.image = pygame.transform.scale(self.image, self.size)
self.isshoot = False
elif self.hp < 15:
self.image = pygame.image.load('resources/images/boss/boss2.png')
self.image = pygame.transform.scale(self.image, self.size)
def shoot(self, all_sprites, enemy_bullets):
self.delay += 1
if self.delay %25 == 0:
gun = enemy_bullet()
gun.gun_change()
gun.image = pygame.image.load('resources/images/boss/boss_bullet.png')
gun.size = (40,40)
gun.rect.move_ip(self.posx+150, self.posy+200)
enemy_bullets.add(gun)
all_sprites.add(gun)
def update(self, x=0, y=0):
if self.endsig == True:
self.ydirection += self.speed
self.rect.move_ip(0,self.speed)
if self.ydirection<0:
self.ydirection += self.speed
self.rect.move_ip(0,self.speed)
else:
if self.xdirection == 0: #오른쪽으로 이동
self.posx += self.speed
if self.posx > 700:
self.xdirection = 1
else: #왼쪽으로 이동
self.posx -= self.speed
if self.posx < 0:
self.xdirection = 0
if self.ydirection == 0: #위로 이동
self.posy -= self.speed
if self.posy < 0:
self.ydirection = 1
else: #아래로 이동
self.posy += self.speed
if self.posy > 150:
self.ydirection = 0
self.posx += x
self.posy += y
self.rect = pygame.Rect((self.posx,self.posy),self.size)
|
{"/enemy.py": ["/item.py"], "/start.py": ["/marco.py", "/enemy.py", "/stage.py", "/sound.py"], "/stage.py": ["/enemy.py"], "/main.py": ["/stage.py", "/start.py", "/f_screen.py", "/sound.py", "/marco.py"], "/f_screen.py": ["/start.py", "/stage.py", "/sound.py"], "/marco.py": ["/sound.py"]}
|
26,510,804
|
jeonjw25/Pygame
|
refs/heads/master
|
/sound.py
|
import pygame
import time
def intro_music(sound):
pygame.mixer.init()
pygame.mixer.music.load("resources/sound/intro_music.mp3")
pygame.mixer.music.play(-1)
def complete_music(sound):
pygame.mixer.init()
pygame.mixer.music.load("resources/sound/Victory.ogg")
pygame.mixer.music.play(-1)
def mission_complete(sound):
pygame.mixer.music.load()
def gameover_music(sound):
pygame.mixer.init()
pygame.mixer.music.load("resources/sound/GameOver.ogg")
pygame.mixer.music.play(-1)
def marco_shoot(sound):
#pygame.mixer.init()
ch = pygame.mixer.Sound("resources/sound/marco_attack.ogg")
pygame.mixer.Sound.play(ch)
def marco_die(sound):
#pygame.mixer.init()
die = pygame.mixer.Sound("resources/sound/marco_die.ogg")
pygame.mixer.Sound.play(die)
|
{"/enemy.py": ["/item.py"], "/start.py": ["/marco.py", "/enemy.py", "/stage.py", "/sound.py"], "/stage.py": ["/enemy.py"], "/main.py": ["/stage.py", "/start.py", "/f_screen.py", "/sound.py", "/marco.py"], "/f_screen.py": ["/start.py", "/stage.py", "/sound.py"], "/marco.py": ["/sound.py"]}
|
26,510,805
|
jeonjw25/Pygame
|
refs/heads/master
|
/item.py
|
import pygame
import random
class ITEM(pygame.sprite.Sprite):
def __init__(self):
super(ITEM, self).__init__()
self.itemno = random.choices(range(0,4), weights=[1,2,5,3])[0]
img = ['resources/images/items/item4.png','resources/images/items/item1.png','resources/images/items/item2.png','resources/images/items/item3.png']
self.image = pygame.image.load(img[self.itemno])
self.size = (60,60)
self.image = pygame.transform.scale(self.image,self.size)
self.rect = pygame.Rect(self.image.get_rect())
self.posx = 0
self.posy = 0
def update(self, x=0, y=0): #아이템 드롭위치
self.posx += x
self.posy += y
self.rect.move_ip(x, y)
def draw(self, screen): #아이템 화면에출력
self.rect = pygame.Rect((self.posx, self.posy),self.size)
screen.blit(self.image, self.rect)
|
{"/enemy.py": ["/item.py"], "/start.py": ["/marco.py", "/enemy.py", "/stage.py", "/sound.py"], "/stage.py": ["/enemy.py"], "/main.py": ["/stage.py", "/start.py", "/f_screen.py", "/sound.py", "/marco.py"], "/f_screen.py": ["/start.py", "/stage.py", "/sound.py"], "/marco.py": ["/sound.py"]}
|
26,510,956
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/model/encoder/__init__.py
|
from src.model.encoder.encoder_resnet import ResnetPointnet, Encoder_Resnet_after_se3ACN, Encoder_Resnet_feat_geom_se3ACN, Encoder_Resnet_geom_se3ACN, Encoder_Resnet
from src.model.encoder.pointnet_e3nn import PointNet_Geo_AllNetwork
# from encoder.se3cnn import Encoder_se3ACN, Encoder_se3ACN_Fast
from src.model.encoder.e3nn import Network, ResNetwork, OutputScalarNetwork, OutputMLPNetwork, Bio_Network
from src.model.encoder.e3nn_vis import Network_Vis
from src.model.encoder.binding_e3nn import Binding_Network
from src.model.encoder.e3nn_res import ResNetwork
from src.model.encoder.pointnet_e3nn import PointNetAllNetwork
from src.model.encoder.e3nn_att import AttentionE3nn
from src.model.encoder.bio_e3nn import Bio_All_Network, Bio_All_Network_no_batch, Bio_Vis_All_Network, Bio_Local_Network, ResNet_Bio_ALL_Network, ResNet_Bio_Local_Network, Concat_Bio_Local_Network
from src.model.encoder.bio_e3nn_res import ResNet_Out_Local_Network
encoder_dict = {
'network1': Network,
'network1_vis': Network_Vis,
'OutputScalarNetwork': OutputScalarNetwork,
'OutputMLPNetwork': OutputMLPNetwork,
'binding_e3nn': Binding_Network,
'e3nn_res': ResNetwork,
'pointnetall': PointNetAllNetwork,
'att_e3nn': AttentionE3nn,
'se3cnn_resnet_after_se3cnn': Encoder_Resnet_after_se3ACN,
'se3cnn_geo_feat_resnet': Encoder_Resnet_feat_geom_se3ACN,
'pointnet_geo': PointNet_Geo_AllNetwork,
'bio_net': Bio_All_Network,
'bio_net_no_bn': Bio_All_Network_no_batch,
'bio_vis_net': Bio_Vis_All_Network,
'bio_local_net': Bio_Local_Network,
'resnet_bio_net': ResNet_Bio_ALL_Network,
'resnet_bio_local_net': ResNet_Bio_Local_Network,
'concat_bio_local_net': Concat_Bio_Local_Network,
'res_out_local_net': ResNet_Out_Local_Network
}
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,957
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/visualisation/pca.py
|
import argparse
import config
from sampling.sampler import Sampler
import argparse
import config
import multiprocessing
import numpy as np
from numpy import savetxt
import torch
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ExponentialLR
# from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from utils import Utils
import argparse
import sys
import config
from py3nvml import py3nvml
import json
import os
import pickle
from sklearn.model_selection import KFold
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from build_vocab import Vocabulary
from data_loader import get_loader, Pdb_Dataset, collate_fn, collate_fn_masks
from sampling.sampler import Sampler
def main():
parser = argparse.ArgumentParser(
description='sample from trained model'
)
parser.add_argument('config', type=str, help='Path to config file.')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configurations/config_lab/default.yaml')
savedir = cfg['output_parameters']['savedir']
# encoder_path = os.path.join(savedir, "models", cfg['training_params']['encoder_name'])
# decoder_path = os.path.join(savedir, "models", cfg['training_params']['decoder_name'])
encoder_path = os.path.join(savedir, "models", "encoder_best_" + str(cfg['splitting']['id_fold']) + '.ckpt')
decoder_path = os.path.join(savedir, "models", "decoder_best_" + str(cfg['splitting']['id_fold']) + '.ckpt')
split = cfg['splitting']['id_fold']
sampler = Sampler(cfg, 'max')
sampler.save_encodings_all('test', split, encoder_path, decoder_path)
sampler.collect_all_encodings()
sampler.save_encodings_all('train', split, encoder_path, decoder_path)
sampler.collect_all_encodings()
if __name__ == "__main__":
main()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,958
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/model/decoder/decoder_beam_search.py
|
from functools import partial
import numpy as np
import torch
import pickle
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch import nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.autograd import Variable
from se3cnn.non_linearities.rescaled_act import Softplus
from se3cnn.point.kernel import Kernel
from se3cnn.point.operations import NeighborsConvolution
from se3cnn.point.radial import CosineBasisModel
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_Length = 245
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, vocab_path, num_layers, beam_size):
"""Set the hyper-parameters and build the layers.
"""
super(DecoderRNN, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, vocab_size)
self.max_seg_length = MAX_Length
self.init_weights()
self.beam_size = beam_size
self.vocab_path = vocab_path
self.device = DEVICE
with open(self.vocab_path, "rb") as f:
self.vocab = pickle.load(f)
def init_weights(self):
self.embed.weight.data.uniform_(-0.1, 0.1)
self.linear.weight.data.uniform_(-0.1, 0.1)
self.linear.bias.data.fill_(0)
def forward(self, features, captions, lengths):
"""Decodes shapes feature vectors and generates SMILES."""
# print("captions shape initial", captions.shape)
embeddings = self.embed(
captions
) # shape [batch_size, padded_length, embed_size]
# print("shape emb", embeddings.shape)
# print("features emb", features.shape)
embeddings = torch.cat(
(features.unsqueeze(1), embeddings), 1
) # shape [batch_size, padded_length + 1, embed_size]
# print("shape embeddings", embeddings.shape)
packed = pack_padded_sequence(
embeddings, lengths, batch_first=True
) # shape [packed_length, embed_size]
# print("packed shape", packed.data.shape)
hiddens, _ = self.lstm(packed)
outputs = self.linear(hiddens[0]) # shape [packed_length, vocab_size]
# print("shape outputs", outputs.shape)
return outputs
def sample(self, features, states=None):
"""Samples SMILES tockens for given features (Greedy search).
"""
sampled_ids = []
inputs = features.unsqueeze(1)
for i in range(self.max_seg_length):
hiddens, states = self.lstm(inputs, states)
outputs = self.linear(hiddens.squeeze(1))
predicted = outputs.max(1)[1]
sampled_ids.append(predicted)
inputs = self.embed(predicted)
inputs = inputs.unsqueeze(1)
sampled_ids = torch.stack(sampled_ids, 1)
return sampled_ids
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,959
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/preprocessing/preprocessing_all.py
|
import argparse
import itertools as IT
import os
import pickle
import time
from distutils.dir_util import copy_tree
import shutil
from shutil import copyfile
from multiprocessing import Pool
from shutil import copyfile
import numpy as np
import scipy.spatial.distance as dist
from moleculekit.molecule import Molecule
from moleculekit.smallmol.smallmol import SmallMol
from openbabel import openbabel
from scipy import spatial as spatial
from src.utils import config
# from e3nn import e3nn
class Preprocessor:
"""
Class for preprocessing refined dataset or core dataset CASF
"""
def __init__(self, cfg, presision: int, flag: str):
self.path_root = cfg['preprocessing']['path_root']
self.refined_path = self.path_root + "/data/new_refined/"
self.files_refined = os.listdir(self.refined_path)
self.files_refined = [file for file in self.files_refined if file[0].isdigit()]
self.files_refined.sort()
self.target = cfg['preprocessing']['target_path']
self.path_data = cfg['data']['path']
self.flag = flag # refined with 4800 or core datasets with 200 complexes
# self.files_pdb = os.listdir(self.init)
# self.files_pdb = self.get_files_pdb
self.precision = presision
self.exceptions_smi = []
# parallel data processing
def get_files_pdb(self):
""" Creates a list of pdb_id from pdbbind dataset (refined or core)
"""
if self.flag == "core":
files_proteins = os.listdir("CASF/protein/pdb")
files = [file[0:4] for file in files_proteins]
return files
elif self.flag == "refined" or self.flag == "core2016":
self.files_refined = os.listdir(self.refined_path)
self.files_refined = [file for file in self.files_refined if file[0].isdigit()]
self.files_refined.sort()
return self.files_refined
else:
raise ValueError("flag must be refined or core")
def _get_path_protein_init(self, pdb_id: str):
""" Creates a path to initial protein.pdb depending on the type of a dataset (refined or core)
Parameters
----------
pdb_id : str
"""
if self.flag == "core":
path_pdb = os.path.join("CASF/protein/pdb/", pdb_id + "_protein.pdb")
return path_pdb
elif self.flag == "refined" or self.flag == "core2016":
path_pdb = os.path.join(self.refined_path, pdb_id, pdb_id + "_protein.pdb")
return path_pdb
else:
raise ValueError("flag must be refined or core")
def _get_path_ligand_init(self, pdb_id: str):
""" Creates a path to initial ligand.mol2 depending on the type of a dataset (refined or core)
Parameters
----------
pdb_id : str
"""
if self.flag == "core":
path_pdb = os.path.join(
# "CASF/ligand/docking/decoy_mol2/", pdb_id + "_ligand.mol2")
"CASF/ligand/ranking_scoring/crystal_mol2/",
pdb_id + "_ligand.mol2",
)
return path_pdb
elif self.flag == "refined" or self.flag == "core2016":
path_pdb = os.path.join(self.refined_path, pdb_id, pdb_id + "_ligand.mol2")
return path_pdb
else:
raise ValueError("flag must be refined or core")
def dataset_all(self):
""" Creates new dataset with protein.pdb, crystal.pdb and ligand.smile
Parameters
----------
agents : int
Number of Processes to parallize
chunksize : int
Number of items in one process
"""
files_pdb = self.get_files_pdb()
print(files_pdb)
if not os.path.exists(self.target):
os.makedirs(self.target)
# files_pdb = os.listdir(self.init)
# files_pdb.sort()
for prot in files_pdb:
self.refined_to_my_dataset(prot)
# for prot in ['1r9l', '5tef', '6ce6', '2aoc', '2aod', '3fzy', '6msy', '4oct', '4gq4', '4ql1', '5kh3', '6ced', '2aog', '4o61', '5ttw', '5epl']:
# for prot in ['1a1e']:
# self.pdb_to_pocket(prot)
def test_protein(self):
for i in ["1a4k"]:
self.pdb_to_pocket(i)
def _get_pockets_all_parallel(self, agents, chunksize):
""" Creates new dataset with pocket.pdb
Parameters
----------
agents : int
Number of Processes to parallize
chunksize : int
Number of items in one process
"""
files_pdb = self.get_files_pdb()
for prot in files_pdb:
self.pdb_to_pocket(prot)
# with Pool(processes=agents) as pool:
# pool.map(self.pdb_to_pocket, self.x, chunksize)
#
def copy_all_folder(self, pdb_id: str, name_folder_destination):
path_to_exceptions = os.path.join(
os.path.abspath(os.getcwd()), name_folder_destination
)
"""copy folder of protein to the name_folder_destination
"""
if not os.path.exists(path_to_exceptions):
os.makedirs(path_to_exceptions)
init_path_protein = self._get_path_protein_init(pdb_id)
copyfile(init_path_protein, os.path.join(path_to_exceptions, pdb_id))
# one protein processing
def refined_to_my_dataset(self, pdb_id: str):
if pdb_id[0].isdigit(): # just proteins
# create folder with pdb in my folder
if not os.path.exists(os.path.join(self.target, pdb_id)):
os.makedirs(os.path.join(self.target, pdb_id))
try:
# crystall = generateCrystalPacking(i) - why not?
crystall = Molecule(pdb_id)
crystall.filter("protein")
crystall.write(
os.path.join(self.target, pdb_id,
pdb_id + "_crystall.pdb"),
type="pdb",
)
init_path_ligand = self._get_path_ligand_init(pdb_id)
ligand = Molecule(init_path_ligand)
target_path_ligand = os.path.join(self.target, pdb_id, pdb_id + "_ligand.pdb")
copyfile(
init_path_ligand,
os.path.join(self.target, pdb_id, pdb_id + "_ligand.mol2"),
)
except RuntimeError:
self.copy_all_folder(pdb_id, "run_time_Molecule_new")
try:
smallmol = SmallMol(
self._get_path_ligand_init(pdb_id),
removeHs=False,
fixHs=True,
force_reading=True,
)
sm = smallmol.toSMILES()
#copy ligand smi
with open(
os.path.join(self.target, pdb_id,
pdb_id + "_ligand.smi"), "w"
) as txt:
txt.write(sm)
#copy mol2
init_path_ligand = self._get_path_ligand_init(pdb_id)
copyfile(
init_path_ligand,
os.path.join(self.target, pdb_id, pdb_id + "_ligand.mol2"),
)
#creating pocket.pdb
self.pdb_to_pocket(pdb_id)
except ValueError:
self.copy_all_folder(pdb_id, "exception_core_2016")
#delete this unlucky file
shutil.rmtree(os.path.join(self.target, pdb_id))
def all_to_smi(self):
for idx in range(len(self.files_refined)):
name_protein = self.files_refined[idx]
self.mol_to_smile(name_protein)
print("exceptions! - ", self.exceptions_smi)
for protein_name in self.exceptions_smi:
print("delete, ... ", protein_name)
self.delete_files(protein_name)
def mol_to_smile(self, pdb_id):
try:
init_path_ligand = self._get_path_ligand_init(pdb_id)
smallmol = SmallMol(
init_path_ligand,
removeHs=False,
fixHs=True,
force_reading=True,
)
sm = smallmol.toSMILES()
#copy ligand smi
with open(
os.path.join(self.target, pdb_id,
pdb_id + "_ligand.smi"), "w"
) as txt:
txt.write(sm)
#copy mol2
except ValueError:
print("exception!!! - ", pdb_id)
self.exceptions_smi.append(pdb_id)
# self.copy_all_folder(pdb_id, "exception_core_2016")
# #delete this unlucky file
# shutil.rmtree(os.path.join(self.target, pdb_id))
def mlkit_write_selected_atoms_to_pocket(
self, id_pdb: str, center_lig: np.array, precision: int
):
"""selects atoms of "id_pdb" protein within the distance "precision" around "center_lig"
Parameters
----------
id_pdb : str id of a protein
Protein to be processed
center : array
Geometrical center of a ligand
precision : int
Radius of atoms selections wrp center of ligand
"""
path_protein_source = self._get_path_protein_init(id_pdb)
if not os.path.exists(os.path.join(self.target, id_pdb)):
os.makedirs(os.path.join(self.target, id_pdb))
path_pocket = os.path.join(self.target, id_pdb, id_pdb + "_pocket.pdb")
print(path_pocket)
mol_protein = Molecule(path_protein_source)
mol_protein.write(
path_pocket,
# sel="(name C or name H or name O or name N or name S) and sqr(x-'{0}')+sqr(y-'{1}')+sqr(z-'{2}') <= sqr('{3}')".format(
sel="sqr(x-'{0}')+sqr(y-'{1}')+sqr(z-'{2}') <= sqr('{3}')".format(
str(center_lig[0][0]),
str(center_lig[0][1]),
str(center_lig[0][2]),
str(precision),
),
type="pdb",
)
def _get_ligand_center(self, path_ligand):
mol_ligand = Molecule(path_ligand)
coor_lig = mol_ligand.coords
center = np.mean(coor_lig, axis=0)
center = center.reshape(1, -1)
return center
def _get_protein_coord(self, path_pocket):
mol_protein = Molecule(path_pocket)
coord_protein = mol_protein.coords
coord_protein = coord_protein[:, :, -1]
return coord_protein
def pdb_to_pocket(self, id_pdb: str):
"""
Creates pocket.pdb files for every protein. Has three regimes
"""
if id_pdb[0].isdigit():
path_ligand = self._get_path_ligand_init(id_pdb)
path_protein = self._get_path_protein_init(id_pdb)
center_ligand = self._get_ligand_center(path_ligand)
coord_protein = self._get_protein_coord(path_protein)
print("start doing protein, '{0}'".format(id_pdb))
self.mlkit_write_selected_atoms_to_pocket(
id_pdb, center_ligand, self.precision
)
print("end doing protein, '{0}'".format(id_pdb))
def delete_files(self, protein_name):
path_to_exceptions = os.path.join(self.path_data, "exceptions")
path_protein_folder = os.path.join(self.refined_path, protein_name)
os.makedirs(path_to_exceptions, exist_ok=True)
copy_tree(path_protein_folder, path_to_exceptions)
shutil.rmtree(path_protein_folder)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Train a 3D reconstruction model.'
)
parser.add_argument('--config', type=str, help='Path to config file.')
parser.add_argument('--radious', type=int , default=8, help='dimension of word embedding vectors')
parser.add_argument('--flag', type=str , default=8, help='flag - refined or core')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configurations/config_lab/default.yaml')
radious = args.radious
flag = args.flag
preprocessing = Preprocessor(cfg, radious, flag)
preprocessing.all_to_smi()
# current_path = os.path.realpath(os.path.dirname(__file__))
# # current_path = '/Users/daniil/ETH/research_drugs/'
# process = Preprocessor(
# os.path.join(current_path, "data/refined-set"),
# os.path.join(current_path, "data/new_refined"),
# 8,
# "mlkit",
# "refined",
# )
# process_core = Preprocessor(
# os.path.join(current_path, "data/CASF-2016/coreset"),
# os.path.join(current_path, "data/new_core_2016"),
# 8,
# "mlkit",
# "core2016",
# )
# process.dataset_all()
# process_core.dataset_all()
# process._get_pockets_all_parallel(5, 5)
# process.test_protein()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,960
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/model/encoder/base.py
|
from torch import nn as nn
import torch
class Aggregate(nn.Module):
"""Pooling layer based on sum or average with optional masking.
Args:
axis (int): axis along which pooling is done.
mean (bool, optional): if True, use average instead for sum pooling.
keepdim (bool, optional): whether the output tensor has dim retained or not.
"""
def __init__(self, axis, mean=False, keepdim=True):
super(Aggregate, self).__init__()
self.average = mean
self.axis = axis
self.keepdim = keepdim
def forward(self, input, mask=None):
r"""Compute layer output.
Args:
input (torch.Tensor): input data.
mask (torch.Tensor, optional): mask to be applied; e.g. neighbors mask.
Returns:
torch.Tensor: layer output.
"""
# mask input
if mask is not None:
input = input * mask[..., None]
# compute sum of input along axis
y = torch.sum(input, self.axis)
# compute average of input along axis
if self.average:
# get the number of items along axis
if mask is not None:
N = torch.sum(mask, self.axis, keepdim=self.keepdim)
N = torch.max(N, other=torch.ones_like(N))
else:
N = input.size(self.axis)
y = y / N
# y = y.unsqueeze(2).to(torch.float)
return y
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,961
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/utils/config.py
|
import os
import yaml
import torch
# from torchvision import transforms
from model.encoder import encoder_dict
from model.decoder import decoder_dict
# from encoder import encoder_dict
# from decoder import decoder_dict
from torch.utils.data import DataLoader
from datasets.data_loader import collate_fn, collate_fn_masks
# from training.trainer import train_loop, train_loop_mask
# General config
def load_config(path, default_path=None):
''' Loads config file.
Args:
path (str): path to config file
default_path (bool): whether to use default path
'''
# Load configuration from file itself
with open(path, 'r') as f:
cfg_special = yaml.load(f)
# Check if we should inherit from a config
inherit_from = cfg_special.get('inherit_from')
# If yes, load this config first as default
# If no, use the default_path
if inherit_from is not None:
cfg = load_config(inherit_from, default_path)
elif default_path is not None:
with open(default_path, 'r') as f:
cfg = yaml.load(f)
else:
cfg = dict()
# Include main configuration
update_recursive(cfg, cfg_special)
return cfg
def update_recursive(dict1, dict2):
''' Update two config dictionaries recursively.
Args:
dict1 (dict): first dictionary to be updated
dict2 (dict): second dictionary which entries should be used
'''
for k, v in dict2.items():
if k not in dict1:
dict1[k] = dict()
if isinstance(v, dict):
update_recursive(dict1[k], v)
else:
dict1[k] = v
# Models
def get_model(cfg, device=None, dataset=None):
''' Returns the model instance.
Args:
cfg (dict): config dictionary
device (device): pytorch device
dataset (dataset): dataset
'''
encoder, decoder = get_model_captioning(
cfg, device=device)
return encoder, decoder
# Trainer
def get_trainer(model, optimizer, cfg, device):
''' Returns a trainer instance.
Args:
model (nn.Module): the model which is used
optimizer (optimizer): pytorch optimizer
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
trainer = method_dict[method].config.get_trainer(
model, optimizer, cfg, device)
return trainer
def get_shape_input(cfg):
r''' Returns input for the model
Args:
cfg (yaml object): the config file
'''
n_atoms = cfg['model']['encoder_kwargs']['natoms']
num_embed = cfg['model']['encoder_kwargs']['num_embeddings']
batch_size = cfg['model_params']['batch_size']
features_shape = (batch_size, n_atoms, num_embed)
geometry_shape = (batch_size, n_atoms, 3)
masks_shape = (batch_size, n_atoms, 284)
return [features_shape, geometry_shape, masks_shape]
def get_model_binding(cfg, device=None, **kwargs):
r''' Returns the model for encoder and decoder
Args:
cfg (yaml object): the config file
device (PyTorch device): the PyTorch device
'''
encoder = cfg['model']['encoder']
encoder_kwargs = cfg['model']['encoder_kwargs']
encoder = encoder_dict[encoder](
**encoder_kwargs
).to(device).double()
# model = models.PCGN(decoder, encoder)
# model = model.to(device)
return encoder
def get_model_captioning(cfg, device=None, **kwargs):
r''' Returns the model for encoder and decoder
Args:
cfg (yaml object): the config file
device (PyTorch device): the PyTorch device
'''
decoder = cfg['model']['decoder']
encoder = cfg['model']['encoder']
decoder_kwargs = cfg['model']['decoder_kwargs']
encoder_kwargs = cfg['model']['encoder_kwargs']
decoder = decoder_dict[decoder](
**decoder_kwargs
).to(device).double()
encoder = encoder_dict[encoder](
**encoder_kwargs
).to(device).double()
# model = models.PCGN(decoder, encoder)
# model = model.to(device)
return encoder, decoder
def eval_model_captioning(cfg, encoder_path, decoder_path, device=None, **kwargs):
r''' Returns the evaluated model for encoder and decoder
Args:
cfg (yaml object): the config file
encoder_path: the path of saved encoder model
decoder_path: the path of saved decoder model
device (PyTorch device): the PyTorch device
'''
decoder = cfg['model']['decoder']
encoder = cfg['model']['encoder']
decoder_kwargs = cfg['model']['decoder_kwargs']
encoder_kwargs = cfg['model']['encoder_kwargs']
encoder = encoder_dict[encoder](
**encoder_kwargs
).to(device).double()
decoder = decoder_dict[decoder](
**decoder_kwargs
).to(device).double()
# Load the trained model parameters
encoder.load_state_dict(torch.load(encoder_path, map_location=torch.device('cpu')))
decoder.load_state_dict(torch.load(decoder_path, map_location=torch.device('cpu')))
encoder.eval()
decoder.eval()
return encoder, decoder
def get_trainer(model, optimizer, cfg, device, **kwargs):
r''' Returns the trainer instance.
Args:
model (nn.Module): PSGN model
optimizer (PyTorch optimizer): The optimizer that should be used
cfg (yaml object): the config file
device (PyTorch device): the PyTorch device
'''
input_type = cfg['data']['input_type']
out_dir = cfg['training']['out_dir']
vis_dir = os.path.join(out_dir, 'vis')
trainer = training.Trainer(
model, optimizer, device=device, input_type=input_type,
vis_dir=vis_dir
)
return trainer
def get_loader(cfg, feat_train, batch_size, num_workers):
if(cfg['preprocessing']['mask'] == True):
loader = DataLoader(feat_train, batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=collate_fn_masks,)
else:
loader = DataLoader(feat_train, batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=collate_fn,)
return loader
def get_train_loop(cfg, loader_train, encoder, decoder,caption_optimizer, split_no, epoch, total_step):
if(cfg['preprocessing']['mask'] == True):
train_loop_mask(loader_train, encoder, decoder,caption_optimizer, split_no, epoch, total_step)
else:
train_loop(loader_train, encoder, decoder,caption_optimizer, split_no, epoch, total_step)
#maybe uncomment later
# def get_collate_fn(cfg):
# if(cfg['preprocessing']['collate_fn'] == 'masks'):
# collate = data_loader.collate_fn()
# else:
# collate = data_loader.collate_fn_masks()
def get_generator(model, cfg, device, **kwargs):
r''' Returns the generator instance.
Args:
cfg (yaml object): the config file
device (PyTorch device): the PyTorch device
'''
generator = generation.Generator3D(model, device=device)
return generator
def get_data_fields(mode, cfg, **kwargs):
r''' Returns the data fields.
Args:
mode (string): The split that is used (train/val/test)
cfg (yaml object): the config file
'''
with_transforms = cfg['data']['with_transforms']
pointcloud_transform = data.SubsamplePointcloud(
cfg['data']['pointcloud_target_n'])
fields = {}
fields['pointcloud'] = data.PointCloudField(
cfg['data']['pointcloud_file'], pointcloud_transform,
with_transforms=with_transforms
)
if mode in ('val', 'test'):
pointcloud_chamfer_file = cfg['data']['pointcloud_chamfer_file']
if pointcloud_chamfer_file is not None:
fields['pointcloud_chamfer'] = data.PointCloudField(
pointcloud_chamfer_file
)
return fields
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,962
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/training/binding.py
|
import multiprocessing
import numpy as np
from numpy import savetxt
import torch
from torchsummary import summary
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ExponentialLR
from torch.utils.tensorboard import SummaryWriter
from torch.optim import Adam
from tqdm import tqdm
import argparse
import sys
import config
from py3nvml import py3nvml
import json
import os
import pickle
from sklearn.model_selection import KFold
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from utils.build_vocab import Vocabulary
from datasets.data_loader_binding import Pdb_Dataset, Loss
from sampling.sampler import Sampler
from utils import Utils
import sys
import numpy as np
from numpy import savetxt
class Trainer_Binding_Fold():
def __init__(self, cfg):
# model params
self.cfg = cfg
self.original_stdout = sys.stdout
#folds data
self.name_file_folds = cfg['splitting']['file_folds']
self.fold_number = cfg['splitting']['id_fold']
self.num_epochs = cfg['model_params']['num_epochs']
self.N_EPOCHS = cfg['model_params']['num_epochs']
self.BATCH_SIZE = cfg['model_params']['batch_size']
self.learning_rate = cfg['model_params']['learning_rate']
self.NUM_WORKERS = cfg['model_params']['num_workers']
# training params
self.protein_dir = cfg['training_params']['image_dir']
self.caption_path = cfg['training_params']['caption_path']
self.log_step = cfg['training_params']['log_step']
self.save_step = cfg['training_params']['save_step']
self.vocab_path = cfg['preprocessing']['vocab_path']
self.n_splits = cfg['training_params']['n_splits']
self.loss_best = np.inf
#output files
self.savedir = cfg['output_parameters']['savedir']
self.name_plot = cfg['output_parameters']['name_plot']
self.tesnorboard_path = os.path.join(self.savedir, "tensorboard")
self.model_path = os.path.join(self.savedir, "models")
self.log_path = os.path.join(self.savedir, "logs")
self.PKD_PATH = os.path.join(self.savedir, "logs")
self.PATH_PLOTS = os.path.join(self.savedir, "plots")
self.idx_file = os.path.join(self.log_path, "idxs")
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.save_dir_smiles = os.path.join(self.savedir, "statistics")
if not os.path.exists(self.tesnorboard_path):
os.makedirs(self.tesnorboard_path)
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
if not os.path.exists(self.PATH_PLOTS):
os.makedirs(self.PATH_PLOTS)
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
if not os.path.exists(self.idx_file):
os.makedirs(self.idx_file)
if not os.path.exists(self.save_dir_smiles):
os.makedirs(self.save_dir_smiles)
#log files
self.test_idx_file = open(os.path.join(self.idx_file, "test_idx.txt"), "w")
self.log_file = open(os.path.join(self.log_path, "log.txt"), "w")
self.log_file_tensor = open(os.path.join(self.log_path, "log_tensor.txt"), "w")
self.writer = SummaryWriter(self.tesnorboard_path)
self.Encoder = config.get_model_binding(self.cfg, device=self.device)
# self.input = self.cfg.get_shape_input(self.cfg)
# print(summary(self.Encoder, self.input))
# print(summary(self.Decoder))
print(self.Encoder)
with open(os.path.join(self.log_path, "model.txt"), 'w') as f:
sys.stdout = f # Change the standard output to the file we created.
# print(summary(self.Encoder, self.input))
# print(summary(self.Decoder))
print(self.Encoder)
sys.stdout = self.original_stdout
self.utils = Utils(self.cfg)
#print all params
nparameters_enc = sum(p.numel() for p in self.Encoder.parameters())
with open(os.path.join(self.log_path, "model.txt"), 'w') as f:
f.write('Total number of parameters: %d' % (nparameters_enc))
with open(self.vocab_path, "rb") as f:
self.vocab = pickle.load(f)
self.criterion = nn.CrossEntropyLoss()
def train_loop_mask(self, loader, model, loss_cl, opt, epoch):
target_pkd_all = []
model = model.train()
progress = tqdm(loader)
all_rmsd = []
pkd_pred = []
for idx, features, geometry, masks, target_pkd in progress:
idx = idx.to(self.device)
features = features.to(self.device)
geometry = geometry.to(self.device)
masks = masks.to(self.device)
# num_atoms= num_atoms.to(self.device)
target_pkd = target_pkd.to(self.device)
target_pkd_all.append(target_pkd)
opt.zero_grad()
# out1 = model(features, geometry)
out1 = model(features, geometry, masks)
pkd_pred.append(out1.cpu())
# print(out1.cpu())
loss_rmsd_pkd = loss_cl(out1, target_pkd).float()
self.writer.add_scalar("training_loss", loss_rmsd_pkd.item(), epoch)
loss_rmsd_pkd.backward()
opt.step()
all_rmsd.append(loss_rmsd_pkd.item())
return torch.cat(target_pkd_all), torch.cat(pkd_pred), sum(all_rmsd) / len(all_rmsd)
def eval_loop(self, loader, model, epoch):
"""
Evaluation loop using `model` and data from `loader`.
"""
model = model.eval()
progress = tqdm(loader)
target_pkd_all = []
pkd_pred = []
all_rmsd = []
for idx, features, geometry, masks, target_pkd in progress:
with torch.no_grad():
features = features.to(self.device)
geometry = geometry.to(self.device)
masks = masks.to(self.device)
out1 = model(features, geometry, masks).to(self.device)
target_pkd = target_pkd.to(self.device)
target_pkd_all.append(target_pkd)
pkd_pred.append(out1.cpu())
loss_rmsd_pkd = self.loss_cl(out1, target_pkd).float()
self.writer.add_scalar("test_loss", loss_rmsd_pkd.item(), epoch)
all_rmsd.append(loss_rmsd_pkd.item())
return torch.cat(target_pkd_all), torch.cat(pkd_pred), sum(all_rmsd) / len(all_rmsd)
def train_epochs(self):
featuriser = Pdb_Dataset(self.cfg, vocab=self.vocab)
files_refined = os.listdir(self.protein_dir)
idx_folds = pickle.load( open(os.path.join(self.idx_file, self.name_file_folds), "rb" ) )
split_no = self.fold_number
test_idx = []
py3nvml.nvmlInit()
train_id, test_id = idx_folds[split_no]
train_data = train_id
test_data = test_id
with open(os.path.join(self.idx_file, 'test_idx_' + str(split_no)), 'wb') as fp:
pickle.dump(test_data, fp)
feat_train = [featuriser[data] for data in train_data]
feat_test = [featuriser[data] for data in test_data]
loader_train = DataLoader(
feat_train, batch_size=self.BATCH_SIZE, num_workers= self.NUM_WORKERS, shuffle=True
)
loader_test = DataLoader(
feat_test, batch_size=self.BATCH_SIZE, num_workers=self.NUM_WORKERS, shuffle=False
)
self.loss_cl = Loss()
opt = Adam(self.Encoder.parameters(),
lr=self.learning_rate)
scheduler = ExponentialLR(opt, gamma=0.95)
print("Training model...")
losses_to_write_train = []
for i in range(self.N_EPOCHS):
print("Epoch {}/{}...".format(i + 1, self.N_EPOCHS))
epoch = i + 1
target_pkd_all, pkd_pred, loss = self.train_loop_mask(
loader_train, self.Encoder, self.loss_cl, opt, epoch
)
print("pkd_pred", pkd_pred)
losses_to_write_train.append(loss)
if i == self.N_EPOCHS - 1:
np.save(
os.path.join(self.PKD_PATH, "pkd_pred_train_{}.npy".format(str(i))),
arr=pkd_pred.detach().cpu().clone().numpy(),
)
scheduler.step()
losses_to_write_train = np.asarray(losses_to_write_train, dtype=np.float32)
# save losses for the train
# np.savetxt(
# os.path.join(self.PATH_LOSS, "losses_train_2016.out"),
# losses_to_write_train,
# delimiter=",",
# )
# save true values of training target
savetxt(
os.path.join(self.PKD_PATH, "target_pkd_all_train.csv"),
target_pkd_all.detach().cpu().clone().numpy(),
)
np.save(
os.path.join(self.PKD_PATH, "target_pkd_all_train"),
arr=target_pkd_all.detach().cpu().clone().numpy(),
)
print("Evaluating model...")
target_pkd_all_test, pkd_pred_test, loss_test_to_write = self.eval_loop(
loader_test, self.Encoder, epoch
)
print("pkd_pred", pkd_pred_test)
loss_test_to_write = np.asarray(loss_test_to_write, dtype=np.float32)
loss_test_to_write = np.asarray([loss_test_to_write])
# np.savetxt(
# os.path.join(self.PATH_LOSS, "losses_test_2016.out"),
# loss_test_to_write,
# delimiter=",",
# )
os.makedirs(self.PKD_PATH, exist_ok=True)
np.save(
os.path.join(self.PKD_PATH, "target_pkd_all_test"),
arr=target_pkd_all_test.detach().cpu().clone().numpy(),
)
np.save(
os.path.join(self.PKD_PATH, "pkd_pred_test"),
arr=pkd_pred_test.detach().cpu().clone().numpy(),
)
# with open(os.path.join(self.PKD_PATH, "split_pdbids.pt"), "wb") as handle:
# pickle.dump(split_pdbids, handle)
self.utils.plot_statistics(
self.PKD_PATH,
self.PATH_PLOTS,
self.N_EPOCHS,
self.name_plot,
"train",
losses_to_write_train[-1],
loss_test_to_write[0],
)
self.utils.plot_statistics(
self.PKD_PATH,
self.PATH_PLOTS,
self.N_EPOCHS,
self.name_plot,
"test",
losses_to_write_train[-1],
loss_test_to_write[0],
)
# self.utils.plot_losses(
# self.PATH_LOSS, self.PATH_PLOTS, self.N_EPOCHS, self.name_plot
# )
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,963
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/datasets/split_test.py
|
import itertools as IT
import json
import os
import pickle
import time
from distutils.dir_util import copy_tree
from functools import partial
from multiprocessing import Pool
from shutil import copyfile
import _pickle as pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.spatial.distance as dist
import torch
from matplotlib import pyplot as plt
from numpy import mean, std
# from openbabel import openbabel
from scipy import spatial as spatial
from scipy.stats import pearsonr
import argparse
import sys
import config
from py3nvml import py3nvml
import json
import os
import pickle
from sklearn.model_selection import KFold
import numpy as np
from utils.build_vocab import Vocabulary
number_atoms = 22
class Splitter:
# def __init__(self, path_pocket: str, path_ligand: str):
def __init__(self, cfg):
self.cfg = cfg
self.name_file_folds = cfg['splitting']['file_folds']
self.num_epochs = cfg['model_params']['num_epochs']
self.batch_size = cfg['model_params']['batch_size']
self.learning_rate = cfg['model_params']['learning_rate']
self.num_workers = cfg['model_params']['num_workers']
# training params
self.protein_dir = cfg['training_params']['image_dir']
self.caption_path = cfg['training_params']['caption_path']
self.log_step = cfg['training_params']['log_step']
self.save_step = cfg['training_params']['save_step']
self.vocab_path = cfg['preprocessing']['vocab_path']
self.n_splits = cfg['training_params']['n_splits']
self.loss_best = np.inf
#output files
self.savedir = cfg['output_parameters']['savedir']
self.tesnorboard_path = self.savedir
self.model_path = os.path.join(self.savedir, "models")
self.log_path = os.path.join(self.savedir, "logs")
self.idx_file = os.path.join(self.log_path, "idxs")
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.save_dir_smiles = os.path.join(self.savedir, "statistics")
if not os.path.exists(self.save_dir_smiles):
os.makedirs(self.save_dir_smiles)
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
if not os.path.exists(self.idx_file):
os.makedirs(self.idx_file)
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
def _get_random_split(self):
files_refined = os.listdir(self.protein_dir)
data_ids = np.array([i for i in range(20)])
#cross validation
kf = KFold(n_splits=5, shuffle=True, random_state=2)
my_list = list(kf.split(data_ids))
with open(os.path.join(self.idx_file, self.name_file_folds), 'wb') as fp:
pickle.dump(my_list, fp)
def main():
parser = argparse.ArgumentParser(
description='Get Splits File'
)
parser.add_argument('config', type=str, help='Path to config file.')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configurations/config_local/default.yaml')
splitter = Splitter(cfg)
if(cfg['splitting']['split'] == 'random'):
splitter._get_random_split()
if __name__ == "__main__":
main()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,964
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/model/encoder/e3nn_simple.py
|
import torch
from e3nn.o3 import rand_rot
from e3nn.networks import (
GatedConvParityNetwork,
GatedConvNetwork,
ImageS2Network,
S2ConvNetwork,
S2ParityNetwork,
)
class SumNetwork(torch.nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.network = GatedConvNetwork(*args, **kwargs)
def forward(self, *args, **kwargs):
output = self.network(*args, **kwargs)
return output.sum(-2) # Sum over N
class MySumNetwork(torch.nn.Module):
def __init__(self, final_out):
super().__init__()
self.final_out = final_out
self.leakyrelu = nn.LeakyReLU(0.2) # Relu
self.e_out_1 = nn.Linear(mlp_h, mlp_h)
self.bn_out_1 = nn.BatchNorm1d(avg_n_atoms)
self.e_out_2 = nn.Linear(mlp_h, 2 * mlp_h)
self.bn_out_2 = nn.BatchNorm1d(avg_n_atoms)
def forward(self, features, geometry):
embedding = self.layers[0]
features = embedding(features)
Rs_in = [(1, 0)]
Rs_hidden = [(middle, 0)]
Rs_out = [(final_out, 0)]
f = SumNetwork(Rs_in, Rs_hidden, Rs_out, lmax)
f = f.to(device)
features = torch.tensor(features).to(self.device).long()
features = embedding(features).to(self.device)
features = features.squeeze(2)
features = f(features, geometry)
features = F.lp_pool2d(features,norm_type=2,
kernel_size=(features.shape[1], 1),
ceil_mode=False,)
features = self.leakyrelu(self.bn_out_1(self.e_out_1(features)))
features = self.leakyrelu(self.bn_out_2(self.e_out_2(features)))
features = F.lp_pool2d(features,norm_type=2,
kernel_size=(features.shape[1], 1),
ceil_mode=False,)
features = features.squeeze(1)
print("feat final shape", features.shape)
return features # shape ?
class MyS2convNetwork(torch.nn.Module):
def __init__(self, final_out):
super().__init__()
self.final_out = final_out
self.leakyrelu = nn.LeakyReLU(0.2) # Relu
self.e_out_1 = nn.Linear(mlp_h, mlp_h)
self.bn_out_1 = nn.BatchNorm1d(avg_n_atoms)
self.e_out_2 = nn.Linear(mlp_h, 2 * mlp_h)
self.bn_out_2 = nn.BatchNorm1d(avg_n_atoms)
def forward(self, features, geometry):
embedding = self.layers[0] #?
features = embedding(features)
lmax = 3
Rs = [(1, l, 1) for l in range(lmax + 1)]
model = S2ConvNetwork(Rs, 4, Rs, lmax)
features = model(features, geometry)
return features # shape ?
def test_s2conv_network():
torch.set_default_dtype(torch.float64)
lmax = 3
Rs = [(1, l, 1) for l in range(lmax + 1)]
model = S2ConvNetwork(Rs, 4, Rs, lmax)
features = rs.randn(1, 4, Rs)
geometry = torch.randn(1, 4, 3)
output = model(features, geometry)
angles = o3.rand_angles()
D = rs.rep(Rs, *angles, 1)
R = -o3.rot(*angles)
ein = torch.einsum
output2 = ein('ij,zaj->zai', D.T, model(ein('ij,zaj->zai', D, features), ein('ij,zaj->zai', R, geometry)))
assert (output - output2).abs().max() < 1e-10 * output.abs().max()
def main():
torch.set_default_dtype(torch.float64)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tetris, labels = get_dataset()
tetris = tetris.to(device)
labels = labels.to(device)
Rs_in = [(1, 0)]
Rs_hidden = [(16, 0), (16, 1), (16, 2)]
Rs_out = [(len(tetris), 0)]
lmax = 3
f = SumNetwork(Rs_in, Rs_hidden, Rs_out, lmax)
f = f.to(device)
optimizer = torch.optim.Adam(f.parameters(), lr=1e-2)
feature = tetris.new_ones(tetris.size(0), tetris.size(1), 1)
for step in range(50):
out = f(feature, tetris)
loss = torch.nn.functional.cross_entropy(out, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
acc = out.argmax(1).eq(labels).double().mean().item()
print("step={} loss={} accuracy={}".format(step, loss.item(), acc))
out = f(feature, tetris)
r_tetris, _ = get_dataset()
r_tetris = r_tetris.to(device)
r_out = f(feature, r_tetris)
print('equivariance error={}'.format((out - r_out).pow(2).mean().sqrt().item()))
if __name__ == '__main__':
main()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,965
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/utils/checkpoint.py
|
import torch
import pandas as pd
import os
def save_checkpoint_feature(checkpoint_path, idx_max_length, max_length, idx_write):
state = {'idx_max_length': idx_max_length,
'max_length': max_length,
'idx_write': idx_write,
}
torch.save(state, checkpoint_path)
def folds_checkpoint(file_folds_checkpoint_path, type_fold):
if not os.path.exists(file_folds_checkpoint_path):
data_folds_checkpoint = pd.DataFrame(columns=['type_fold','fold_no'])
data_folds_checkpoint.to_csv(file_folds_checkpoint_path, index=False)
data_checkpoint = pd.read_csv(file_folds_checkpoint_path)
data_selected = data_checkpoint.loc[(data_checkpoint['type_fold'] == type_fold)]
if (data_selected.empty):
data_checkpoint = data_checkpoint.append({'type_fold': type_fold, 'fold_no': 0}, ignore_index=True)
data_checkpoint.to_csv(file_folds_checkpoint_path, index=False)
start_idx_fold = 0
else:
start_idx_fold = int(data_selected['fold_no'].to_list()[0])
class Checkpoint_Fold():
def __init__(self, file_folds_checkpoint_path, type_fold):
self.file_folds_checkpoint_path = file_folds_checkpoint_path
self.type_fold = type_fold
if not os.path.exists(self.file_folds_checkpoint_path):
data_folds_checkpoint = pd.DataFrame(columns=['type_fold','fold_no'])
data_folds_checkpoint.to_csv(self.file_folds_checkpoint_path, index=False)
self.data_checkpoint = pd.read_csv(self.file_folds_checkpoint_path)
self.data_selected = self.data_checkpoint.loc[(self.data_checkpoint['type_fold'] == type_fold)]
def _get_current_fold(self):
if (self.data_selected.empty):
print("yes, empty!!!")
self.data_checkpoint = self.data_checkpoint.append({'type_fold': self.type_fold, 'fold_no': 0}, ignore_index=True)
self.data_checkpoint.to_csv(self.file_folds_checkpoint_path, index=False)
start_idx_fold = 0
else:
start_idx_fold = int(self.data_selected['fold_no'].to_list()[0])
return start_idx_fold
def write_checkpoint(self, idx_fold):
self.data_checkpoint.loc[(self.data_checkpoint['type_fold'] == self.type_fold), 'fold_no'] = idx_fold
print("data_selected", self.data_checkpoint)
self.data_checkpoint.to_csv( self.file_folds_checkpoint_path, index=False)
class Checkpoint_Eval():
def __init__(self, path_checkpoint_evaluator, type_fold, sampling):
self.path_checkpoint_evaluator = path_checkpoint_evaluator
self.type_fold = type_fold
self.sampling = sampling
if not os.path.exists(self.path_checkpoint_evaluator):
self.data_checkpoint = pd.DataFrame(columns=['type_fold','sampling','start_rec_fold','start_rec_epoch','start_eval_fold','start_eval_epoch', 'start_pdb'])
self.data_checkpoint.to_csv(self.path_checkpoint_evaluator, index=False)
self.data_checkpoint = pd.read_csv(self.path_checkpoint_evaluator)
self.data_selected = self.data_checkpoint.loc[(self.data_checkpoint['type_fold'] == self.type_fold) & (self.data_checkpoint['sampling'] == self.sampling)]
def _get_data(self):
if (self.data_selected.empty):
# print("empty!!!")
self.data_checkpoint = self.data_checkpoint.append({'type_fold': self.type_fold, 'sampling': self.sampling,
'start_rec_fold': 0,'start_rec_epoch': 0,'start_eval_fold': 0,'start_eval_epoch': 0, 'start_pdb': 0}, ignore_index=True)
self.data_checkpoint.to_csv(self.path_checkpoint_evaluator, index=False)
self.start_rec_fold = 0
self.start_rec_epoch = 0
self.start_eval_fold = 0
self.start_eval_epoch = 0
else:
self.start_rec_fold = int(self.data_selected['start_rec_fold'].to_list()[0])
self.start_rec_epoch = int(self.data_selected['start_rec_epoch'].to_list()[0])
self.start_eval_fold = int(self.data_selected['start_eval_fold'].to_list()[0])
self.start_eval_epoch = int(self.data_selected['start_eval_epoch'].to_list()[0])
return self.start_rec_fold, self.start_rec_epoch, self.start_eval_fold, self.start_eval_epoch
def write_record_checkpoint(self, idx_fold, epoch):
self.data_checkpoint.loc[(self.data_checkpoint['type_fold'] == self.type_fold) & (self.data_checkpoint['sampling'] == self.sampling), 'start_rec_fold'] = idx_fold + 1
self.data_checkpoint.loc[(self.data_checkpoint['type_fold'] == self.type_fold) & (self.data_checkpoint['sampling'] == self.sampling), 'start_rec_epoch'] = epoch + 1
self.data_checkpoint.to_csv(self.path_checkpoint_evaluator, index=False)
def write_eval_checkpoint(self, idx_fold, epoch):
self.data_checkpoint.loc[(self.data_checkpoint['type_fold'] == self.type_fold) & (self.data_checkpoint['sampling'] == self.sampling), 'start_eval_fold'] = idx_fold + 1
self.data_checkpoint.loc[(self.data_checkpoint['type_fold'] == self.type_fold) & (self.data_checkpoint['sampling'] == self.sampling), 'start_eval_epoch'] = epoch + 1
self.data_checkpoint.to_csv(self.path_checkpoint_evaluator, index=False)
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,966
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/datasets/data_loader.py
|
import os
import re
from functools import partial
import numpy as np
import torch
import torch.nn.functional as F
from matplotlib import pyplot as plt
from moleculekit.molecule import Molecule
# from moleculekit.smallmol.smallmol import SmallMol
from torch import nn
from torch.utils.data import DataLoader, Dataset
# import dictionary of atoms' types and hot encoders
from datasets.dictionaries import (atom_most_common, dict_atoms_hot,
dict_atoms_simple)
# number_atoms_unique = 22
class Pdb_Dataset(Dataset):
"""PdB binding dataset"""
def __init__(self, cfg, vocab):
"""uses cfg file which is given as arg in "python train_server.py"
"""
self.path_root = cfg['preprocessing']['path_root']
self.init_refined = self.path_root + "/data/new_refined/"
# self.init_refined = path_root + "/data/refined_26.05/"
self.init_casf = self.path_root + "/data/new_core_2016/"
# self.init_casf = path_root + "/data/core_26.05/"
# self.labels = self.read_labels(self.path_root + "/data/labels/labels.csv")
self.vocab = vocab
# self.labels_all = self._get_labels_refined_core(
# self.path_root + "/data/labels/new_labels_core_2016.csv",
# self.path_root + "/data/labels/new_labels_refined.csv",
# )
##################refined files###################
self.files_refined = os.listdir(self.init_refined)
self.files_refined = [file for file in self.files_refined if file[0].isdigit()]
self.files_refined.sort()
# self.files_refined.remove(".DS_Store")
##################################################
self.len_files = len(self.files_refined)
###################core files#####################
self.files_core = os.listdir(self.init_casf)
self.files_core.sort()
##################################################
self.dict_atoms = dict_atoms_hot
self.dict_atoms_simple = dict_atoms_simple
self.dict_words = atom_most_common
self.set_atoms = []
self.encoding = {}
self.label_protein = np.array([5.0]) # identification of pocketd
self.label_ligand = np.array([-5.0]) # identification of ligand
self.features_complexes = [] # tensors of euclidean features
self.affinities_complexes = [] # targets
self.common_atoms = ["C", "H", "O", "N", "S"]
# self.type_filtering = "filtered"
self.type_filtering = cfg['preprocessing']['selection'] # "filtered"
self.mask = cfg['preprocessing']['mask']
self.len_padding = cfg['preprocessing']['natoms']
print("filtering", self.type_filtering)
def __len__(self):
#!!!!!!!!!!!!!!!!
return 20
# return len(self.files_refined) - 3 # from the lab:
def smi_tokenizer(self, smi):
"""
Tokenize a SMILES molecule or reaction
"""
pattern = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])"
regex = re.compile(pattern)
tokens = [token for token in regex.findall(smi)]
return tokens
def __getitem__(self, idx: int):
vocab = self.vocab
all_features, masks = self._get_features_complex(idx)
all_geometry = self._get_geometry_complex(idx)
# print("shape all geom", all_geometry.shape)
caption_raw = self._get_caption(idx)
# tokens = [token for token in caption_raw]
tokens = self.smi_tokenizer(caption_raw)
caption = []
caption.append(vocab("<start>"))
# print("caption of start", vocab('<start>'))
caption.extend([vocab(token) for token in tokens])
caption.append(vocab("<end>"))
target = torch.Tensor(caption)
return all_features, all_geometry, masks, target
# if(self.mask == 'True'):
# return all_features, all_geometry, masks, target
# else:
# return all_features, all_geometry, target
def _get_name_protein(self, idx: int):
name_protein = self.files_refined[idx]
return name_protein
def _get_caption(self, id):
"""get caption as a row of a smile by id
"""
protein_name = self.files_refined[id]
# print("current protein", protein_name)
path_to_smile = os.path.join(
self.init_refined, protein_name, protein_name + "_ligand.smi"
)
with open(path_to_smile, "r") as file:
caption = file.read()
return caption
# def get_caption(self, idx: int):
# protein_name = self.files_refined[protein_id]
# path_ligand_caption = os.path.join(
# self.init_refined, protein_name, protein_name + "_ligand.txt"
# )
# ligand_caption = loadtxt(path_ligand_caption, delimiter=",", unpack=False)
# return ligand_caption
def _get_path(self, protein_id: int):
""" get a full path to pocket/ligand
"""
protein_name = self.files_refined[protein_id]
# print("current protein", protein_name)
path_pocket = os.path.join(
self.init_refined, protein_name, protein_name + "_pocket.pdb"
)
return path_pocket
def _get_elems(self, protein_id: int, type_filtering: str):
""" gives np.array of elements for a pocket and a ligand in one complex
Parameters
----------
protein_id : str
id of a complex
"""
path_pocket = self._get_path(protein_id)
try:
mol_pocket = Molecule(path_pocket)
# mol_ligand = Molecule(path_ligand)
if type_filtering == "filtered":
mol_pocket_element = [
elem
for elem in mol_pocket.element
if elem in ["C", "H", "N", "O", "S"]
]
# mol_ligand_element = [elem for elem in mol_ligand.element if elem in ["C", "H", "N", "O", "S"]]
elif type_filtering == "all":
mol_pocket_element = mol_pocket.element
# mol_ligand_element = mol_ligand.element
except FileNotFoundError:
print(protein_id, " exception")
path_pocket = self._get_path(2)
mol_pocket = Molecule(path_pocket)
mol_pocket_element = mol_pocket.element
# print("mol_ligand_element", mol_ligand.element)
return mol_pocket_element
def atom_to_vector(self, elem: str):
""" creates a hot vector of an atom
Parameters
----------
elem : str atom element
"""
return self.dict_words[elem]
# return self.dict_atoms[elem]
# return self.dict_atoms[elem]
def coords_to_tensor(self, coords: np.array):
""" creates a tensor of coords
Parameters
----------
coords : array of coords of n atoms [n, 3]
"""
return torch.tensor(coords)
def _get_feature_vector_atom(self, elem: str, type_atom: str, type_filtering: str):
"""creates a tensor-feature vector concatenating label of protein/ligand and hot vector
Parameters
----------
elem : str atom element
"""
hot_vector_atom = self.atom_to_vector(elem)
if type_atom == "pocket":
if type_filtering == "filtered":
feature_vector_atom = self.dict_words[elem]
feature_vector_atom = np.array([feature_vector_atom])
elif type_filtering == "all":
feature_vector_atom = np.concatenate(
(self.label_protein, hot_vector_atom)
)
feature_vector_atom = np.array([hot_vector_atom])
# print("feat_atom", feature_vector_atom)
# feature_vector_atom = np.concatenate((self.label_protein, hot_vector_atom))
# feature_vector_atom = np.array([hot_vector_atom])
# print("feat vector", feature_vector_atom)
# print("feat_atom_lig", feature_vector_atom)
# print("feature_lig", feature_vector_atom)
# print("feat vector", feature_vector_atom)
else:
raise ValueError("type of atom should be pocket or ligand")
# feature_tensor_atom = torch.from_numpy(feature_vector_atom)
# return feature_tensor_atom
# print(type(feature_vector_atom))
return feature_vector_atom
# return hot_vector_atom
def _get_features_unit(
self, elements: np.array, type_atom: str, type_filtering: str
):
"""creates a union of tensors-features of an atoms' array at particlular biological unit: pocket/ligand
Parameters
----------
elements : np.array
elements of protein/ligand
type_atom : char
type of a biological unit: pocket/ligand
Returns
-------
list_features_tensors : list
The list of features-tensors
"""
list_features_tensors = []
for elem in elements:
tensor_feature = self._get_feature_vector_atom(
elem, type_atom, type_filtering
)
list_features_tensors.append(tensor_feature)
# features = torch.cat(list_features_tensors, dim=-1)
return list_features_tensors
def _get_features_dict(self, elements: np.array, type_atom: str):
"""creates a dictionary of atoms' features of a particular bio unit (protein/ligand)
Parameters
----------
id : str
id of a complex
Returns
-------
dict : 'O' : torch.tensor([2,2,2,2]) - tensor.size = number of 'O' in protein, 2 - positive encoding of atom 'O' in protein
'Na': torch.tensor([5,5]) - tensor.size = number of 'Na' in protein, 5 - positive encoding of atom 'Na' in protein
'Pb': torch.tensor([-3,-3,-3,-3]) - tensor.size = number of 'Pb' in ligand, -3 - negative encoding of atom 'Pb' in ligand
..................................................................................
"""
dict_atoms_feat = {}
return dict_atoms_feat
def _get_features_complex(self, id: int):
"""creates a tensor of all features in complex (pocket AND ligand)
Parameters
----------
id : str
id of a complex
Returns
-------
type_filtering: all
tensor : torch.tensor [1, n, 23]
The tensor of all n atoms' features:
1 | 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - pocket
type_filtering: filtered
The tensor of all n atoms' features:
(atoms are encoded from 0 to 4 - ["C", "H", "O", "N", "S"] for pocket)
1 1 2 4 4 - pocket
n atoms are padded then till max_length with 10
"""
elem_pocket = self._get_elems(id, self.type_filtering)
# coord_pocket, coord_ligand = self._get_coord(id)
features_pocket_part = self._get_features_unit(
elem_pocket, "pocket", self.type_filtering
)
# features_ligand_part = self._get_features_unit(elem_ligand, "ligand", self.type_filtering)
# features_all = features_pocket_part + features_ligand_part
tensor_all_features = torch.tensor(
features_pocket_part, dtype=torch.long
).unsqueeze(0)
length_padding = self.len_padding - tensor_all_features.shape[1]
result = F.pad(
input=tensor_all_features,
pad=(0, 0, 0, length_padding),
mode="constant",
value=5,
)
mask_binary = torch.cat([torch.ones(tensor_all_features.shape[1]),torch.zeros(length_padding)])
# print("feature shape")
# print(result.shape)
# print(result)
result = result.squeeze(0)
return result, mask_binary
# return result, elem_pocket, elem_ligand
# return tensor_all_features
def _get_geometry_complex(self, id: int):
"""creates a tensor of all geometries (coordinates) in complex (pocket AND ligand)
Parameters
----------
id : str
id of a complex
Returns
-------
tensor_all_atoms_coords : torch.tensor [1, n, 3]
The tensor of coords-tensors
"""
coords_pocket = self._get_coord(id, self.type_filtering)
list_geom_tensors = []
all_atoms_coords = np.asarray(coords_pocket)
tensor_all_atoms_coords = (
torch.from_numpy(all_atoms_coords).squeeze().unsqueeze(0)
)
length_padding = self.len_padding - tensor_all_atoms_coords.shape[1]
result = F.pad(
input=tensor_all_atoms_coords,
pad=(0, 0, 0, length_padding),
mode="constant",
value=99,
)
# print("goemetry shape")
# print(result.shape)
result = result.squeeze(0)
return result
# return result, tensor_all_atoms_coords.shape[1]
# return tensor_all_atoms_coords
def _get_coord(self, protein_id: int, type_filtering: str):
""" gives np.array of coordinates for a pocket and a ligand in one complex
Parameters
----------
protein_id : str
id of a complex
"""
path_pocket = self._get_path(protein_id)
mol_pocket = Molecule(path_pocket)
# print("protein coords", mol_pocket.coords)
if type_filtering == "all":
coords_pocket = mol_pocket.coords
elif type_filtering == "filtered":
prot_idxs = [
idx
for idx, elem in enumerate(mol_pocket.element)
if elem in self.common_atoms
]
coords_pocket = [
element for i, element in enumerate(mol_pocket.coords) if i in prot_idxs
]
# lig_idxs = [idx for idx, elem in enumerate(mol_ligand.element) if elem in self.common_atoms]
# coords_ligand = [element for i, element in enumerate(mol_ligand.coords) if i in lig_idxs]
# lig_idxs = [idx for idx, elem in enumerate(mol_ligand.element) if elem in ["C", "H", "N", "O", "S"]]
# lig_coords = [element for i, element in enumerate(mol_ligand.coords) if i in lig_idxs]
# return mol_pocket.coords, mol_ligand.coords
return coords_pocket
def collate_fn(data):
"""Creates mini-batch tensors from the list of tuples (image, caption).
We should build custom collate_fn rather than using default collate_fn,
because merging caption (including padding) is not supported in default.
Args:
data: list of tuple (image, caption).
- image: torch tensor of shape (3, 256, 256).
- caption: torch tensor of shape (?); variable length.
Returns:
features: torch tensor of shape (batch_size, n_atoms, hidden_dim (1)).
geometry: torch tensor of shape (batch_size, n_atoms, 3)
targets: torch tensor of shape (batch_size, padded_length).
lengths: list; valid length for each padded caption.
"""
# Sort a data list by caption length (descending order).
data.sort(key=lambda x: len(x[2]), reverse=True)
if(self.mask == 'True'):
features, geometry, masks, captions = zip(*data)
features, geometry, captions = zip(*data)
features = torch.stack(features, 0)
geometry = torch.stack(geometry, 0)
# Merge images (from tuple of 3D tensor to 4D tensor).
# Merge captions (from tuple of 1D tensor to 2D tensor).
lengths = [len(cap) for cap in captions]
# we padd smaller targets till the max length with zeros. Therefore firstly create zero tensor
targets = torch.zeros(len(captions), max(lengths)).long()
for i, cap in enumerate(captions):
end = lengths[i]
targets[i, :end] = cap[:end]
return features, geometry, mask, targets, lengths
def collate_fn_masks(data):
# Sort a data list by caption length (descending order).
data.sort(key=lambda x: len(x[3]), reverse=True)
features, geometry, masks, captions = zip(*data)
features = torch.stack(features, 0)
geometry = torch.stack(geometry, 0)
masks = torch.stack(masks, 0)
# Merge images (from tuple of 3D tensor to 4D tensor).
# Merge captions (from tuple of 1D tensor to 2D tensor).
lengths = [len(cap) for cap in captions]
# we padd smaller targets till the max length with zeros. Therefore firstly create zero tensor
targets = torch.zeros(len(captions), max(lengths)).long()
for i, cap in enumerate(captions):
end = lengths[i]
targets[i, :end] = cap[:end]
return features, geometry, masks, targets, lengths
def get_loader(cfg, vocab, batch_size, shuffle, num_workers):
"""Returns torch.utils.data.DataLoader for custom coco dataset."""
# Pdb caption dataset
pdb_dataset = Pdb_Dataset(cfg=cfg, vocab=vocab)
# Data loader for PDB refined dataset
# This will return (features, geometry, captions, lengths) for each iteration.
# features: a tensor of shape (batch_size, n_atoms, hidden_dim (1))
# geometry: a tensor of shape (batch_size, n_atoms, 3)
# captions: a tensor of shape (batch_size, padded_length).
# lengths: a list indicating valid length for each caption. length is (batch_size).
data_loader = torch.utils.data.DataLoader(
dataset=pdb_dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=collate_fn,
)
return data_loader
if __name__ == "__main__":
# DATA_PATH = os.path.realpath(os.path.dirname(__file__))
DATA_PATH = "/Volumes/Ubuntu"
featuriser = Pdb_Dataset(DATA_PATH)
lengthes = featuriser._get_length_padding("refined")
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,967
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/model/encoder/e3nn_vis.py
|
from functools import partial
import torch
from torch import nn as nn
from e3nn.point.kernelconv import KernelConv
from e3nn.radial import CosineBasisModel, GaussianRadialModel, BesselRadialModel
from e3nn.non_linearities import rescaled_act
from e3nn.non_linearities.gated_block import GatedBlock
from e3nn.rsh import spherical_harmonics_xyz
from model.encoder.base import Aggregate
import torch.nn.functional as F
CUSTOM_BACKWARD = False
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def create_kernel_conv(cutoff, n_bases, n_neurons, n_layers, act, radial_model):
if radial_model == "cosine":
RadialModel = partial(
CosineBasisModel,
max_radius=cutoff,
number_of_basis=n_bases,
h=n_neurons,
L=n_layers,
act=act
)
elif radial_model == "gaussian":
RadialModel = partial(
GaussianRadialModel,
max_radius=cutoff,
number_of_basis=n_bases,
h=n_neurons,
L=n_layers,
act=act
)
elif radial_model == "bessel":
RadialModel = partial(
BesselRadialModel,
max_radius=cutoff,
number_of_basis=n_bases,
h=n_neurons,
L=n_layers,
act=act
)
else:
raise ValueError("radial_model must be either cosine or gaussian")
K = partial(KernelConv, RadialModel=RadialModel)
return K
def constants(geometry, mask):
rb = geometry.unsqueeze(1) # [batch, 1, b, xyz]
ra = geometry.unsqueeze(2) # [batch, a, 1, xyz]
diff_geo = (rb - ra).double().detach()
radii = diff_geo.norm(2, dim=-1).detach()
return mask, diff_geo, radii
class Network_Vis(torch.nn.Module):
def __init__(self, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, l0, L, scalar_act_name, gate_act_name, natoms, mlp_h, Out, aggregation_mode):
super().__init__()
self.natoms = natoms #286
self.ssp = rescaled_act.ShiftedSoftplus(beta = beta)
self.sp = rescaled_act.Softplus(beta=beta)
self.l0 = l0
if(scalar_act_name == "sp"):
scalar_act = self.sp
if(gate_act_name == "sigmoid"):
gate_act = rescaled_act.sigmoid
Rs = [[(embed, 0)]]
Rs_mid = [(mul, l) for l, mul in enumerate([l0])]
Rs += [Rs_mid] * L
Rs += [[(mlp_h, 0)]] * Out
self.Rs = Rs
self.device = DEVICE
if aggregation_mode == "sum":
self.atom_pool = Aggregate(axis=1, mean=False)
elif aggregation_mode == "avg":
self.atom_pool = Aggregate(axis=1, mean=True)
self.num_embeddings = 6
self.RadialModel = partial(
CosineBasisModel,
max_radius=max_rad,
number_of_basis=num_basis,
h=n_neurons,
L=n_layers,
act=self.ssp
)
# kernel_conv = create_kernel_conv(max_rad, num_basis, n_neurons, n_layers, self.ssp, rad_model)
self.kernel_conv = partial(KernelConv, RadialModel=self.RadialModel)
def make_layer(Rs_in, Rs_out):
act = GatedBlock(Rs_out, scalar_act, gate_act)
kc = self.kernel_conv(Rs_in, act.Rs_in)
return torch.nn.ModuleList([kc, act])
self.layers = torch.nn.ModuleList([torch.nn.Embedding(self.num_embeddings, embed, padding_idx=5)])
self.layers += [make_layer(rs_in, rs_out) for rs_in, rs_out in zip(Rs, Rs[1:])]
self.leakyrelu = nn.LeakyReLU(0.2) # Relu
self.e_out_1 = nn.Linear(mlp_h, mlp_h)
self.bn_out_1 = nn.BatchNorm1d(natoms)
self.e_out_2 = nn.Linear(mlp_h, 2 * mlp_h)
self.bn_out_2 = nn.BatchNorm1d(natoms)
torch.autograd.set_detect_anomaly(True)
def forward(self, features, geometry, mask):
mask, diff_geo, radii = constants(geometry, mask)
embedding = self.layers[0]
features = torch.tensor(features).to(self.device).long()
features = embedding(features).to(self.device)
features = features.squeeze(2)
set_of_l_filters = self.layers[1][0].set_of_l_filters
y = spherical_harmonics_xyz(set_of_l_filters, diff_geo)
for kc, act in self.layers[1:]:
if kc.set_of_l_filters != set_of_l_filters:
set_of_l_filters = kc.set_of_l_filters
y = spherical_harmonics_xyz(set_of_l_filters, diff_geo)
features = features.div(self.natoms ** 0.5).to(self.device)
features = kc(
features,
diff_geo,
mask,
y=y,
radii=radii,
custom_backward=CUSTOM_BACKWARD
)
features = act(features)
features = features * mask.unsqueeze(-1)
print("features shape after enc", features.shape)
# out_net = OutputMLPNetwork(kernel_conv=kernel_conv, previous_Rs = self.Rs[-1],
# l0 = self.l0, l1 = 0, L = 1, scalar_act=sp, gate_act=rescaled_act.sigmoid,
# mlp_h = 128, mlp_L = 1, natoms = 286)
# features = out_net(features, geometry, mask)
features = self.leakyrelu(self.bn_out_1(self.e_out_1(features))) # shape [batch, 2 * cloud_dim * (self.cloud_order ** 2) * nclouds]
features = self.leakyrelu(self.bn_out_2(self.e_out_2(features)))
# if self.atomref is not None:
# features_z = self.atomref(atomic_numbers)
# features = features_z + features
# features = self.atom_pool(features, mask)
# features = F.lp_pool2d(features,norm_type=2,
# kernel_size=(features.shape[1], 1),
# ceil_mode=False,)
features = features.squeeze(1)
print("feat final shape", features.shape)
return features # shape ?
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,968
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/model/decoder/beam_search_att.py
|
def sample_beam_search(self, features, beam_size=3):
"""
Reads an image and captions it with beam search.
:param encoder: encoder model
:param decoder: decoder model
:param image_path: path to image
:param word_map: word map
:param beam_size: number of sequences to consider at each decode-step
:return: caption, weights for visualization
"""
k = beam_size
vocab_size = len(self.vocab)
# # Flatten encoding
# encoder_out = encoder_out.view(1, -1, encoder_dim) # (1, num_pixels, encoder_dim)
# num_pixels = encoder_out.size(1)
# # We'll treat the problem as having a batch size of k
shape_1 = features.shape[0]
shape_2 = features.shape[1]
features = features.expand(k, shape_2) ##? check tomorrow!!!
# encoder_out = encoder_out.expand(k, num_pixels, encoder_dim) # (k, num_pixels, encoder_dim)
# Tensor to store top k previous words at each step; now they're just <start>
k_prev_words = torch.LongTensor([[self.vocab.word2idx['<start>']]] * k).to(self.device) # (k, 1)
# Tensor to store top k sequences; now they're just <start>
seqs = k_prev_words # (k, 1)
# Tensor to store top k sequences' scores; now they're just 0
top_k_scores = torch.zeros(k, 1).to(self.device) # (k, 1)
# Tensor to store top k sequences' alphas; now they're just 1s
# seqs_alpha = torch.ones(k, 1, enc_image_size, enc_image_size).to(device) # (k, 1, enc_image_size, enc_image_size)
# Lists to store completed sequences, their alphas and scores
complete_seqs = list()
# complete_seqs_alpha = list()
complete_seqs_scores = list()
# Start decoding
step = 1
h, c = self.init_hidden_state(features)
# s is a number less than or equal to k, because sequences are removed from this process once they hit <end>
while True:
embeddings = self.embedding(k_prev_words).squeeze(1) # (s, embed_dim) ?why should we alos use it???
awe, alpha = self.attention(features, h) # (s, encoder_dim), (s, num_pixels)
# alpha = alpha.view(-1, enc_image_size, enc_image_size) # (s, enc_image_size, enc_image_size)
gate = self.sigmoid(self.f_beta(h)) # gating scalar, (s, encoder_dim)
awe = gate * awe
#s is a batch_size_t since we do not have a batch of images, we have just one image
# and we want to find several words.
h, c = self.decode_step(torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim)
scores = self.fc(h) # (s, vocab_size)
scores = F.log_softmax(scores, dim=1)
#!!!!!!!!!!!!!!!!!!!# choose the highest score here
# Add
scores = top_k_scores.expand_as(scores) + scores # (s, vocab_size)
# For the first step, all k points will have the same scores (since same k previous words, h, c)
if step == 1:
top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)
else:
# Unroll and find top scores, and their unrolled indices
top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s)
# Convert unrolled indices to actual indices of scores
prev_word_inds = top_k_words / vocab_size # (s)
next_word_inds = top_k_words % vocab_size # (s)
# Add new words to sequences, alphas
seqs = torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)
# seqs_alpha = torch.cat([seqs_alpha[prev_word_inds], alpha[prev_word_inds].unsqueeze(1)],
# dim=1) # (s, step+1, enc_image_size, enc_image_size)
# Which sequences are incomplete (didn't reach <end>)?
incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if
next_word != self.vocab.word2idx['<end>']]
complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))
# Set aside complete sequences
if len(complete_inds) > 0:
complete_seqs.extend(seqs[complete_inds].tolist())
# complete_seqs_alpha.extend(seqs_alpha[complete_inds].tolist())
complete_seqs_scores.extend(top_k_scores[complete_inds])
k -= len(complete_inds) # reduce beam length accordingly
# Proceed with incomplete sequences
if k == 0:
break
seqs = seqs[incomplete_inds]
# seqs_alpha = seqs_alpha[incomplete_inds]
h = h[prev_word_inds[incomplete_inds]]
c = c[prev_word_inds[incomplete_inds]]
features = features[prev_word_inds[incomplete_inds]]
top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)
k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)
# Break if things have been going on too long
if step > MAX_Length:
break
step += 1
i = complete_seqs_scores.index(max(complete_seqs_scores))
seq = complete_seqs[i]
# alphas = complete_seqs_alpha[i]
return seq
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,969
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/train_all_folds.py
|
import argparse
import json
import multiprocessing
import os
import pickle
# from utils import Utils
import sys
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from numpy import savetxt
from py3nvml import py3nvml
from sklearn.model_selection import KFold
from torch.nn.utils.rnn import pack_padded_sequence
from torch.optim.lr_scheduler import ExponentialLR
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
# from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import src.utils.config as config
from src.datasets.data_loader import (Pdb_Dataset, collate_fn,
collate_fn_masks, get_loader)
from src.datasets.feature import Featuring
from src.datasets.split import Splitter
from src.evaluation.analysis import plot_all
from src.evaluation.evaluator import Evaluator
from src.sampling.sampler import Sampler
from src.training.train_check_att_vis import Trainer_Attention_Check_Vis
from src.training.train_checkpoint import Trainer_Fold
# from src.training.training_feature import Trainer_Fold_Feature
# from src.training.training_feature_att import Trainer_Fold_Feature_Attention
from src.training.trainer import Trainer_Fold_Feature
from src.training.trainer_att import Trainer_Fold_Feature_Attention
from src.training.utils import save_checkpoint_sampling
from src.utils.build_vocab import Vocabulary
from src.utils.checkpoint import Checkpoint_Eval, Checkpoint_Fold
def main():
parser = argparse.ArgumentParser(
description='Train a 3D reconstruction model.'
)
parser.add_argument('--loc', type=str, help='Location of running')
parser.add_argument('--config', type=str, help='Path to config file.')
parser.add_argument('--radious', type=int , default=8, help='dimension of word embedding vectors')
parser.add_argument('--type_feature', type=str , default='mass_charge', help='type_feature')
parser.add_argument('--type_filtering', type=str , default = 'all', help='type_filtering')
parser.add_argument('--h_filterig', type=str , default='without_h', help='h')
parser.add_argument('--type_fold', type=str, help='type_fold')
# parser.add_argument('--idx_fold', type=str, help='Path to config file.')
args = parser.parse_args()
if args.loc == 'lab':
config_file_path = 'configurations/config_lab/default.yaml'
else:
config_file_path = 'configurations/config_local/default.yaml'
cfg = config.load_config(args.config, config_file_path)
type_fold = args.type_fold
savedir = cfg["output_parameters"]["savedir"]
cfg["sampling_params"]["type_fold"] = type_fold
model_name = cfg["model_params"]["model_name"] + "_" + args.type_feature + "_" + str(args.radious) + "_" + args.type_filtering + "_" + args.h_filterig
cfg["model_params"]["model_name"] = model_name
num_epoches = cfg["model_params"]["num_epochs"]
#features generation
print("**********Checking features**************")
Feature_gen = Featuring(cfg, args.radious, args.type_feature, args.type_filtering, args.h_filterig)
cfg['model']['encoder_kwargs']['natoms'] = Feature_gen.max_length
print("number of atoms: ", cfg['model']['encoder_kwargs']['natoms'])
file_folds_checkpoint_path = os.path.join(savedir, model_name, "checkpoints", "folds.csv")
os.makedirs(os.path.join(savedir, model_name, "checkpoints"), exist_ok=True)
checkpoint_fold = Checkpoint_Fold(file_folds_checkpoint_path, type_fold)
start_idx_fold = checkpoint_fold._get_current_fold()
pipeline_checkpoint_path = os.path.join(savedir, model_name, "checkpoints", 'pipeline.txt')
file_pipeline_checkpoint = open(pipeline_checkpoint_path, "a+")
# get split folds file
file_idx_split = os.path.join(cfg['output_parameters']['savedir'], model_name, "logs", "idxs", type_fold)
print("file_idx_split", file_idx_split)
if not os.path.exists(file_idx_split):
print("doing split...")
splitter = Splitter(cfg)
splitter.split(type_fold)
#training + validation + pca
for idx_fold in range(start_idx_fold, 2):
print("Doing Train/Val on the fold - ",idx_fold)
if(cfg['training_params']['mode'] == "no_attention"):
trainer = Trainer_Fold_Feature(cfg, idx_fold)
trainer.train_epochs(Feature_gen)
elif(cfg['training_params']['mode'] == "attention"):
trainer = Trainer_Fold_Feature_Attention(cfg, idx_fold)
trainer.train_epochs(Feature_gen)
#pca
encoder_path = os.path.join(savedir, model_name, "models", "encoder-" + str(idx_fold) + "-" + str(num_epoches) + "-" + str(type_fold) + '.ckpt')
decoder_path = os.path.join(savedir, model_name, "models", "decoder-" + str(idx_fold) + "-" + str(num_epoches) + "-" + str(type_fold) + '.ckpt')
sampler = Sampler(cfg, 'max', Feature_gen)
print("Doing pca on the fold - ",idx_fold)
sampler.save_encodings_all('test', idx_fold, encoder_path, decoder_path)
sampler.save_encodings_all('train', idx_fold, encoder_path, decoder_path)
#write fold id to checkpoint
checkpoint_fold.write_checkpoint(idx_fold + 1)
#Evaluation
range_epochs = [num_epoches]
# regimes = ["probabilistic", "max", "beam_3", "beam_10"]
# regimes = ['max', 'beam_2']
regimes = ['beam_2']
# regimes = ["probabilistic"]
print("Evaluation starts!...")
for regim in regimes:
evaluator = Evaluator(cfg, regim, type_fold, range_epochs, Feature_gen)
print("start run evaluation!...")
evaluator.run_evaluation()
#Plot similarities & Mol dostributions
if "plot" not in file_pipeline_checkpoint.readlines():
for epoch in range_epochs:
plot = plot_all(cfg, epoch)
plot.run()
#plot for every epoch
# for epoch in range(num_epoches):
# plot = plot_all(cfg, num_epoches - 1)
# plot.run()
file_pipeline_checkpoint.write("plot")
file_pipeline_checkpoint.flush()
if __name__ == "__main__":
main()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,970
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/train_binding.py
|
import argparse
import config
import multiprocessing
import numpy as np
from numpy import savetxt
import torch
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ExponentialLR
# from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from utils import Utils
import argparse
import sys
import config
from py3nvml import py3nvml
import json
import os
import pickle
from sklearn.model_selection import KFold
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from build_vocab import Vocabulary
from data_loader import get_loader, Pdb_Dataset, collate_fn, collate_fn_masks
from data_loader_binding import Pdb_Dataset
# from training.trainer import train_loop, train_loop_mask
from training.train import Trainer
from training.binding import Trainer_Binding_Fold
from utils import Utils
# from training.train import Trainer
# from utils import Utils
def main():
parser = argparse.ArgumentParser(
description='Train a 3D reconstruction model.'
)
parser.add_argument('config', type=str, help='Path to config file.')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configurations/config_lab/default.yaml')
trainer = Trainer_Binding_Fold(cfg)
trainer.train_epochs()
if __name__ == "__main__":
main()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,971
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/model/encoder/loc_resnet.py
|
from functools import partial
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.autograd import Variable
from model.encoder.base import Aggregate
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def maxpool(x, dim=-1, keepdim=False):
out, _ = x.max(dim=dim, keepdim=keepdim)
return out
# Resnet Blocks
class ResnetBlockFC(nn.Module):
''' Fully connected ResNet Block class.
Args:
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
'''
def __init__(self, size_in, size_out=None, size_h=None):
super().__init__()
# Attributes
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(x))
dx = self.fc_1(self.actvn(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class ResnetPointnet(nn.Module):
# PointNet-based encoder network with ResNet blocks.
# Args:
# c_dim (int): dimension of latent code c
# dim (int): input points dimension
# hidden_dim (int): hidden dimension of the network
# n_channels (int): number of planes for projection
def __init__(self, dim=None, hidden_dim=None):
super().__init__()
self.dim = dim
self.hidden_dim = hidden_dim
# For grid features
self.fc_pos = nn.Linear(dim, 2*hidden_dim)
self.block_0 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_1 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_2 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_3 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_4 = ResnetBlockFC(2*hidden_dim, hidden_dim)
# Activation & pooling
self.actvn = nn.ReLU()
self.pool = maxpool
is_cuda = torch.cuda.is_available()
self.device = DEVICE
self.atom_pool = Aggregate_Pointnet(axis=-1, mean=True)
def forward(self, p, masks):
batch_size, T, D = p.size()
# print("D", D)
# p = p.to(torch.float)
# Grid features
net = self.fc_pos(p)
net = self.block_0(net)
pool_test = self.pool(net, keepdim=True)
print("shaoe test", pool_test.shape)
pooled = self.atom_pool(net, masks).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_1(net)
pooled = self.atom_pool(net, masks).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_2(net)
pooled = self.atom_pool(net, masks).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_3(net)
# pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
# net = torch.cat([net, pooled], dim=2)
# net = self.block_4(net) # batch_size x T x hidden_dim (T: number of sampled input points)
return net
class ResnetPointnet_4(nn.Module):
# PointNet-based encoder network with ResNet blocks.
# Args:
# c_dim (int): dimension of latent code c
# dim (int): input points dimension
# hidden_dim (int): hidden dimension of the network
# n_channels (int): number of planes for projection
def __init__(self, dim=None, hidden_dim=None):
super().__init__()
self.dim = dim
self.hidden_dim = hidden_dim
# For grid features
self.fc_pos = nn.Linear(dim, 2*hidden_dim)
self.block_0 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_1 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_2 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_3 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_4 = ResnetBlockFC(2*hidden_dim, hidden_dim)
# Activation & pooling
self.actvn = nn.ReLU()
self.pool = maxpool
is_cuda = torch.cuda.is_available()
self.device = torch.device("cuda" if is_cuda else "cpu")
def forward(self, p):
batch_size, T, D = p.size()
print("D", D)
# Grid features
net = self.fc_pos(p)
net = self.block_0(net)
pooled = self.atom_pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_1(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_2(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_3(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_4(net) # batch_size x T x hidden_dim (T: number of sampled input points)
return net
class Aggregate_Pointnet(nn.Module):
"""Pooling layer based on sum or average with optional masking.
Args:
axis (int): axis along which pooling is done.
mean (bool, optional): if True, use average instead for sum pooling.
keepdim (bool, optional): whether the output tensor has dim retained or not.
"""
def __init__(self, axis, mean=False, keepdim=True):
super(Aggregate_Pointnet, self).__init__()
self.average = mean
self.axis = axis
self.keepdim = keepdim
def forward(self, input, mask=None):
r"""Compute layer output.
Args:
input (torch.Tensor): input data.
mask (torch.Tensor, optional): mask to be applied; e.g. neighbors mask.
Returns:
torch.Tensor: layer output.
"""
# mask input
if mask is not None:
input = input * mask[..., None]
# compute sum of input along axis
y = torch.sum(input, self.axis)
# compute average of input along axis
if self.average:
# get the number of items along axis
if mask is not None:
N = torch.sum(mask, self.axis, keepdim=self.keepdim)
N = torch.max(N, other=torch.ones_like(N))
else:
N = input.size(self.axis)
y = y / N
y = y.unsqueeze(2).to(torch.double)
return y
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,972
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/model/decoder/decoder_vis_old.py
|
from functools import partial
import numpy as np
import torch
import pickle
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch import nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.autograd import Variable
from se3cnn.non_linearities.rescaled_act import Softplus
from se3cnn.point.kernel import Kernel
from se3cnn.point.operations import NeighborsConvolution
from se3cnn.point.radial import CosineBasisModel
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_Length = 245
class My_attention(nn.Module):
"""
Attention Network.
"""
def __init__(self, encoder_dim, decoder_dim, attention_dim):
"""
:param encoder_dim: feature size of encoded images
:param decoder_dim: size of decoder's RNN
:param attention_dim: size of the attention network
"""
super(My_attention, self).__init__()
self.encoder_att = nn.Linear(
encoder_dim, attention_dim
) # linear layer to transform encoded pocket
self.decoder_att = nn.Linear(
decoder_dim, attention_dim
) # linear layer to transform decoder's output
self.full_att = nn.Linear(
attention_dim, 1
) # linear layer to calculate values to be softmax-ed
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1) # softmax layer to calculate weights
def forward(self, encoder_out, decoder_hidden):
"""
Forward propagation.
:param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:param decoder_hidden: previous decoder output, a tensor of dimension (batch_size, decoder_dim)
:return: attention weighted encoding, weights
"""
att1 = self.encoder_att(
encoder_out
) # (batch_size, num_pixels, attention_dim) or (batch_size, attention_dim) - check again!
att2 = self.decoder_att(decoder_hidden) # (batch_size, attention_dim)
att = self.full_att(self.relu(att1 + att2.unsqueeze(1))).squeeze(
2
) # (batch_size, num_pixels)
alpha = self.softmax(att) # (batch_size, num_pixels)
attention_weighted_encoding = (encoder_out * alpha.unsqueeze(2)).sum(
dim=1
) # (batch_size, encoder_dim)
return attention_weighted_encoding, alpha
# this is under construction (sampling part)
class MyDecoderWithAttention_Vis(nn.Module):
"""
Decoder.
"""
def __init__(
self,
attention_dim,
embed_dim,
decoder_dim,
vocab_size,
vocab_path,
encoder_dim=512,
dropout=0.5,
beam_size=3,
device=DEVICE,
):
"""
:param attention_dim: size of attention network
:param embed_dim: embedding size
:param decoder_dim: size of decoder's RNN
:param vocab_size: size of vocabulary
:param encoder_dim: feature size of encoded images
:param dropout: dropout
"""
super(MyDecoderWithAttention_Vis, self).__init__()
self.device = device
self.encoder_dim = encoder_dim
self.attention_dim = attention_dim
self.embed_dim = embed_dim
self.decoder_dim = decoder_dim
self.vocab_size = vocab_size
self.dropout = dropout
self.max_seg_length = MAX_Length
self.attention = My_attention(
encoder_dim, decoder_dim, attention_dim
) # attention network
self.embedding = nn.Embedding(vocab_size, embed_dim) # embedding layer
self.dropout = nn.Dropout(p=self.dropout)
self.decode_step = nn.LSTMCell(
embed_dim + encoder_dim, decoder_dim, bias=True
) # decoding LSTMCell
self.init_h = nn.Linear(
encoder_dim, decoder_dim
) # linear layer to find initial hidden state of LSTMCell
self.init_c = nn.Linear(
encoder_dim, decoder_dim
) # linear layer to find initial cell state of LSTMCell
self.f_beta = nn.Linear(
decoder_dim, encoder_dim
) # linear layer to create a sigmoid-activated gate
self.sigmoid = nn.Sigmoid()
self.fc = nn.Linear(
decoder_dim, vocab_size
) # linear layer to find scores over vocabulary
self.init_weights() # initialize some layers with the uniform distribution
self.vocab_path = vocab_path
self.beam_size = beam_size
with open(self.vocab_path, "rb") as f:
self.vocab = pickle.load(f)
def init_weights(self):
"""
Initializes some parameters with values from the uniform distribution, for easier convergence.
"""
self.embedding.weight.data.uniform_(-0.1, 0.1)
self.fc.bias.data.fill_(0)
self.fc.weight.data.uniform_(-0.1, 0.1)
def load_pretrained_embeddings(self, embeddings):
"""
Loads embedding layer with pre-trained embeddings.
:param embeddings: pre-trained embeddings
"""
self.embedding.weight = nn.Parameter(embeddings)
def fine_tune_embeddings(self, fine_tune=True):
"""
Allow fine-tuning of embedding layer? (Only makes sense to not-allow if using pre-trained embeddings).
:param fine_tune: Allow?
"""
for p in self.embedding.parameters():
p.requires_grad = fine_tune
def init_hidden_state(self, encoder_out):
"""
Creates the initial hidden and cell states for the decoder's LSTM based on the encoded images.
:param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:return: hidden state, cell state
"""
mean_encoder_out = encoder_out.mean(dim=1)
# mean_encoder_out = encoder_out
# print("shape mean enc out", mean_encoder_out.shape)
h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)
c = self.init_c(mean_encoder_out)
return h, c
def forward(self, encoder_out, encoded_captions, caption_lengths, device=DEVICE):
"""
Forward propagation.
:param encoder_out: encoded images, a tensor of dimension (batch_size, enc_image_size, enc_image_size, encoder_dim)
:param encoded_captions: encoded captions, a tensor of dimension (batch_size, max_caption_length)
:param caption_lengths: caption lengths, a tensor of dimension (batch_size, 1)
:return: scores for vocabulary, sorted encoded captions, decode lengths, weights, sort indices
"""
batch_size = encoder_out.size(0)
encoder_dim = encoder_out.size(-1)
vocab_size = self.vocab_size
# Flatten image
encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (batch_size, num_pixels, encoder_dim)
num_pixels = encoder_out.size(1)
# Sort input data by decreasing lengths; why? apparent below
# TODO - adjust list of lengthes to tensor
caption_lengths = torch.tensor(caption_lengths).view(-1, 1) #uncomment for attention!!!
caption_lengths, sort_ind = caption_lengths.squeeze(1).sort(
dim=0, descending=True
)
encoder_out = encoder_out[sort_ind]
encoded_captions = encoded_captions[sort_ind]
# Embedding
embeddings = self.embedding(
encoded_captions
) # (batch_size, max_caption_length, embed_dim)
# Initialize LSTM state
h, c = self.init_hidden_state(encoder_out) # (batch_size, decoder_dim) ??
# We won't decode at the <end> position, since we've finished generating as soon as we generate <end>
# So, decoding lengths are actual lengths - 1
decode_lengths = (caption_lengths - 1).tolist() # maybe just caption_lengths
# Create tensors to hold word predicion scores and alphas
predictions = torch.zeros(batch_size, max(decode_lengths), vocab_size).to(
device
)
alphas = torch.zeros(batch_size, max(decode_lengths), num_pixels).to(device)
# At each time-step, decode by
# attention-weighing the encoder's output based on the decoder's previous hidden state output
# then generate a new word in the decoder with the previous word and the attention weighted encoding
for t in range(max(decode_lengths)):
batch_size_t = sum([l > t for l in decode_lengths])
attention_weighted_encoding, alpha = self.attention(
encoder_out[:batch_size_t], h[:batch_size_t]
)
gate = self.sigmoid(
self.f_beta(h[:batch_size_t])
) # gating scalar, (batch_size_t, encoder_dim)
attention_weighted_encoding = gate * attention_weighted_encoding
h, c = self.decode_step(
torch.cat(
[embeddings[:batch_size_t, t, :], attention_weighted_encoding],
dim=1,
),
(h[:batch_size_t], c[:batch_size_t]),
) # (batch_size_t, decoder_dim)
preds = self.fc(h) # (batch_size_t, vocab_size)
predictions[:batch_size_t, t, :] = preds
alphas[:batch_size_t, t, :] = alpha
return predictions, encoded_captions, decode_lengths, alphas # , encoded_captions, decode_lengths, alphas, sort_ind
def sample_max(self, features, states=None):
"""Samples SMILES tockens for given features (Greedy search)."""
k = 1
k_prev_words = torch.LongTensor([[self.vocab.word2idx['<start>']]] * k).to(self.device)
h, c = self.init_hidden_state(features)
sampled_ids = []
inputs = features.unsqueeze(1)
for i in range(self.max_seg_length):
embeddings = self.embedding(k_prev_words).squeeze(1) # (s, embed_dim) ?why should we alos use it???
awe, alpha = self.attention(features, h) # (s, encoder_dim), (s, num_pixels) - we give to Attention the same features
# alpha = alpha.view(-1, enc_image_size, enc_image_size) # (s, enc_image_size, enc_image_size)
gate = self.sigmoid(self.f_beta(h)) # gating scalar, (s, encoder_dim)
awe = gate * awe
#s is a batch_size_t since we do not have a batch of images, we have just one image
# and we want to find several words.
h, c = self.decode_step(torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim)
scores = self.fc(h) # (s, vocab_size)
scores = F.log_softmax(scores, dim=1)
predicted = scores.max(1)[1] #check that
k_prev_words = predicted #now we have predicted word and give it to the next lastm
# scores = F.log_softmax(scores, dim=1)
# h = h[i] #we have the only word - no sense to have index of h (h dim - [1, decoder_dim])
# c = c[prev_word_inds[incomplete_inds]]
# encoder_out = encoder_out[prev_word_inds[incomplete_inds]] - we give to Attention the same features
sampled_ids.append(predicted)
sampled_ids = torch.stack(sampled_ids, 1)
return sampled_ids
def sample_prob(self, features, states=None):
"""Samples SMILES tockens for given features (Greedy search)."""
k = 1
k_prev_words = torch.LongTensor([[self.vocab.word2idx['<start>']]] * k).to(self.device)
print("feat decoder begin shape", features.shape)
h, c = self.init_hidden_state(features)
sampled_ids = []
inputs = features.unsqueeze(1)
for i in range(self.max_seg_length):
embeddings = self.embedding(k_prev_words).squeeze(1) # (s, embed_dim) ?why should we alos use it???
awe, alpha = self.attention(features, h) # (s, encoder_dim), (s, num_pixels) - we give to Attention the same features
# alpha = alpha.view(-1, enc_image_size, enc_image_size) # (s, enc_image_size, enc_image_size)
gate = self.sigmoid(self.f_beta(h)) # gating scalar, (s, encoder_dim)
awe = gate * awe
#s is a batch_size_t since we do not have a batch of images, we have just one image
# and we want to find several words.
h, c = self.decode_step(torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim)
scores = self.fc(h) # (s, vocab_size)
# print("outputs shape,", outputs.shape)
if i == 0:
predicted = scores.max(1)[1]
else:
probs = F.softmax(scores, dim=1)
# Probabilistic sample tokens
if probs.is_cuda:
probs_np = probs.data.cpu().numpy()
else:
probs_np = probs.data.numpy()
# print("shape probs_np", probs_np.shape)
rand_num = np.random.rand(probs_np.shape[0])
# print("shape rand_num", rand_num.shape)
iter_sum = np.zeros((probs_np.shape[0],))
tokens = np.zeros(probs_np.shape[0], dtype=np.int)
for i in range(probs_np.shape[1]):
c_element = probs_np[:, i]
iter_sum += c_element
valid_token = rand_num < iter_sum
update_indecies = np.logical_and(valid_token,
np.logical_not(tokens.astype(np.bool)))
tokens[update_indecies] = i
# put back on the GPU.
if probs.is_cuda:
predicted = Variable(torch.LongTensor(tokens.astype(np.int)).cuda())
else:
predicted = Variable(torch.LongTensor(tokens.astype(np.int)))
k_prev_words = predicted #now we have predicted word and give it to the next lastm
sampled_ids.append(predicted)
sampled_ids = torch.stack(sampled_ids, 1)
return sampled_ids
def simple_prob(self, features, states = None):
k = 1
k_prev_words = torch.LongTensor([[self.vocab.word2idx['<start>']]] * k).to(self.device)
h, c = self.init_hidden_state(features)
sampled_ids = []
inputs = features.unsqueeze(1)
for i in range(self.max_seg_length): # maximum sampling length
embeddings = self.embedding(k_prev_words).squeeze(1) # (s, embed_dim) ?why should we alos use it???
awe, alpha = self.attention(features, h) # (s, encoder_dim), (s, num_pixels) - we give to Attention the same features
# alpha = alpha.view(-1, enc_image_size, enc_image_size) # (s, enc_image_size, enc_image_size)
gate = self.sigmoid(self.f_beta(h)) # gating scalar, (s, encoder_dim)
awe = gate * awe
#s is a batch_size_t since we do not have a batch of images, we have just one image
# and we want to find several words.
h, c = self.decode_step(torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim)
scores = self.fc(h) # (s, vocab_size)
if i == 0:
predicted = scores.max(1)[1]
else:
probs = F.softmax(scores, dim=1)
# Probabilistic sample tokens
if probs.is_cuda:
probs_np = probs.data.cpu().numpy()
else:
probs_np = probs.data.numpy()
# print("shape probs_np", probs_np.shape)
# top_k_probs = sorted(probs)[-top_k:]
# for i in range(self.vocab_size):
# if probs[i] < top_k_probs[0]:
# probs[i] = 0
predicted = np.random.choice(self.vocab_size, p=probs)
sampled_ids.append(predicted)
inputs = self.embed(predicted)
inputs = inputs.unsqueeze(1)
sampled_ids = torch.stack(sampled_ids, 1)
return sampled_ids
def simple_prob_topk(self, features, states = None):
k = 1
k_prev_words = torch.LongTensor([[self.vocab.word2idx['<start>']]] * k).to(self.device)
h, c = self.init_hidden_state(features)
sampled_ids = []
inputs = features.unsqueeze(1)
for i in range(self.max_seg_length): # maximum sampling length
embeddings = self.embedding(k_prev_words).squeeze(1) # (s, embed_dim) ?why should we alos use it???
awe, alpha = self.attention(features, h) # (s, encoder_dim), (s, num_pixels) - we give to Attention the same features
# alpha = alpha.view(-1, enc_image_size, enc_image_size) # (s, enc_image_size, enc_image_size)
gate = self.sigmoid(self.f_beta(h)) # gating scalar, (s, encoder_dim)
awe = gate * awe
#s is a batch_size_t since we do not have a batch of images, we have just one image
# and we want to find several words.
h, c = self.decode_step(torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim)
scores = self.fc(h) # (s, vocab_size)
if i == 0:
predicted = scores.max(1)[1]
else:
probs = F.softmax(scores, dim=1)
# Probabilistic sample tokens
if probs.is_cuda:
probs_np = probs.data.cpu().numpy()
else:
probs_np = probs.data.numpy()
# print("shape probs_np", probs_np.shape)
top_k_probs = sorted(probs)[-3:]
for i in range(self.vocab_size):
if probs[i] < top_k_probs[0]:
probs[i] = 0
predicted = np.random.choice(self.vocab_size, p=probs)
sampled_ids.append(predicted)
inputs = self.embed(predicted)
inputs = inputs.unsqueeze(1)
sampled_ids = torch.stack(sampled_ids, 1)
return sampled_ids
def sample_beam_search(self, features):
"""
Reads an image and captions it with beam search.
:param encoder: encoder model
:param decoder: decoder model
:param image_path: path to image
:param word_map: word map
:param beam_size: number of sequences to consider at each decode-step
:return: caption, weights for visualization
"""
k = self.beam_size
vocab_size = len(self.vocab)
# # Flatten encoding
enc_image_size = 17
# encoder_out = encoder_out.view(1, -1, encoder_dim) # (1, num_pixels, encoder_dim)
# num_atoms = encoder_out.size(1)
# # We'll treat the problem as having a batch size of k
shape_1 = features.shape[0]
num_atoms = features.shape[1]
encoder_dim = features.shape[2]
features = features.expand(k, num_atoms, encoder_dim) ##? check tomorrow!!!
# encoder_out = encoder_out.expand(k, num_pixels, encoder_dim) # (k, num_pixels, encoder_dim)
# Tensor to store top k previous words at each step; now they're just <start>
k_prev_words = torch.LongTensor([[self.vocab.word2idx['<start>']]] * k).to(self.device) # (k, 1)
# Tensor to store top k sequences; now they're just <start>
seqs = k_prev_words # (k, 1)
# Tensor to store top k sequences' scores; now they're just 0
top_k_scores = torch.zeros(k, 1).to(self.device) # (k, 1)
# Tensor to store top k sequences' alphas; now they're just 1s
seqs_alpha = torch.ones(k, 1, enc_image_size, enc_image_size).to(self.device) # (k, 1, enc_image_size, enc_image_size)
# Lists to store completed sequences, their alphas and scores
complete_seqs = list()
complete_seqs_alpha = list()
complete_seqs_scores = list()
# Start decoding
step = 1
h, c = self.init_hidden_state(features)
# s is a number less than or equal to k, because sequences are removed from this process once they hit <end>
while True:
embeddings = self.embedding(k_prev_words).squeeze(1) # (s, embed_dim) ?why should we alos use it???
awe, alpha = self.attention(features, h) # (s, encoder_dim), (s, num_pixels)
alpha = alpha.view(-1, enc_image_size, enc_image_size) # (s, enc_image_size, enc_image_size)
gate = self.sigmoid(self.f_beta(h)) # gating scalar, (s, encoder_dim)
awe = gate * awe
#s is a batch_size_t since we do not have a batch of images, we have just one image
# and we want to find several words.
h, c = self.decode_step(torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim)
scores = self.fc(h) # (s, vocab_size)
scores = F.log_softmax(scores, dim=1)
#!!!!!!!!!!!!!!!!!!!# choose the highest score here
# Add
scores = top_k_scores.expand_as(scores) + scores # (s, vocab_size)
# print("scores", scores)
# For the first step, all k points will have the same scores (since same k previous words, h, c)
if step == 1:
top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)
else:
# Unroll and find top scores, and their unrolled indices
top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s)
# print("top_scores", top_k_scores)
# print("top k words", top_k_words)
# Convert unrolled indices to actual indices of scores
prev_word_inds = top_k_words / vocab_size # (s)
next_word_inds = top_k_words % vocab_size # (s)
# Add new words to sequences, alphas
seqs = torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)
alpha = alpha.float()
seqs_alpha = torch.cat([seqs_alpha[prev_word_inds], alpha[prev_word_inds].unsqueeze(1)],
dim=1) # (s, step+1, enc_image_size, enc_image_size)
# Which sequences are incomplete (didn't reach <end>)?
incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if
next_word != self.vocab.word2idx['<end>']]
# print("end idx", self.vocab.word2idx['<end>'])
# print("incomp inds", incomplete_inds)
complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))
# print("comp inds", complete_inds)
# Set aside complete sequences
# print("seqs", seqs)
if len(complete_inds) > 0:
complete_seqs.extend(seqs[complete_inds].tolist())
complete_seqs_alpha.extend(seqs_alpha[complete_inds].tolist())
complete_seqs_scores.extend(top_k_scores[complete_inds])
k -= len(complete_inds) # reduce beam length accordingly
# Proceed with incomplete sequences
if k == 0:
break
seqs = seqs[incomplete_inds]
seqs_alpha = seqs_alpha[incomplete_inds]
# print("prev_word_inds[incomplete_inds]", prev_word_inds[incomplete_inds])
h = h[prev_word_inds[incomplete_inds]]
c = c[prev_word_inds[incomplete_inds]]
features = features[prev_word_inds[incomplete_inds]]
top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)
k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)
# Break if things have been going on too long
if step > MAX_Length:
break
step += 1
if (len(complete_seqs_scores) > 0):
i = complete_seqs_scores.index(max(complete_seqs_scores))
seq = complete_seqs[i]
alphas = complete_seqs_alpha[i]
# print("more than zero")
return seq, alphas
else:
# print("zero")
return seqs.cpu(), complete_seqs_alpha
# i = complete_seqs_scores.index(max(complete_seqs_scores))
# seq = complete_seqs[i]
# alphas = complete_seqs_alpha[i]
# return seq, alphas
def sample_beam_search(decoder, features):
"""
Reads an image and captions it with beam search.
:param encoder: encoder model
:param decoder: decoder model
:param image_path: path to image
:param word_map: word map
:param beam_size: number of sequences to consider at each decode-step
:return: caption, weights for visualization
"""
k = decoder.beam_size
vocab_size = len(decoder.vocab)
# # Flatten encoding
enc_image_size = 17
# encoder_out = encoder_out.view(1, -1, encoder_dim) # (1, num_pixels, encoder_dim)
# num_atoms = encoder_out.size(1)
# # We'll treat the problem as having a batch size of k
shape_1 = features.shape[0]
num_atoms = features.shape[1]
encoder_dim = features.shape[2]
features = features.expand(k, num_atoms, encoder_dim) ##? check tomorrow!!!
# encoder_out = encoder_out.expand(k, num_pixels, encoder_dim) # (k, num_pixels, encoder_dim)
# Tensor to store top k previous words at each step; now they're just <start>
k_prev_words = torch.LongTensor([[decoder.vocab.word2idx['<start>']]] * k).to(decoder.device) # (k, 1)
# Tensor to store top k sequences; now they're just <start>
seqs = k_prev_words # (k, 1)
# Tensor to store top k sequences' scores; now they're just 0
top_k_scores = torch.zeros(k, 1).to(decoder.device) # (k, 1)
# Tensor to store top k sequences' alphas; now they're just 1s
seqs_alpha = torch.ones(k, 1, enc_image_size, enc_image_size).to(decoder.device) # (k, 1, enc_image_size, enc_image_size)
# Lists to store completed sequences, their alphas and scores
complete_seqs = list()
complete_seqs_alpha = list()
complete_seqs_scores = list()
# Start decoding
step = 1
h, c = decoder.init_hidden_state(features)
# s is a number less than or equal to k, because sequences are removed from this process once they hit <end>
while True:
embeddings = decoder.embedding(k_prev_words).squeeze(1) # (s, embed_dim) ?why should we alos use it???
awe, alpha = decoder.attention(features, h) # (s, encoder_dim), (s, num_pixels)
alpha = alpha.view(-1, enc_image_size, enc_image_size) # (s, enc_image_size, enc_image_size)
gate = decoder.sigmoid(decoder.f_beta(h)) # gating scalar, (s, encoder_dim)
awe = gate * awe
#s is a batch_size_t since we do not have a batch of images, we have just one image
# and we want to find several words.
h, c = decoder.decode_step(torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim)
scores = decoder.fc(h) # (s, vocab_size)
scores = F.log_softmax(scores, dim=1)
#!!!!!!!!!!!!!!!!!!!# choose the highest score here
# Add
scores = top_k_scores.expand_as(scores) + scores # (s, vocab_size)
# print("scores", scores)
# For the first step, all k points will have the same scores (since same k previous words, h, c)
if step == 1:
top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)
else:
# Unroll and find top scores, and their unrolled indices
top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s)
# print("top_scores", top_k_scores)
# print("top k words", top_k_words)
# Convert unrolled indices to actual indices of scores
prev_word_inds = top_k_words / vocab_size # (s)
next_word_inds = top_k_words % vocab_size # (s)
# Add new words to sequences, alphas
seqs = torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)
alpha = alpha.float()
seqs_alpha = torch.cat([seqs_alpha[prev_word_inds], alpha[prev_word_inds].unsqueeze(1)],
dim=1) # (s, step+1, enc_image_size, enc_image_size)
# Which sequences are incomplete (didn't reach <end>)?
incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if
next_word != decoder.vocab.word2idx['<end>']]
# print("end idx", self.vocab.word2idx['<end>'])
# print("incomp inds", incomplete_inds)
complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))
# print("comp inds", complete_inds)
# Set aside complete sequences
# print("seqs", seqs)
if len(complete_inds) > 0:
complete_seqs.extend(seqs[complete_inds].tolist())
complete_seqs_alpha.extend(seqs_alpha[complete_inds].tolist())
complete_seqs_scores.extend(top_k_scores[complete_inds])
k -= len(complete_inds) # reduce beam length accordingly
# Proceed with incomplete sequences
if k == 0:
break
seqs = seqs[incomplete_inds]
seqs_alpha = seqs_alpha[incomplete_inds]
# print("prev_word_inds[incomplete_inds]", prev_word_inds[incomplete_inds])
h = h[prev_word_inds[incomplete_inds]]
c = c[prev_word_inds[incomplete_inds]]
features = features[prev_word_inds[incomplete_inds]]
top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)
k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)
# Break if things have been going on too long
if step > MAX_Length:
break
step += 1
if (len(complete_seqs_scores) > 0):
i = complete_seqs_scores.index(max(complete_seqs_scores))
seq = complete_seqs[i]
# print("more than zero")
return complete_seqs
else:
# print("zero")
return seqs.cpu()
i = complete_seqs_scores.index(max(complete_seqs_scores))
seq = complete_seqs[i]
alphas = complete_seqs_alpha[i]
return seq, alphas
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,973
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/training/trainer_att.py
|
import argparse
import json
import multiprocessing
import os
import pickle
import sys
import numpy as np
import torch
import torch.nn as nn
from numpy import savetxt
from py3nvml import py3nvml
from sklearn.model_selection import KFold
from torch.nn.utils.rnn import pack_padded_sequence
from torch.optim.lr_scheduler import ExponentialLR
from torch.utils import model_zoo
# from torchsummary import summary
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
# from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import src.utils.config as config
from src.datasets.data_loader import (Pdb_Dataset, collate_fn,
collate_fn_masks, get_loader)
from src.datasets.data_loader_feature import Pdb_Dataset_Feature
from src.sampling.sampler import Sampler
from src.training.utils import save_checkpoint
from src.utils.build_vocab import Vocabulary
class Trainer_Fold_Feature_Attention():
def __init__(self, cfg, split_no):
# model params
self.cfg = cfg
self.split_no = split_no
self.type_fold = cfg["sampling_params"]["type_fold"]
self.original_stdout = sys.stdout
#folds data
self.name_file_folds = cfg['splitting']['file_folds']
self.fold_number = cfg['splitting']['id_fold']
self.model_name = cfg['model_params']['model_name']
self.num_epochs = cfg['model_params']['num_epochs']
self.batch_size = cfg['model_params']['batch_size']
self.learning_rate = cfg['model_params']['learning_rate']
self.num_workers = cfg['model_params']['num_workers']
# training params
self.protein_dir = cfg['training_params']['image_dir']
self.caption_path = cfg['training_params']['caption_path']
self.log_step = cfg['training_params']['log_step']
self.save_step = cfg['training_params']['save_step']
self.vocab_path = cfg['preprocessing']['vocab_path']
self.n_splits = cfg['training_params']['n_splits']
self.loss_mode = cfg['training_params']['loss_mode']
self.loss_best = np.inf
self.global_tensorboard_path = os.path.join(cfg['output_parameters']['savedir'], "tensorboard")
os.makedirs(self.global_tensorboard_path, exist_ok=True)
#output files
# self.savedir = cfg['output_parameters']['savedir']
self.savedir = os.path.join(cfg['output_parameters']['savedir'], self.model_name)
self.tesnorboard_path_train = os.path.join(self.global_tensorboard_path, 'train_' + str(self.split_no) + '_' + self.model_name)
self.tesnorboard_path_eval = os.path.join(self.global_tensorboard_path, 'eval_' + str(self.split_no) + '_' + self.model_name)
# self.tesnorboard_path_train = os.path.join(self.savedir, "logs", "tensorboard_" + self.model_name, 'train')
# self.tesnorboard_path_eval = os.path.join(self.savedir, "logs", "tensorboard_" + self.model_name, 'eval')
self.model_path = os.path.join(self.savedir, "models")
self.log_path = os.path.join(self.savedir, "logs")
self.idx_file = os.path.join(self.log_path, "idxs")
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.save_dir_smiles = os.path.join(self.savedir, "statistics")
os.makedirs(self.log_path, exist_ok=True)
os.makedirs(self.idx_file, exist_ok=True)
os.makedirs(self.model_path, exist_ok=True)
os.makedirs(self.save_dir_smiles, exist_ok=True)
os.makedirs(self.tesnorboard_path_train, exist_ok=True)
os.makedirs(self.tesnorboard_path_eval, exist_ok=True)
#log files
self.test_idx_file = open(os.path.join(self.idx_file, "test_idx.txt"), "w")
self.log_file = open(os.path.join(self.log_path, "log.txt"), "w")
self.log_file_tensor = open(os.path.join(self.log_path, "log_tensor.txt"), "w")
self.writer_train = SummaryWriter(self.tesnorboard_path_train)
self.writer_eval = SummaryWriter(self.tesnorboard_path_eval)
self.Encoder, self.Decoder = config.get_model(cfg, device=self.device)
# self.input = config.get_shape_input(self.cfg)
print(self.Encoder)
print(self.Decoder)
with open(os.path.join(self.log_path, "model.txt"), 'w') as f:
sys.stdout = f # Change the standard output to the file we created.
# print(summary(self.Encoder, self.input))
# print(summary(self.Decoder))
print(self.Encoder)
print(self.Decoder)
sys.stdout = self.original_stdout
#print all params
nparameters_enc = sum(p.numel() for p in self.Encoder.parameters())
nparameters_dec = sum(p.numel() for p in self.Decoder.parameters())
print('Total number of parameters: %d' % (nparameters_enc + nparameters_dec))
with open(os.path.join(self.log_path, "model.txt"), 'w') as f:
f.write('Total number of parameters: %d' % (nparameters_enc + nparameters_dec))
with open(self.vocab_path, "rb") as f:
self.vocab = pickle.load(f)
self.criterion = nn.CrossEntropyLoss()
self.model_name = 'e3nn'
self.checkpoint_path = os.path.join(self.savedir, 'checkpoints')
os.makedirs(self.checkpoint_path, exist_ok=True)
os.makedirs(os.path.join(self.checkpoint_path, 'training'), exist_ok=True)
self.checkpoint_path_training = os.path.join(self.savedir, 'checkpoints', 'training', str(self.split_no) + '_' + self.type_fold + '_training.pkl')
self.eval_check_path = os.path.join(self.savedir, 'checkpoints', 'eval.txt')
if not os.path.exists(self.eval_check_path):
with open(self.eval_check_path, 'w') as file:
file.write('0')
#loading checkpoint
if (os.path.exists(self.checkpoint_path_training)):
checkpoint = torch.load(self.checkpoint_path_training)
print("loading model...")
self.start_epoch = checkpoint['start_epoch'] + 1
self.Encoder, self.Decoder = config.get_model(cfg, device=self.device)
self.Encoder.load_state_dict(checkpoint['encoder'])
self.Decoder.load_state_dict(checkpoint['decoder'])
self.encoder_best, self.decoder_best = self.Encoder, self.Decoder
self.caption_optimizer = checkpoint['caption_optimizer']
# self.scheduler = ExponentialLR(self.caption_optimizer, gamma=0.95)
# self.scheduler.load_state_dict(checkpoint['scheduler'])
# self.split_no = checkpoint['split_no']
else:
print("initialising model...")
self.start_epoch = 0
self.Encoder, self.Decoder = config.get_model(cfg, device=self.device)
self.encoder_best, self.decoder_best = self.Encoder, self.Decoder
caption_params = list(self.Encoder.parameters()) + list(self.Decoder.parameters())
self.caption_optimizer = torch.optim.Adam(caption_params, lr = self.learning_rate)
# self.scheduler = ExponentialLR(self.caption_optimizer, gamma=0.95)
# self.split_no = self.fold_number
def train_loop_mask(self, loader, caption_optimizer, split_no, epoch, total_step):
self.Encoder.train()
self.Decoder.train()
progress = tqdm(loader)
for i, (features, geometry, masks, captions, lengths) in enumerate(progress):
features = features.to(self.device)
geometry = geometry.to(self.device)
captions = captions.to(self.device)
masks = masks.to(self.device)
caption_optimizer.zero_grad()
feature = self.Encoder(features, geometry, masks)
scores, caps_sorted, decode_lengths, alphas = self.Decoder(feature, captions, lengths)
# Since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
scores = pack_padded_sequence(scores, decode_lengths, batch_first=True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)[0]
loss = self.criterion(scores, targets)
if (self.loss_mode == "double_stochastic"):
loss += self.alpha_c * ((1 - alphas.sum(dim = 1)) ** 2).mean()
self.Decoder.zero_grad()
self.Encoder.zero_grad()
loss.backward()
caption_optimizer.step() #!!! figure out whether we should leave that
name = "training_loss_" + str(split_no + 1)
self.writer_train.add_scalar(name, loss.item(), epoch)
self.log_file_tensor.write(str(loss.item()) + "\n")
self.log_file_tensor.flush()
handle = py3nvml.nvmlDeviceGetHandleByIndex(0)
fb_mem_info = py3nvml.nvmlDeviceGetMemoryInfo(handle)
mem = fb_mem_info.used >> 20
self.writer_train.add_scalar('val/gpu_memory', mem, epoch)
# Print log info
if i % self.log_step == 0:
result = "Split [{}], Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Perplexity: {:5.4f}".format(
split_no, epoch, self.num_epochs, i, total_step, loss.item(), np.exp(loss.item())
)
# print(result)
self.log_file.write(result + "\n")
self.log_file.flush()
progress.set_postfix({'epoch': epoch,
'loss': loss.item(),
'Perplexity': np.exp(loss.item()),
'mem': mem})
if (self.loss_best - loss > 0):
# print("The best loss " + str(loss.item()) + "; Split-{}-Epoch-{}-Iteration-{}_best.ckpt".format(split_no, epoch + 1, i + 1))
self.log_file.write("The best loss " + str(loss.item()) + "; Split-{}-Epoch-{}-Iteration-{}_best.ckpt".format(split_no, epoch + 1, i + 1) + "\n")
self.enoder_best = self.Encoder
self.decoder_best = self.Decoder
self.encoder_best_name = os.path.join(
self.model_path, "encoder_best_" + str(split_no) + ".ckpt"
)
self.decoder_best_name = os.path.join(
self.model_path, "decoder_best_" + str(split_no) + ".ckpt")
torch.save(
self.Encoder.state_dict(),
self.encoder_best_name,
)
torch.save(
self.Decoder.state_dict(),
self.decoder_best_name,
)
self.loss_best = loss
self.log_file_tensor.write("\n")
self.log_file_tensor.flush()
def eval_loop(self, loader, epoch):
"""
Evaluation loop using `model` and data from `loader`.
"""
self.Encoder.eval()
self.Decoder.eval()
progress = tqdm(loader)
# print("Evaluation starts...")
for step, (features, geometry, masks, captions, lengths) in enumerate(progress):
with torch.no_grad():
features = features.to(self.device)
geometry = geometry.to(self.device)
captions = captions.to(self.device)
masks = masks.to(self.device)
feature = self.Encoder(features, geometry, masks)
scores, caps_sorted, decode_lengths, alphas = self.Decoder(feature, captions, lengths)
targets = caps_sorted[:, 1:]
scores = pack_padded_sequence(scores, decode_lengths, batch_first=True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)[0]
loss = self.criterion(scores, targets)
name = "eval_loss_" + str(self.split_no + 1)
self.writer_eval.add_scalar(name, loss.item(), epoch)
# self.writer.add_scalar("test_loss", loss.item(), step)
handle = py3nvml.nvmlDeviceGetHandleByIndex(0)
fb_mem_info = py3nvml.nvmlDeviceGetMemoryInfo(handle)
mem = fb_mem_info.used >> 20
# mem = 20
progress.set_postfix({'epoch': epoch,
'l_ev': loss.item(),
'Perplexity': np.exp(loss.item()),
'mem': mem})
# with open(self.eval_check_path, 'w') as file:
# file.write('1')
def train_epochs(self, Feature_loader):
py3nvml.nvmlInit() # output memory usage
featuriser = Pdb_Dataset_Feature(self.cfg, Feature_loader)
files_refined = os.listdir(self.protein_dir)
#cross validation
idx_folds = pickle.load(open(os.path.join(self.idx_file, self.type_fold), "rb" ) )
test_idx = []
train_id, test_id = idx_folds[self.split_no]
train_data = train_id
test_data = test_id
feat_train = [featuriser[data] for data in train_data]
feat_test = [featuriser[data] for data in test_data]
loader_train = DataLoader(feat_train, batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
collate_fn=collate_fn_masks,)
loader_test = DataLoader(feat_test, batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
collate_fn=collate_fn_masks,)
# loader_train = config.get_loader(cfg, feat_train, batch_size, num_workers,)
total_step = len(loader_train)
print("total_step", total_step)
print("current split no - ", self.split_no)
# params_encoder = filter(lambda p: p.requires_grad, encoder.parameters())
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(caption_optimizer, 'min')
for epoch in range(self.start_epoch, self.num_epochs):
# config.get_train_loop(cfg, loader_train, encoder, decoder,caption_optimizer, split_no, epoch, total_step)
#if add masks everywhere call just train_loop
self.train_loop_mask(loader_train, self.caption_optimizer, self.split_no, epoch, total_step)
# self.scheduler.step()
self.eval_loop(loader_test, epoch)
save_checkpoint(self.checkpoint_path_training, epoch, self.Encoder, self.Decoder,
self.encoder_best, self.decoder_best, self.caption_optimizer, self.split_no)
# save_checkpoint(self.checkpoint_path_training, epoch, self.Encoder, self.Decoder,
# self.encoder_best, self.decoder_best, self.caption_optimizer, self.scheduler, self.split_no)
self.encoder_name = os.path.join(
self.model_path, "encoder-{}-{}-{}.ckpt".format(self.split_no, epoch + 1, self.type_fold)
)
self.decoder_name = os.path.join(
self.model_path, "decoder-{}-{}-{}.ckpt".format(self.split_no, epoch + 1, self.type_fold)
)
torch.save(
self.Encoder.state_dict(),
self.encoder_name,
)
torch.save(
self.Decoder.state_dict(),
self.decoder_name,
)
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,974
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/sampling/sampler.py
|
import multiprocessing
import numpy as np
from numpy import savetxt
import torch
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ExponentialLR
# from torch.utils.tensorboard import SummaryWriter
import argparse
import sys
import utils.config as config
from rdkit import Chem
import json
import os
import csv
import pickle
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from src.utils.build_vocab import Vocabulary
from src.datasets.data_loader import Pdb_Dataset
from src.evaluation.Contrib.statistics import analysis_to_csv, analysis_to_csv_test
from src.training.utils import save_checkpoint_sampling
class Sampler():
def __init__(self, cfg, sampling, Feature_Loader):
self.cfg = cfg
self.Feature_Loader = Feature_Loader
self.path_root = cfg['preprocessing']['path_root']
self.init_refined = self.path_root + "/data/new_refined/"
self.files_refined = os.listdir(self.init_refined)
self.files_refined = [file for file in self.files_refined if file[0].isdigit()]
self.files_refined.sort()
self.attention = self.cfg['training_params']['mode']
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# self.device = torch.device("cpu")
self.sampling = sampling
self.model_encoder = cfg['model']['encoder']
print(self.model_encoder)
self.model_decoder = cfg['model']['decoder']
self.sampling_data = cfg['sampling_params']['sampling_data']
self.protein_dir = cfg["training_params"]["image_dir"]
# self.number_smiles = cfg["sampling_params"]["number_smiles"]
# if (self.sampling == "max"):
# self.number_smiles = 1
self.time_waiting = cfg["sampling_params"]["time_waiting"]
self.type_fold = cfg["sampling_params"]["type_fold"]
# model params
self.model_name = cfg['model_params']['model_name']
self.num_epochs = cfg['model_params']['num_epochs']
self.batch_size = cfg['model_params']['batch_size']
self.learning_rate = cfg['model_params']['learning_rate']
self.num_workers = cfg['model_params']['num_workers']
# training params
self.protein_dir = cfg['training_params']['image_dir']
self.caption_path = cfg['training_params']['caption_path']
self.log_step = cfg['training_params']['log_step']
self.save_step = cfg['training_params']['save_step']
self.vocab_path = cfg['preprocessing']['vocab_path']
#output files
self.savedir = os.path.join(cfg['output_parameters']['savedir'], self.model_name)
self.save_dir_smiles = os.path.join(self.savedir, "statistics")
self.tesnorboard_path = self.savedir
self.log_path = os.path.join(self.savedir, "logs")
self.idx_file = os.path.join(self.log_path, "idxs")
#encoder/decoder path
# self.encoder_path = os.path.join(self.savedir, "models", cfg['training_params']['encoder_name'])
# self.decoder_path = os.path.join(self.savedir, "models", cfg['training_params']['decoder_name'])
self.save_dir_encodings = os.path.join(cfg['output_parameters']['savedir'], "encodings", self.model_name)
#sampling params
os.makedirs(self.save_dir_smiles, exist_ok=True)
os.makedirs(self.save_dir_encodings, exist_ok=True)
with open(self.vocab_path, "rb") as f:
self.vocab = pickle.load(f)
self.dataset = Pdb_Dataset(cfg, self.vocab)
self.path_checkpoint_evaluator = os.path.join(self.savedir, "checkpoints", "checkpoint_evaluator.csv")
if os.path.exists(self.path_checkpoint_evaluator):
self.data_checkpoint = pd.read_csv(self.path_checkpoint_evaluator)
def analysis_cluster(self, split_no, epoch_no, type_fold, encoder_path, decoder_path):
# encoder, decoder = self._get_model_path(idx_fold)
self.idx_fold = split_no
self.type_fold = type_fold
self.epoch_no = epoch_no
self.name_file_stat = self.sampling + "_" + str(self.type_fold) + "_" + str(self.idx_fold) + ".csv"
self.path_to_file_stat = os.path.join(self.save_dir_smiles, self.name_file_stat)
self.file_statistics = open(self.path_to_file_stat, "a+")
self.checkpoint_sampling_path = os.path.join(self.savedir, "checkpoints", str(split_no) + '_sample.pkl')
#the file of the whole stat
if (len(open(self.path_to_file_stat).readlines()) == 0):
self.file_statistics.write("name,fold,type_fold,epoch_no,orig_smile,gen_smile,gen_NP,gen_logP,gen_sa,gen_qed,gen_weight,gen_similarity,orig_NP,orig_logP,orig_sa,orig_qed,orig_weight,frequency,sampling,encoder,decoder" + "\n")
self.file_statistics.flush()
# checkpoint_sampling = torch.load(self.checkpoint_sampling_path)
print("loading start_ind_protein...")
# start_ind_protein = checkpoint_sampling['idx_sample_start']
start_ind_protein = self.data_checkpoint.loc[(self.data_checkpoint['type_fold'] == self.type_fold) & (self.data_checkpoint['sampling'] == self.sampling), 'start_pdb']
# idx_sample = checkpoint_sampling['idx_sample_regime_start']
self.encoder, self.decoder = config.eval_model_captioning(self.cfg, encoder_path, decoder_path, device = self.device)
# self.file_folds = os.path.join(self.idx_file, "test_idx_" + str(self.idx_fold))
self.file_folds = os.path.join(self.idx_file, self.type_fold)
with (open(self.file_folds, "rb")) as openfile:
idx_proteins = pickle.load(openfile)
train_idx, test_idx = idx_proteins[self.idx_fold]
print("train idx , - ", train_idx)
print("test idx , - ", test_idx)
# idx_proteins = [1,2,3,4]
files_refined = os.listdir(self.protein_dir)
idx_all = [i for i in range(len(files_refined) - 3)]
#take indx of proteins in the training set
if (self.sampling_data == "train"):
# idx_to_generate = np.setdiff1d(idx_all, idx_proteins)
idx_to_generate = train_idx
else:
# idx_to_generate = idx_proteins
idx_to_generate = test_idx
#sampling checkpoint
end_idx = len(idx_to_generate)
for idx in range(int(start_ind_protein), end_idx):
id_abs_protein = idx_to_generate[idx]
self.generate_smiles(id_abs_protein)
next_idx = (idx + 1) % end_idx
# print("next_ind!!!! - ", next_idx)
# print("end_ind!! - ", end_idx)
# print("ind!!! - ", idx)
self.data_checkpoint.loc[(self.data_checkpoint['type_fold'] == self.type_fold) & (self.data_checkpoint['sampling'] == self.sampling), 'start_pdb'] = next_idx
# save_checkpoint_sampling(self.checkpoint_sampling_path, next_idx, idx_sample)
if (next_idx == 0):
self.data_checkpoint.loc[(self.data_checkpoint['type_fold'] == self.type_fold) & (self.data_checkpoint['sampling'] == self.sampling), 'start_rec_epoch'] = epoch_no + 1
# save_checkpoint_sampling(self.checkpoint_sampling_path, next_idx, idx_sample + 1)
self.data_checkpoint.to_csv(self.path_checkpoint_evaluator, index=False)
def _get_models(self, idx_fold):
encoder_path, decoder_path = self._get_model_path(idx_fold)
encoder, decoder = config.eval_model_captioning(cfg, encoder_path, decoder_path, device = self.device)
return encoder, decoder
def _get_model_path(self):
encoder_name = "encoder-" + str(self.idx_fold) + "-1-2.ckpt"
decoder_name = "decoder-" + str(self.idx_fold) + "-1-2.ckpt"
encoder_path = os.path.join(self.savedir, "models", encoder_name)
decoder_path = os.path.join(self.savedir, "models", decoder_name)
return encoder_path, decoder_path
def load_pocket(self, id_protein, transform=None):
print("loading data of a protein", self.dataset._get_name_protein(id_protein))
# features, masks = self.dataset._get_features_complex(id_protein)
# geometry = self.dataset._get_geometry_complex(id_protein)
# features = features.to(self.device).unsqueeze(0)
# geometry = geometry.to(self.device).unsqueeze(0)
# masks = masks.to(self.device).unsqueeze(0)
features, masks, geometry = self.Feature_Loader._get_feat_geo_from_file(id_protein)
features = features.to(self.device).unsqueeze(0)
geometry = geometry.to(self.device).unsqueeze(0)
masks = masks.to(self.device).unsqueeze(0)
return features, geometry, masks
def generate_encodings(self, id):
#generate features of encoder and writes it to files
protein_name = self.dataset._get_name_protein(id)
features, geometry, masks = self.load_pocket(id)
# Generate a caption from the image
feature = self.encoder(features, geometry, masks)
torch.save(feature, os.path.join(self.folder_save, protein_name + "_feature_encoding.pt"))
def printing_smiles(self, sampled_ids, list_smiles_all):
sampled_caption = []
# print("sampled_id", sampled_ids)
for word_id in sampled_ids:
word = self.vocab.idx2word[word_id]
sampled_caption.append(word)
if word == "<end>":
break
sentence = "".join(sampled_caption)
sentence = sentence[7:-5]
print(sentence)
m = Chem.MolFromSmiles(sentence)
if m is None or sentence == '' or sentence.isspace() == True:
print('invalid')
# list_smiles_all.append(sentence)
return 1
else:
print(sentence)
# smiles.append(sentence)
list_smiles_all.append(sentence)
return 1
def smiles_all_txt(self):
file_all_smiles = open(os.path.join(self.save_dir_smiles, "all_smiles_lig.txt"), "w")
files_refined = os.listdir(self.caption_path)
files_refined.remove(".DS_Store")
for protein_name in files_refined:
init_path_smile = os.path.join(
self.caption_path, protein_name, protein_name + "_ligand.smi"
)
with open(init_path_smile) as fp:
initial_smile = fp.readlines()[0]
file_all_smiles.write(initial_smile + "\n")
file_all_smiles.flush()
def generate_smiles(self, id):
#original + gen smiles
print("current id - ", id)
smiles = []
protein_name = self.dataset._get_name_protein(id)
print("current protein ", protein_name)
#path of the real smile
init_path_smile = os.path.join(
self.caption_path, protein_name, protein_name + "_ligand.smi"
)
with open(init_path_smile) as fp:
initial_smile = fp.readlines()[0] #write a true initial smile
smiles.append(initial_smile)
amount_val_smiles = 0
iter = 0
start = time.time()
if (self.sampling == "beam_1"):
self.number_smiles = 1
else:
self.number_smiles = self.cfg["sampling_params"]["number_smiles"]
if (self.sampling.startswith('beam') == False):
while (amount_val_smiles < self.number_smiles):
end = time.time()
# print("time elapsed", end - start)
if((end - start) > self.time_waiting):
#stop generating if we wait for too long till 50 ligands
self.file_long_proteins = open(os.path.join(self.save_dir_smiles, "exceptions_long.txt"), "w")
self.file_long_proteins.write(protein_name + "\n") #write a protein with long time of generating
self.file_long_proteins.flush()
break
iter += 1
# Build models
# Load the trained model parameters
# # Prepare features and geometry from pocket
features, geometry, masks = self.load_pocket(id)
# Generate a caption from the image
feature = self.encoder(features, geometry, masks)
#print("feature", feature)
if (self.sampling == "probabilistic"):
sampled_ids = self.decoder.sample_prob(feature)
# if self.cfg["training_params"]["mode"] != "attention":
# sampled_ids = self.decoder.sample_prob(feature)
# else:
elif (self.sampling == "max"):
sampled_ids = self.decoder.sample_max(feature)
self.number_smiles = 1
elif (self.sampling == "simple_probabilistic"):
sampled_ids = self.decoder.simple_prob(feature)
elif (self.sampling.startswith("simple_probabilistic_topk") == True):
k = int(self.sampling.split("_")[-1])
sampled_ids = self.decoder.simple_prob_topk(feature, k)
elif (self.sampling.startswith("temp_sampling")):
temperature = float(self.sampling.split("_")[-1])
sampled_ids = self.decoder.sample_temp(feature, temperature)
sampled_ids = ( sampled_ids[0].cpu().numpy())
if(type(sampled_ids[0]) != list):
idx = self.printing_smiles(sampled_ids, smiles)
amount_val_smiles += idx
else:
amount_val_smiles = 0
elif (self.sampling.startswith('beam') == True):
number_beams = int(self.sampling.split("_")[1])
features, geometry, masks = self.load_pocket(id)
feature = self.encoder(features, geometry, masks)
# self.decoder = self.decoder.float()
if (self.attention == "attention"):
sampled_ids, alphas = self.decoder.sample_beam_search(feature, number_beams)
else:
sampled_ids = self.decoder.sample_beam_search(feature, number_beams)
# print("sampled-ind", sampled_ids)
if(sampled_ids == 120):
amount_val_smiles = 0
else:
for sentence in sampled_ids:
print("sentence", sentence[1:])
iter += 1
idx = self.printing_smiles(np.asarray(sentence[1:]), smiles)
amount_val_smiles += idx
else:
raise ValueError("Unknown sampling...")
if (amount_val_smiles > 0):
# print("stat write!!!")
# save_dir_analysis = os.path.join(save_dir_smiles, str(self.idx_fold), protein_name)
stat_protein = analysis_to_csv(smiles, protein_name, self.idx_fold, self.type_fold, self.epoch_no) #get the list of lists of statistics
# stat_protein = np.transpose(np.vstack((stat_protein, np.asarray(amount_val_smiles * [amount_val_smiles /iter]))))
stat_protein.append(amount_val_smiles * [amount_val_smiles /iter])
stat_protein.append(amount_val_smiles * [self.sampling])
stat_protein.append(amount_val_smiles * [self.model_encoder])
stat_protein.append(amount_val_smiles * [self.model_decoder])
# file_statistics.write(str(list(map(list, zip(*stat_protein)))) + "\n")
wr = csv.writer(self.file_statistics)
wr.writerows(list(map(list, zip(*stat_protein))))
self.file_statistics.flush()
# else:
# length = self.number_smiles
# # print("length, - ", length)
# stat_protein = [length * ['a'], length * ['a'], length * [str(self.epoch_no)], length * ['a'], length * ['a'], length * ['a'], length * ['a'], length * ['a'], length * ['a'], length * ['a'], length * ['a'], length * ['a'],
# length * ['a'], length * ['a'], length * ['a'], length * ['a'], length * ['a'], length * ['a'], length * ['a'], length * ['a'], length * ['a']]
# wr = csv.writer(self.file_statistics)
# wr.writerows(list(map(list, zip(*stat_protein))))
# self.file_statistics.flush()
# print("end of stat!")
def analysis_all(self):
#for every fold takes indicies for the test, generates smiles and builds statistics
num_folds = 3
# all_stat = np.empty((1, 8))
for id_fold in range(num_folds):
file_freq = open(os.path.join(save_dir_smiles, str(id_fold), str(id_fold) + "_freq.txt"), "w")
file_idx = os.path.join(save_dir_folds, "test_idx_" + str(id_fold))
with (open(file_idx, "rb")) as openfile:
idx_proteins = pickle.load(openfile)
for id_protein in idx_proteins:
self.generate_smiles(id_protein)
def test_analysis_all(self):
#for every fold takes indicies for the test, generates smiles and builds statistics
num_folds = 3
all_stat = []
# idx_array = [[11,12], [14, 15]]
idx_array = [[11], [14]]
for id_fold in range(2):
file_freq = open(os.path.join(save_dir_smiles, str(id_fold), str(id_fold) + "_freq.txt"), "w")
idx_proteins = idx_array[id_fold]
for id_protein in idx_proteins:
self.generate_smiles(id_protein)
# all_stat = np.array(all_stat)
# print("shape all_stat", len(all_stat))
# print("all_stat", all_stat)
df = pd.DataFrame(all_stat, columns = ['name', 'fold', 'logP','sa','qed','weight','similarity', 'orig_logP', 'orig_sa', 'orig_qed', 'orig_weight','frequency'])
df.to_csv(os.path.join(save_dir_smiles, "all_stat_new.csv"))
def save_encodings_all(self, mode, split_no, encoder_path, decoder_path):
r'''For every protein id in rain/test generates feature and saves it
'''
self.mode_split = mode
self.type_fold = self.cfg["sampling_params"]["type_fold"]
self.folder_save = os.path.join(self.save_dir_encodings, mode)
if not os.path.exists(self.folder_save):
os.makedirs(self.folder_save )
self.encoder, self.decoder = config.eval_model_captioning(self.cfg, encoder_path, decoder_path, device = self.device)
idx_folds = pickle.load(open(os.path.join(self.idx_file, self.type_fold), "rb" ) )
train_id, test_id = idx_folds[split_no]
if (mode == "test"):
idx_proteins_gen = test_id
else:
idx_proteins_gen = train_id
for id_protein in idx_proteins_gen:
self.generate_encodings(id_protein)
files_encodings = os.listdir(self.folder_save)
all_encodings = []
for file_enc in files_encodings:
if(file_enc[0].isdigit()):
path_to_enc = os.path.join(self.folder_save, file_enc)
enc_from_torch = torch.load(path_to_enc, map_location=torch.device('cpu')).view(-1).detach().numpy()
# print(type(enc_from_torch))
all_encodings.append(enc_from_torch)
all_encodings = np.asarray(all_encodings)
name = str(self.mode_split) + "_" + str(split_no) + "_" + str(self.type_fold)+ '_' + self.model_name + "_all_encodings.csv"
np.savetxt(os.path.join(self.save_dir_encodings, name), all_encodings, delimiter=',')
def collect_all_encodings(self):
r''' Writes all saved features to 1 file
'''
files_encodings = os.listdir(self.folder_save)
all_encodings = []
for file_enc in files_encodings:
if(file_enc[0].isdigit()):
path_to_enc = os.path.join(self.folder_save, file_enc)
enc_from_torch = torch.load(path_to_enc, map_location=torch.device('cpu')).view(-1).detach().numpy()
# print(type(enc_from_torch))
all_encodings.append(enc_from_torch)
all_encodings = np.asarray(all_encodings)
name = str(self.mode_split) + "_all_encodings.csv"
np.savetxt(os.path.join(self.save_dir_encodings, name), all_encodings, delimiter=',')
# df = pd.DataFrame(all_stat, columns = ['name', 'fold', 'logP','sa','qed','weight','similarity', 'frequency'])
# df = pd.DataFrame(all_stat, columns = ['name', 'fold', 'logP','sa','qed','weight','similarity', 'orig_logP', 'orig_sa', 'orig_qed', 'orig_weight','frequency'])
# df.to_csv(os.path.join(save_dir_smiles, "all_stat_new.csv"))
# all_stat = np.vstack((all_stat, stat_protein))
# all_stat += map(list, zip(*stat_protein))
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,975
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/visualisation/analysis.py
|
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
class Tree_Analysis(dict):
"""Implementation of perl's autovivification feature."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
def get_mean(self, sampling: str, param: str):
mean_array = np.asarray(list(d[sampling][param].values()))
mean = np.mean(mean_array, axis = 0)
return mean
class plot_all():
def __init__(self, path_data):
self.path_data = path_data
self.names_gen_properties = ["gen_NP", "gen_weight", "gen_logP", "gen_sa"]
self.names_orig_properties = ['orig_NP', 'orig_weight', 'orig_logP', 'orig_sa']
self.files = os.listdir(self.path_data)
self.dict_analysis = Tree_Analysis()
self.dict_orig = Tree_Analysis()
self.dict_sim = Tree_Analysis()
self.rand_sim = self.get_random_perm()
self.gen_to_orig = {"gen_NP": 'orig_NP',
"gen_weight": 'orig_weight',
"gen_logP": 'orig_logP',
"gen_sa": 'orig_sa'}
self.colors = ['b', 'r', 'c', 'm', 'k', 'y', 'w']
self.path_vis = 'plots'
self.path_sim = os.path.join(self.path_vis, 'similarity')
self.path_prop = os.path.join(self.path_vis, 'properties')
os.makedirs( self.path_sim, exist_ok=True)
os.makedirs(self.path_prop, exist_ok=True)
def get_random_perm(self):
with open("all_smiles_lig.txt") as f:
list_smiles = f.read().splitlines()
#random permutation
perm = list(range(len(list_smiles)))
random.shuffle(perm)
perm_smiles = [list_smiles[index] for index in perm]
mol_orig = [AllChem.GetMorganFingerprint(Chem.MolFromSmiles(smile), 2) for smile in list_smiles] #for original
mol_perm = [AllChem.GetMorganFingerprint(Chem.MolFromSmiles(smile), 2) for smile in perm_smiles] #for permuted
similarity = [DataStructs.DiceSimilarity(mol_orig[i],mol_perm[i]) for i in range(len(mol_orig))] #array of similarities
return similarity
def get_array(self, file: str, name: str):
data = pd.read_csv(os.path.join(self.path_data, file))
array = data[name].to_list()
return array
def get_dim(self):
self.dim_splits = len(self.dict_analysis)
def allign_dict(self, dict_values: list):
dim_splits = len(dict_values)
max_length = 0
for i in range(dim_splits):
if(len(dict_values[i]) > max_length):
max_length = len(dict_values[i])
for i in range(dim_splits):
dict_values[i] += [0] * (max_length - len(dict_values[i]))
def _get_average_property(self, method, name_split, property_mol):
all_l = list(self.dict_analysis[name_split][property_mol][method].values())
self.allign_dict(all_l)
lst = np.asarray(all_l)
mean = np.mean(lst, axis = 0)
return mean
def _get_average_sim(self, method, name_split, property_mol):
all_l = list(self.dict_sim[name_split][property_mol][method].values())
self.allign_dict(all_l)
lst = np.asarray(all_l)
mean = np.mean(lst, axis = 0)
return mean
def _get_average_orig(self, name_split, property_mol):
all_l = list(self.dict_orig[name_split][property_mol].values())
self.allign_dict(all_l)
lst = np.asarray(all_l)
mean = np.mean(lst, axis = 0)
return mean
def build_dict(self):
for file in self.files:
if file != ".ipynb_checkpoints" and file != "exceptions_long.txt" and file != "stat_e3nn_prob_0.csv":
print("file", file)
method = file.split("_")[0]
id_fold = file.split("_")[1]
print("id_fold", id_fold)
name_split = file.split("_")[2][:-4]
for property_name in self.names_gen_properties:
self.dict_analysis[name_split][property_name][method][id_fold] = self.get_array(file, property_name)
for property_name in self.names_orig_properties:
self.dict_orig[name_split][property_name][id_fold] = self.get_array(file, property_name)
self.dict_sim[name_split]["gen_similarity"][id_fold] = self.get_array(file, "gen_similarity")
self.num_splits = len(self.dict_analysis)
self.num_methods = len(self.dict_analysis['rand'])
def plot_similarity(self):
num_splits = len(self.dict_analysis)
num_methods = len(self.dict_analysis['rand'])
# print("num_splits", num_splits)
# print("num_methods", num_methods)
fig, axs = plt.subplots(nrows = 1, ncols = self.num_splits)
fig.set_figheight(15)
fig.set_figwidth(40)
for id_split, name_split in enumerate(list(self.dict_analysis)):
ax_all = axs[id_split]
fig1, axs1 = plt.subplots(nrows = 1, ncols = num_methods) #for local file for every fold type split
plt.title = 'Histogram of Shear Strength'
fig1.set_figheight(15)
fig1.set_figwidth(40)
fig1.suptitle(name_split, fontsize=26)
plt.ylabel('Density')
plt.xlabel('Similarity')
pyplot.legend(loc='upper right')
sns.distplot(self.rand_sim, color='yellow', hist=True, rug=False, label= 'random', ax = ax_all);
for id_method, method_name in enumerate(list(self.dict_analysis[name_split])):
sim_array = self._get_average_sim(method_name, name_split, "gen_similarity")
color = self.colors[id_method]
color_rand = self.colors[-1]
ax1 = axs1[id_method]
sns.distplot(sim_array, color=color, hist=True, rug=False, label= method_name, ax = ax_all);
sns.distplot(sim_array, color='blue', hist=True, rug=False, label= method_name, ax = ax1);
sns.distplot(self.rand_sim, color='yellow', hist=True, rug=False, label= 'random', ax = ax1);
ax1.set_title(method_name)
mean_sim = mean(sim_array)
mean_sim_rand = mean(self.rand_sim)
ax_all.axvline(mean_sim, color='blue', linestyle='--')
ax_all.axvline(mean_sim_rand, color='yellow', linestyle='--')
ax1.axvline(mean_sim, color='blue', linestyle='--')
ax1.axvline(mean_sim_rand, color='yellow', linestyle='--')
ax_all.set_ylabel('Density')
ax1.set_ylabel('Density')
ax_all.set_xlabel('Distance Similarity')
ax1.set_xlabel('Distance Similarity')
ax_all.set_title(name_split)
ax_all.legend(loc='upper right')
ax1.legend(loc='upper right')
name = name_split + "_sim.pdf"
plt.savefig(os.path.join(self.path_sim, name), dpi = 600)
name_all = "sim_all.pdf"
fig.savefig(os.path.join(self.path_sim, name_all), dpi=600)
def plot_properties(self):
num_splits = len(self.dict_analysis)
#iterate over random/chain/scaffold split
for id_split, name_split in enumerate(list(self.dict_analysis)):
fig1, axs = plt.subplots(nrows = 1, ncols = 4)
fig1.suptitle(name_split, fontsize=26)
fig1.set_figheight(15)
fig1.set_figwidth(40)
#iterate over NP, weight...
for id_property, property_name in enumerate(list(self.dict_analysis[name_split])):
ax1 = axs[id_property]
orig_name = self.gen_to_orig[property_name]
print("orig name", orig_name)
prop_array_orig = self._get_average_orig(name_split, orig_name)
sns.distplot(prop_array_orig, hist=True, color = 'black', rug=False, label= orig_name, ax = ax1);
mean_array_orig = mean(prop_array_orig)
ax1.axvline(mean_array_orig, color = 'black', linestyle='--')
#iterate over sampling (max, probabilistic)...
for id_method, method_name in enumerate(list(self.dict_analysis[name_split][property_name])):
prop_array = self._get_average_property(method_name, name_split, property_name)
color = self.colors[id_method]
sns.distplot(prop_array, hist=True, color = color, rug=False, label= method_name, ax = ax1);
mean_prop_array = mean(prop_array)
ax1.axvline(mean_prop_array, color = color, linestyle='--')
ax1.set_ylabel('Density')
ax1.set_title(property_name)
ax1.legend(loc=
'upper right')
name = name_split + "_prop.pdf"
plt.savefig(os.path.join(self.path_prop, name), dpi=600)
def run(self):
self.build_dict()
self.plot_similarity()
self.plot_properties()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,976
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/model/encoder/encoder_resnet.py
|
from functools import partial
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# import torchvision
from torch import nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.autograd import Variable
from se3cnn.non_linearities.rescaled_act import Softplus
from se3cnn.point.kernel import Kernel
from se3cnn.point.operations import NeighborsConvolution
from se3cnn.point.radial import CosineBasisModel
from model.encoder.resnet import ResnetPointnet, ResnetPointnet_4, ResnetBlockFC
from e3nn.rsh import spherical_harmonics_xyz
from e3nn.non_linearities.rescaled_act import Softplus
# from e3nn.point.operations import NeighborsConvolution
from e3nn.radial import CosineBasisModel
from e3nn.kernel import Kernel
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_Length = 245
class Encoder_Resnet_after_se3ACN(nn.Module):
"""
Architecture of molecular ACN model using se3 equivariant functions.
"""
def __init__(
self,
device=DEVICE,
nclouds=3, #1-3
natoms=286,
cloud_dim=8, # 4-96 !
neighborradius=3,
nffl=1,
ffl1size=512,
num_embeddings=6,
emb_dim=4, #12-not so important
cloudord=1,
nradial=3,
nbasis=3,
rad_neurons = 150,
Z=True,
lat_out = 128
):
# emb_dim=4 - experimentals
super(Encoder_Resnet_after_se3ACN, self).__init__()
self.num_embeddings = num_embeddings
self.device = device
self.natoms = natoms
self.Z = Z # Embedding if True, ONE-HOT if False
self.emb_dim = emb_dim
self.cloud_res = True
self.leakyrelu = nn.LeakyReLU(0.2) # Relu
self.relu = nn.ReLU()
self.feature_collation = "pool" # pool or 'sum'
self.nffl = nffl
self.ffl1size = ffl1size
# Cloud specifications
self.nclouds = nclouds
self.cloud_order = cloudord
self.cloud_dim = cloud_dim
self.radial_layers = nradial
self.sp = Softplus(beta=5)
# self.sh = spherical_harmonics_xyz
# Embedding
self.emb = nn.Embedding(
num_embeddings=self.num_embeddings, embedding_dim=self.emb_dim
)
# Radial Model
self.number_of_basis = nbasis
self.neighbor_radius = neighborradius
self.RadialModel = partial(
CosineBasisModel,
max_radius=self.neighbor_radius, # radius
number_of_basis=self.number_of_basis, # basis
h=150, # ff neurons
L=self.radial_layers, # ff layers
act=self.sp,
) # activation
# Kernel
self.K = partial(
Kernel,
RadialModel=self.RadialModel,
# sh=self.sh,
normalization="norm",
)
# Embedding
self.clouds = nn.ModuleList()
if self.Z:
dim_in = self.emb_dim
else:
dim_in = 6 # ONE HOT VECTOR, 6 ATOMS HCONF AND PADDING = 6
dim_out = self.cloud_dim
self.lat_out = lat_out
Rs_in = [(dim_in, o) for o in range(1)]
Rs_out = [(dim_out, o) for o in range(self.cloud_order)]
for c in range(self.nclouds):
# Cloud
self.clouds.append(
NeighborsConvolution(self.K, Rs_in, Rs_out, neighborradius)
)
Rs_in = Rs_out
if self.cloud_res:
cloud_out = self.cloud_dim * (self.cloud_order ** 2) * self.nclouds
else:
cloud_out = self.cloud_dim * (self.cloud_order ** 2)
# Cloud residuals
in_shape = cloud_out
# passing molecular features after pooling through output layer
self.e_out_1 = nn.Linear(cloud_out, cloud_out)
self.bn_out_1 = nn.BatchNorm1d(cloud_out)
self.e_out_2 = nn.Linear(cloud_out, 2 * cloud_out)
self.bn_out_2 = nn.BatchNorm1d(2 * cloud_out)
# Final output activation layer
# self.layer_to_atoms = nn.Linear(
# ff_in_shape, natoms
# ) # linear output layer from ff_in_shape hidden size to the number of atoms
self.act = (
nn.Sigmoid()
) # y is scaled between 0 and 1, better than ReLu of tanh for U0
self.resnet_block = ResnetPointnet_4(cloud_out, self.lat_out)
def forward(self, features, xyz, masks):
# print("xyz input shape", xyz.shape)
# print("Z input shape", Z.shape)
# xyz -
# Z -
if self.Z:
features = self.emb(features).to(self.device)
else:
features = features.to(self.device)
xyz = xyz.to(torch.double)
features = features.to(torch.double)
features = features.squeeze(2)
feature_list = []
for _, op in enumerate(self.clouds):
features = op(features, xyz)
feature_list.append(features)
# self.res = nn.Linear(in_shape, in_shape)
# features_linear = F.relu(self.res(features)) #features from linear layer operation
# add all received features to common list
# feature_list.append(features_linear)
# Concatenate features from clouds
features = (
torch.cat(feature_list, dim=2).to(torch.double).to(self.device)
) # shape [batch, n_atoms, cloud_dim * cloud_order ** 2 * nclouds]
features = self.resnet_block(features) #shape [batch, n_atoms, lat_out]
#!! maybe use transformer, you have n_atoms with N features. You may define H "heads"
# and then do Q, K, V as described in the article: https://arxiv.org/pdf/2004.08692.pdf
# Pooling: Sum/Average/pool2D
if "sum" in self.feature_collation: #here attention!
features = features.sum(1)
elif "pool" in self.feature_collation:
features = F.lp_pool2d(
features,
norm_type=2,
kernel_size=(features.shape[1], 1),
ceil_mode=False,
)
features = features.squeeze(1) # shape [batch, cloud_dim * (self.cloud_order ** 2) * nclouds
# features = self.leakyrelu(self.bn_out_1(self.e_out_1(features))) # shape [batch, 2 * cloud_dim * (self.cloud_order ** 2) * nclouds]
print("shape final features", features.shape)
return features #shape [batch, lat_out]
class Encoder_Resnet_feat_geom_se3ACN(nn.Module):
"""
Architecture of molecular ACN model using se3 equivariant functions.
"""
def __init__(
self,
device=DEVICE,
nclouds=3, #1-3
natoms=286,
cloud_dim=8, # 4-96 !
neighborradius=3,
nffl=1,
ffl1size=512,
num_embeddings=6,
emb_dim=4, #12-not so important
cloudord=1,
nradial=3,
nbasis=3,
rad_neurons = 150,
Z=True,
lat_out = 32
):
# emb_dim=4 - experimentals
super(Encoder_Resnet_feat_geom_se3ACN, self).__init__()
self.num_embeddings = num_embeddings
self.device = device
self.natoms = natoms
self.Z = Z # Embedding if True, ONE-HOT if False
self.emb_dim = emb_dim
self.cloud_res = True
self.leakyrelu = nn.LeakyReLU(0.2) # Relu
self.relu = nn.ReLU()
self.feature_collation = "pool" # pool or 'sum'
self.nffl = nffl
self.ffl1size = ffl1size
# Cloud specifications
self.nclouds = nclouds
self.cloud_order = cloudord
self.cloud_dim = cloud_dim
self.radial_layers = nradial
self.sp = Softplus(beta=5)
# self.sh = spherical_harmonics_xyz
# Embedding
self.emb = nn.Embedding(
num_embeddings=self.num_embeddings, embedding_dim=self.emb_dim
)
self.lat_out = lat_out
# Radial Model
self.number_of_basis = nbasis
self.neighbor_radius = neighborradius
self.RadialModel = partial(
CosineBasisModel,
max_radius=self.neighbor_radius, # radius
number_of_basis=self.number_of_basis, # basis
h=150, # ff neurons
L=self.radial_layers, # ff layers
act=self.sp,
) # activation
# Kernel
self.K = partial(
Kernel,
RadialModel=self.RadialModel,
# sh=self.sh,
normalization="norm",
)
# Embedding
self.clouds = nn.ModuleList()
if self.Z:
dim_in = self.emb_dim
else:
dim_in = 6 # ONE HOT VECTOR, 6 ATOMS HCONF AND PADDING = 6
dim_out = self.cloud_dim
Rs_in = [(dim_in, o) for o in range(1)]
Rs_out = [(dim_out, o) for o in range(self.cloud_order)]
for c in range(self.nclouds):
# Cloud
self.clouds.append(
NeighborsConvolution(self.K, Rs_in, Rs_out, neighborradius)
)
Rs_in = Rs_out
if self.cloud_res:
cloud_out = self.cloud_dim * (self.cloud_order ** 2) * self.nclouds
else:
cloud_out = self.cloud_dim * (self.cloud_order ** 2)
# Cloud residuals
in_shape = cloud_out
# passing molecular features after pooling through output layer
self.e_out_1 = nn.Linear(cloud_out, cloud_out)
self.bn_out_1 = nn.BatchNorm1d(cloud_out)
self.e_out_2 = nn.Linear(cloud_out, 2 * cloud_out)
self.bn_out_2 = nn.BatchNorm1d(2 * cloud_out)
# Final output activation layer
# self.layer_to_atoms = nn.Linear(
# ff_in_shape, natoms
# ) # linear output layer from ff_in_shape hidden size to the number of atoms
self.act = (
nn.Sigmoid()
) # y is scaled between 0 and 1, better than ReLu of tanh for U0
self.resnet_block = ResnetPointnet(self.emb_dim + 3, self.lat_out)
def forward(self, features, xyz, masks):
# print("xyz input shape", xyz.shape)
# print("Z input shape", Z.shape)
# xyz -
# Z -
if self.Z:
features = self.emb(features).to(self.device)
else:
features = features.to(self.device)
xyz = xyz.to(torch.double)
features = features.to(torch.double)
features = features.squeeze(2)
features_all = torch.cat([xyz, features], dim=2)
features_all = self.resnet_block(features_all)
feature_list = []
for _, op in enumerate(self.clouds):
features = op(features, xyz)
feature_list.append(features)
# self.res = nn.Linear(in_shape, in_shape)
# features_linear = F.relu(self.res(features)) #features from linear layer operation
# add all received features to common list
# feature_list.append(features_linear)
# Concatenate features from clouds
features = (
torch.cat(feature_list, dim=2).to(torch.double).to(self.device)
) # shape [batch, n_atoms, cloud_dim * cloud_order ** 2 * nclouds]
features_out = torch.cat([features, features_all], dim=2) #shape [batch, n_atoms, cloud_dim * cloud_order ** 2 * nclouds + lat_out]
#!! maybe use transformer, you have n_atoms with N features. You may define H "heads"
# and then do Q, K, V as described in the article: https://arxiv.org/pdf/2004.08692.pdf
# print("\nfeatures before pooling", features.shape) # shape [batch, ]
# Pooling: Sum/Average/pool2D
if "sum" in self.feature_collation: #here attention!
features_out = features_out.sum(1)
elif "pool" in self.feature_collation:
features_out = F.lp_pool2d(
features_out,
norm_type=2,
kernel_size=(features_out.shape[1], 1),
ceil_mode=False,
)
features_out = features_out.squeeze(1) # shape [batch, cloud_dim * (self.cloud_order ** 2) * nclouds
# features = self.leakyrelu(self.bn_out_1(self.e_out_1(features))) # shape [batch, 2 * cloud_dim * (self.cloud_order ** 2) * nclouds]
print("shape final features", features_out.shape)
return features_out #shape [batch, cloud_dim * cloud_order ** 2 * nclouds + lat_out]
class Encoder_Resnet_geom_se3ACN(nn.Module):
"""
Architecture of molecular ACN model using se3 equivariant functions.
"""
def __init__(
self,
device=DEVICE,
nclouds=3, #1-3
natoms=286,
cloud_dim=8, # 4-96 !
neighborradius=3,
nffl=1,
ffl1size=512,
num_embeddings=6,
emb_dim=4, #12-not so important
cloudord=1,
nradial=3,
nbasis=3,
rad_neurons = 150,
Z=True,
lat_out = 32
):
# emb_dim=4 - experimentals
super(Encoder_Resnet_geom_se3ACN, self).__init__()
self.num_embeddings = num_embeddings
self.device = device
self.natoms = natoms
self.Z = Z # Embedding if True, ONE-HOT if False
self.emb_dim = emb_dim
self.cloud_res = True
self.leakyrelu = nn.LeakyReLU(0.2) # Relu
self.relu = nn.ReLU()
self.feature_collation = "pool" # pool or 'sum'
self.nffl = nffl
self.ffl1size = ffl1size
# Cloud specifications
self.nclouds = nclouds
self.cloud_order = cloudord
self.cloud_dim = cloud_dim
self.radial_layers = nradial
self.sp = Softplus(beta=5)
# self.sh = spherical_harmonics_xyz
# Embedding
self.emb = nn.Embedding(
num_embeddings=self.num_embeddings, embedding_dim=self.emb_dim
)
# Radial Model
self.number_of_basis = nbasis
self.neighbor_radius = neighborradius
self.RadialModel = partial(
CosineBasisModel,
max_radius=self.neighbor_radius, # radius
number_of_basis=self.number_of_basis, # basis
h=150, # ff neurons
L=self.radial_layers, # ff layers
act=self.sp,
) # activation
# Kernel
self.K = partial(
Kernel,
RadialModel=self.RadialModel,
# sh=self.sh,
normalization="norm",
)
# Embedding
self.clouds = nn.ModuleList()
if self.Z:
dim_in = self.emb_dim
else:
dim_in = 6 # ONE HOT VECTOR, 6 ATOMS HCONF AND PADDING = 6
dim_out = self.cloud_dim
self.lat_out = lat_out
Rs_in = [(dim_in, o) for o in range(1)]
Rs_out = [(dim_out, o) for o in range(self.cloud_order)]
for c in range(self.nclouds):
# Cloud
self.clouds.append(
NeighborsConvolution(self.K, Rs_in, Rs_out, neighborradius)
)
Rs_in = Rs_out
if self.cloud_res:
cloud_out = self.cloud_dim * (self.cloud_order ** 2) * self.nclouds
else:
cloud_out = self.cloud_dim * (self.cloud_order ** 2)
# Cloud residuals
in_shape = cloud_out
# passing molecular features after pooling through output layer
self.e_out_1 = nn.Linear(cloud_out, cloud_out)
self.bn_out_1 = nn.BatchNorm1d(cloud_out)
self.e_out_2 = nn.Linear(cloud_out, 2 * cloud_out)
self.bn_out_2 = nn.BatchNorm1d(2 * cloud_out)
# Final output activation layer
# self.layer_to_atoms = nn.Linear(
# ff_in_shape, natoms
# ) # linear output layer from ff_in_shape hidden size to the number of atoms
self.act = (
nn.Sigmoid()
) # y is scaled between 0 and 1, better than ReLu of tanh for U0
self.resnet_block = ResnetPointnet(3, self.lat_out)
def forward(self, features, xyz, masks):
# print("xyz input shape", xyz.shape)
# print("Z input shape", Z.shape)
# xyz -
# Z -
if self.Z:
features = self.emb(features).to(self.device)
else:
features = features.to(self.device)
xyz = xyz.to(torch.double)
features = features.to(torch.double)
features = features.squeeze(2)
geom_resnet = self.resnet_block(xyz)
feature_list = []
for _, op in enumerate(self.clouds):
features = op(features, xyz)
feature_list.append(features)
# self.res = nn.Linear(in_shape, in_shape)
# features_linear = F.relu(self.res(features)) #features from linear layer operation
# add all received features to common list
# feature_list.append(features_linear)
# Concatenate features from clouds
features = (
torch.cat(feature_list, dim=2).to(torch.double).to(self.device)
) # shape [batch, n_atoms, cloud_dim * cloud_order ** 2 * nclouds]
features_out = torch.cat([features, geom_resnet], dim=2) #shape [batch, n_atoms, cloud_dim * cloud_order ** 2 * nclouds + lat_out]
#!! maybe use transformer, you have n_atoms with N features. You may define H "heads"
# and then do Q, K, V as described in the article: https://arxiv.org/pdf/2004.08692.pdf
# print("\nfeatures before pooling", features.shape) # shape [batch, ]
# Pooling: Sum/Average/pool2D
if "sum" in self.feature_collation: #here attention!
features_out = features_out.sum(1)
elif "pool" in self.feature_collation:
features_out = F.lp_pool2d(
features_out,
norm_type=2,
kernel_size=(features_out.shape[1], 1),
ceil_mode=False,
)
features_out = features_out.squeeze(1) # shape [batch, cloud_dim * (self.cloud_order ** 2) * nclouds
# features = self.leakyrelu(self.bn_out_1(self.e_out_1(features))) # shape [batch, 2 * cloud_dim * (self.cloud_order ** 2) * nclouds]
print("shape final features", features_out.shape)
return features_out #shape [batch, cloud_dim * cloud_order ** 2 * nclouds + lat_out]
class Encoder_Resnet(nn.Module):
"""
Architecture of molecular ACN model using se3 equivariant functions.
"""
def __init__(
self,
device=DEVICE,
num_embeddings=6,
emb_dim=4, #12-not so important
Z=True,
feature_collation = "pool",
lat_out = 256
):
super(Encoder_Resnet, self).__init__()
# emb_dim=4 - experimentals
self.Z = Z
self.num_embeddings = num_embeddings
self.device = device
self.feature_collation = feature_collation
self.emb_dim = emb_dim
self.cloud_res = True
self.leakyrelu = nn.LeakyReLU(0.2) # Relu
self.relu = nn.ReLU()
# self.sh = spherical_harmonics_xyz
# Embedding
self.emb = nn.Embedding(
num_embeddings=self.num_embeddings, embedding_dim=self.emb_dim
)
self.lat_out = lat_out
self.act = (
nn.Sigmoid()
) # y is scaled between 0 and 1, better than ReLu of tanh for U0
self.resnet_block = ResnetPointnet(self.emb_dim + 3, self.lat_out)
def forward(self, features, xyz, masks):
# print("xyz input shape", xyz.shape)
# print("Z input shape", Z.shape)
# xyz -
# Z -
if self.Z:
features = self.emb(features).to(self.device)
else:
features = features.to(self.device)
xyz = xyz.to(torch.double)
features = features.to(torch.double)
features = features.squeeze(2)
features_all = torch.cat([xyz, features], dim=2)
print("shape feat before resnet", features_all.shape)
features_all = self.resnet_block(features_all)
# Concatenate features from clouds
#!! maybe use transformer, you have n_atoms with N features. You may define H "heads"
# and then do Q, K, V as described in the article: https://arxiv.org/pdf/2004.08692.pdf
# print("\nfeatures before pooling", features.shape) # shape [batch, ]
# Pooling: Sum/Average/pool2D
if "sum" in self.feature_collation: #here attention!
features_all = features_all.sum(1)
elif "pool" in self.feature_collation:
features_all = F.lp_pool2d(
features_all,
norm_type=2,
kernel_size=(features_all.shape[1], 1),
ceil_mode=False,
)
features_all = features_all.squeeze(1) # shape [batch, cloud_dim * (self.cloud_order ** 2) * nclouds
# features = self.leakyrelu(self.bn_out_1(self.e_out_1(features))) # shape [batch, 2 * cloud_dim * (self.cloud_order ** 2) * nclouds]
print("shape final features", features_all.shape)
return features_all #shape [batch, lat_out]
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,977
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/training/utils.py
|
import os
import torch
# def save_checkpoint(checkpoint_path, start_epoch, encoder, decoder,
# encoder_best, decoder_best, caption_optimizer, scheduler, split_no):
def save_checkpoint(checkpoint_path, start_epoch, encoder, decoder,
encoder_best, decoder_best, caption_optimizer, split_no):
"""
Saves model checkpoint.
:param data_name: base name of processed dataset
:param epoch: epoch number
:param epochs_since_improvement: number of epochs since last improvement in BLEU-4 score
:param encoder: encoder model
:param decoder: decoder model
:param encoder_optimizer: optimizer to update encoder's weights, if fine-tuning
:param decoder_optimizer: optimizer to update decoder's weights
:param bleu4: validation BLEU-4 score for this epoch
:param is_best: is this checkpoint the best so far?
"""
state = {'start_epoch': start_epoch,
'encoder': encoder.state_dict(),
'decoder': decoder.state_dict(),
'caption_optimizer': caption_optimizer,
'split_no': split_no,
# 'scheduler': scheduler.state_dict(),
}
torch.save(state, checkpoint_path)
def save_checkpoint_sampling(checkpoint_path, idx_sampling, idx_sample_regime_start):
state = {'idx_sample_start': idx_sampling,
'idx_sample_regime_start': idx_sample_regime_start,
}
torch.save(state, checkpoint_path)
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,978
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/model/encoder/resnet.py
|
from functools import partial
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.autograd import Variable
def maxpool(x, dim=-1, keepdim=False):
out, _ = x.max(dim=dim, keepdim=keepdim)
return out
# Resnet Blocks
class ResnetBlockFC(nn.Module):
''' Fully connected ResNet Block class.
Args:
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
'''
def __init__(self, size_in, size_out=None, size_h=None):
super().__init__()
# Attributes
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(x))
dx = self.fc_1(self.actvn(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class ResnetPointnet(nn.Module):
# PointNet-based encoder network with ResNet blocks.
# Args:
# c_dim (int): dimension of latent code c
# dim (int): input points dimension
# hidden_dim (int): hidden dimension of the network
# n_channels (int): number of planes for projection
def __init__(self, dim=None, hidden_dim=None):
super().__init__()
self.dim = dim
self.hidden_dim = hidden_dim
# For grid features
self.fc_pos = nn.Linear(dim, 2*hidden_dim)
self.block_0 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_1 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_2 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_3 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_4 = ResnetBlockFC(2*hidden_dim, hidden_dim)
# Activation & pooling
self.actvn = nn.ReLU()
self.pool = maxpool
is_cuda = torch.cuda.is_available()
self.device = torch.device("cuda" if is_cuda else "cpu")
def forward(self, p):
batch_size, T, D = p.size()
print("D", D)
# Grid features
net = self.fc_pos(p)
net = self.block_0(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_1(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_2(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_3(net)
# pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
# net = torch.cat([net, pooled], dim=2)
# net = self.block_4(net) # batch_size x T x hidden_dim (T: number of sampled input points)
return net
class ResnetPointnet_4(nn.Module):
# PointNet-based encoder network with ResNet blocks.
# Args:
# c_dim (int): dimension of latent code c
# dim (int): input points dimension
# hidden_dim (int): hidden dimension of the network
# n_channels (int): number of planes for projection
def __init__(self, dim=None, hidden_dim=None):
super().__init__()
self.dim = dim
self.hidden_dim = hidden_dim
# For grid features
self.fc_pos = nn.Linear(dim, 2*hidden_dim)
self.block_0 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_1 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_2 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_3 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_4 = ResnetBlockFC(2*hidden_dim, hidden_dim)
# Activation & pooling
self.actvn = nn.ReLU()
self.pool = maxpool
is_cuda = torch.cuda.is_available()
self.device = torch.device("cuda" if is_cuda else "cpu")
def forward(self, p):
batch_size, T, D = p.size()
print("D", D)
# Grid features
net = self.fc_pos(p)
net = self.block_0(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_1(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_2(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_3(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_4(net) # batch_size x T x hidden_dim (T: number of sampled input points)
return net
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,979
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/tests/datasets/feature.py
|
import os
import shutil
from distutils.dir_util import copy_tree
import multiprocessing
from multiprocessing import Pool
from functools import partial
import re
import numpy as np
import torch
import torch.nn.functional as F
from matplotlib import pyplot as plt
from moleculekit.molecule import Molecule
import pandas as pd
# from moleculekit.smallmol.smallmol import SmallMol
from torch import nn
from tqdm import tqdm
from torch.utils.data import DataLoader, Dataset
from moleculekit.tools.atomtyper import prepareProteinForAtomtyping, getFeatures
from moleculekit.tools.voxeldescriptors import getChannels
# import dictionary of atoms' types and hot encoders
from src.datasets.dictionaries import atom_most_common, dict_atoms_hot, dict_atoms_simple, dict_atoms_masses, dict_atoms_charges
from src.utils.checkpoint import save_checkpoint_feature
import src.utils.config as config
import argparse
# from dict
class Featuring():
def __init__(self, cfg, radious, type_feature, type_filtering, h_filterig):
"""uses cfg file which is given as arg in "python train_captioning.py"
"""
print("feat test!!")
self.path_root = cfg['preprocessing']['path_root']
self.path_data = cfg['data']['path']
self.path_checkpoint = os.path.join(self.path_data, "preprocess_checkpoint.csv")
self.file_checkpoint_data = open(self.path_checkpoint, "a+").close()
# self.file_checkpoint_data.close()
if (len(open(self.path_checkpoint).readlines()) == 0):
print("creating the file...")
with open(self.path_checkpoint, "a+") as f:
f.write('radious,type_feature,type_filtering,h_filterig'+ "\n")
self.init_refined = self.path_root + "/data/new_refined/"
self.init_casf = self.path_root + "/data/new_core_2016/"
self.dict_atoms = dict_atoms_hot
self.dict_atoms_simple = dict_atoms_simple
self.dict_words = atom_most_common
self.dict_atoms_masses = dict_atoms_masses
self.dict_atoms_charges = dict_atoms_charges
self.radious = radious
self.type_feature = type_feature
self.type_filtering = type_filtering
self.h_filterig = h_filterig
##################refined files###################
self.files_refined = os.listdir(self.init_refined)
self.files_refined = [file for file in self.files_refined if file[0].isdigit()]
self.files_refined.sort()
self.idx_files_refined = list(range(0, len(self.files_refined)))
# self.idx_files_refined = [0, 1]
self.max_length = 0
# array_names = [str(radious), self.type_feature, self.type_filtering, self.h_filterig]
# self.name_checkpoint_features = '_'.join(array_names)
# os.makedirs(os.path.join(self.path_data, "checkpoints"), exist_ok=True)
# self.path_checkpoint_features = os.path.join(self.path_data, "checkpoints", self.name_checkpoint_features + ".pkl")
# if (os.path.exists(self.path_checkpoint_features)):
# print("loading feature ids...")
# checkpoint_features = torch.load(self.path_checkpoint_features)
# self.idx_max_length = checkpoint_features['idx_max_length']
# self.max_length = checkpoint_features['max_length']
# self.idx_write = checkpoint_features['idx_write']
# else:
# self.idx_max_length = 130
# self.max_length = 0
# self.idx_write = 0
# save_checkpoint_feature(self.path_checkpoint_features, self.idx_max_length, self.max_length, self.idx_write)
# self.max_length = 0
# self.write_filtered_pad_feat_geo()
# else:
# f, m, g = self._get_feat_geo_from_file(0)
# self.max_length = f.shape[0]
def run_parallel_write(self):
with Pool(processes=8) as pool:
pool.map(self.write_padd_feat_geo, self.idx_files_refined)
self.write_checkpoint()
def run_parallel_max_length(self):
with Pool(processes=8) as pool:
lengthes = pool.map(self._get_length, self.idx_files_refined)
# lengthes = []
# with Pool(processes=8) as pool:
# with tqdm(total=len(self.idx_files_refined)) as pbar:
# for i, res in tqdm(enumerate(pool.imap_unordered(self._get_length, self.idx_files_refined))):
# lengthes.append(res)
# pbar.update()
# lengthes = list(tqdm.tqdm(pool.imap(self._get_length, self.idx_files_refined), total=len(self.idx_files_refined)))
# lengthes = pool.map(self._get_length, self.idx_files_refined)
self.max_length = max(lengthes)
print("********max********* - ", self.max_length)
def _get_length(self, pdb_id):
features_filt, geo_filt = self._get_features_geo_filtered(pdb_id)
length = features_filt.shape[0]
return length
def write_padd_feat_geo(self, id):
feat_filt_padded, masks, geo_filt_padded = self._get_features_geo_padded(id, self.max_length)
path_feature, path_mask, path_geo = self._get_name_save(id)
torch.save(feat_filt_padded, path_feature)
torch.save(masks, path_mask)
torch.save(geo_filt_padded, path_geo)
def _get_feat_geo_from_file(self, id):
"""reads torch tensors of feature/geo from files
Args:
id ([int]): [pdb id of a protein]
Returns:
[toch.array]: [feature/geo padded filtered tensors from saved files]
"""
path_feature, path_mask, path_geo = self._get_name_save(id)
feature_filt_padded = torch.load(path_feature, map_location=torch.device('cpu')).long()
mask = torch.load(path_mask, map_location=torch.device('cpu'))
geo_filt_padded = torch.load(path_geo, map_location=torch.device('cpu'))
return feature_filt_padded, mask, geo_filt_padded
def write_filtered_pad_feat_geo(self):
"""1. calculates max length of feat/gep tensors
2. padds feat/geo tensors with zeros till the max length
3. writes resulting tensor to the file
"""
length_max = self._get_length_max()
data_list = range(self.idx_write, len(self.files_refined))
# length_max = 150
progress = tqdm(data_list)
for id in progress:
progress.set_postfix({'pdb': self.files_refined[id]})
feat_filt_padded, masks, geo_filt_padded = self._get_features_geo_padded(id, length_max)
path_feature, path_mask, path_geo = self._get_name_save(id)
torch.save(feat_filt_padded, path_feature)
torch.save(masks, path_mask)
torch.save(geo_filt_padded, path_geo)
save_checkpoint_feature(self.path_checkpoint_features, len(self.files_refined), self.max_length, id)
self.write_checkpoint()
def _get_name_save(self, id: int):
"""creates a path name for feature/geo
Example:
1a1e_feature_r_5_hot_simple_all_no_h.pt
1a1e_geo_r_5_hot_simple_all_no_h.pt
Args:
id ([int]): [pdb id of a protein]
Returns:
[str]: [path name for feature and geometry]
"""
# print("id", id)
name_protein = self.files_refined[id]
array_feat_names = [name_protein, "feature", "r", str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]
array_mask_names = [name_protein, "mask", "r", str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]
array_geo_names = [name_protein, "geo", "r", str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]
name_feature = "_".join(array_feat_names) + ".pt"
name_mask = "_".join(array_mask_names) + ".pt"
name_geo = "_".join(array_geo_names) + ".pt"
path_feat = os.path.join(self.init_refined, name_protein, name_feature)
path_mask = os.path.join(self.init_refined, name_protein, name_mask)
path_geo = os.path.join(self.init_refined, name_protein, name_geo)
return path_feat, path_mask, path_geo
def _get_features_geo_padded(self, id: int, length_max):
"""padds filtered feature/geometry tensors till the max length
Args:
id ([int]): [pdb id]
Returns:
[torch.tensor]: [padded tensors [1 * length_max * feat_length]]
"""
features_filt, geo_filt = self._get_features_geo_filtered(id)
length_padding = length_max - features_filt.shape[0]
mask_binary = torch.cat([torch.ones(features_filt.shape[0]),torch.zeros(length_padding)]).squeeze()
# feat_padd_vector = torch.zeros(features_filt.shape[2])
feat_filt_padded = F.pad(
input=features_filt,
pad=(0, 0, 0, length_padding),
mode="constant",
value = 0,
)
geo_filt_padded = F.pad(
input=geo_filt,
pad=(0, 0, 0, length_padding),
mode="constant",
value=99,
)
return feat_filt_padded, mask_binary, geo_filt_padded
def _get_length_max(self):
"""get the max length of feature array among all pdbids
Returns:
[int]: [maximum length]
"""
# data_list = list(range(len(self.files_refined)))
data_list = range(self.idx_max_length, len(self.files_refined))
progress = tqdm(data_list)
for pdb_id in progress:
features_filt, geo_filt = self._get_features_geo_filtered(pdb_id)
length = features_filt.shape[1]
if (length > self.max_length):
self.max_length = length
progress.set_postfix({'pdb': self.files_refined[pdb_id],
'length': length,
'max_langth': self.max_length})
save_checkpoint_feature(self.path_checkpoint_features, pdb_id, self.max_length, id)
return self.max_length
def _get_features_geo_filtered(self, pdb_id):
"""calculates features and geometry with filteing
Args:pdb id of a protein]
Returns:
[torch.tensor]: [Num_atoms * Feat_dim]
"""
features, geometry = self._get_features_geo(pdb_id)
mask = self._get_mask_selected_atoms_pocket(pdb_id)
features_filtered, geometry_filtered = features[mask, :], geometry[mask, :]
features_filtered = torch.from_numpy(features_filtered).squeeze()
geometry_filtered = torch.from_numpy(geometry_filtered).squeeze()
return features_filtered, geometry_filtered
def _get_features_geo(self, id):
"""gets features depending on the type of featuring
Implemented: hot_simple, mass_charges, bio_properties
Args:
id ([str]): [id of a protein]
Returns:
[np.asarray]: [arrays of feature, geometry for a given pdb id]
"""
#creates featues/geo tensors for all atoms in protein
if self.type_feature == "hot_simple":
features = self.hot_enc(id)
elif self.type_feature == "mass_charges":
features = self.mass_charges(id)
elif self.type_feature == "bio_properties":
features = self.bio_prop(id)
elif self.type_feature == "bio_all_properties":
features_1 = self.mass_charges(id)
features_2 = self.bio_prop(id)
features = np.concatenate((features_1, features_2), axis=1)
geometry = self._get_geometry_protein(id)
return features, geometry
def hot_enc(self, id):
#creates hot vector encoding for all atoms!
elems = self._get_all_elems(id)
features = [self.atom_to_hot_vector(elem) for elem in elems]
features = np.asarray(features)
return features
def atom_to_hot_vector(self, elem: str):
""" creates a hot vector of an atom type
Parameters
----------
elem : str atom element
"""
hot_vector = np.zeros(22)
idx = self.dict_atoms_simple[elem]
hot_vector[idx] = 1
return hot_vector
def mass_charges(self, id):
"""calculates "smart" hot vectors for the whole protein (all atoms!)
mass of atoms on the atomic number's position
Args:
id ([type]): [description]
Returns:
[np.asarray]: [array of features [Num_elems * 80]]
"""
elems = self._get_all_elems(id)
features = [self.atom_to_mass_charge_hot(elem) for elem in elems]
features = np.asarray(features)
return features
def atom_to_mass_charge_hot(self, elem: str):
atom_mass = self.dict_atoms_masses[elem]
atom_charge_idx = self.dict_atoms_charges[elem]
vector = np.zeros(80)
vector[atom_charge_idx] = atom_mass
return vector
def bio_prop(self, id: int):
"""calculates pharmacophoric properties for the whole protein (all atoms!)
Args:
id ([int]): [pdb id of a protein]
Returns:
[np.array]: [array of pharmacophoric properties [N_atoms, dim_feature]]
"""
#pocket
path_protein, _ = self._get_path(id)
protein_name = self.files_refined[id]
mol = Molecule(path_protein)
mol.filter('protein')
mol = prepareProteinForAtomtyping(mol, verbose = False)
features = getChannels(mol, version=2)
features = (features[0] > 0).astype(np.float32)
features = np.asarray(features[:, :-1])
# print("feat shape bio - ", features.shape)
return features
def _get_mask_selected_atoms_pocket(
self, pdb_id: int,
):
"""selects atoms of "id_pdb" protein within the distance "precision" around "center_lig"
Parameters
----------
id_pdb : str id of a protein
Protein to be processed
center : array
Geometrical center of a ligand
radious : int
Radius of atoms selections wrp center of ligand
"""
path_protein, path_ligand = self._get_path(pdb_id)
center_ligand = self._get_ligand_center(path_ligand)
if self.type_filtering == "all" and self.h_filterig == 'h':
sel="protein and noh and sqr(x-'{0}')+sqr(y-'{1}')+sqr(z-'{2}') <= sqr('{3}')".format(
str(center_ligand[0][0]),
str(center_ligand[0][1]),
str(center_ligand[0][2]),
str(self.radious),
)
elif self.type_filtering == "all" and self.h_filterig == '-h':
sel="sqr(x-'{0}')+sqr(y-'{1}')+sqr(z-'{2}') <= sqr('{3}')".format(
str(center_ligand[0][0]),
str(center_ligand[0][1]),
str(center_ligand[0][2]),
str(self.radious),
)
mol_protein = Molecule(path_protein)
mol_protein.filter('protein')
if (self.type_feature == "bio_properties" or self.type_feature == "bio_all_properties"):
mol_protein = prepareProteinForAtomtyping(mol_protein, verbose = False)
mask = mol_protein.atomselect(sel)
return mask
def _get_ligand_center(self, path_ligand):
"""get the geometrical center of a ligand
Args:
path_ligand ([str]): [path to the mol2 file]
Returns:
[np.asarray]: geo center of a ligand
"""
mol_ligand = Molecule(path_ligand)
coor_lig = mol_ligand.coords
center = np.mean(coor_lig, axis=0)
center = center.reshape(1, -1)
return center
def _get_all_elems(self, protein_id: int):
"""takes all elems in protein
Args:
protein_id (int): [id of a protein]
Returns:
[list]: [all elements]
"""
path_protein, _ = self._get_path(protein_id)
try:
# mol_pocket = Molecule(path_protein)
mol_protein = Molecule(path_protein)
mol_protein.filter('protein')
if (self.type_feature == "bio_properties" or self.type_feature == "bio_all_properties"):
mol_protein = prepareProteinForAtomtyping(mol_protein, verbose = False)
mol_pocket_element = mol_protein.element
except FileNotFoundError:
print(protein_id, " exception")
path_protein, path_lig = self._get_path(2)
mol_pocket = Molecule(path_protein)
mol_pocket_element = mol_pocket.element
return mol_pocket_element
def _get_all_elem_general(self, protein_id: int):
path_protein, _ = self._get_path(protein_id)
try:
# mol_pocket = Molecule(path_protein)
mol_protein = Molecule(path_protein)
mol_protein.filter('protein')
mol_pocket_element = mol_protein.element
except FileNotFoundError:
print(protein_id, " exception")
path_protein, path_lig = self._get_path(2)
mol_pocket = Molecule(path_protein)
mol_pocket_element = mol_pocket.element
return list(set(mol_pocket_element))
def _get_geometry_protein(self, protein_id: int):
""" gives np.array of coordinates for a pocket and a ligand in one complex
Parameters
----------
protein_id : str
id of a complex
"""
path_protein, _ = self._get_path(protein_id)
mol_protein = Molecule(path_protein)
mol_protein.filter("protein")
if (self.type_feature == "bio_properties" or self.type_feature == "bio_all_properties"):
mol_protein = prepareProteinForAtomtyping(mol_protein, verbose = False)
coords_protein = mol_protein.coords
coords_protein = np.asarray(coords_protein)
return coords_protein
def _get_path(self, protein_id: int):
""" get a full path to protein/ligand
"""
protein_name = self.files_refined[protein_id]
path_protein = os.path.join(
self.init_refined, protein_name, protein_name + "_protein.pdb"
)
path_ligand = os.path.join(
self.init_refined, protein_name, protein_name + "_ligand.mol2"
)
return path_protein, path_ligand
def write_checkpoint(self):
"""writes inf about radious, type_feature, type_filtering, h_filterig used at extracting features/geometry of atoms
"""
self.file_checkpoint_data = open(self.path_checkpoint, "a+")
array_to_write = [str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]
self.file_checkpoint_data.write(','.join(array_to_write) + "\n")
self.file_checkpoint_data.flush()
def check_featuring(self):
"""check if feature generation was already done with params (mentioned in command line args)
Returns:
[bool]: [True if generation was done/ False if wasn't]
"""
existing_featuring = pd.read_csv(self.path_checkpoint)
array_to_check = [float(self.radious), self.type_feature, self.type_filtering, self.h_filterig]
bool_answer = (existing_featuring == array_to_check).all(1).any()
# self.file_checkpoint_data.close()
return bool_answer
def delete_files(self, protein_name):
path_to_exceptions = os.path.join(self.path_data, "exceptions")
path_protein_folder = os.path.join(self.init_refined, protein_name)
os.makedirs(path_to_exceptions, exist_ok=True)
copy_tree(path_protein_folder, path_to_exceptions)
shutil.rmtree(path_protein_folder)
class Batch_prep(Featuring):
def __init__(self, cfg, radious, type_feature, type_filtering, h_filterig, n_proc=2, mp_pool=None):
super(Batch_prep, self).__init__(cfg, radious, type_feature, type_filtering, h_filterig)
self.mp = multiprocessing.Pool(n_proc)
def transform_data(self):
inputs = self.mp.map(self._get_length, self.files_refined)
# Sometimes representation generation fails
inputs = list(filter(lambda x: x is not None, inputs))
return max(inputs)
def main():
parser = argparse.ArgumentParser(
description='Train a 3D reconstruction model.')
parser.add_argument('--config', type=str, help='Path to config file.')
parser.add_argument('--radious', type=int , default=8, help='dimension of word embedding vectors')
parser.add_argument('--type_feature', type=str , default='mass_charge', help='type_feature')
parser.add_argument('--type_filtering', type=str , default = 'all', help='type_filtering')
parser.add_argument('--h_filterig', type=str , default='without_h', help='h')
parser.add_argument('--type_fold', type=str, help='type_fold')
parser.add_argument('--idx_fold', type=str, help='idx fold')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configurations/config_local/default.yaml')
type_fold = args.type_fold
idx_fold = args.idx_fold
savedir = cfg["output_parameters"]["savedir"]
model_name = cfg["model_params"]["model_name"]
num_epoches = cfg["model_params"]["num_epochs"]
#features generation
Feature_gen = Featuring(cfg, args.radious, args.type_feature, args.type_filtering, args.h_filterig)
print("max length", Feature_gen.max_length)
if __name__ == "__main__":
main()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,980
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/tests/training/train_binding.py
|
import multiprocessing
import os
import pickle
import json
import numpy as np
from numpy import savetxt
import torch
from torch.optim import Adam
from torch.optim.lr_scheduler import ExponentialLR
from torch.utils.data import DataLoader
# from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
# from network import EuclideanNet, SE3Net
from ACNE3 import se3ACN, se3ACN_residual
# from network1 import EuclideanNet, SE3Net
# from network_utils import Loss, Pdb_Dataset
from datasets.data_loader import Loss, Pdb_Dataset
from utils import Utils
import argparse
import sys
# parser = argparse.ArgumentParser()
# parser.add_argument("path_config", help="display a path to the config file",
# type=str)
# args = parser.parse_args()
# parse config file as an argument
args = str(sys.argv[1])
# args = "configs/tetris_simple.json"
print(args)
# ags = "configs/tetris_simple.json"
# DATA_PATH = os.path.realpath(os.path.dirname(__file__))
# DATA_PATH = '/Volumes/Ubuntu'
with open(args) as json_file:
config = json.load(json_file)
# config = utils.parse_configuration(args)
utils = Utils(config)
DATA_PATH = config["preprocessing"]["path_root"]
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
NUM_WORKERS = int(multiprocessing.cpu_count() / 2)
# NUM_WORKERS = 0
N_EPOCHS = config["model_params"]["N_EPOCHS"]
print(N_EPOCHS)
N_SPLITS = config["model_params"]["n_splits"]
BATCH_SIZE = config["model_params"]["batch_size"]
EVAL_MODES = ["normal"]
RES_PATH = os.path.join(DATA_PATH, config["output_parameters"]["path_results"])
PKD_PATH = os.path.join(RES_PATH, config["output_parameters"]["pkd_path"])
PATH_LOSS = os.path.join(RES_PATH, config["output_parameters"]["path_losses_output"])
PATH_PLOTS = config["output_parameters"]["output_plots"]
# create folders for results if not exist
if not os.path.exists(RES_PATH):
os.makedirs(RES_PATH)
if not os.path.exists(PATH_LOSS):
os.makedirs(PATH_LOSS)
if not os.path.exists(PKD_PATH):
os.makedirs(PKD_PATH)
if not os.path.exists(PATH_PLOTS):
os.makedirs(PATH_PLOTS)
# writer = SummaryWriter(config["output_parameters"]["path_tesnorboard_output"])
def training_loop(loader, model, loss_cl, opt, epoch):
"""
Training loop of `model` using data from `loader` and
loss functions from `loss_cl` using optimizer `opt`.
"""
target_pkd_all = []
model = model.train()
progress = tqdm(loader)
all_rmsd = []
pkd_pred = []
for idx, features, geometry, target_pkd in progress:
idx = idx.to(DEVICE)
features = features.to(DEVICE)
geometry = geometry.to(DEVICE)
# num_atoms= num_atoms.to(DEVICE)
target_pkd = target_pkd.to(DEVICE)
target_pkd_all.append(target_pkd)
opt.zero_grad()
# out1 = model(features, geometry)
out1 = model(features, geometry)
pkd_pred.append(out1.cpu())
# print(out1.cpu())
loss_rmsd_pkd = loss_cl(out1, target_pkd).float()
# writer.add_scalar("training_loss", loss_rmsd_pkd.item(), epoch)
loss_rmsd_pkd.backward()
opt.step()
# progress.set_postfix(
# {"loss_rmsd_pkd": loss_rmsd_pkd.item(),}
# )
all_rmsd.append(loss_rmsd_pkd.item())
return torch.cat(target_pkd_all), torch.cat(pkd_pred), sum(all_rmsd) / len(all_rmsd)
def eval_loop(loader, model, epoch):
"""
Evaluation loop using `model` and data from `loader`.
"""
model = model.eval()
progress = tqdm(loader)
target_pkd_all = []
pkd_pred = []
all_rmsd = []
for idx, features, geometry, masks, target_pkd in progress:
with torch.no_grad():
features = features.to(DEVICE)
geometry = geometry.to(DEVICE)
masks = masks.to(DEVICE)
# out1 = model(features, geometry).to(DEVICE)
out1 = model(geometry, features, masks).to(DEVICE)
target_pkd = target_pkd.to(DEVICE)
target_pkd_all.append(target_pkd)
pkd_pred.append(out1.cpu())
loss_rmsd_pkd = loss_cl(out1, target_pkd).float()
# progress.set_postfix(
# {"loss_rmsd_pkd": loss_rmsd_pkd.item(),}
# )
# writer.add_scalar("test_loss", loss_rmsd_pkd.item(), epoch)
all_rmsd.append(loss_rmsd_pkd.item())
return torch.cat(target_pkd_all), torch.cat(pkd_pred), sum(all_rmsd) / len(all_rmsd)
if __name__ == "__main__":
# get indexes of all complexes and "nick names"
data_ids, data_names = utils._get_refined_data()
# print("furst data names")
# print(data_names)
data_names = utils._get_names_refined_core()
# print("second data names")
# print(data_names)
split_pdbids = {}
print(DATA_PATH)
featuriser = Pdb_Dataset(config)
# os.makedirs(PKD_PATH, parents = True, exist_ok=True)
# get indices of train and test data
# train_data, test_data = utils._get_train_test_data(data_ids)
# train_data, test_data = utils._get_dataset_preparation()
if config["train_dataset_params"]["splitting"] == "casf":
train_data, test_data = utils._get_core_train_test_casf()
train_data = train_data[5:] #5 from lab
test_data = test_data[6:]
print("len train data", len(train_data))
print("len test data", len(test_data))
else:
# train and test from refined set (4850 pdb)
train_data, test_data = utils._get_train_test_data(data_ids)
train_data = train_data[1:]
print("train data", train_data)
train_data = train_data[1:6]
test_data = test_data[1:6]
print("test_data", test_data)
pdbids = [
data_names[t] for t in test_data
] # names of pdb corresponding to test data indexes
feat_train = [featuriser[data] for data in train_data]
feat_test = [featuriser[data] for data in test_data]
loader_train = DataLoader(
feat_train, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, shuffle=True
)
loader_test = DataLoader(
feat_test, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, shuffle=False
)
loss_cl = Loss()
opt = Adam(model.parameters(),
lr=config["model_params"]["learning_rate"])
scheduler = ExponentialLR(opt, gamma=0.95)
print("Training model...")
losses_to_write_train = []
for i in range(N_EPOCHS):
print("Epoch {}/{}...".format(i + 1, N_EPOCHS))
epoch = i + 1
target_pkd_all, pkd_pred, loss = training_loop(
loader_train, self.Encoder, loss_cl, opt, epoch
)
# print("pkd_pred", pkd_pred)
losses_to_write_train.append(loss)
if i == N_EPOCHS - 1:
# for local debugging csv
# savetxt(
# os.path.join(
# PKD_PATH, "pkd_pred_train_{}.csv".format(str(i))),
# pkd_pred.detach().cpu().clone().numpy(),
# )
np.save(
os.path.join(PKD_PATH, "pkd_pred_train_{}.npy".format(str(i))),
arr=pkd_pred.detach().cpu().clone().numpy(),
)
scheduler.step()
losses_to_write_train = np.asarray(losses_to_write_train, dtype=np.float32)
# save losses for the train
np.savetxt(
os.path.join(PATH_LOSS, "losses_train_2016.out"),
losses_to_write_train,
delimiter=",",
)
# save true values of training target
savetxt(
os.path.join(PKD_PATH, "target_pkd_all_train.csv"),
target_pkd_all.detach().cpu().clone().numpy(),
)
np.save(
os.path.join(PKD_PATH, "target_pkd_all_train"),
arr=target_pkd_all.detach().cpu().clone().numpy(),
)
print("Evaluating model...")
target_pkd_all_test, pkd_pred_test, loss_test_to_write = eval_loop(
loader_test, model, epoch
)
print("pkd_pred", pkd_pred_test)
loss_test_to_write = np.asarray(loss_test_to_write, dtype=np.float32)
loss_test_to_write = np.asarray([loss_test_to_write])
np.savetxt(
os.path.join(PATH_LOSS, "losses_test_2016.out"),
loss_test_to_write,
delimiter=",",
)
os.makedirs(PKD_PATH, exist_ok=True)
# Save results for later evaluation
# for local debugging csv
# savetxt(
# os.path.join(PKD_PATH, "target_pkd_all_test.csv"),
# target_pkd_all_test.detach().cpu().clone().numpy(),
# )
# savetxt(
# os.path.join(PKD_PATH, "pkd_pred_test.csv"),
# pkd_pred_test.detach().cpu().clone().numpy(),
# )
np.save(
os.path.join(PKD_PATH, "target_pkd_all_test"),
arr=target_pkd_all_test.detach().cpu().clone().numpy(),
)
np.save(
os.path.join(PKD_PATH, "pkd_pred_test"),
arr=pkd_pred_test.detach().cpu().clone().numpy(),
)
with open(os.path.join(PKD_PATH, "split_pdbids.pt"), "wb") as handle:
pickle.dump(split_pdbids, handle)
utils.plot_statistics(
PKD_PATH,
PATH_PLOTS,
N_EPOCHS,
config["output_parameters"]["name_plot"],
"train",
losses_to_write_train[-1],
loss_test_to_write[0],
)
utils.plot_statistics(
PKD_PATH,
PATH_PLOTS,
N_EPOCHS,
config["output_parameters"]["name_plot"],
"test",
losses_to_write_train[-1],
loss_test_to_write[0],
)
utils.plot_losses(
PATH_LOSS, PATH_PLOTS, N_EPOCHS, config["output_parameters"]["name_plot"]
)
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,981
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/datasets/split.py
|
import itertools as IT
import json
import os
import pickle
import time
from distutils.dir_util import copy_tree
from functools import partial
from multiprocessing import Pool
from shutil import copyfile
import itertools
import _pickle as pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.spatial.distance as dist
import torch
from matplotlib import pyplot as plt
from numpy import mean, std
# from openbabel import openbabel
from scipy import spatial as spatial
from scipy.stats import pearsonr
import argparse
import sys
import utils.config
from py3nvml import py3nvml
import json
import os
import pickle
from sklearn.model_selection import KFold
import numpy as np
from rdkit import DataStructs
from rdkit import Chem
from rdkit.Chem import AllChem
from sklearn.model_selection import KFold
from sklearn.cluster import MiniBatchKMeans
from utils.build_vocab import Vocabulary
number_atoms = 22
FP_SIZE = 1024
class Splitter:
# def __init__(self, path_pocket: str, path_ligand: str):
def __init__(self, cfg):
self.cfg = cfg
self.type_fold = cfg["sampling_params"]["type_fold"]
self.name_file_folds = cfg['splitting']['file_folds']
self.num_epochs = cfg['model_params']['num_epochs']
self.batch_size = cfg['model_params']['batch_size']
self.learning_rate = cfg['model_params']['learning_rate']
self.num_workers = cfg['model_params']['num_workers']
self.path_root = cfg['preprocessing']['path_root']
self.init_refined = self.path_root + "/data/new_refined/"
# training params
self.protein_dir = cfg['training_params']['image_dir']
self.files_refined = os.listdir(self.protein_dir)
self.files_refined = [file for file in self.files_refined if file[0].isdigit()]
self.files_refined.sort()
self.n_samples = len(self.files_refined)
self.caption_path = cfg['training_params']['caption_path']
self.log_step = cfg['training_params']['log_step']
self.save_step = cfg['training_params']['save_step']
self.vocab_path = cfg['preprocessing']['vocab_path']
self.n_splits = cfg['training_params']['n_splits']
self.loss_best = np.inf
#output files
self.savedir = os.path.join(cfg['output_parameters']['savedir'], cfg['model_params']['model_name'])
self.tesnorboard_path = self.savedir
self.model_path = os.path.join(self.savedir, "models")
self.log_path = os.path.join(self.savedir, "logs")
self.idx_file = os.path.join(self.log_path, "idxs")
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.save_dir_smiles = os.path.join(self.savedir, "statistics")
self.file_prot_chain = "../data/bc-95.txt"
self.random_state = 1337
if not os.path.exists(self.save_dir_smiles):
os.makedirs(self.save_dir_smiles)
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
if not os.path.exists(self.idx_file):
os.makedirs(self.idx_file)
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
def _get_random_split(self):
data_ids = np.array([i for i in range(self.n_samples)])
# data_ids = np.array([i for i in range(20)])
#cross validation
kf = KFold(n_splits=5, shuffle=True, random_state=2)
my_list = list(kf.split(data_ids))
with open(os.path.join(self.idx_file, self.type_fold), 'wb') as fp:
pickle.dump(my_list, fp)
def _ligand_scaffold_split(self):
"""
Ligand-based scaffold split using Morgan fingerprints
and k-means clustering.
"""
km = MiniBatchKMeans(n_clusters=self.n_splits, random_state=self.random_state)
feat = np.zeros((self.n_samples, 1024), dtype=np.uint8)
for idx in range(self.n_samples):
smile = self._get_caption(idx)
mol = Chem.MolFromSmiles(smile)
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=FP_SIZE)
arr = np.zeros((1,), dtype=np.uint8)
DataStructs.ConvertToNumpyArray(fp, arr)
feat[idx] = arr.copy()
labels = km.fit_predict(feat)
splits = []
for split_no in range(self.n_splits):
indices_train = np.where(labels != split_no)[0]
indices_test = np.where(labels == split_no)[0]
splits.append((indices_train, indices_test))
splits = np.asarray(splits)
with open(os.path.join(self.idx_file, self.type_fold), 'wb') as fp:
pickle.dump(splits, fp)
return splits
def chain_split(self):
# self.files_refined = self.files_refined[:-3]
with open(self.file_prot_chain, 'r') as file:
lines = file.read().splitlines()
words_all = []
for line in lines:
words_line = [word.split("_")[0].lower() for word in line.split()]
words_all.append(words_line)
word_dict = {}
for idx, line in enumerate(words_all):
for word in line:
word_dict[word] = idx
refined_orig_dict = {key: value for value, key in enumerate(self.files_refined)}
refined_dict_chain = {}
for id in self.files_refined:
if id in word_dict.keys():
refined_dict_chain[id] = word_dict[id]
inverted_ref_chain = {}
for key, value in refined_dict_chain.items():
inverted_ref_chain.setdefault(value, list()).append(key)
values_unique = list(set(refined_dict_chain.values())) #idx of line (subgroup) from prot chain
kf = KFold(n_splits=5, shuffle=True, random_state=2)
my_list = list(kf.split(values_unique))
split_refined_all = []
for split in my_list: # folds
split_refined = []
for sub_split in split: #train/test
idx_first = [values_unique[idx] for idx in sub_split] #train/test
names_prot = list(itertools.chain.from_iterable([inverted_ref_chain[ind] for ind in idx_first])) #names of refined
id_orig = [refined_orig_dict[name] for name in names_prot] #id of prot from original list
split_refined.append(id_orig)
split_refined_all.append(split_refined)
with open(os.path.join(self.idx_file, self.type_fold), 'wb') as fp:
pickle.dump(split_refined_all, fp)
def _get_caption(self, id):
"""get caption as a row of a smile by id
"""
protein_name = self.files_refined[id]
# print("current protein", protein_name)
path_to_smile = os.path.join(
self.init_refined, protein_name, protein_name + "_ligand.smi"
)
with open(path_to_smile, "r") as file:
caption = file.read()
return caption
def split(self, type_fold: str):
if(type_fold == 'random'):
self._get_random_split()
elif(type_fold == 'morgan'):
self._ligand_scaffold_split()
elif(type_fold == 'chain'):
self.chain_split()
def main():
parser = argparse.ArgumentParser(
description='Get Splits File'
)
parser.add_argument('config', type=str, help='Path to config file.')
parser.add_argument('type_fold', type=str, help='type_fold')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configurations/config_lab/default.yaml')
type_fold = args.type_fold
args = parser.parse_args()
cfg = config.load_config(args.config, 'configurations/config_local/default.yaml')
splitter = Splitter(cfg)
if(type_fold == 'random'):
splitter._get_random_split()
elif(type_fold == 'morgan'):
splitter._ligand_scaffold_split()
elif(type_fold == 'chain'):
splitter.chain_split()
# if(cfg['splitting']['split'] == 'random'):
# splitter._get_random_split()
# elif(cfg['splitting']['split'] == 'morgan'):
# splitter._ligand_scaffold_split()
# elif(cfg['splitting']['split'] == 'chain'):
# splitter.chain_split()
if __name__ == "__main__":
main()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,982
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/evaluation/Contrib/NP_Score/npscorer_my.py
|
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
import sys, math, gzip, pickle
import os.path
from collections import namedtuple
def readNPModel(filename=os.path.join(os.path.dirname(__file__), 'publicnp.model.gz')):
"""Reads and returns the scoring model,
which has to be passed to the scoring functions."""
print("reading NP model ...", file=sys.stderr)
fscore = pickle.load(gzip.open(filename))
print("model in", file=sys.stderr)
return fscore
def scoreMolWConfidence(mol, fscore):
"""Next to the NP Likeness Score, this function outputs a confidence value
between 0..1 that descibes how many fragments of the tested molecule
were found in the model data set (1: all fragments were found).
Returns namedtuple NPLikeness(nplikeness, confidence)"""
if mol is None:
raise ValueError('invalid molecule')
fp = rdMolDescriptors.GetMorganFingerprint(mol, 2)
bits = fp.GetNonzeroElements()
# calculating the score
score = 0.0
bits_found = 0
for bit in bits:
if bit in fscore:
bits_found += 1
score += fscore[bit]
score /= float(mol.GetNumAtoms())
confidence = float(bits_found / len(bits))
# preventing score explosion for exotic molecules
if score > 4:
score = 4. + math.log10(score - 4. + 1.)
elif score < -4:
score = -4. - math.log10(-4. - score + 1.)
NPLikeness = namedtuple("NPLikeness", "nplikeness,confidence")
return NPLikeness(score, confidence)
def scoreMol(mol, fscore):
"""Calculates the Natural Product Likeness of a molecule.
Returns the score as float in the range -5..5."""
return scoreMolWConfidence(mol, fscore).nplikeness
# def processMols(suppl):
# fscore = readNPModel()
# print("calculating ...", file=sys.stderr)
# count = {}
# scores = []
# n = 0
# for i, m in enumerate(suppl):
# if m is None:
# continue
# n += 1
# score = "%.3f" % scoreMol(m, fscore)
# scores.append(float(score))
# return scores
def processMols(suppl):
fscore = readNPModel()
# print("calculating ...", file=sys.stderr)
count = {}
scores = []
n = 0
for m in suppl:
if m is None:
continue
n += 1
score = "%.3f" % scoreMol(m, fscore)
scores.append(float(score))
return scores
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,983
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/build_vocab.py
|
import argparse
import json
import os
import pickle
import sys
from collections import Counter
import argparse
import utils.config as config
MAX_Length = 245
# args = str(sys.argv[1])
# print(args)
# with open(args) as json_file:
# config = json.load(json_file)
# Arguments
# parser = argparse.ArgumentParser(
# description='Train a 3D reconstruction model.'
# )
# parser.add_argument('config', type=str, help='Path to config file.')
# args = parser.parse_args()
# cfg = config.load_config(args.config, 'configurations/config_lab/default.yaml')
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self):
self.word2idx = {}
self.idx2word = {}
self.idx = 0
def add_word(self, word):
if not word in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def __call__(self, word):
if not word in self.word2idx:
print("word", word)
return self.word2idx["<unk>"]
return self.word2idx[word]
def __len__(self):
return len(self.word2idx)
import re
def smi_tokenizer(smi):
"""
Tokenize a SMILES molecule or reaction
"""
pattern = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])"
regex = re.compile(pattern)
tokens = [token for token in regex.findall(smi)]
return tokens
def build_vocab(cfg):
# dir_path = config["preprocessing"]["path_proteins"]
dir_path = cfg['data']['path_refined']
files_pr = os.listdir(dir_path)
files_pr.remove(".DS_Store") #for my mac
max = 0
counter = Counter()
for file in files_pr:
print(file)
# if file in files_exceptions:
# shutil.rmtree(os.path.join(dir_path, file))
path_to_smile = os.path.join(dir_path, file, file + "_ligand.smi")
# path_to_smile_csv = os.path.join(dir_path, file,file + "_ligand.txt")
with open(path_to_smile, "r") as file:
data = file.read()
print(data)
# tokens = [token for token in data]
tokens = smi_tokenizer(data)
counter.update(tokens)
print("counter", counter)
words = [word for word, cnt in counter.items()]
vocab = Vocabulary()
# vocab.add_word("pad>")
vocab.add_word("<start>")
vocab.add_word("<end>")
for i, word in enumerate(words):
vocab.add_word(word)
return vocab
def main(args):
vocab = build_vocab(config)
vocab_path = config["preprocessing"]["vocab_path"]
with open(vocab_path, "wb") as f:
pickle.dump(vocab, f)
print("Total vocabulary size: {}".format(len(vocab)))
print("Saved the vocabulary wrapper to '{}'".format(vocab_path))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Train a 3D reconstruction model.'
)
parser.add_argument('config', type=str, help='Path to config file.')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configurations/config_local/default.yaml')
# with open(vocab_path, "r") as f:
# v = pickle.load(f)
# v = pickle.load( open( vocab_path, "rb" ) )
# print("vocab", v)
# build_vocab(cfg)
vocab = build_vocab(cfg)
vocab_path = cfg['preprocessing']['vocab_path']
with open(vocab_path, "wb") as f:
pickle.dump(vocab, f)
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,984
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/datasets/feature.py
|
import argparse
import multiprocessing
import os
import re
import shutil
from distutils.dir_util import copy_tree
from functools import partial
from multiprocessing import Pool
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from matplotlib import pyplot as plt
from moleculekit.molecule import Molecule
from moleculekit.tools.atomtyper import (getFeatures,
prepareProteinForAtomtyping)
from moleculekit.tools.voxeldescriptors import getChannels
# from moleculekit.smallmol.smallmol import SmallMol
from torch import nn
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
import src.utils.config as config
# import dictionary of atoms' types and hot encoders
from src.datasets.dictionaries import (atom_most_common, dict_atoms_charges,
dict_atoms_hot, dict_atoms_masses,
dict_atoms_simple)
from src.tests.list_exception import list_exception
from src.utils.checkpoint import save_checkpoint_feature
# from dict
class Featuring():
def __init__(self, cfg, radious, type_feature, type_filtering, h_filterig):
"""uses cfg file which is given as arg in "python train_captioning.py"
"""
print("begin!")
self.path_root = cfg['preprocessing']['path_root']
self.path_data = cfg['data']['path']
self.path_checkpoint = os.path.join(self.path_data, "preprocess_checkpoint.csv")
self.file_checkpoint_data = open(self.path_checkpoint, "a+").close()
if (len(open(self.path_checkpoint).readlines()) == 0):
print("creating the file...")
with open(self.path_checkpoint, "a+") as f:
f.write('radious,type_feature,type_filtering,h_filterig'+ "\n")
self.init_refined = cfg['data']['path_refined']
self.init_casf = self.path_root + "/data/new_core_2016/"
self.dict_atoms = dict_atoms_hot
self.dict_atoms_simple = dict_atoms_simple
self.dict_words = atom_most_common
self.dict_atoms_masses = dict_atoms_masses
self.dict_atoms_charges = dict_atoms_charges
self.radious = radious
self.type_feature = type_feature
self.type_filtering = type_filtering
self.h_filterig = h_filterig
##################refined files###################
self.files_refined = os.listdir(self.init_refined)
self.files_refined = [file for file in self.files_refined if file[0].isdigit()]
self.files_refined.sort()
self.idx_files_refined = list(range(0, len(self.files_refined)))
self.names_bio_exception = []
self.max_length = 0
if not self.check_featuring():
self.run_parallel_write_feat_geo()
else:
f, m, g = self._get_feat_geo_from_file(1)
self.max_length = f.shape[0]
def run_parallel_write_feat_geo(self):
print("writing filtered features/geo...")
with Pool(processes=8) as pool:
pool.map(self.write_padd_feat_geo, self.idx_files_refined)
print("exception...!", self.names_bio_exception)
for name in self.names_bio_exception:
self.delete_files(name)
self.files_refined = os.listdir(self.init_refined)
self.files_refined = [file for file in self.files_refined if file[0].isdigit()]
self.files_refined.sort()
self.idx_files_refined = list(range(0, len(self.files_refined)))
print("max length calculating...")
with Pool(processes=8) as pool:
lengthes = pool.map(self._get_max_length_from_files, self.idx_files_refined)
self.max_length = max(lengthes)
print("max length - ", self.max_length)
print("padding...")
with Pool(processes=8) as pool:
pool.map(self.files_to_padded, self.idx_files_refined)
self.write_checkpoint()
def write_padd_feat_geo(self, id):
try:
feat_filt_padded, geo_filt_padded = self._get_features_geo_filtered(id)
path_feature, path_mask, path_geo = self._get_name_save(id)
torch.save(feat_filt_padded, path_feature)
torch.save(geo_filt_padded, path_geo)
except:
protein_name = self.files_refined[id]
self.names_bio_exception.append(protein_name)
def _get_max_length_from_files(self, id):
path_feature, path_mask, path_geo = self._get_name_save(id)
feature_filt = torch.load(path_feature, map_location=torch.device('cpu')).long()
length = feature_filt.shape[0]
return length
def files_to_padded(self, id):
path_feature, path_mask, path_geo = self._get_name_save(id)
feature_filt = torch.load(path_feature, map_location=torch.device('cpu')).long()
if len(list(feature_filt.size())) == 1:
feature_filt = feature_filt.unsqueeze(1)
geo_filt = torch.load(path_geo, map_location=torch.device('cpu')).long()
length_padding = self.max_length - feature_filt.shape[0]
mask_binary = torch.cat([torch.ones(feature_filt.shape[0]),torch.zeros(length_padding)]).squeeze()
feat_filt_padded = F.pad(
input=feature_filt,
pad=(0, 0, 0, length_padding),
mode="constant",
value = 0,
)
geo_filt_padded = F.pad(
input=geo_filt,
pad=(0, 0, 0, length_padding),
mode="constant",
value=99,
)
torch.save(feat_filt_padded, path_feature)
torch.save(mask_binary, path_mask)
torch.save(geo_filt_padded, path_geo)
def run_parallel_write(self):
self.files_refined = os.listdir(self.init_refined)
self.files_refined = [file for file in self.files_refined if file[0].isdigit()]
self.files_refined.sort()
self.idx_files_refined = list(range(0, len(self.files_refined)))
with Pool(processes=8) as pool:
pool.map(self.write_padd_feat_geo, self.idx_files_refined)
self.write_checkpoint()
def run_parallel_max_length(self):
with Pool(processes=8) as pool:
lengthes = pool.map(self._get_length, self.idx_files_refined)
# lengthes = []
# with Pool(processes=8) as pool:
# with tqdm(total=len(self.idx_files_refined)) as pbar:
# for i, res in tqdm(enumerate(pool.imap_unordered(self._get_length, self.idx_files_refined))):
# lengthes.append(res)
# pbar.update()
# lengthes = list(tqdm.tqdm(pool.imap(self._get_length, self.idx_files_refined), total=len(self.idx_files_refined)))
# lengthes = pool.map(self._get_length, self.idx_files_refined)
self.max_length = max(lengthes)
print("********max********* - ", self.max_length)
def run_parallel_write_filt_feat_geo(self):
features_filt, geo_filt = self._get_features_geo_filtered(id)
path_feature, path_mask, path_geo = self._get_name_save(id)
torch.save(feat_filt, path_feature)
torch.save(masks, path_mask)
torch.save(geo_filt_padded, path_geo)
with Pool(processes=8) as pool:
lengthes = pool.map(self._get_length, self.idx_files_refined)
def _get_length(self, pdb_id):
features_filt, geo_filt = self._get_features_geo_filtered(pdb_id)
length = features_filt.shape[0]
return length
def _get_feat_geo_from_file(self, id):
"""reads torch tensors of feature/geo from files
Args:
id ([int]): [pdb id of a protein]
Returns:
[toch.array]: [feature/geo padded filtered tensors from saved files]
"""
path_feature, path_mask, path_geo = self._get_name_save(id)
feature_filt_padded = torch.load(path_feature, map_location=torch.device('cpu')).long()
mask = torch.load(path_mask, map_location=torch.device('cpu'))
geo_filt_padded = torch.load(path_geo, map_location=torch.device('cpu'))
return feature_filt_padded, mask, geo_filt_padded
def write_filtered_pad_feat_geo(self):
"""1. calculates max length of feat/gep tensors
2. padds feat/geo tensors with zeros till the max length
3. writes resulting tensor to the file
"""
length_max = self._get_length_max()
data_list = range(self.idx_write, len(self.files_refined))
# length_max = 150
progress = tqdm(data_list)
for id in progress:
progress.set_postfix({'pdb': self.files_refined[id]})
feat_filt_padded, masks, geo_filt_padded = self._get_features_geo_padded(id, length_max)
path_feature, path_mask, path_geo = self._get_name_save(id)
torch.save(feat_filt_padded, path_feature)
torch.save(masks, path_mask)
torch.save(geo_filt_padded, path_geo)
save_checkpoint_feature(self.path_checkpoint_features, len(self.files_refined), self.max_length, id)
self.write_checkpoint()
def _get_name_save(self, id: int):
"""creates a path name for feature/geo
Example:
1a1e_feature_r_5_hot_simple_all_no_h.pt
1a1e_geo_r_5_hot_simple_all_no_h.pt
Args:
id ([int]): [pdb id of a protein]
Returns:
[str]: [path name for feature and geometry]
"""
# print("id", id)
name_protein = self.files_refined[id]
array_feat_names = [name_protein, "feature", "r", str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]
array_mask_names = [name_protein, "mask", "r", str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]
array_geo_names = [name_protein, "geo", "r", str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]
name_feature = "_".join(array_feat_names) + ".pt"
name_mask = "_".join(array_mask_names) + ".pt"
name_geo = "_".join(array_geo_names) + ".pt"
path_feat = os.path.join(self.init_refined, name_protein, name_feature)
path_mask = os.path.join(self.init_refined, name_protein, name_mask)
path_geo = os.path.join(self.init_refined, name_protein, name_geo)
return path_feat, path_mask, path_geo
def _get_features_geo_padded(self, id: int, length_max):
"""padds filtered feature/geometry tensors till the max length
Args:
id ([int]): [pdb id]
Returns:
[torch.tensor]: [padded tensors [1 * length_max * feat_length]]
"""
features_filt, geo_filt = self._get_features_geo_filtered(id)
length_padding = length_max - features_filt.shape[0]
mask_binary = torch.cat([torch.ones(features_filt.shape[0]),torch.zeros(length_padding)]).squeeze()
# feat_padd_vector = torch.zeros(features_filt.shape[2])
feat_filt_padded = F.pad(
input=features_filt,
pad=(0, 0, 0, length_padding),
mode="constant",
value = 0,
)
geo_filt_padded = F.pad(
input=geo_filt,
pad=(0, 0, 0, length_padding),
mode="constant",
value=99,
)
return feat_filt_padded, mask_binary, geo_filt_padded
def _get_length_max(self):
"""get the max length of feature array among all pdbids
Returns:
[int]: [maximum length]
"""
# data_list = list(range(len(self.files_refined)))
data_list = range(self.idx_max_length, len(self.files_refined))
progress = tqdm(data_list)
for pdb_id in progress:
features_filt, geo_filt = self._get_features_geo_filtered(pdb_id)
length = features_filt.shape[1]
if (length > self.max_length):
self.max_length = length
progress.set_postfix({'pdb': self.files_refined[pdb_id],
'length': length,
'max_langth': self.max_length})
save_checkpoint_feature(self.path_checkpoint_features, pdb_id, self.max_length, id)
return self.max_length
def _get_features_geo_filtered(self, pdb_id):
"""calculates features and geometry with filteing
Args:pdb id of a protein]
Returns:
[torch.tensor]: [Num_atoms * Feat_dim]
"""
features, geometry = self._get_features_geo(pdb_id)
mask = self._get_mask_selected_atoms_pocket(pdb_id)
if mask.shape[0] != features.shape[0]:
features = np.zeros((mask.shape[0], 3))
features_filtered, geometry_filtered = features[mask, :], geometry[mask, :]
features_filtered = torch.from_numpy(features_filtered).squeeze()
geometry_filtered = torch.from_numpy(geometry_filtered).squeeze()
return features_filtered, geometry_filtered
def _get_features_geo(self, id):
"""gets features depending on the type of featuring
Implemented: hot_simple, mass_charges, bio_properties
Args:
id ([str]): [id of a protein]
Returns:
[np.asarray]: [arrays of feature, geometry for a given pdb id]
"""
#creates featues/geo tensors for all atoms in protein
if self.type_feature == "atom_number":
features = self.number_atom(id)
elif self.type_feature == "hot_simple":
features = self.hot_enc(id)
elif self.type_feature == "mass_charges":
features = self.mass_charges(id)
elif self.type_feature == "bio_properties":
features = self.bio_prop(id)
elif self.type_feature == "bio_all_properties":
features_1 = self.mass_charges(id)
features_2 = self.bio_prop(id)
if features_2.shape[1] == 3:
features_1 = np.zeros((1, 3))
features = np.concatenate((features_1, features_2), axis=1)
geometry = self._get_geometry_protein(id)
return features, geometry
def hot_enc(self, id):
#creates hot vector encoding for all atoms!
elems = self._get_all_elems(id)
features = [self.atom_to_hot_vector(elem) for elem in elems]
features = np.asarray(features)
return features
def number_atom(self, id):
elems = self._get_all_elems(id)
features = [self.dict_atoms_simple[elem] for elem in elems]
features = np.asarray(features)
features = np.expand_dims(features, axis=1)
return features
def atom_to_hot_vector(self, elem: str):
""" creates a hot vector of an atom type
Parameters
----------
elem : str atom element
"""
hot_vector = np.zeros(22)
idx = self.dict_atoms_simple[elem]
hot_vector[idx] = 1
return hot_vector
def mass_charges(self, id):
"""calculates "smart" hot vectors for the whole protein (all atoms!)
mass of atoms on the atomic number's position
Args:
id ([type]): [description]
Returns:
[np.asarray]: [array of features [Num_elems * 80]]
"""
elems = self._get_all_elems(id)
features = [self.atom_to_mass_charge_hot(elem) for elem in elems]
features = np.asarray(features)
return features
def atom_to_mass_charge_hot(self, elem: str):
atom_mass = self.dict_atoms_masses[elem]
atom_charge_idx = self.dict_atoms_charges[elem]
vector = np.zeros(80)
vector[atom_charge_idx] = atom_mass
return vector
def bio_prop(self, id: int):
"""calculates pharmacophoric properties for the whole protein (all atoms!)
Args:
id ([int]): [pdb id of a protein]
Returns:
[np.array]: [array of pharmacophoric properties [N_atoms, dim_feature]]
"""
#pocket
# try:
path_protein, _ = self._get_path(id)
protein_name = self.files_refined[id]
print("processing...", protein_name)
mol = Molecule(path_protein)
mol.filter('protein')
mol = prepareProteinForAtomtyping(mol, verbose = False)
features = getChannels(mol, version=2)
features = (features[0] > 0).astype(np.float32)
features = np.asarray(features[:, :-1])
# print("feat shape bio - ", features.shape)
# except:
# self.names_bio_exception.append(protein_name)
# features = np.zeros((1, 3))
return features
def _get_mask_selected_atoms_pocket(
self, pdb_id: int,
):
"""selects atoms of "id_pdb" protein within the distance "precision" around "center_lig"
Parameters
----------
id_pdb : str id of a protein
Protein to be processed
center : array
Geometrical center of a ligand
radious : int
Radius of atoms selections wrp center of ligand
"""
path_protein, path_ligand = self._get_path(pdb_id)
center_ligand = self._get_ligand_center(path_ligand)
if self.type_filtering == "all" and self.h_filterig == 'h':
sel="protein and noh and sqr(x-'{0}')+sqr(y-'{1}')+sqr(z-'{2}') <= sqr('{3}')".format(
str(center_ligand[0][0]),
str(center_ligand[0][1]),
str(center_ligand[0][2]),
str(self.radious),
)
elif self.type_filtering == "all" and self.h_filterig == '-h':
sel="sqr(x-'{0}')+sqr(y-'{1}')+sqr(z-'{2}') <= sqr('{3}')".format(
str(center_ligand[0][0]),
str(center_ligand[0][1]),
str(center_ligand[0][2]),
str(self.radious),
)
mol_protein = Molecule(path_protein)
mol_protein.filter('protein')
if (self.type_feature == "bio_properties" or self.type_feature == "bio_all_properties"):
mol_protein = prepareProteinForAtomtyping(mol_protein, verbose = False)
mask = mol_protein.atomselect(sel)
return mask
def _get_ligand_center(self, path_ligand):
"""get the geometrical center of a ligand
Args:
path_ligand ([str]): [path to the mol2 file]
Returns:
[np.asarray]: geo center of a ligand
"""
mol_ligand = Molecule(path_ligand)
coor_lig = mol_ligand.coords
center = np.mean(coor_lig, axis=0)
center = center.reshape(1, -1)
return center
def _get_all_elems(self, protein_id: int):
"""takes all elems in protein
Args:
protein_id (int): [id of a protein]
Returns:
[list]: [all elements]
"""
path_protein, _ = self._get_path(protein_id)
# try:
# mol_pocket = Molecule(path_protein)
mol_protein = Molecule(path_protein)
mol_protein.filter('protein')
if (self.type_feature == "bio_properties" or self.type_feature == "bio_all_properties"):
mol_protein = prepareProteinForAtomtyping(mol_protein, verbose = False)
mol_pocket_element = mol_protein.element
# except FileNotFoundError:
# print(protein_id, " exception")
# path_protein, path_lig = self._get_path(2)
# mol_pocket = Molecule(path_protein)
# mol_pocket_element = mol_pocket.element
return mol_pocket_element
def _get_all_elem_general(self, protein_id: int):
path_protein, _ = self._get_path(protein_id)
try:
# mol_pocket = Molecule(path_protein)
mol_protein = Molecule(path_protein)
mol_protein.filter('protein')
mol_pocket_element = mol_protein.element
except FileNotFoundError:
print(protein_id, " exception")
path_protein, path_lig = self._get_path(2)
mol_pocket = Molecule(path_protein)
mol_pocket_element = mol_pocket.element
return mol_pocket_element
def _get_geometry_protein(self, protein_id: int):
""" gives np.array of coordinates for a pocket and a ligand in one complex
Parameters
----------
protein_id : str
id of a complex
"""
path_protein, _ = self._get_path(protein_id)
mol_protein = Molecule(path_protein)
mol_protein.filter("protein")
if (self.type_feature == "bio_properties" or self.type_feature == "bio_all_properties"):
mol_protein = prepareProteinForAtomtyping(mol_protein, verbose = False)
coords_protein = mol_protein.coords
coords_protein = np.asarray(coords_protein)
return coords_protein
def _get_path(self, protein_id: int):
""" get a full path to protein/ligand
"""
protein_name = self.files_refined[protein_id]
path_protein = os.path.join(
self.init_refined, protein_name, protein_name + "_protein.pdb"
)
path_ligand = os.path.join(
self.init_refined, protein_name, protein_name + "_ligand.mol2"
)
return path_protein, path_ligand
def write_checkpoint(self):
"""writes inf about radious, type_feature, type_filtering, h_filterig used at extracting features/geometry of atoms
"""
self.file_checkpoint_data = open(self.path_checkpoint, "a+")
array_to_write = [str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]
self.file_checkpoint_data.write(','.join(array_to_write) + "\n")
self.file_checkpoint_data.flush()
def check_featuring(self):
"""check if feature generation was already done with params (mentioned in command line args)
Returns:
[bool]: [True if generation was done/ False if wasn't]
"""
existing_featuring = pd.read_csv(self.path_checkpoint)
array_to_check = [float(self.radious), self.type_feature, self.type_filtering, self.h_filterig]
bool_answer = (existing_featuring == array_to_check).all(1).any()
# self.file_checkpoint_data.close()
return bool_answer
def delete_files(self, protein_name):
path_to_exceptions = os.path.join(self.path_data, "exceptions")
path_protein_folder = os.path.join(self.init_refined, protein_name)
os.makedirs(path_to_exceptions, exist_ok=True)
copy_tree(path_protein_folder, path_to_exceptions)
shutil.rmtree(path_protein_folder)
class Batch_prep(Featuring):
def __init__(self, cfg, radious, type_feature, type_filtering, h_filterig, n_proc=2, mp_pool=None):
super(Batch_prep, self).__init__(cfg, radious, type_feature, type_filtering, h_filterig)
self.mp = multiprocessing.Pool(n_proc)
def transform_data(self):
inputs = self.mp.map(self._get_length, self.files_refined)
# Sometimes representation generation fails
inputs = list(filter(lambda x: x is not None, inputs))
return max(inputs)
def main():
parser = argparse.ArgumentParser(
description='Train a 3D reconstruction model.')
parser.add_argument('--config', type=str, help='Path to config file.')
parser.add_argument('--radious', type=int , default=8, help='dimension of word embedding vectors')
parser.add_argument('--type_feature', type=str , default='mass_charge', help='type_feature')
parser.add_argument('--type_filtering', type=str , default = 'all', help='type_filtering')
parser.add_argument('--h_filterig', type=str , default='without_h', help='h')
parser.add_argument('--type_fold', type=str, help='type_fold')
parser.add_argument('--idx_fold', type=str, help='idx fold')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configurations/config_local/default.yaml')
type_fold = args.type_fold
idx_fold = args.idx_fold
savedir = cfg["output_parameters"]["savedir"]
model_name = cfg["model_params"]["model_name"]
num_epoches = cfg["model_params"]["num_epochs"]
#features generation
Feature_gen = Featuring(cfg, args.radious, args.type_feature, args.type_filtering, args.h_filterig)
print("max length", Feature_gen.max_length)
if __name__ == "__main__":
main()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,985
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/evaluation/evaluator.py
|
import argparse
import csv
import json
import multiprocessing
import os
import pickle
import sys
import time
import matplotlib.pyplot as plt
import numpy as np
# from torch.utils.tensorboard import SummaryWriter
import pandas as pd
import torch
import torch.nn as nn
from numpy import savetxt
from rdkit import Chem
from sklearn.model_selection import KFold
from torch.nn.utils.rnn import pack_padded_sequence
from torch.optim.lr_scheduler import ExponentialLR
from torch.utils.data import DataLoader
import utils.config as config
from src.datasets.data_loader import Pdb_Dataset
from src.evaluation.Contrib.statistics import (analysis_to_csv,
analysis_to_csv_test)
from src.sampling.sampler import Sampler
from src.training.utils import save_checkpoint_sampling
from src.utils.build_vocab import Vocabulary
from src.utils.checkpoint import Checkpoint_Eval
class Evaluator():
def __init__(self, cfg, sampling, type_fold, epochs_array, Feature_Loader):
self.cfg = cfg
self.Feature_Loader = Feature_Loader
self.type_fold = type_fold
self.path_root = cfg['preprocessing']['path_root']
# self.init_refined = self.path_root + "/data/new_refined/"
self.init_refined = cfg['training_params']['image_dir']
self.files_refined = os.listdir(self.init_refined)
self.files_refined = [file for file in self.files_refined if file[0].isdigit()]
self.files_refined.sort()
self.attention = self.cfg['training_params']['mode']
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# self.device = torch.device("cpu")
self.sampling = sampling
self.epochs_array = epochs_array
self.num_epochs = len(self.epochs_array)
print("epoches array - ", self.epochs_array)
print("num of epoches", self.num_epochs)
self.model_encoder = cfg['model']['encoder']
# print(self.model_encoder)
self.model_decoder = cfg['model']['decoder']
self.sampling_data = cfg['sampling_params']['sampling_data']
self.protein_dir = cfg["training_params"]["image_dir"]
if not self.sampling.startswith('beam'):
self.number_smiles = cfg["sampling_params"]["number_smiles"]
else:
self.number_smiles = int(self.sampling.split("_")[1])
# if (self.sampling == "max"):
# self.number_smiles = 1
self.time_waiting = cfg["sampling_params"]["time_waiting"]
# model params
self.model_name = cfg['model_params']['model_name']
# self.num_epochs = cfg['model_params']['num_epochs']
self.batch_size = cfg['model_params']['batch_size']
self.learning_rate = cfg['model_params']['learning_rate']
self.num_workers = cfg['model_params']['num_workers']
# training params
self.protein_dir = cfg['training_params']['image_dir']
self.caption_path = cfg['training_params']['caption_path']
self.log_step = cfg['training_params']['log_step']
self.save_step = cfg['training_params']['save_step']
self.vocab_path = cfg['preprocessing']['vocab_path']
#output files
self.savedir = os.path.join(cfg['output_parameters']['savedir'], self.model_name)
self.save_dir_smiles = os.path.join(self.savedir, "statistics")
self.tesnorboard_path = self.savedir
self.log_path = os.path.join(self.savedir, "logs")
self.idx_file = os.path.join(self.log_path, "idxs")
self.save_dir_encodings = os.path.join(self.savedir, "encodings", self.model_name)
#sampling params
os.makedirs(self.save_dir_smiles, exist_ok=True)
os.makedirs(self.save_dir_encodings, exist_ok=True)
os.makedirs(os.path.join(self.log_path, "checkpoints"), exist_ok=True)
self.path_data = os.path.join(cfg["output_parameters"]["savedir"], cfg["model_params"]["model_name"], "statistics")
with open(self.vocab_path, "rb") as f:
self.vocab = pickle.load(f)
self.dataset = Pdb_Dataset(cfg, self.vocab)
self.path_smiles_train = os.path.join(self.log_path, "checkpoints", "smiles_train")
if not os.path.exists(self.path_smiles_train):
self.smiles_train = self._get_train_smiles()
else:
with open(self.path_smiles_train, 'rb') as smiles:
self.smiles_train = pickle.load(smiles)
self.path_vis = os.path.join(cfg["output_parameters"]["savedir"], self.model_name, 'results_' + self.model_name)
self.path_plot = os.path.join(self.path_vis, self.type_fold)
os.makedirs(self.path_plot, exist_ok=True)
self._n_folds = 5
self.path_checkpoint_evaluator = os.path.join(self.savedir, "checkpoints", "checkpoint_evaluator.csv")
self.checkpoint_evaluation = Checkpoint_Eval(self.path_checkpoint_evaluator, self.type_fold, self.sampling)
self.start_rec_fold, self.start_rec_epoch, self.start_eval_fold, self.start_eval_epoch = self.checkpoint_evaluation._get_data()
os.makedirs(os.path.join(self.log_path, "checkpoints", self.type_fold), exist_ok=True)
self.path_novel = os.path.join(self.log_path, "checkpoints", self.type_fold, "novel.npy")
self.path_valid = os.path.join(self.log_path, "checkpoints", self.type_fold, "valid.npy")
self.path_unique = os.path.join(self.log_path, "checkpoints", self.type_fold, "unique.npy")
if not os.path.isfile(self.path_novel):
self.valid = np.zeros((self._n_folds, self.num_epochs))
self.unique = np.zeros((self._n_folds, self.num_epochs))
self.novel = np.zeros((self._n_folds, self.num_epochs))
np.save(self.path_novel, self.novel)
np.save(self.path_valid, self.valid)
np.save(self.path_unique, self.unique)
else:
self.novel = np.load(self.path_novel, allow_pickle=True)
self.valid = np.load(self.path_valid, allow_pickle=True)
self.unique = np.load(self.path_unique, allow_pickle=True)
# print("shape of unique array first- ", self.unique.shape)
def run_evaluation(self):
self.record_all_mol()
self.evaluate_all_mol()
def record_all_mol(self):
for idx_fold in range(self.start_rec_fold, self._n_folds):
for epoch in range(self.start_rec_epoch, self.num_epochs):
epoch_absolute = self.epochs_array[epoch]
encoder_path = os.path.join(self.savedir, "models", "encoder-" + str(idx_fold) + "-" + str(epoch_absolute) + '-' + str(self.type_fold) + '.ckpt')
decoder_path = os.path.join(self.savedir, "models", "decoder-" + str(idx_fold) + "-" + str(epoch_absolute) + '-' + str(self.type_fold) + '.ckpt')
# print("encoder_path!!", encoder_path)
sampler = Sampler(self.cfg, self.sampling, self.Feature_Loader)
sampler.analysis_cluster(idx_fold, epoch_absolute, self.type_fold, encoder_path, decoder_path)
self.checkpoint_evaluation.write_record_checkpoint(idx_fold + 1, epoch + 1)
def evaluate_all_mol(self):
for idx_fold in range(self.start_eval_fold, self._n_folds):
for epoch in range(self.start_eval_epoch, self.num_epochs):
epoch_absolute = self.epochs_array[epoch]
self.name_file_stat = self.sampling + "_" + str(self.type_fold) + "_" + str(idx_fold) + ".csv"
file_mols = pd.read_csv(os.path.join(self.save_dir_smiles, self.name_file_stat))
# print("file_mols, - ", file_mols)
mol = file_mols.loc[file_mols['epoch_no'] == str(epoch_absolute), 'gen_smile'].to_list()
number_mols = len(mol)
# print("mol!!, ", mol)
# Compute unique molecules
# print("shape of unique array - ", self.unique.shape)
self.unique[idx_fold, epoch] = len(set(mol)) / (number_mols + 1)
# Remove duplicates
mol = np.array(list(set(mol)))
number_mols = mol.shape[0]
# Check validity and remove non-valid molecules
to_delete = []
for k, m in enumerate(mol):
if not self.check_valid(m):
to_delete.append(k)
valid_mol = np.delete(mol, to_delete)
self.valid[idx_fold, epoch] = len(valid_mol) / (number_mols + 1)
# Compute molecules unequal to training data
if valid_mol.size != 0:
print("not equal to 0!")
new_m = self.check_with_training_data(list(valid_mol), idx_fold)
self.novel[idx_fold, epoch] = len(new_m) / number_mols
#save arrays of novel/valid/unique
np.save(self.path_novel, self.novel)
np.save(self.path_valid, self.valid)
np.save(self.path_unique, self.unique)
self.checkpoint_evaluation.write_eval_checkpoint(idx_fold + 1, epoch + 1)
# Get percentage
self.unique *= 100
self.novel *= 100
self.valid *= 100
# Get mean values
mean_unique = np.mean(self.unique, axis=0)
mean_valid = np.mean(self.valid, axis=0)
mean_novel = np.mean(self.novel, axis=0)
# Get standard deviation
std_unique = np.std(self.unique, axis=0)
std_valid = np.std(self.valid, axis=0)
std_novel = np.std(self.novel, axis=0)
# PLot
plt.figure(1)
array_epoches = np.asarray(self.epochs_array)
print("array_epoches shape, - ", array_epoches.shape)
print("mean_unique shape, - ", mean_unique.shape)
print("std_unique shape, - ", std_unique.shape)
plt.errorbar(array_epoches, mean_unique, yerr=std_unique, capsize=3, label='unique')
plt.errorbar(array_epoches, mean_valid, yerr=std_valid, capsize=3,
label='valid & unique')
plt.errorbar(array_epoches, mean_novel, yerr=std_novel, capsize=3,
label='novel, valid & unique', linestyle=':')
# plt.errorbar(np.arange(1, self.num_epochs + 1), mean_unique, yerr=std_unique, capsize=3, label='unique')
# plt.errorbar(np.arange(1, self.num_epochs + 1), mean_valid, yerr=std_valid, capsize=3,
# label='valid & unique')
# plt.errorbar(np.arange(1, self.num_epochs + 1), mean_novel, yerr=std_novel, capsize=3,
# label='novel, valid & unique', linestyle=':')
plt.yticks(np.arange(0, 110, step=10))
plt.legend(loc=3)
plt.ylim(0, 105)
plt.title('SMILES at ' + str(self.sampling) + ' sampling, ' + str(self.type_fold) + ' split')
plt.ylabel('% SMILES')
plt.xlabel('Epoch')
path_save = os.path.join(self.path_plot, self.sampling + '_' + 'novel_valid_unique_molecules.png')
plt.savefig(path_save)
# data = np.vstack((mean_unique, std_unique, mean_valid, std_valid, mean_novel, std_novel))
# pd.DataFrame(data).to_csv(self._experiment_name + '/molecules/' + self._experiment_name + '_data.csv')
# # Create output for last epoch
# data = np.vstack((unique[:,self._epochs-1],valid[:,self._epochs-1],novel[:,self._epochs-1]))
# pd.DataFrame(data).to_csv(self._experiment_name + '/molecules/' + self._experiment_name + 'final_epoch_data.csv')
#plt.show()
plt.close()
def check_with_training_data(self, mol, id_fold):
'''Remove molecules that are within the training set and return number
:return mol: SMILES not contained in the training
'''
to_delete = []
can_mol = []
for i, m in enumerate(mol):
if m in self.smiles_train[id_fold]:
to_delete.append(i)
mol = np.delete(mol, to_delete)
return mol
def check_valid(self, smile):
m = Chem.MolFromSmiles(smile)
if m is None or smile == '' or smile.isspace() == True:
return False
else:
return True
def _get_train_smiles(self):
print("smiles training start...")
smiles_train = []
for id_split in range(5):
smiles_split = []
# self.file_folds = os.path.join(self.idx_file, "test_idx_" + self.type_fold + "_" + str(id_split))
self.file_folds = os.path.join(self.idx_file, self.type_fold)
idx_all = [i for i in range(len(self.files_refined))]
with (open(self.file_folds, "rb")) as openfile:
idx_folds = pickle.load(openfile)
_, idx_test = idx_folds[id_split]
#take indx of proteins in the training set
idx_proteins_train = np.setdiff1d(idx_all, idx_test)
for pid in idx_proteins_train:
smile = self._get_caption(pid)
smiles_split.append(smile)
smiles_train.append(smiles_split)
with open(self.path_smiles_train, 'wb') as fp:
pickle.dump(smiles_train, fp)
return smiles_train
def _get_caption(self, id):
"""get caption as a row of a smile by id
"""
protein_name = self.files_refined[id]
path_to_smile = os.path.join(
self.init_refined, protein_name, protein_name + "_ligand.smi"
)
with open(path_to_smile, "r") as file:
caption = file.read()
return caption
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,986
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/model/decoder/__init__.py
|
from src.model.decoder.decoder import DecoderRNN, My_attention, MyDecoderWithAttention
from src.model.decoder.decoder_vis import MyDecoderWithAttention_Vis
decoder_dict = {
'lstm': DecoderRNN,
'lstm_attention': MyDecoderWithAttention,
'lstm_attention_vis': MyDecoderWithAttention_Vis
}
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,987
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/model/encoder/bio_e3nn.py
|
from functools import partial
import torch
from torch import nn as nn
from e3nn.point.kernelconv import KernelConv
from e3nn.radial import CosineBasisModel, GaussianRadialModel, BesselRadialModel
from e3nn.non_linearities import rescaled_act
from e3nn.non_linearities.gated_block import GatedBlock
from e3nn.rsh import spherical_harmonics_xyz
from src.model.encoder.base import Aggregate
import torch.nn.functional as F
import ast
CUSTOM_BACKWARD = False
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def create_kernel_conv(cutoff, n_bases, n_neurons, n_layers, act, radial_model):
#choice of radial model depending on the kind of basis functions
if radial_model == "cosine":
RadialModel = partial(
CosineBasisModel,
max_radius=cutoff,
number_of_basis=n_bases,
h=n_neurons,
L=n_layers,
act=act
)
elif radial_model == "gaussian":
RadialModel = partial(
GaussianRadialModel,
max_radius=cutoff,
number_of_basis=n_bases,
h=n_neurons,
L=n_layers,
act=act
)
elif radial_model == "bessel":
RadialModel = partial(
BesselRadialModel,
max_radius=cutoff,
number_of_basis=n_bases,
h=n_neurons,
L=n_layers,
act=act
)
else:
raise ValueError("radial_model must be either cosine or gaussian")
K = partial(KernelConv, RadialModel=RadialModel)
return K
def constants(geometry, mask):
rb = geometry.unsqueeze(1) # [batch, 1, b, xyz]
ra = geometry.unsqueeze(2) # [batch, a, 1, xyz]
diff_geo = (rb - ra).double().detach()
radii = diff_geo.norm(2, dim=-1).detach()
return mask, diff_geo, radii
class Bio_All_Network(torch.nn.Module):
"""network to predict atom-wise features from pocket atoms.
Takes pharmacophoric and atom type (charge) features
Args:
natoms (int): max number of atoms
encoding (string): type of encoding
max_rad (float): radious of protein pocket
num_basis (int): number of basis functions in radial convolution model
n_neurons (int): number of neurons in convolution model
n_layers (int): number of layers in convolution model
beta (int): normalisation coefficient in convolution model
rad_model (string): type of radial convolution model: gaussian, laplassian or ...
num_embeddings (int): number of embeddings in embedding layer
embed (int): type of embedding
scalar_act_name (string): name for the scalar activation function
gate_act_name (string): name for the gated block
list_harm (string): list of harmonics dim
aggregation_mode (string): mode for pooling: avg or max
fc_sizes (string): list of output fc layers dimension
Returns:
[type]: [description]
"""
def __init__(self, natoms, encoding, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, scalar_act_name, gate_act_name, list_harm, aggregation_mode, fc_sizes):
super().__init__()
self.natoms = natoms
self.encoding = encoding
self.ssp = rescaled_act.ShiftedSoftplus(beta = beta)
self.sp = rescaled_act.Softplus(beta=beta)
self.embed = embed
self.list_harm = list_harm
if(scalar_act_name == "sp"):
scalar_act = self.sp
if(gate_act_name == "sigmoid"):
gate_act = rescaled_act.sigmoid
Rs = [[(embed, 0)]]
Rs += ast.literal_eval(self.list_harm)
self.Rs = Rs
self.fc_sizes = ast.literal_eval(fc_sizes)
self.device = DEVICE
if aggregation_mode == "sum":
self.atom_pool = Aggregate(axis=1, mean=False)
elif aggregation_mode == "avg":
self.atom_pool = Aggregate(axis=1, mean=True)
self.num_embeddings = 6
self.RadialModel = partial(
CosineBasisModel,
max_radius=max_rad,
number_of_basis=num_basis,
h=n_neurons,
L=n_layers,
act=self.ssp
)
# kernel_conv = create_kernel_conv(max_rad, num_basis, n_neurons, n_layers, self.ssp, rad_model)
self.kernel_conv = partial(KernelConv, RadialModel=self.RadialModel)
def make_layer(Rs_in, Rs_out):
act = GatedBlock(Rs_out, scalar_act, gate_act)
kc = self.kernel_conv(Rs_in, act.Rs_in)
return torch.nn.ModuleList([kc, act])
self.layers = torch.nn.ModuleList([torch.nn.Embedding(self.num_embeddings, embed, padding_idx=0)])
self.layers += [make_layer(rs_in, rs_out) for rs_in, rs_out in zip(Rs, Rs[1:])]
self.leakyrelu = nn.LeakyReLU(0.2) # Relu
torch.autograd.set_detect_anomaly(True)
def fc_out_block(in_f, out_f):
return nn.Sequential(
nn.Linear(in_f, out_f),
nn.BatchNorm1d(self.natoms),
self.leakyrelu
)
def fc_out_block_no_bn(in_f, out_f):
return nn.Sequential(
nn.Linear(in_f, out_f),
self.leakyrelu
)
self.fc_blocks_out = [fc_out_block(block_size[0], block_size[1])
for block_size in self.fc_sizes]
self.fc_out = nn.Sequential(*self.fc_blocks_out)
def encoding_block(self, features):
# mask, diff_geo, radii = constants(geometry, mask)
if self.encoding == "embedding":
embedding = self.layers[0]
features = torch.tensor(features).to(self.device).long()
features = embedding(features).to(self.device)
features = features.squeeze(2)
else:
features = torch.tensor(features).to(self.device).float()
# features = torch.tensor(features).to(self.device)
linear = nn.Linear(features.shape[2], self.embed).to(self.device)
# features = features.long()
features = linear(features).to(self.device)
features = features.squeeze(2)
features = features.double()
return features
def e3nn_block(self, features, geometry, mask):
mask, diff_geo, radii = constants(geometry, mask)
if self.encoding == "embedding":
embedding = self.layers[0]
features = torch.tensor(features).to(self.device).long()
features = embedding(features).to(self.device)
else:
features = torch.tensor(features).to(self.device).float()
# features = torch.tensor(features).to(self.device)
linear = nn.Linear(features.shape[2], self.embed).to(self.device)
# features = features.long()
features = linear(features).to(self.device)
features = features.squeeze(2)
features = features.double()
set_of_l_filters = self.layers[1][0].set_of_l_filters
y = spherical_harmonics_xyz(set_of_l_filters, diff_geo)
for kc, act in self.layers[1:]:
if kc.set_of_l_filters != set_of_l_filters:
set_of_l_filters = kc.set_of_l_filters
y = spherical_harmonics_xyz(set_of_l_filters, diff_geo)
features = features.div(self.natoms ** 0.5).to(self.device)
features = kc(
features,
diff_geo,
mask,
y=y,
radii=radii,
custom_backward=CUSTOM_BACKWARD
)
features = act(features)
features = features * mask.unsqueeze(-1)
return features
def fc_output(self, features, mask):
#chain of MLP and pooling in the end
features = self.fc_out(features)
features = self.atom_pool(features, mask)
features = features.squeeze(1)
features = features.double()
return features
def forward(self, features, geometry, mask):
features_bio = features[:, :, :7] #take pharma features
features_charge = features[:, :, 7:] #take charges (atom type)
features_bio = self.e3nn_block(features_bio, geometry, mask) #output after neural net on pharma features
features_charge = self.e3nn_block(features_charge, geometry, mask) #output after neural net on atom type features
features = torch.cat([features_bio, features_charge], dim=2) #concat
# features = features.float()
features = self.fc_output(features, mask) #apply MLP and pooling in the end
return features # shape ?
class Bio_All_Network_no_batch(Bio_All_Network):
"""Bio_All_Network without batch normalisaation in output fully connected layers
"""
def __init__(self, natoms, encoding, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, scalar_act_name, gate_act_name, list_harm, aggregation_mode, fc_sizes):
super(Bio_All_Network_no_batch, self).__init__(natoms, encoding, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, scalar_act_name, gate_act_name, list_harm, aggregation_mode, fc_sizes)
def fc_out_block_no_bn(in_f, out_f):
return nn.Sequential(
nn.Linear(in_f, out_f),
self.leakyrelu
)
self.fc_blocks_out = [fc_out_block_no_bn(block_size[0], block_size[1])
for block_size in self.fc_sizes]
self.fc_out = nn.Sequential(*self.fc_blocks_out)
def fc_output_no_bn(self, features, mask):
features = self.fc_out(features)
features = self.atom_pool(features, mask)
features = features.squeeze(1)
features = features.double()
return features
def forward(self, features, geometry, mask):
features_bio = features[:, :, :7]
features_charge = features[:, :, 7:]
features_bio = self.e3nn_block(features_bio, geometry, mask)
features_charge = self.e3nn_block(features_charge, geometry, mask)
features = torch.cat([features_bio, features_charge], dim=2)
# features = features.float()
features = self.fc_output_no_bn(features, mask)
return features # shape ?
class Bio_Vis_All_Network(Bio_All_Network):
"""Bio_All_Network without pooling in output fully connected layers
"""
def __init__(self, natoms, encoding, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, scalar_act_name, gate_act_name, list_harm, aggregation_mode, fc_sizes):
super(Bio_Vis_All_Network, self).__init__(natoms, encoding, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, scalar_act_name, gate_act_name, list_harm, aggregation_mode, fc_sizes)
def forward(self, features, geometry, mask):
features_bio = features[:, :, :7]
features_charge = features[:, :, 7:]
features_bio = self.e3nn_block(features_bio, geometry, mask)
features_charge = self.e3nn_block(features_charge, geometry, mask)
features = torch.cat([features_bio, features_charge], dim=2)
features = self.fc_out(features)
# features = features.squeeze(1)
features = features.double()
return features # shape ?
class Bio_Local_Network(Bio_All_Network):
"""Takes one type of features (just atom types or pharmocophoric features) and calculates atom-wise features
"""
def __init__(self, natoms, encoding, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, scalar_act_name, gate_act_name, list_harm, aggregation_mode, fc_sizes):
super(Bio_Local_Network, self).__init__(natoms, encoding, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, scalar_act_name, gate_act_name, list_harm, aggregation_mode, fc_sizes)
def forward(self, features, geometry, mask):
features = self.e3nn_block(features, geometry, mask)
features = self.fc_output(features, mask)
return features # shape ?
class ResNet_Bio_ALL_Network(Bio_All_Network):
"""Bio_All_Network with residual connection between the first layer and features after all layers of e3nn convolution
"""
def __init__(self, natoms, encoding, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, scalar_act_name, gate_act_name, list_harm, aggregation_mode, fc_sizes):
super(ResNet_Bio_ALL_Network, self).__init__(natoms, encoding, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, scalar_act_name, gate_act_name, list_harm, aggregation_mode, fc_sizes)
def resnet_e3nn_block(self, features, geometry, mask):
mask, diff_geo, radii = constants(geometry, mask)
features = self.encoding_block(features)
set_of_l_filters = self.layers[1][0].set_of_l_filters
y = spherical_harmonics_xyz(set_of_l_filters, diff_geo)
kc, act = self.layers[1]
features = kc(
features.div(self.natoms ** 0.5),
diff_geo,
mask,
y=y,
radii=radii,
custom_backward=CUSTOM_BACKWARD
)
features = act(features)
# print("shape feat before conv", features.shape)
for kc, act in self.layers[2:]:
if kc.set_of_l_filters != set_of_l_filters:
set_of_l_filters = kc.set_of_l_filters
y = spherical_harmonics_xyz(set_of_l_filters, diff_geo)
new_features = kc(
features.div(self.natoms ** 0.5),
diff_geo,
mask,
y=y,
radii=radii,
custom_backward=CUSTOM_BACKWARD
)
new_features = act(new_features)
new_features = new_features * mask.unsqueeze(-1)
features = features + new_features
return features
def forward(self, features, geometry, mask):
features_bio = features[:, :, :7]
features_charge = features[:, :, 7:]
features_bio = self.resnet_e3nn_block(features_bio, geometry, mask)
features_charge = self.resnet_e3nn_block(features_charge, geometry, mask)
features = torch.cat([features_bio, features_charge], dim=2)
# features = features.float()
features = self.fc_output(features, mask)
return features
class ResNet_Bio_Local_Network(ResNet_Bio_ALL_Network):
"""Bio_Local_Network with residual connection between the first layer and features after all layers of e3nn convolution
"""
def __init__(self, natoms, encoding, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, scalar_act_name, gate_act_name, list_harm, aggregation_mode, fc_sizes):
super(ResNet_Bio_Local_Network, self).__init__(natoms, encoding, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, scalar_act_name, gate_act_name, list_harm, aggregation_mode, fc_sizes)
def forward(self, features, geometry, mask):
features = self.resnet_e3nn_block(features, geometry, mask)
features = self.fc_output(features, mask)
return features # shape ?
class Concat_Bio_Local_Network(ResNet_Bio_ALL_Network):
"""Bio_Local_Network where all features in e3nn conv layers ae concatenated together
"""
def __init__(self, natoms, encoding, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, scalar_act_name, gate_act_name, list_harm, aggregation_mode, fc_sizes):
super(Concat_Bio_Local_Network, self).__init__(natoms, encoding, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, scalar_act_name, gate_act_name, list_harm, aggregation_mode, fc_sizes)
def concat_e3nn_block(self, features, geometry, mask):
features_all = []
mask, diff_geo, radii = constants(geometry, mask)
features = self.encoding_block(features)
set_of_l_filters = self.layers[1][0].set_of_l_filters
y = spherical_harmonics_xyz(set_of_l_filters, diff_geo)
kc, act = self.layers[1]
features = kc(
features.div(self.natoms ** 0.5),
diff_geo,
mask,
y=y,
radii=radii,
custom_backward=CUSTOM_BACKWARD
)
features = act(features)
features_all.append(features )
for kc, act in self.layers[2:]:
if kc.set_of_l_filters != set_of_l_filters:
set_of_l_filters = kc.set_of_l_filters
y = spherical_harmonics_xyz(set_of_l_filters, diff_geo)
new_features = kc(
features.div(self.natoms ** 0.5),
diff_geo,
mask,
y=y,
radii=radii,
custom_backward=CUSTOM_BACKWARD
)
new_features = act(new_features)
new_features = new_features * mask.unsqueeze(-1)
features_all.append(new_features)
features_all = torch.cat(features_all, 2) #concatenation of all features
# features = features + new_features
return features_all
def forward(self, features, geometry, mask):
features = self.concat_e3nn_block(features, geometry, mask)
features = self.fc_output(features, mask)
return features # shape ?
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,988
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/evaluation/Contrib/statistics.py
|
from rdkit import RDConfig
from rdkit import DataStructs
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.Chem import AllChem, QED
from rdkit.Chem.Descriptors import qed, ExactMolWt, MolLogP
from evaluation.Contrib.SA_Score import sascorer
from evaluation.Contrib.NP_Score import npscorer
from evaluation.Contrib.NP_Score import npscorer_my
from evaluation.Contrib.NP_Score.npscorer_my import processMols
import subprocess
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
import os
import numpy as np
def similarity(smile_true, smiles_others):
m1 = Chem.MolFromSmiles(smile_true)
fp1 = AllChem.GetMorganFingerprint(m1,2)
similarities = []
for smile in smiles_others:
m2 = Chem.MolFromSmiles(smile)
fp2 = AllChem.GetMorganFingerprint(m2,2)
similarity = DataStructs.DiceSimilarity(fp1,fp2)
similarities.append(similarity)
return similarities
def analysis_to_csv(smiles, name_protein, id_fold, type_fold, epoch_no):
orig_smile = smiles[0] # original smile
gen_smiles = smiles[1:] #list of generated smiles
length = len(gen_smiles)
####################################diagrams##################################
mol_orig = Chem.MolFromSmiles(orig_smile)
mols_gen = [Chem.MolFromSmiles(smile) for smile in gen_smiles]
orig_logP = MolLogP(mol_orig)
orig_sa = sascorer.calculateScore(mol_orig)
orig_qed = qed(mol_orig)
orig_weight = ExactMolWt(mol_orig)
orig_NP = processMols([mol_orig])
gen_logP = [MolLogP(mol) for mol in mols_gen]
gen_sa = [sascorer.calculateScore(mol) for mol in mols_gen]
gen_qed = [qed(mol) for mol in mols_gen]
gen_weight = [ExactMolWt(mol) for mol in mols_gen]
gen_NP = processMols(mols_gen)
# suppl = Chem.SmilesMolSupplier(file_smiles, smilesColumn=0, nameColumn=1, titleLine=False)
# scores_NP = processMols(suppl) # 1: since first one is an initial smile
# scores_NP = processMols(mols)
# scores_NP_orig = processMols(mols)
# print("scoresNP!!", scores_NP)
#####################################similarity###############################
# sim_random = similarity(orig_smile, smiles_all)
gen_sim = similarity(orig_smile, gen_smiles)
# print("name_protein ", name_protein, "gen_logP ", gen_logP)
statistics = [length * [name_protein], length * [str(id_fold)], length * [type_fold], length * [str(epoch_no)], length * [orig_smile], gen_smiles, gen_NP, gen_logP, gen_sa, gen_qed, gen_weight, gen_sim,
length * [float(orig_NP[0])], length * [orig_logP], length * [orig_sa], length * [orig_qed], length * [orig_weight]]
return statistics
if __name__ == "__main__":
analysis_to_csv("10gs")
def analysis_to_csv_test(smiles, name_protein, id_fold, type_fold):
orig_smile = smiles[0] # original smile
gen_smiles = smiles[1:] #list of generated smiles
length = len(gen_smiles)
####################################diagrams##################################
mol_orig = Chem.MolFromSmiles(orig_smile)
orig_logP = MolLogP(mol_orig)
orig_sa = sascorer.calculateScore(mol_orig)
orig_qed = qed(mol_orig)
orig_weight = ExactMolWt(mol_orig)
orig_NP = processMols([mol_orig])
statistics = [length * ['fg'], length * [str(id_fold)], length * [type_fold], length * [orig_smile], smiles[1:], smiles[1:], smiles[1:], smiles[1:], smiles[1:], smiles[1:], smiles[1:],
length * [orig_NP], length * [orig_logP], length * [orig_sa], length * [orig_qed], length * [orig_weight]]
return statistics
# analysis = {'logP': gen_logP, 'sa': gen_sa, 'qed': gen_qed, 'gen_weight': gen_weight,
# 'similarity': gen_sim}
# df = pd.DataFrame(data=analysis)
# name_csv = os.path.join(save_dir, "analysis_" + name_protein + ".csv")
# df.to_csv(name_csv)
# statistics = np.vstack((np.asarray(length * [name_protein]), np.asarray(length * [str(id_fold)]),
# np.asarray(gen_logP), np.asarray(gen_sa),
# np.asarray(gen_qed), np.asarray(gen_weight), np.asarray(gen_sim)))
# return map(list, zip(*statistics))
# file_smiles = os.path.join("/Volumes/Ubuntu/research_drugs/data/gen_smiles_without_at/", name_protein, name_protein + ".txt")
# save_dir = os.path.join(save_dir_smiles, str(id_fold), name_protein)
# file_smiles = os.path.join(save_dir, name_protein + ".txt")
# file_all_smiles = "/Volumes/Ubuntu/research_drugs/data/gen_smiles_without_at/all_smiles_lig.txt"
# with open(file_smiles) as fp:
# smiles = fp.readlines()
# with open(file_all_smiles) as fp:
# smiles_all = fp.readlines()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,989
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/model/decoder/beam_search_e3nn.py
|
def sample_beam_search(self, features, states=None):
"""
Reads an image and captions it with beam search.
:param encoder: encoder model
:param decoder: decoder model
:param image_path: path to image
:param word_map: word map
:param beam_size: number of sequences to consider at each decode-step
:return: caption, weights for visualization
"""
print("feat shape init", features.shape)
k = self.beam_size
vocab_size = len(self.vocab)
# # We'll treat the problem as having a batch size of k
shape_1 = features.shape[0]
shape_2 = features.shape[1]
# inputs = features.unsqueeze(1)
inputs = features.expand(k, shape_2) ##? check tomorrow!!!
inputs = inputs.unsqueeze(1)
# encoder_out = encoder_out.expand(k, num_pixels, encoder_dim) # (k, num_pixels, encoder_dim)
# Tensor to store top k previous words at each step; now they're just <start>
k_prev_words = torch.LongTensor([[self.vocab.word2idx['<start>']]] * k).to(self.device) # (k, 1)
# Tensor to store top k sequences; now they're just <start>
seqs = k_prev_words # (k, 1)
# Tensor to store top k sequences' scores; now they're just 0
top_k_scores = torch.zeros(k, 1).to(self.device) # (k, 1)
# Tensor to store top k sequences' alphas; now they're just 1s
# seqs_alpha = torch.ones(k, 1, enc_image_size, enc_image_size).to(device) # (k, 1, enc_image_size, enc_image_size)
# Lists to store completed sequences, their alphas and scores
complete_seqs = list()
# complete_seqs_alpha = list()
complete_seqs_scores = list()
# Start decoding
step = 1
# s is a number less than or equal to k, because sequences are removed from this process once they hit <end>
while True:
if step == 1:
h, states = self.lstm(inputs, states)
else:
# h, states = self.lstm(inputs, (h, states))
h, states = self.lstm(inputs, states)
# h, states = self.lstm(inputs, states)
print("states usual")
scores = self.linear(h.squeeze(1))
scores = F.softmax(scores, dim=1)
# scores = top_k_scores.expand_as(scores) + scores # (s, vocab_size)
# print("scores", scores)
# For the first step, all k points will have the same scores (since same k previous words, h, c)
if step == 1:
top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)
else:
# Unroll and find top scores, and their unrolled indices
top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s)
# print("pred next word", top_k_scores)
# Convert unrolled indices to actual indices of scores
prev_word_inds = top_k_words / vocab_size # (s)
next_word_inds = top_k_words % vocab_size # (s)
# Add new words to sequences, alphas
seqs = torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)
# seqs_alpha = torch.cat([seqs_alpha[prev_word_inds], alpha[prev_word_inds].unsqueeze(1)],
# dim=1) # (s, step+1, enc_image_size, enc_image_size)
# Which sequences are incomplete (didn't reach <end>)?
incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if
next_word != self.vocab.word2idx['<end>']]
complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))
# Set aside complete sequences
if len(complete_inds) > 0:
complete_seqs.extend(seqs[complete_inds].tolist())
# complete_seqs_alpha.extend(seqs_alpha[complete_inds].tolist())
complete_seqs_scores.extend(top_k_scores[complete_inds])
k -= len(complete_inds) # reduce beam length accordingly
# Proceed with incomplete sequences
if k == 0:
break
seqs = seqs[incomplete_inds]
# print("h", h)
h = h[prev_word_inds[incomplete_inds]]
# print("incomp ind", incomplete_inds)
# print("states first shape", states[0].shape)
# print("ind for states", prev_word_inds[incomplete_inds])
states = (states[0][:, prev_word_inds[incomplete_inds]], states[1][:, prev_word_inds[incomplete_inds]])
# print("shape states in end", states[0].shape)
# print("states in the end")
top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)
k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)
inputs = self.embed(k_prev_words)
# Break if things have been going on too long
if step > MAX_Length:
break
step += 1
i = complete_seqs_scores.index(max(complete_seqs_scores))
seq = complete_seqs[i]
print(complete_seqs)
return complete_seqs
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,990
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/datasets/dictionaries.py
|
# dictionary of unique atoms in PDBBINDING database of pockets/ligands
# {atom: hot_vecctor}
atom_type = {'c': 0,
'n': 1}
atom_most_common = {"C": 0,
"H": 1,
"N": 2,
"O": 3,
"S": 4
}
dict_atoms_simple = {"C": 1,"H": 2,"N": 3, "O": 4, "S": 5, "P": 6, "Zn": 7, "Cl": 8, "F": 9, "Mg": 10, "Ca": 11, "Na": 12, "Mn": 13, "I": 14,"Br": 15,"Fe": 16, "Cu": 17, "Cd": 18, "Ni": 19, "Co": 20, "Hg": 21, "K": 22, "Se": 23}
dict_atoms_masses = {"C": 12, "H": 2, "N": 14, "O": 16, "S": 32, "P": 31, "Zn": 65, "Cl": 35.5, "F": 19, "Mg": 24, "Ca": 40, "Na": 23, "Mn": 55, "I": 127,"Br": 80,"Fe": 56, "Cu": 64, "Cd": 112, "Ni": 58.7, "Co": 59, "Hg": 201, "K": 39, "Se": 79}
dict_atoms_charges = {"C": 6, "H": 1, "N": 7, "O": 8, "S": 16, "P": 15, "Zn": 30, "Cl": 17, "F": 9, "Mg": 12, "Ca": 20, "Na": 11, "Mn": 25, "I": 53, "Br": 35, "Fe": 26, "Cu": 29, "Cd": 48, "Ni": 28, "Co": 27, "Hg": 80, "K": 19, "Se": 34}
dict_atoms_mass = {}
dict_atoms_hot = {}
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,991
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/tests/check_feat_exceptions.py
|
import os, sys
import argparse
# from release import *
from src.utils import config
# import utils.config as config
import multiprocessing
import numpy as np
from numpy import savetxt
import torch
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ExponentialLR
# from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
# from utils import Utils
import argparse
import sys
from py3nvml import py3nvml
import json
import os
import pickle
from sklearn.model_selection import KFold
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from src.utils.build_vocab import Vocabulary
from src.datasets.data_loader import get_loader, Pdb_Dataset, collate_fn, collate_fn_masks
from src.training.train_check_att_vis import Trainer_Attention_Check_Vis
from src.tests.training.train_checkpoint import Trainer_Fold
from src.sampling.sampler import Sampler
from src.datasets.split import Splitter
from src.training.utils import save_checkpoint_sampling
from src.evaluation.analysis import plot_all
from src.tests.datasets.feature import Featuring
import warnings
import shutil
from distutils.dir_util import copy_tree
def test_Feature_exists():
parser = argparse.ArgumentParser(
description='Train a 3D reconstruction model.'
)
parser.add_argument('--config', type=str, help='Path to config file.')
parser.add_argument('--radious', type=int , default=8, help='dimension of word embedding vectors')
parser.add_argument('--type_feature', type=str , default='mass_charge', help='type_feature')
parser.add_argument('--type_filtering', type=str , default = 'all', help='type_filtering')
parser.add_argument('--h_filterig', type=str , default='without_h', help='h')
parser.add_argument('--type_fold', type=str, help='type_fold')
parser.add_argument('--idx_fold', type=str, help='Path to config file.')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configurations/config_lab/default.yaml')
type_fold = args.type_fold
idx_fold = args.idx_fold
path_data = cfg['data']['path']
savedir = cfg["output_parameters"]["savedir"]
model_name = cfg["model_params"]["model_name"]
num_epoches = cfg["model_params"]["num_epochs"]
path_root = cfg['preprocessing']['path_root']
init_refined = path_root + "/data/new_refined/"
##################refined files###################
files_refined = os.listdir(init_refined)
files_refined = [file for file in files_refined if file[0].isdigit()]
files_refined.sort()
idx_files_refined = list(range(0, len(files_refined)))
def delete_files(protein_name):
path_to_exceptions = os.path.join(path_data, "exceptions")
path_protein_folder = os.path.join(init_refined, protein_name)
os.makedirs(path_to_exceptions, exist_ok=True)
copy_tree(path_protein_folder, path_to_exceptions)
shutil.rmtree(path_protein_folder)
#features generation
print("Checking saved features!")
names_prot_exceptions = []
# Feature_gen = Featuring(cfg, args.radious, args.type_feature, args.type_filtering, args.h_filterig)
for pdbid in idx_files_refined:
name_protein = files_refined[pdbid]
files = os.listdir(os.path.join(init_refined, name_protein))
array_feat_names = [name_protein, "feature", "r", str(args.radious), args.type_feature, args.type_filtering, args.h_filterig]
name_feature = "_".join(array_feat_names) + ".pt"
if name_feature in files:
pass
# path_feat = os.path.join(init_refined, name_protein, name_feature)
# feature_filt = torch.load(path_feat, map_location=torch.device('cpu')).long()
# if feature_filt.shape[1] == 3:
# print("exception! - ", name_protein)
# names_prot_exceptions.append(name_protein)
# Feature_gen.delete_files(name_protein)
else:
print("no feature! - ", name_protein)
delete_files(name_protein)
print(names_prot_exceptions)
if __name__ == "__main__":
test_Feature_exists()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,992
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/model/encoder/bio_e3nn_res.py
|
from functools import partial
import torch
from torch import nn as nn
from e3nn.point.kernelconv import KernelConv
from e3nn.radial import CosineBasisModel, GaussianRadialModel, BesselRadialModel
from e3nn.non_linearities import rescaled_act
from e3nn.non_linearities.gated_block import GatedBlock
from e3nn.rsh import spherical_harmonics_xyz
from src.model.encoder.base import Aggregate
import torch.nn.functional as F
import ast
from src.model.encoder.bio_e3nn import Bio_All_Network
CUSTOM_BACKWARD = False
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def create_kernel_conv(cutoff, n_bases, n_neurons, n_layers, act, radial_model):
if radial_model == "cosine":
RadialModel = partial(
CosineBasisModel,
max_radius=cutoff,
number_of_basis=n_bases,
h=n_neurons,
L=n_layers,
act=act
)
elif radial_model == "gaussian":
RadialModel = partial(
GaussianRadialModel,
max_radius=cutoff,
number_of_basis=n_bases,
h=n_neurons,
L=n_layers,
act=act
)
elif radial_model == "bessel":
RadialModel = partial(
BesselRadialModel,
max_radius=cutoff,
number_of_basis=n_bases,
h=n_neurons,
L=n_layers,
act=act
)
else:
raise ValueError("radial_model must be either cosine or gaussian")
K = partial(KernelConv, RadialModel=RadialModel)
return K
def constants(geometry, mask):
rb = geometry.unsqueeze(1) # [batch, 1, b, xyz]
ra = geometry.unsqueeze(2) # [batch, a, 1, xyz]
diff_geo = (rb - ra).double().detach()
radii = diff_geo.norm(2, dim=-1).detach()
return mask, diff_geo, radii
class ResNet_Out_Local_Network(Bio_All_Network):
"""Bio_Local_Network with residual connection between the first layer and features after all layers of e3nn convolution
"""
def __init__(self, natoms, encoding, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, scalar_act_name, gate_act_name, list_harm, aggregation_mode, fc_sizes):
super(ResNet_Out_Local_Network, self).__init__(natoms, encoding, max_rad, num_basis, n_neurons, n_layers, beta, rad_model, num_embeddings,
embed, scalar_act_name, gate_act_name, list_harm, aggregation_mode, fc_sizes)
self.size_out_harm = self.Rs[-1][0][0]
self.resnet_out_fc = nn.Linear(self.size_out_harm, self.size_out_harm)
def resnet_out_block(self, features):
features_out = self.resnet_out_fc(features)
features = features + features_out
return features
def forward(self, features, geometry, mask):
features = self.e3nn_block(features, geometry, mask)
features = self.resnet_out_block(features)
features = self.fc_output(features, mask)
return features # shape ?
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,993
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/train_feature_all.py
|
import argparse
import src.utils.config as config
import multiprocessing
import numpy as np
from numpy import savetxt
import torch
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ExponentialLR
# from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
# from utils import Utils
import sys
from py3nvml import py3nvml
import json
import os
import pickle
from sklearn.model_selection import KFold
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from src.utils.build_vocab import Vocabulary
from src.datasets.data_loader import get_loader, Pdb_Dataset, collate_fn, collate_fn_masks
from src.training.train_check_att_vis import Trainer_Attention_Check_Vis
from src.training.training_feature_att import Trainer_Fold_Feature_Attention
from src.training.train_checkpoint import Trainer_Fold
from src.training.training_feature import Trainer_Fold_Feature
from src.sampling.sampler import Sampler
from src.datasets.split import Splitter
from src.training.utils import save_checkpoint_sampling
from src.evaluation.analysis import plot_all
from src.datasets.feature import Featuring
def main():
parser = argparse.ArgumentParser(
description='Train a 3D reconstruction model.'
)
parser.add_argument('--config', type=str, help='Path to config file.')
parser.add_argument('--radious', type=int , default=8, help='dimension of word embedding vectors')
parser.add_argument('--type_feature', type=str , default='mass_charge', help='type_feature')
parser.add_argument('--type_filtering', type=str , default = 'all', help='type_filtering')
parser.add_argument('--h_filterig', type=str , default='without_h', help='h')
parser.add_argument('--type_fold', type=str, help='type_fold')
parser.add_argument('--idx_fold', type=str, help='Path to config file.')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configurations/config_lab/default.yaml')
type_fold = args.type_fold
idx_fold = args.idx_fold
savedir = cfg["output_parameters"]["savedir"]
model_name = cfg["model_params"]["model_name"] + "_" + args.type_feature + "_" + str(args.radious) + "_" + args.type_filtering + "_" + args.h_filterig
cfg["model_params"]["model_name"] = model_name
num_epoches = cfg["model_params"]["num_epochs"]
#features generation
print("**********Checking features**************")
Feature_gen = Featuring(cfg, args.radious, args.type_feature, args.type_filtering, args.h_filterig)
cfg['model']['encoder_kwargs']['natoms'] = Feature_gen.max_length
print("number of atoms: ", cfg['model']['encoder_kwargs']['natoms'])
# get split folds file
dir_idx_split = os.path.join(cfg['output_parameters']['savedir'], model_name, "logs", "idxs", cfg['splitting']['file_folds'])
if not os.path.exists(dir_idx_split):
print("***********doing split...***********")
splitter = Splitter(cfg)
splitter.split(type_fold)
#training + evaluation
if(cfg['training_params']['mode'] == "no_attention"):
trainer = Trainer_Fold_Feature(cfg, idx_fold)
trainer.train_epochs(Feature_gen)
elif(cfg['training_params']['mode'] == "attention"):
trainer = Trainer_Fold_Feature_Attention(cfg, idx_fold)
trainer.train_epochs(Feature_gen)
# encoder_path = os.path.join(savedir, "models", "encoder_best_" + str(idx_fold) + '.ckpt')
# decoder_path = os.path.join(savedir, "models", "decoder_best_" + str(idx_fold) + '.ckpt')
encoder_path = os.path.join(savedir, model_name, "models", "encoder-" + str(idx_fold) + "-" + str(num_epoches) + '.ckpt')
decoder_path = os.path.join(savedir, model_name, "models", "decoder-" + str(idx_fold) + "-" + str(num_epoches) + '.ckpt')
checkpoint_sampling_path = os.path.join(savedir, model_name, "checkpoints", str(idx_fold) + '_sample.pkl')
pipeline_checkpoint_path = os.path.join(savedir, model_name, "checkpoints", str(idx_fold) + 'pipeline.txt')
file_pipeline_checkpoint = open(pipeline_checkpoint_path, "a+")
# regimes = ["simple_probabilistic", "max", "temp_sampling", "simple_probabilistic_topk"]
# regimes = ["beam_1", "beam_3", "beam_10", "max", "temp_sampling_0.7", "probabilistic",
# "simple_probabilistic_topk_10"]
#sampling
# regimes = ["probabilistic", "max", "beam_1", "beam_3", "beam_10"]
if "pca" not in file_pipeline_checkpoint.readlines():
print("*****doing pca********")
sampler = Sampler(cfg, 'max', Feature_gen)
sampler.save_encodings_all('test', idx_fold, encoder_path, decoder_path)
sampler.collect_all_encodings()
sampler.save_encodings_all('train', idx_fold, encoder_path, decoder_path)
sampler.collect_all_encodings()
file_pipeline_checkpoint.write("pca")
regimes = ["probabilistic", "max", "beam_1", "beam_3", "beam_10", "beam_20"]
end_sampling_ind = len(regimes)
if (os.path.exists(checkpoint_sampling_path)):
print("loading sample ids...")
checkpoint_sampling = torch.load(checkpoint_sampling_path)
start_sampling_ind = checkpoint_sampling['idx_sample_regime_start']
print("************start_sampling_ind***********", start_sampling_ind)
else:
start_sampling_ind = 0
save_checkpoint_sampling(checkpoint_sampling_path, 0, 0)
for sampling_ind in range(start_sampling_ind, end_sampling_ind):
sample = regimes[sampling_ind]
print("*********sample regim*********** ", sample)
sampler = Sampler(cfg, sample, Feature_gen)
sampler.analysis_cluster(idx_fold, type_fold, encoder_path, decoder_path)
if "plot" not in file_pipeline_checkpoint.readlines():
plot = plot_all(cfg)
plot.run()
file_pipeline_checkpoint.write("plot")
# for regim in regimes:
# print("doing sampling... ", regim)
# sampler = Sampler(cfg, regim)
# sampler.analysis_cluster(idx_fold, type_fold, encoder_path, decoder_path)
if __name__ == "__main__":
main()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,994
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/tests/training/train_attention.py
|
import multiprocessing
import numpy as np
from numpy import savetxt
import torch
from torchsummary import summary
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ExponentialLR
# from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from utils import Utils
import argparse
import sys
import config
from py3nvml import py3nvml
import json
import os
import pickle
from sklearn.model_selection import KFold
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from utils.build_vocab import Vocabulary
from datasets.data_loader import get_loader, Pdb_Dataset, collate_fn, collate_fn_masks
from sampling.sampler import Sampler
class Trainer_Attention():
def __init__(self, cfg):
# model params
self.original_stdout = sys.stdout
self.cfg = cfg
self.num_epochs = cfg['model_params']['num_epochs']
self.batch_size = cfg['model_params']['batch_size']
self.learning_rate = cfg['model_params']['learning_rate']
self.num_workers = cfg['model_params']['num_workers']
# training params
self.protein_dir = cfg['training_params']['image_dir']
self.caption_path = cfg['training_params']['caption_path']
self.log_step = cfg['training_params']['log_step']
self.save_step = cfg['training_params']['save_step']
self.vocab_path = cfg['preprocessing']['vocab_path']
self.n_splits = cfg['training_params']['n_splits']
self.loss_best = np.inf
#output files
self.savedir = cfg['output_parameters']['savedir']
self.tesnorboard_path = self.savedir
self.model_path = os.path.join(self.savedir, "models")
self.log_path = os.path.join(self.savedir, "logs")
self.idx_file = os.path.join(self.log_path, "idxs")
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.save_dir_smiles = os.path.join(self.savedir, "statistics")
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
if not os.path.exists(self.idx_file):
os.makedirs(self.idx_file)
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
if not os.path.exists(self.save_dir_smiles):
os.makedirs(self.save_dir_smiles)
#log files
self.test_idx_file = open(os.path.join(self.idx_file, "test_idx.txt"), "w")
self.log_file = open(os.path.join(self.log_path, "log.txt"), "w")
self.log_file_tensor = open(os.path.join(self.log_path, "log_tensor.txt"), "w")
self.writer = SummaryWriter(self.tesnorboard_path)
self.Encoder, self.Decoder = config.get_model(cfg, device=self.device)
self.input = config.get_shape_input(self.cfg)
# print(summary(self.Encoder, self.input))
# print(summary(self.Decoder))
print(self.Encoder)
print(self.Decoder)
with open(os.path.join(self.log_path, "model.txt"), 'w') as f:
sys.stdout = f # Change the standard output to the file we created.
# print(summary(self.Encoder, self.input))
# print(summary(self.Decoder))
print(self.Encoder)
print(self.Decoder)
sys.stdout = self.original_stdout
# print(model)
self.name_file_stat = cfg["sampling_params"]["name_all_stat"]
self.file_statistics = open(os.path.join(self.save_dir_smiles, self.name_file_stat), "w")
#the file of the whole stat
self.file_statistics.write("name,fold,type_fold, orig_smile, gen_smile, gen_NP, gen_logP,gen_sa,gen_qed,gen_weight,gen_similarity, orig_NP, orig_logP, orig_sa, orig_qed, orig_weight, frequency, sampling" + "\n")
self.file_statistics.flush()
#print all params
nparameters_enc = sum(p.numel() for p in self.Encoder.parameters())
nparameters_dec = sum(p.numel() for p in self.Decoder.parameters())
print('Total number of parameters: %d' % (nparameters_enc + nparameters_dec))
with open(os.path.join(self.log_path, "model.txt"), 'w') as f:
f.write('Total number of parameters: %d' % (nparameters_enc + nparameters_dec))
with open(self.vocab_path, "rb") as f:
self.vocab = pickle.load(f)
self.criterion = nn.CrossEntropyLoss()
def train_loop_mask(self, loader, encoder, decoder, caption_optimizer, split_no, epoch, total_step):
encoder.train()
decoder.train()
for i, (features, geometry, masks, captions, lengths) in enumerate(loader):
# Set mini-batch dataset
features = features.to(self.device)
geometry = geometry.to(self.device)
captions = captions.to(self.device)
masks = masks.to(self.device)
# targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]
caption_optimizer.zero_grad()
# Forward, backward and optimize
feature = encoder(features, geometry, masks)
# outputs = decoder(feature, captions, lengths)
scores, caps_sorted, decode_lengths = decoder(feature, captions, lengths)
# Since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
scores = pack_padded_sequence(scores, decode_lengths, batch_first=True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)[0]
loss = self.criterion(scores, targets)
# scheduler.step(loss)
# if grad_clip is not None:
# clip_gradient(decoder_optimizer, grad_clip)
# if encoder_optimizer is not None:
# clip_gradient(encoder_optimizer, grad_clip)
decoder.zero_grad()
encoder.zero_grad() #shall I do that?
loss.backward()
caption_optimizer.step() #!!! figure out whether we should leave that
name = "training_loss_" + str(split_no + 1)
self.writer.add_scalar(name, loss.item(), epoch)
# writer.add_scalar("training_loss", loss.item(), epoch)
self.log_file_tensor.write(str(loss.item()) + "\n")
self.log_file_tensor.flush()
handle = py3nvml.nvmlDeviceGetHandleByIndex(0)
fb_mem_info = py3nvml.nvmlDeviceGetMemoryInfo(handle)
mem = fb_mem_info.used >> 20
print('GPU memory usage: ', mem)
self.writer.add_scalar('val/gpu_memory', mem, epoch)
# Print log info
if i % self.log_step == 0:
result = "Split [{}], Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Perplexity: {:5.4f}".format(
split_no, epoch, self.num_epochs, i, total_step, loss.item(), np.exp(loss.item())
)
print(result)
self.log_file.write(result + "\n")
self.log_file.flush()
# loss is a real crossentropy loss
#
# Save the model checkpoints
if (i + 1) % self.save_step == 0:
# print("yeeees!!!")
self.encoder_name = os.path.join(
self.model_path, "encoder-{}-{}-{}.ckpt".format(split_no, epoch + 1, i + 1)
)
self.decoder_name = os.path.join(
self.model_path, "decoder-{}-{}-{}.ckpt".format(split_no, epoch + 1, i + 1)
)
torch.save(
encoder.state_dict(),
self.encoder_name,
)
torch.save(
decoder.state_dict(),
self.decoder_name,
)
if (self.loss_best - loss > 0):
print("The best loss " + str(loss.item()) + "; Split-{}-Epoch-{}-Iteration-{}_best.ckpt".format(split_no, epoch + 1, i + 1))
self.log_file.write("The best loss " + str(loss.item()) + "; Split-{}-Epoch-{}-Iteration-{}_best.ckpt".format(split_no, epoch + 1, i + 1) + "\n")
self.encoder_best_name = os.path.join(
self.model_path, "encoder_best_" + str(split_no) + ".ckpt"
)
self.decoder_best_name = os.path.join(
self.model_path, "decoder_best_" + str(split_no) + ".ckpt")
torch.save(
encoder.state_dict(),
self.encoder_best_name,
)
torch.save(
decoder.state_dict(),
self.decoder_best_name,
)
self.loss_best = loss
self.log_file_tensor.write("\n")
self.log_file_tensor.flush()
def train_epochs(self):
# get indexes of all complexes and "nick names"
# Load vocabulary wrapper
featuriser = Pdb_Dataset(self.cfg, vocab=self.vocab)
# data_ids, data_names = utils._get_refined_data()
files_refined = os.listdir(self.protein_dir)
data_ids = np.array([i for i in range(len(files_refined) - 3)])
# data_ids = np.array([i for i in range(20)])
#cross validation
kf = KFold(n_splits=self.n_splits, shuffle=True, random_state=2)
my_list = list(kf.split(data_ids))
test_idx = []
# output memory usage
py3nvml.nvmlInit()
sampler = Sampler(self.cfg)
for split_no in range(self.n_splits):
train_id, test_id = my_list[split_no]
train_data = data_ids[train_id]
test_data = data_ids[test_id]
with open(os.path.join(self.idx_file, 'test_idx_' + str(split_no)), 'wb') as fp:
pickle.dump(test_data, fp)
test_idx.append(test_data)
self.test_idx_file.write(str(test_data) + "\n")
self.test_idx_file.flush()
feat_train = [featuriser[data] for data in train_data]
loader_train = DataLoader(feat_train, batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
collate_fn=collate_fn_masks,)
# loader_train = config.get_loader(cfg, feat_train, batch_size, num_workers,)
total_step = len(loader_train)
print("total_step", total_step)
encoder = self.Encoder
decoder = self.Decoder
# params_encoder = filter(lambda p: p.requires_grad, encoder.parameters())
caption_params = list(decoder.parameters()) + list(encoder.parameters())
caption_optimizer = torch.optim.Adam(caption_params, lr = self.learning_rate)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(caption_optimizer, 'min')
for epoch in range(self.num_epochs):
# config.get_train_loop(cfg, loader_train, encoder, decoder,caption_optimizer, split_no, epoch, total_step)
#if add masks everywhere call just train_loop
self.train_loop_mask(loader_train, encoder, decoder, caption_optimizer, split_no, epoch, total_step)
#run sampling for the test indxs
sampler.analysis_cluster(split_no, self.encoder_best_name, self.decoder_best_name)
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,995
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/utils/test_mol.py
|
from moleculekit.molecule import Molecule
from moleculekit.smallmol.smallmol import SmallMol
from moleculekit.tools.atomtyper import prepareProteinForAtomtyping, getFeatures
from moleculekit.tools.voxeldescriptors import getChannels
import numpy as np
mol = Molecule('1ATL')
mol.filter('protein')
mol = prepareProteinForAtomtyping(mol, verbose = False)
array = getChannels(mol, version=2)
print("array", array[0])
answer = (array[0] > 0).astype(np.float32)
print("res - ", answer)
print("shape answer", answer.shape)
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,996
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/visualisation/visualise.py
|
import multiprocessing
import numpy as np
from numpy import savetxt
import torch
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ExponentialLR
# from torch.utils.tensorboard import SummaryWriter
import argparse
import sys
import config
from rdkit import Chem
import json
import os
import csv
import pickle
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from build_vocab import Vocabulary
from data_loader import Pdb_Dataset
from Contrib.statistics import analysis_to_csv, analysis_to_csv_test
from visualisation import Visualisation
def main():
parser = argparse.ArgumentParser(
description='Train a 3D reconstruction model.'
)
parser.add_argument('config', type=str, help='Path to config file.')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configurations/config_lab/default.yaml')
savedir = cfg['output_parameters']['savedir']
encoder_path = os.path.join(savedir, "models", cfg['training_params']['encoder_name'])
decoder_path = os.path.join(savedir, "models", cfg['training_params']['decoder_name'])
mode_vis = ["beam_1"]
for mode in mode_vis:
visualiser = Visualisation(cfg, mode)
visualiser.save_for_vis(0, encoder_path, decoder_path)
if __name__ == "__main__":
main()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,997
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/visualisation/visualisation.py
|
import multiprocessing
import numpy as np
from numpy import savetxt
import torch
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ExponentialLR
# from torch.utils.tensorboard import SummaryWriter
import argparse
import sys
import config
from rdkit import Chem
import json
import os
import csv
import pickle
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from build_vocab import Vocabulary
from data_loader import Pdb_Dataset
from Contrib.statistics import analysis_to_csv, analysis_to_csv_test
from decoder.decoder_vis import sample_beam_search
class Visualisation:
def __init__(self, cfg, sampling):
# model params
#sampling params
# self.idx_fold = idx_fold
self.cfg = cfg
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# self.device = torch.device("cpu")
self.sampling = sampling
# self.sampling = cfg['sampling_params']['sampling']
self.model_encoder = cfg['model']['encoder']
print(self.model_encoder)
self.model_decoder = cfg['model']['decoder']
self.sampling_data = cfg['sampling_params']['sampling_data']
self.protein_dir = cfg["training_params"]["image_dir"]
self.number_smiles = cfg["sampling_params"]["number_smiles"]
if (self.sampling == "max"):
self.number_smiles = 1
self.time_waiting = cfg["sampling_params"]["time_waiting"]
self.type_fold = cfg["sampling_params"]["type_fold"]
# self.file_folds = cfg["sampling_params"]["folds"]
# self.file_folds = os.path.join()
# model params
self.num_epochs = cfg['model_params']['num_epochs']
self.batch_size = cfg['model_params']['batch_size']
self.learning_rate = cfg['model_params']['learning_rate']
self.num_workers = cfg['model_params']['num_workers']
# training params
self.protein_dir = cfg['training_params']['image_dir']
self.caption_path = cfg['training_params']['caption_path']
self.log_step = cfg['training_params']['log_step']
self.save_step = cfg['training_params']['save_step']
self.vocab_path = cfg['preprocessing']['vocab_path']
#output files
self.savedir = cfg['output_parameters']['savedir']
self.save_dir_smiles = os.path.join(self.savedir, "statistics")
self.tesnorboard_path = self.savedir
self.log_path = os.path.join(self.savedir, "logs")
self.idx_file = os.path.join(self.log_path, "idxs")
#encoder/decoder path
# self.encoder_path = os.path.join(self.savedir, "models", cfg['training_params']['encoder_name'])
# self.decoder_path = os.path.join(self.savedir, "models", cfg['training_params']['decoder_name'])
self.save_dir_encodings = os.path.join(self.savedir, "encodings")
#sampling params
if not os.path.exists(self.save_dir_smiles):
os.makedirs(self.save_dir_smiles)
if not os.path.exists(self.save_dir_encodings):
os.makedirs(self.save_dir_encodings)
self.file_long_proteins = open(os.path.join(self.save_dir_smiles, "exceptions_long.txt"), "w")
self.name_all_statistics = cfg['sampling_params']['name_all_stat']
self.file_all_stat = open(os.path.join(self.save_dir_smiles, self.name_all_statistics), "w")
# self.file_statistics = file_statistics
# self.file_statistics = open(os.path.join(self.save_dir_smiles, self.name_file_stat), "w")
# #the file of the whole stat
# self.file_statistics.write("name,fold,type_fold,orig_smile,gen_smile,gen_NP,gen_logP,gen_sa,gen_qed,gen_weight,gen_similarity,orig_NP,orig_logP,orig_sa,orig_qed,orig_weight,frequency,sampling,encoder,decoder" + "\n")
# self.file_statistics.flush()
with open(self.vocab_path, "rb") as f:
self.vocab = pickle.load(f)
self.dataset = Pdb_Dataset(cfg, self.vocab)
# self.encoder_path, self.decoder_path = self._get_model_path()
# self.encoder, self.decoder = config.eval_model_captioning(cfg, self.encoder_path, self.decoder_path, device = self.device)
def save_for_vis(self, split_no, encoder_path, decoder_path):
self.idx_fold = split_no
self.vis_path = os.path.join(self.savedir, str(self.idx_fold) + "_" + self.sampling + "_visualisations")
if not os.path.exists(self.vis_path):
os.makedirs(self.vis_path)
# self.encoder_path, self.decoder_path = self._get_model_path()
self.encoder, self.decoder = config.eval_model_captioning(self.cfg, encoder_path, decoder_path, device = self.device)
self.file_folds = os.path.join(self.idx_file, "test_idx_" + str(self.idx_fold))
with (open(self.file_folds, "rb")) as openfile:
idx_proteins = pickle.load(openfile)
# idx_proteins = [1,2,3,4]
files_refined = os.listdir(self.protein_dir)
idx_all = [i for i in range(len(files_refined) - 3)]
#take indx of proteins in the training set
if (self.sampling_data == "train"):
idx_to_visualise = np.setdiff1d(idx_all, idx_proteins)
else:
idx_to_visualise = idx_proteins
for id_protein in idx_to_visualise:
self.visualise(id_protein)
def load_pocket(self, id_protein, transform=None):
name_protein = self.dataset._get_name_protein(id_protein)
print("loading data of a protein", name_protein)
self.path_protein = os.path.join(self.vis_path, name_protein)
# os.makedirs(self.path_protein, exist_ok=True)
features, masks = self.dataset._get_features_complex(id_protein)
geometry = self.dataset._get_geometry_complex(id_protein)
features = features.to(self.device).unsqueeze(0)
geometry = geometry.to(self.device).unsqueeze(0)
masks = masks.to(self.device).unsqueeze(0)
# features = np.asarray(features.cpu().clone().numpy())
self.geometry_write = np.asarray(geometry.cpu().clone().numpy())
return features, geometry, masks
def generate_encodings(self, idx_):
#generate features of encoder and writes it to files
protein_name = self.dataset._get_name_protein(id)
features, geometry = self.load_pocket(id)
# Generate a caption from the image
feature = self.encoder(features, geometry)
torch.save(feature, os.path.join(self.save_dir_encodings, protein_name + "_feature_encoding.pt"))
def visualise(self, id):
#original + gen smiles
print("current id - ", id)
self.smiles = []
alphas_result = []
protein_name = self.dataset._get_name_protein(id)
print("current protein ", protein_name)
#path of the real smile
init_path_smile = os.path.join(
self.caption_path, protein_name, protein_name + "_ligand.smi"
)
with open(init_path_smile) as fp:
initial_smile = fp.readlines()[0] #write a true initial smile
# smiles.append(initial_smile)
amount_val_smiles = 0
iter = 0
start = time.time()
if (self.sampling.startswith('beam') == False):
while (amount_val_smiles < self.number_smiles):
end = time.time()
print("time elapsed", end - start)
if((end - start) > self.time_waiting):
#stop generating if we wait for too long till 50 ligands
self.file_long_proteins.write(protein_name + "\n") #write a protein with long time of generating
self.file_long_proteins.flush()
break
iter += 1
# Build models
# Load the trained model parameters
# # Prepare features and geometry from pocket
features, geometry, masks = self.load_pocket(id)
# Generate a caption from the image
feature = self.encoder(features, geometry, masks)
#print("feature", feature)
if (self.sampling == "probabilistic"):
sampled_ids = self.decoder.sample_prob(feature)
# sampled_ids = ( sampled_ids[0].cpu().numpy())
elif (self.sampling == "max"):
sampled_ids = self.decoder.sample_max(feature)
# sampled_ids = ( sampled_ids[0].cpu().numpy())
elif (self.sampling == "simple_probabilistic"):
sampled_ids = self.decoder.simple_prob(feature)
# sampled_ids = ( sampled_ids[0].cpu().numpy())
elif (self.sampling.startswith("simple_probabilistic_topk") == True):
k = int(self.sampling.split("_")[-1])
sampled_ids = self.decoder.simple_prob_topk(feature, k)
# sampled_ids = ( sampled_ids[0].cpu().numpy())
elif (self.sampling.startswith("temp_sampling")):
temperature = float(self.sampling.split("_")[-1])
sampled_ids = self.decoder.sample_temp(feature, temperature)
sampled_ids = ( sampled_ids[0].cpu().numpy() )
idx = self.printing_smiles(sampled_ids, alphas_result, alphas, iter)
amount_val_smiles += idx
elif (self.sampling.startswith("beam")):
number_beams = int(self.sampling.split("_")[-1])
features, geometry, masks = self.load_pocket(id)
feature = self.encoder(features, geometry, masks)
# self.decoder = self.decoder.float()
# sampled_ids, alpha_all = sample_beam_search(self.decoder, feature)
sampled_ids, alpha_all = self.decoder.sample_beam_search(feature, number_beams)
if alpha_all != 120:
for sentence in sampled_ids:
iter += 1
self.printing_smiles(np.asarray(sentence[1:]), alphas_result, alpha_all[0], iter)
amount_val_smiles += iter
else:
raise ValueError("Unknown sampling...")
if(len(alphas_result) > 0):
print("alph_rea", alphas_result)
if not os.path.exists(self.path_protein):
os.makedirs(self.path_protein)
np.save(
os.path.join(self.path_protein, "geometry"),
arr = self.geometry_write,
)
alphas_result = alphas_result #? convert..
with open(os.path.join(self.path_protein, "smiles"), 'wb') as fp:
pickle.dump(self.smiles, fp)
with open(os.path.join(self.path_protein, "alphas"), 'wb') as f:
np.save(f, alphas_result)
def printing_smiles(self, sampled_ids, alphas_result, alpha_all, idx):
sampled_caption = []
for word_id in sampled_ids:
word = self.vocab.idx2word[word_id]
sampled_caption.append(word)
if word == "<end>":
break
sentence = "".join(sampled_caption)
sentence = sentence[7:-5]
print(sentence)
m = Chem.MolFromSmiles(sentence)
if m is None or sentence == '' or sentence.isspace() == True:
print('invalid')
# list_smiles_all.append(sentence)
else:
print(sentence)
# smiles.append(sentence)
self.smiles.append(sentence)
#print('alpha',alpha_all)
alphas_result.append(alpha_all[idx])
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,998
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/tests/test_exist_feat.py
|
import os, sys
import argparse
# from release import *
from src.utils import config
# import utils.config as config
import multiprocessing
import numpy as np
from numpy import savetxt
import torch
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ExponentialLR
# from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
# from utils import Utils
import argparse
import sys
from py3nvml import py3nvml
import json
import os
import pickle
from sklearn.model_selection import KFold
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from src.utils.build_vocab import Vocabulary
from src.datasets.data_loader import get_loader, Pdb_Dataset, collate_fn, collate_fn_masks
from src.training.train_check_att_vis import Trainer_Attention_Check_Vis
from src.tests.training.train_checkpoint import Trainer_Fold
from src.sampling.sampler import Sampler
from src.datasets.split import Splitter
from src.training.utils import save_checkpoint_sampling
from src.evaluation.analysis import plot_all
from src.tests.datasets.feature import Featuring
import warnings
def test_Feature_exists():
parser = argparse.ArgumentParser(
description='Train a 3D reconstruction model.'
)
parser.add_argument('--config', type=str, help='Path to config file.')
parser.add_argument('--radious', type=int , default=8, help='dimension of word embedding vectors')
parser.add_argument('--type_feature', type=str , default='mass_charge', help='type_feature')
parser.add_argument('--type_filtering', type=str , default = 'all', help='type_filtering')
parser.add_argument('--h_filterig', type=str , default='without_h', help='h')
parser.add_argument('--type_fold', type=str, help='type_fold')
parser.add_argument('--idx_fold', type=str, help='Path to config file.')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configurations/config_lab/default.yaml')
type_fold = args.type_fold
idx_fold = args.idx_fold
savedir = cfg["output_parameters"]["savedir"]
model_name = cfg["model_params"]["model_name"]
num_epoches = cfg["model_params"]["num_epochs"]
#features generation
print("Checking saved features!")
names_prot_exceptions = []
Feature_gen = Featuring(cfg, args.radious, args.type_feature, args.type_filtering, args.h_filterig)
for pdbid in Feature_gen.idx_files_refined:
name_protein = Feature_gen.files_refined[pdbid]
files = os.listdir(os.path.join(Feature_gen.init_refined, name_protein))
array_feat_names = [name_protein, "feature", "r", str(args.radious), args.type_feature, args.type_filtering, args.h_filterig]
name_feature = "_".join(array_feat_names) + ".pt"
if name_feature not in files:
print("no! - ", name_protein)
names_prot_exceptions.append(name_protein)
print(names_prot_exceptions)
if __name__ == "__main__":
test_Feature_exists()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,510,999
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/tests/all_elems.py
|
import os, sys
import argparse
import multiprocessing
from multiprocessing import Pool
# from release import *
from src.utils import config
# import utils.config as config
import multiprocessing
import numpy as np
from numpy import savetxt
import torch
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ExponentialLR
# from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
# from utils import Utils
import argparse
import sys
from py3nvml import py3nvml
import json
import os
import pickle
from sklearn.model_selection import KFold
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from src.utils.build_vocab import Vocabulary
from src.datasets.data_loader import get_loader, Pdb_Dataset, collate_fn, collate_fn_masks
from src.training.train_check_att_vis import Trainer_Attention_Check_Vis
from src.tests.training.train_checkpoint import Trainer_Fold
from src.sampling.sampler import Sampler
from src.datasets.split import Splitter
from src.training.utils import save_checkpoint_sampling
from src.evaluation.analysis import plot_all
from src.tests.datasets.feature import Featuring
import warnings
warnings.filterwarnings("ignore")
def main():
parser = argparse.ArgumentParser(
description='Train a 3D reconstruction model.')
parser.add_argument('--config', type=str, help='Path to config file.')
parser.add_argument('--radious', type=int , default=8, help='dimension of word embedding vectors')
parser.add_argument('--type_feature', type=str , default='mass_charge', help='type_feature')
parser.add_argument('--type_filtering', type=str , default = 'all', help='type_filtering')
parser.add_argument('--h_filterig', type=str , default='without_h', help='h')
parser.add_argument('--type_fold', type=str, help='type_fold')
parser.add_argument('--idx_fold', type=str, help='idx fold')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configurations/config_lab/default.yaml')
type_fold = args.type_fold
idx_fold = args.idx_fold
savedir = cfg["output_parameters"]["savedir"]
model_name = cfg["model_params"]["model_name"]
num_epoches = cfg["model_params"]["num_epochs"]
#features generation
Feature_gen = Featuring(cfg, args.radious, args.type_feature, args.type_filtering, args.h_filterig)
with Pool(processes=8) as pool:
all_elems = pool.map(Feature_gen._get_all_elem_general, Feature_gen.idx_files_refined)
all_elems = list(set(all_elems))
print("all_elems - ", all_elems)
# with Pool(processes=8) as pool:
# all_elems = []
# # all_elems = pool.map(get_unique_elems, Feature_gen.idx_files_refined)
# with tqdm(total=len(Feature_gen.idx_files_refined)) as pbar:
# for i, res in tqdm(enumerate(pool.imap_unordered(Feature_gen._get_length, Feature_gen.idx_files_refined))):
# all_elems.append(res)
# pbar.update()
# all_elems = list(set(all_elems))
# print("all_elems - ", all_elems)
# for pid in Feature_gen.idx_files_refined:
# all_elems = list(set(Feature_gen._get_all_elems(pid)))
# # print("all_elems", list(set(all_elems)))
# elems_to_add = [elem for elem in all_elems if elem not in all_elems]
# all_elems.append(elems_to_add)
# print("all_elems", all_elems)
if __name__ == "__main__":
main()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,511,000
|
daniil-777/geneuclidean
|
refs/heads/main
|
/src/train_captioning.py
|
import argparse
import utils.config as config
import multiprocessing
import numpy as np
from numpy import savetxt
import torch
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ExponentialLR
# from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
# from utils import Utils
import argparse
import sys
from py3nvml import py3nvml
import json
import os
import pickle
from sklearn.model_selection import KFold
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from utils.build_vocab import Vocabulary
from datasets.data_loader import get_loader, Pdb_Dataset, collate_fn, collate_fn_masks
from training.train_check_att_vis import Trainer_Attention_Check_Vis
from training.train_checkpoint import Trainer_Fold
from sampling.sampler import Sampler
from datasets.split import Splitter
from training.utils import save_checkpoint_sampling
from evaluation.analysis import plot_all
def main():
parser = argparse.ArgumentParser(
description='Train a 3D reconstruction model.'
)
parser.add_argument('config', type=str, help='Path to config file.')
parser.add_argument('type_fold', type=str, help='type_fold')
parser.add_argument('idx_fold', type=str, help='Path to config file.')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configurations/config_lab/default.yaml')
type_fold = args.type_fold
idx_fold = args.idx_fold
savedir = cfg["output_parameters"]["savedir"]
model_name = cfg["model_params"]["model_name"]
num_epoches = cfg["model_params"]["num_epochs"]
# get split folds file
dir_idx_split = os.path.join(cfg['output_parameters']['savedir'], model_name, "logs", "idxs", cfg['splitting']['file_folds'])
if not os.path.exists(dir_idx_split):
print("doing split...")
splitter = Splitter(cfg)
splitter.split(type_fold)
#training + evaluation
if(cfg['training_params']['mode'] == "no_attention"):
trainer = Trainer_Fold(cfg, idx_fold)
trainer.train_epochs()
elif(cfg['training_params']['mode'] == "attention"):
trainer = Trainer_Attention_Check_Vis(cfg)
trainer.train_epochs()
# encoder_path = os.path.join(savedir, "models", "encoder_best_" + str(idx_fold) + '.ckpt')
# decoder_path = os.path.join(savedir, "models", "decoder_best_" + str(idx_fold) + '.ckpt')
encoder_path = os.path.join(savedir, model_name, "models", "encoder-" + str(idx_fold) + "-" + str(num_epoches) + '.ckpt')
decoder_path = os.path.join(savedir, model_name, "models", "decoder-" + str(idx_fold) + "-" + str(num_epoches) + '.ckpt')
checkpoint_sampling_path = os.path.join(savedir, model_name, "checkpoints", str(idx_fold) + '_sample.pkl')
# regimes = ["simple_probabilistic", "max", "temp_sampling", "simple_probabilistic_topk"]
# regimes = ["beam_1", "beam_3", "beam_10", "max", "temp_sampling_0.7", "probabilistic",
# "simple_probabilistic_topk_10"]
#sampling
# regimes = ["probabilistic", "max", "beam_1", "beam_3", "beam_10"]
regimes = ["probabilistic", "max", "beam_1", "beam_3", "beam_10", "beam_20"]
end_sampling_ind = len(regimes)
if (os.path.exists(checkpoint_sampling_path)):
print("loading sample ids...")
checkpoint_sampling = torch.load(checkpoint_sampling_path)
start_sampling_ind = checkpoint_sampling['idx_sample_regime_start']
print("************start_sampling_ind***********", start_sampling_ind)
else:
start_sampling_ind = 0
save_checkpoint_sampling(checkpoint_sampling_path, 0, 0)
for sampling_ind in range(start_sampling_ind, end_sampling_ind):
sample = regimes[sampling_ind]
print("*********sample regim*********** ", sample)
sampler = Sampler(cfg, sample)
sampler.analysis_cluster(idx_fold, type_fold, encoder_path, decoder_path)
plot = plot_all(cfg)
plot.run()
# for regim in regimes:
# print("doing sampling... ", regim)
# sampler = Sampler(cfg, regim)
# sampler.analysis_cluster(idx_fold, type_fold, encoder_path, decoder_path)
if __name__ == "__main__":
main()
|
{"/src/model/encoder/__init__.py": ["/src/model/encoder/encoder_resnet.py", "/src/model/encoder/e3nn_vis.py", "/src/model/encoder/bio_e3nn.py", "/src/model/encoder/bio_e3nn_res.py"], "/src/train_all_folds.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/datasets/feature.py", "/src/datasets/split.py", "/src/evaluation/evaluator.py", "/src/sampling/sampler.py", "/src/training/trainer_att.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/training/trainer_att.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/training/utils.py"], "/src/sampling/sampler.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/training/utils.py"], "/src/tests/datasets/feature.py": ["/src/datasets/dictionaries.py", "/src/utils/checkpoint.py", "/src/utils/config.py"], "/src/datasets/feature.py": ["/src/utils/config.py", "/src/datasets/dictionaries.py", "/src/utils/checkpoint.py"], "/src/evaluation/evaluator.py": ["/src/datasets/data_loader.py", "/src/evaluation/Contrib/statistics.py", "/src/sampling/sampler.py", "/src/training/utils.py", "/src/utils/checkpoint.py"], "/src/model/encoder/bio_e3nn.py": ["/src/model/encoder/base.py"], "/src/tests/check_feat_exceptions.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/model/encoder/bio_e3nn_res.py": ["/src/model/encoder/base.py", "/src/model/encoder/bio_e3nn.py"], "/src/train_feature_all.py": ["/src/utils/config.py", "/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/datasets/feature.py"], "/src/tests/test_exist_feat.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"], "/src/tests/all_elems.py": ["/src/datasets/data_loader.py", "/src/sampling/sampler.py", "/src/datasets/split.py", "/src/training/utils.py", "/src/tests/datasets/feature.py"]}
|
26,541,907
|
prabha-git/data_preprocessing
|
refs/heads/master
|
/web_scrapping/imdb_box_office_weekly.py
|
import requests
from bs4 import BeautifulSoup
url = 'https://en.wikipedia.org/wiki/2015_in_hip_hop_music'
page = requests.get(url)
#print(page.content)
soup = BeautifulSoup(page.content,'html.parser')
results = soup.find_all('table',class_='wikitable')[1]
for record in soup.find_all('tr'):
albumdata=""
for data in record.find_all('td'):
albumdata = albu,data+","+
|
{"/projects/covid.py": ["/gbq/save_to_gbq.py", "/web_scrapping/covid.py", "/data_cleaning/covid.py"]}
|
26,609,516
|
neizmirasego/Netcracker-DevOps-school-2021
|
refs/heads/master
|
/bot/main.py
|
# # our module
# telegram bot
from aiogram.utils import executor
from bot import BotTelegram
def main():
bot_telegram = BotTelegram()
executor.start_polling(bot_telegram.disp)
if __name__ == '__main__':
main()
|
{"/bot.py": ["/config.py"], "/main.py": ["/bot.py"], "/bot/main.py": ["/bot.py"], "/bot/bot.py": ["/config.py"]}
|
26,609,517
|
neizmirasego/Netcracker-DevOps-school-2021
|
refs/heads/master
|
/bot/bot.py
|
"""
class telegram bot
"""
# # library
# api telegram
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters.state import State, StatesGroup
from aiogram.contrib.fsm_storage.memory import MemoryStorage
# # our module
# telegram's api token
from config import token
from chatbot import ChatBot
class FormLanguages(StatesGroup):
language = State()
class FormTraining(StatesGroup):
training = State()
class BotTelegram(object):
def __init__(self):
self.bot = Bot(token=token)
self.disp = Dispatcher(self.bot, storage=MemoryStorage())
self.chat_bot = ChatBot()
@self.disp.message_handler(commands=['start'])
async def process_start_command(message: types.Message):
await self.bot.send_message(message.from_user.id, "Hello!\nWrite me something")
@self.disp.message_handler(commands=['help'])
async def process_start_command(message: types.Message):
mes = 'Chat bot with training function for DevOps course project\n\n' \
'start - Beginning of work\n' \
'help - Command help display\n' \
'changelanguage - Change the language of communication\n' \
'addtraining - Add value for learning\n' \
'training - Start training'
await message.bot.send_message(message.from_user.id, mes)
@self.disp.message_handler(commands=['changelanguage'])
async def change_languages(message: types.Message):
"""
Change our language
"""
await FormLanguages.language.set()
await self.bot.send_message(message.from_user.id, f'Our language: {self.chat_bot.get_languages()}\n'
f'What language do you need?')
@self.disp.message_handler(state=FormLanguages.language)
async def enter_language(message: types.Message, state: FSMContext):
await self.bot.send_message(message.from_user.id, self.chat_bot.change_language(message.text))
await state.finish()
@self.disp.message_handler(commands=['addtraining'])
async def add_training(message: types.Message):
"""
Add intent in training
"""
await FormTraining.training.set()
mes = 'Input format:\n' \
'<language>:<tag>:<pat or res (pat - pattern, res - response)>:<text>\n' \
'Example 1:\n' \
'en:greeting:pat:whats up\n' \
'Example 2:\n' \
'en:greeting:res:Hey!\n' \
'Example 3:\n' \
'ru:вот такой тег:res:вот такой текст!\n'
await self.bot.send_message(message.from_user.id, mes)
@self.disp.message_handler(state=FormTraining.training)
async def enter_training(message: types.Message, state: FSMContext):
await self.bot.send_message(message.from_user.id, self.chat_bot.add_training(message.text))
await state.finish()
@self.disp.message_handler(commands=['training'])
async def training_go(message: types.Message):
"""
Start learning
"""
self.chat_bot.training()
await self.bot.send_message(message.from_user.id, 'Done!')
@self.disp.message_handler()
async def echo_message(message: types.Message):
intents = self.chat_bot.predict_class(message.text.lower())
await self.bot.send_message(message.from_user.id, self.chat_bot.get_response(intents,
self.chat_bot.intents))
|
{"/bot.py": ["/config.py"], "/main.py": ["/bot.py"], "/bot/main.py": ["/bot.py"], "/bot/bot.py": ["/config.py"]}
|
26,609,518
|
neizmirasego/Netcracker-DevOps-school-2021
|
refs/heads/master
|
/config.py
|
import os
path_to_dir = os.path.dirname(os.path.abspath(__file__))
# api token for telegram
token = '1804011613:AAG1DqIdMqRiwCxgVTL_9kWzbp8ecJC3HmY'
|
{"/bot.py": ["/config.py"], "/main.py": ["/bot.py"], "/bot/main.py": ["/bot.py"], "/bot/bot.py": ["/config.py"]}
|
26,657,470
|
knowsuchagency/cdk-hello-apigw-asgi
|
refs/heads/master
|
/app.py
|
#!/usr/bin/env python3
from aws_cdk import core
from hello_apig_wsgi.hello_apig_wsgi_stack import HelloApigWsgiStack
from hello_apig_wsgi.pipeline_stack import PipelineStack
from pydantic import BaseSettings
class Config(BaseSettings):
"""https://pydantic-docs.helpmanual.io/usage/settings/"""
account: str = "385504394431"
region: str = "us-east-2"
gh_username: str = "knowsuchagency"
gh_repo: str = "cdk-hello-apigw-asgi"
if __name__ == "__main__":
config = Config()
app = core.App()
application_stack = HelloApigWsgiStack(app, "application")
pipeline_stack = PipelineStack(
app,
"pipeline",
config,
env={"account": config.account, "region": config.region},
)
app.synth()
|
{"/app.py": ["/hello_apig_wsgi/hello_apig_wsgi_stack.py", "/hello_apig_wsgi/pipeline_stack.py"], "/hello_apig_wsgi/pipeline_stack.py": ["/hello_apig_wsgi/hello_apig_wsgi_stack.py"]}
|
26,657,471
|
knowsuchagency/cdk-hello-apigw-asgi
|
refs/heads/master
|
/hello_apig_wsgi/pipeline_stack.py
|
from aws_cdk import core
from aws_cdk import aws_codepipeline as codepipeline
from aws_cdk import aws_codepipeline_actions as pipeline_actions
from aws_cdk import pipelines
from aws_cdk import aws_codebuild as codebuild
from .hello_apig_wsgi_stack import HelloApigWsgiStack
from pydantic import BaseSettings
class WebServiceStage(core.Stage):
def __init__(self, scope: core.Construct, id: str, **kwargs):
super().__init__(scope, id, **kwargs)
self.service = HelloApigWsgiStack(self, "WebService")
class PipelineStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, config: BaseSettings, **kwargs):
super().__init__(scope, id, **kwargs)
source_artifact = codepipeline.Artifact()
cloud_assembly_artifact = codepipeline.Artifact()
source_action = pipeline_actions.GitHubSourceAction(
action_name="GitHub",
output=source_artifact,
oauth_token=core.SecretValue.secrets_manager("github-token"),
owner=config.gh_username,
repo=config.gh_repo,
trigger=pipeline_actions.GitHubTrigger.POLL,
)
synth_action = pipelines.SimpleSynthAction(
source_artifact=source_artifact,
cloud_assembly_artifact=cloud_assembly_artifact,
install_commands=[
"npm install -g aws-cdk",
"pip install -r requirements.txt",
],
test_commands=["pytest lambdas -v -m 'not integration'"],
synth_command="cdk synth application",
environment=codebuild.BuildEnvironment(privileged=True),
)
pipeline = pipelines.CdkPipeline(
self,
"pipeline",
cloud_assembly_artifact=cloud_assembly_artifact,
pipeline_name="hello-pipeline",
source_action=source_action,
synth_action=synth_action,
)
pre_prod_app = WebServiceStage(
self,
"preprod",
env={
"account": config.account,
"region": config.region,
},
)
pre_prod_stage = pipeline.add_application_stage(pre_prod_app)
pre_prod_stage.add_actions(
pipelines.ShellScriptAction(
action_name="integration_tests",
run_order=pre_prod_stage.next_sequential_run_order(),
additional_artifacts=[source_artifact],
commands=[
"pip install -r requirements.txt",
"pytest lambdas -v -m integration",
],
use_outputs={
"http_api_url": pipeline.stack_output(
pre_prod_app.service.http_api_url
)
},
)
)
pre_prod_stage.add_manual_approval_action(action_name="PromoteToProd")
prod_app = WebServiceStage(
self,
"Prod",
env={
"account": config.account,
"region": config.region,
},
)
pipeline.add_application_stage(prod_app)
|
{"/app.py": ["/hello_apig_wsgi/hello_apig_wsgi_stack.py", "/hello_apig_wsgi/pipeline_stack.py"], "/hello_apig_wsgi/pipeline_stack.py": ["/hello_apig_wsgi/hello_apig_wsgi_stack.py"]}
|
26,657,472
|
knowsuchagency/cdk-hello-apigw-asgi
|
refs/heads/master
|
/lambdas/graphql/index.py
|
import os
import boto3
TABLE_NAME = os.environ["NOTES_TABLE"]
TABLE = boto3.resource("dynamodb").Table(TABLE_NAME)
dynamodb = boto3.client("dynamodb")
def get_note(id_):
return TABLE.get_item(Key={"id": id_})
def create_note(note):
TABLE.put_item(Item=note)
return note
def list_notes():
return TABLE.scan()["Items"]
def delete_note(id_):
TABLE.delete_item(Key={"id": id_})
return id_
def handler(event, context):
print(f"{event=}")
field_name = event["info"]["fieldName"]
arguments = event["arguments"]
if field_name == "getNotebyId":
return get_note(arguments["noteId"])
elif field_name == "createNote":
return create_note(arguments["note"])
elif field_name == "listNotes":
return list_notes()
elif field_name == "deleteNote":
return delete_note(arguments["noteId"])
else:
return None
|
{"/app.py": ["/hello_apig_wsgi/hello_apig_wsgi_stack.py", "/hello_apig_wsgi/pipeline_stack.py"], "/hello_apig_wsgi/pipeline_stack.py": ["/hello_apig_wsgi/hello_apig_wsgi_stack.py"]}
|
26,657,473
|
knowsuchagency/cdk-hello-apigw-asgi
|
refs/heads/master
|
/hello_apig_wsgi/hello_apig_wsgi_stack.py
|
from aws_cdk import (
core,
aws_lambda as lmb,
aws_lambda_python as lmb_py,
aws_apigateway as apigw,
aws_apigatewayv2 as apigw_v2,
aws_appsync as appsync,
aws_dynamodb as dynamodb,
)
class HelloApigWsgiStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# The code that defines your stack goes here
wsgi_function = lmb_py.PythonFunction(
self, "wsgi-function", entry="./lambdas/wsgi"
)
wsgi_integration = apigw_v2.LambdaProxyIntegration(
handler=wsgi_function,
payload_format_version=apigw_v2.PayloadFormatVersion.VERSION_1_0,
)
asgi_function = lmb_py.PythonFunction(
self,
"asgi-function",
entry="./lambdas/asgi",
)
asgi_integration = apigw_v2.LambdaProxyIntegration(handler=asgi_function)
self.http_api = apigw_v2.HttpApi(
self, "http-api", default_integration=asgi_integration
)
self.http_api.add_routes(
path="/wsgi",
methods=[apigw_v2.HttpMethod.GET],
integration=wsgi_integration,
)
self.http_api.add_routes(
path="/wsgi/{proxy+}",
methods=[apigw_v2.HttpMethod.GET],
integration=wsgi_integration,
)
self.http_api_url = core.CfnOutput(self, "RestApiUrl", value=self.http_api.url)
self.graphql_api = appsync.GraphqlApi(
self,
"graphql-api",
name="notes-example-api",
schema=appsync.Schema.from_asset("./graphql/schema.graphql"),
)
core.CfnOutput(self, "GraphQLUrl", value=self.graphql_api.graphql_url)
core.CfnOutput(self, "GraphQlApiKey", value=self.graphql_api.api_key)
graphql_handler = lmb_py.PythonFunction(
self,
"graphql-handler",
entry="./lambdas/graphql",
runtime=lmb.Runtime.PYTHON_3_8,
)
data_source = self.graphql_api.add_lambda_data_source(
"lambdaDatasource", graphql_handler
)
data_source.create_resolver(type_name="Query", field_name="getNoteById")
data_source.create_resolver(type_name="Query", field_name="listNotes")
data_source.create_resolver(type_name="Mutation", field_name="createNote")
data_source.create_resolver(type_name="Mutation", field_name="deleteNote")
dynamo_table = dynamodb.Table(
self,
"notes-table",
billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
partition_key=dynamodb.Attribute(
name="id", type=dynamodb.AttributeType.STRING
),
)
dynamo_table.grant_read_write_data(graphql_handler)
graphql_handler.add_environment("NOTES_TABLE", dynamo_table.table_name)
|
{"/app.py": ["/hello_apig_wsgi/hello_apig_wsgi_stack.py", "/hello_apig_wsgi/pipeline_stack.py"], "/hello_apig_wsgi/pipeline_stack.py": ["/hello_apig_wsgi/hello_apig_wsgi_stack.py"]}
|
26,657,474
|
knowsuchagency/cdk-hello-apigw-asgi
|
refs/heads/master
|
/lambdas/asgi/index.py
|
from mangum import Mangum
from quart import Quart, request
app = Quart(__name__)
app.url_map.strict_slashes = False
handler = Mangum(app)
@app.route("/")
def hello():
return {"path": request.path, "root": True}
@app.route("/asgi")
def hello_asgi():
return {"path": request.path}
@app.route("/asgi/foo")
def hello_asgi_foo():
return {"path": request.path}
|
{"/app.py": ["/hello_apig_wsgi/hello_apig_wsgi_stack.py", "/hello_apig_wsgi/pipeline_stack.py"], "/hello_apig_wsgi/pipeline_stack.py": ["/hello_apig_wsgi/hello_apig_wsgi_stack.py"]}
|
26,657,475
|
knowsuchagency/cdk-hello-apigw-asgi
|
refs/heads/master
|
/lambdas/wsgi/index.py
|
from apig_wsgi import make_lambda_handler
from flask import Flask, request, jsonify
app = Flask(__name__)
app.url_map.strict_slashes = False
handler = make_lambda_handler(app)
@app.route("/wsgi")
def wsgi():
resp = {"path": request.path}
return jsonify(resp)
@app.route("/wsgi/foo")
def wsgi_foo():
resp = {"path": request.path}
return jsonify(resp)
|
{"/app.py": ["/hello_apig_wsgi/hello_apig_wsgi_stack.py", "/hello_apig_wsgi/pipeline_stack.py"], "/hello_apig_wsgi/pipeline_stack.py": ["/hello_apig_wsgi/hello_apig_wsgi_stack.py"]}
|
26,657,476
|
knowsuchagency/cdk-hello-apigw-asgi
|
refs/heads/master
|
/lambdas/wsgi/test_wsgi.py
|
import os
import pytest
import requests
from index import app
@pytest.fixture
def url():
return os.environ["http_api_url"].rstrip("/")
@pytest.fixture()
def client():
with app.test_client() as client_:
yield client_
def test_wsgi_unit(client):
resp = client.get("/wsgi")
assert "path" in resp.json
@pytest.mark.integration
def test_wsgi_integration(url):
with requests.get(f"{url}/wsgi") as resp:
assert "path" in resp.json()
|
{"/app.py": ["/hello_apig_wsgi/hello_apig_wsgi_stack.py", "/hello_apig_wsgi/pipeline_stack.py"], "/hello_apig_wsgi/pipeline_stack.py": ["/hello_apig_wsgi/hello_apig_wsgi_stack.py"]}
|
26,699,160
|
bcaitech1/p3-ims-obd-savetheearth
|
refs/heads/master
|
/code/utils.py
|
# https://github.com/wkentaro/pytorch-fcn/blob/master/torchfcn/utils.py
import numpy as np
import torch
import os
import pydensecrf.densecrf as dcrf
import pydensecrf.utils as utils
def _fast_hist(label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(n_class * label_true[mask].astype(int) + label_pred[mask],
minlength=n_class ** 2).reshape(n_class, n_class)
return hist
def prev_label_accuracy_score(label_trues, label_preds, n_class):
"""
Returns accuracy score evaluation result.
- [acc]: overall accuracy
- [acc_cls]: mean accuracy
- [mean_iu]: mean IU
- [fwavacc]: fwavacc
"""
hist = np.zeros((n_class, n_class)) # confusion matrix
for lt, lp in zip(label_trues, label_preds):
hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
acc = np.diag(hist).sum() / hist.sum()
with np.errstate(divide='ignore', invalid='ignore'):
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
with np.errstate(divide='ignore', invalid='ignore'):
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
return acc, acc_cls, mean_iu, fwavacc
def label_accuracy_score(hist):
"""
Returns accuracy score evaluation result.
- [acc]: overall accuracy
- [acc_cls]: mean accuracy
- [mean_iu]: mean IU
- [fwavacc]: fwavacc
"""
acc = np.diag(hist).sum() / hist.sum()
with np.errstate(divide='ignore', invalid='ignore'):
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
with np.errstate(divide='ignore', invalid='ignore'):
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
return acc, acc_cls, mean_iu, iu, fwavacc
def add_hist(hist, label_trues, label_preds, n_class):
"""
stack hist(confusion matrix)
"""
for lt, lp in zip(label_trues, label_preds):
hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
return hist
# https://github.com/Bjarten/early-stopping-pytorch
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0, docs_path='docs', models_path='models', model_name='checkpoint.pt', trace_func=print):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
path (str): Path for the checkpoint to be saved to.
Default: 'checkpoint.pt'
trace_func (function): trace print function.
Default: print
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.trace_func = trace_func
self.best_metric = None
self.model_name = model_name
self.validation_path = os.path.join(docs_path, 'validation', model_name)
if not os.path.isdir(self.validation_path):
os.mkdir(self.validation_path)
self.model_path = os.path.join(models_path, model_name)
if not os.path.isdir(self.model_path):
os.mkdir(self.model_path)
def __call__(self, model, val_loss=None, mIoU=None, plt=None, metric=None, epoch=None):
if val_loss:
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint_loss(val_loss, model)
elif score < self.best_score + self.delta:
self.counter += 1
self.trace_func(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint_loss(val_loss, model)
self.counter = 0
elif mIoU:
score = mIoU
if self.best_score is None:
self.best_score = np.inf
self.save_checkpoint_score(score, model, plt, metric, epoch)
elif score < self.best_score + self.delta:
self.counter += 1
self.trace_func(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.save_checkpoint_score(score, model, plt, metric, epoch)
self.counter = 0
def save_checkpoint_loss(self, val_loss, model):
'''Saves model when validation loss decrease.'''
if self.verbose:
self.trace_func(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), self.path)
self.val_loss_min = val_loss
def save_checkpoint_score(self, score, model, plt, metric, epoch):
'''Saves model when mIoU score decrease.'''
if self.verbose:
self.trace_func(f'score increased ({self.best_score:.6f} --> {score:.6f}). Saving model ...')
if score > 0.5:
torch.save(model.state_dict(), os.path.join(self.model_path, f"{self.model_name}_{epoch}.pt"))
plt.savefig(os.path.join(self.validation_path, f"{self.model_name}_{epoch}.png"))
self.best_score = score
self.best_metric = metric
MAX_ITER = 10
POS_W = 3
POS_XY_STD = 1
Bi_W = 4
Bi_XY_STD = 67
Bi_RGB_STD = 3
def dense_crf(img, output_probs):
c = output_probs.shape[0]
h = output_probs.shape[1]
w = output_probs.shape[2]
U = utils.unary_from_softmax(output_probs)
U = np.ascontiguousarray(U)
img = np.ascontiguousarray(img)
d = dcrf.DenseCRF2D(w, h, c)
d.setUnaryEnergy(U)
d.addPairwiseGaussian(sxy=POS_XY_STD, compat=POS_W)
d.addPairwiseBilateral(sxy=Bi_XY_STD, srgb=Bi_RGB_STD, rgbim=img, compat=Bi_W)
Q = d.inference(MAX_ITER)
Q = np.array(Q).reshape((c, h, w))
return Q
def dense_crf_wrapper(args):
return dense_crf(args[0], args[1])
|
{"/WEB_P3/p3_web/p3_app/views.py": ["/WEB_P3/p3_web/p3_app/visualize.py", "/WEB_P3/p3_web/p3_app/detect_model/detection_result.py"]}
|
26,699,161
|
bcaitech1/p3-ims-obd-savetheearth
|
refs/heads/master
|
/code/optimizer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim.optimizer import Optimizer, required
import math
from typing import Any, Callable, Dict, Iterable, Optional, Tuple, Union, List
from torch import Tensor
Params = Union[Iterable[Tensor], Iterable[Dict[str, Any]]]
LossClosure = Callable[[], float]
OptLossClosure = Optional[LossClosure]
Betas2 = Tuple[float, float]
State = Dict[str, Any]
OptFloat = Optional[float]
Nus2 = Tuple[float, float]
class SGDP(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1):
defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay,
nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio)
super(SGDP, self).__init__(params, defaults)
def _channel_view(self, x):
return x.view(x.size(0), -1)
def _layer_view(self, x):
return x.view(1, -1)
def _cosine_similarity(self, x, y, eps, view_func):
x = view_func(x)
y = view_func(y)
return F.cosine_similarity(x, y, dim=1, eps=eps).abs_()
def _projection(self, p, grad, perturb, delta, wd_ratio, eps):
wd = 1
expand_size = [-1] + [1] * (len(p.shape) - 1)
for view_func in [self._channel_view, self._layer_view]:
cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)
if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):
p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)
perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)
wd = wd_ratio
return perturb, wd
return perturb, wd
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['momentum'] = torch.zeros_like(p.data)
# SGD
buf = state['momentum']
buf.mul_(momentum).add_(grad, alpha=1 - dampening)
if nesterov:
d_p = grad + momentum * buf
else:
d_p = buf
# Projection
wd_ratio = 1
if len(p.shape) > 1:
d_p, wd_ratio = self._projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps'])
# Weight decay
if group['weight_decay'] > 0:
p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum))
# Step
p.data.add_(d_p, alpha=-group['lr'])
return loss
class RAdam(Optimizer):
r"""Implements RAdam optimization algorithm.
It has been proposed in `On the Variance of the Adaptive Learning
Rate and Beyond`__.
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-3)
betas: coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps: term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay: weight decay (L2 penalty) (default: 0)
Example:
>>> import torch_optimizer as optim
>>> optimizer = optim.RAdam(model.parameters(), lr=0.1)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ https://arxiv.org/abs/1908.03265
Note:
Reference code: https://github.com/LiyuanLucasLiu/RAdam
"""
def __init__(
self,
params: Params,
lr: float = 1e-3,
betas: Betas2 = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0,
) -> None:
if lr <= 0.0:
raise ValueError('Invalid learning rate: {}'.format(lr))
if eps < 0.0:
raise ValueError('Invalid epsilon value: {}'.format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
'Invalid beta parameter at index 0: {}'.format(betas[0])
)
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
'Invalid beta parameter at index 1: {}'.format(betas[1])
)
if weight_decay < 0:
raise ValueError(
'Invalid weight_decay value: {}'.format(weight_decay)
)
if (
isinstance(params, (list, tuple))
and len(params) > 0
and isinstance(params[0], dict)
):
for param in params:
if 'betas' in param and (
param['betas'][0] != betas[0]
or param['betas'][1] != betas[1]
):
param['buffer'] = [[None, None, None] for _ in range(10)]
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
buffer=[[None, None, None] for _ in range(10)],
)
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure: OptLossClosure = None) -> OptFloat:
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
lr = group['lr']
weight_decay = group['weight_decay']
beta1, beta2 = group['betas']
eps = group['eps']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
msg = (
'RAdam does not support sparse gradients, '
'please consider SparseAdam instead'
)
raise RuntimeError(msg)
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(
p_data_fp32, memory_format=torch.preserve_format
)
state['exp_avg_sq'] = torch.zeros_like(
p_data_fp32, memory_format=torch.preserve_format
)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(
p_data_fp32
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
state['step'] += 1
buffered = group['buffer'][int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (
1 - beta2_t
)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = (
lr
* math.sqrt(
(1 - beta2_t)
* (N_sma - 4)
/ (N_sma_max - 4)
* (N_sma - 2)
/ N_sma
* N_sma_max
/ (N_sma_max - 2)
)
/ (1 - beta1 ** state['step'])
)
else:
step_size = lr / (1 - beta1 ** state['step'])
buffered[2] = step_size
if weight_decay != 0:
p_data_fp32.add_(p_data_fp32, alpha=-weight_decay * lr)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(eps)
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)
else:
p_data_fp32.add_(exp_avg, alpha=-step_size)
p.data.copy_(p_data_fp32)
return loss
class AdamP(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
delta=delta, wd_ratio=wd_ratio, nesterov=nesterov)
super(AdamP, self).__init__(params, defaults)
def _channel_view(self, x):
return x.view(x.size(0), -1)
def _layer_view(self, x):
return x.view(1, -1)
def _cosine_similarity(self, x, y, eps, view_func):
x = view_func(x)
y = view_func(y)
return F.cosine_similarity(x, y, dim=1, eps=eps).abs_()
def _projection(self, p, grad, perturb, delta, wd_ratio, eps):
wd = 1
expand_size = [-1] + [1] * (len(p.shape) - 1)
for view_func in [self._channel_view, self._layer_view]:
cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)
if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):
p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)
perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)
wd = wd_ratio
return perturb, wd
return perturb, wd
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
beta1, beta2 = group['betas']
nesterov = group['nesterov']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
# Adam
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
if nesterov:
perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom
else:
perturb = exp_avg / denom
# Projection
wd_ratio = 1
if len(p.shape) > 1:
perturb, wd_ratio = self._projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps'])
# Weight decay
if group['weight_decay'] > 0:
p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio)
# Step
p.data.add_(perturb, alpha=-step_size)
return loss
class Yogi(Optimizer):
r"""Implements Yogi Optimizer Algorithm.
It has been proposed in `Adaptive methods for Nonconvex Optimization`__.
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-2)
betas: coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps: term added to the denominator to improve
numerical stability (default: 0.001)
initial_accumulator: initial values for first and
second moments (default: 1e-6)
weight_decay: weight decay (L2 penalty) (default: 0)
Example:
>>> import torch_optimizer as optim
>>> optimizer = optim.Yogi(model.parameters(), lr=0.01)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ https://papers.nips.cc/paper/8186-adaptive-methods-for-nonconvex-optimization # noqa
Note:
Reference code: https://github.com/4rtemi5/Yogi-Optimizer_Keras
"""
def __init__(
self,
params: Params,
lr: float = 1e-2,
betas: Betas2 = (0.9, 0.999),
eps: float = 1e-3,
initial_accumulator: float = 1e-6,
weight_decay: float = 0,
) -> None:
if lr <= 0.0:
raise ValueError('Invalid learning rate: {}'.format(lr))
if eps < 0.0:
raise ValueError('Invalid epsilon value: {}'.format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
'Invalid beta parameter at index 0: {}'.format(betas[0])
)
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
'Invalid beta parameter at index 1: {}'.format(betas[1])
)
if weight_decay < 0:
raise ValueError(
'Invalid weight_decay value: {}'.format(weight_decay)
)
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
initial_accumulator=initial_accumulator,
weight_decay=weight_decay,
)
super(Yogi, self).__init__(params, defaults)
def step(self, closure: OptLossClosure = None) -> OptFloat:
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Yogi does not support sparse gradients, '
'please consider SparseAdam instead'
)
state = self.state[p]
# State initialization
# Followed from official implementation in tensorflow addons:
# https://github.com/tensorflow/addons/blob/master/tensorflow_addons/optimizers/yogi.py#L118 # noqa
# For more details refer to the discussion:
# https://github.com/jettify/pytorch-optimizer/issues/77
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = nn.init.constant_(
torch.empty_like(
p.data, memory_format=torch.preserve_format
),
group['initial_accumulator'],
)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = nn.init.constant_(
torch.empty_like(
p.data, memory_format=torch.preserve_format
),
group['initial_accumulator'],
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['weight_decay'] != 0:
grad = grad.add(p.data, alpha=group['weight_decay'])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
grad_squared = grad.mul(grad)
exp_avg_sq.addcmul_(
torch.sign(exp_avg_sq - grad_squared),
grad_squared,
value=-(1 - beta2),
)
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(
group['eps']
)
step_size = group['lr'] / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
return loss
_optimizer_entrypoints = {
'SGD': torch.optim.SGD,
'Adam': torch.optim.Adam,
'AdamW': torch.optim.AdamW,
'SGDP': SGDP,
'AdamP': AdamP,
"RAdam": RAdam,
"Yogi": Yogi,
"RMSprop": torch.optim.RMSprop,
}
def optimizer_entrypoint(optimizer_name):
return _optimizer_entrypoints[optimizer_name]
def is_optimizer(optimizer_name):
return optimizer_name in _optimizer_entrypoints
def create_optimizer(optimizer_name, **kwargs):
if is_optimizer(optimizer_name):
create_fn = optimizer_entrypoint(optimizer_name)
optimizer = create_fn(**kwargs)
else:
raise RuntimeError('Unknown optimizer (%s)' % optimizer_name)
return optimizer
|
{"/WEB_P3/p3_web/p3_app/views.py": ["/WEB_P3/p3_web/p3_app/visualize.py", "/WEB_P3/p3_web/p3_app/detect_model/detection_result.py"]}
|
26,699,162
|
bcaitech1/p3-ims-obd-savetheearth
|
refs/heads/master
|
/code/recycle_model.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision.models.segmentation.deeplabv3 import DeepLabHead
import segmentation_models_pytorch as smp
import numpy as np
import timm
from pprint import pprint
class FCN8s(nn.Module):
'''
Backbone: VGG-16
num_class: segmentation하고 싶은 객체의 종류
forward output
- output : [batch_size, num_classes, height, width]
'''
def __init__(self, num_classes=12):
super(FCN8s, self).__init__()
self.num_classes = num_classes
backbone = torchvision.models.vgg16(pretrained=True)
self.conv1 = nn.Sequential(*(list(backbone.features[0:5]))) # 1 / 2
self.conv2 = nn.Sequential(*(list(backbone.features[5:10]))) # 1 / 4
self.conv3 = nn.Sequential(*(list(backbone.features[10:17]))) # 1 / 8
self.conv4 = nn.Sequential(*(list(backbone.features[17:24]))) # 1 / 16
self.conv5 = nn.Sequential(*(list(backbone.features[24:31]))) # 1 / 32
self.fc6 = nn.Sequential(nn.Conv2d(in_channels=512, out_channels=4096, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.Dropout2d())
self.fc7 = nn.Sequential(nn.Conv2d(in_channels=4096, out_channels=4096, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.Dropout2d())
self.score_3 = nn.Conv2d(in_channels=256, out_channels=self.num_classes, kernel_size=1, stride=1, padding=0)
self.score_4 = nn.Conv2d(in_channels=512, out_channels=self.num_classes, kernel_size=1, stride=1, padding=0)
self.score_5 = nn.Conv2d(in_channels=4096, out_channels=self.num_classes, kernel_size=1, stride=1, padding=0)
# input, kernel, padding, stride의 i,k,p,s
# o' = s(i'-1) + k - 2p
self.upscore2_1 = nn.ConvTranspose2d(in_channels=self.num_classes, out_channels=self.num_classes, kernel_size=4, stride=2, padding=1)
self.upscore2_2 = nn.ConvTranspose2d(in_channels=self.num_classes, out_channels=self.num_classes, kernel_size=4, stride=2, padding=1)
self.upscore8_3 = nn.ConvTranspose2d(in_channels=self.num_classes, out_channels=self.num_classes, kernel_size=16, stride=8, padding=4)
self._initialize_weights()
def forward(self, x):
conv1_out = self.conv1(x)
conv2_out = self.conv2(conv1_out)
conv3_out = self.conv3(conv2_out)
conv4_out = self.conv4(conv3_out)
conv5_out = self.conv5(conv4_out)
fc6_out = self.fc6(conv5_out)
fc7_out = self.fc7(fc6_out)
score_1 = self.score_5(fc7_out)
score_1_up = self.upscore2_1(score_1)
score_2 = self.score_4(conv4_out)
skip_connection_1 = score_1_up + score_2
score_2_up = self.upscore2_2(skip_connection_1)
score_3 = self.score_3(conv3_out)
skip_connection_2 = score_2_up + score_3
output = self.upscore8_3(skip_connection_2)
return output
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
assert m.kernel_size[0] == m.kernel_size[1]
initial_weight = self._get_upsampling_weight(m.in_channels, m.out_channels, m.kernel_size[0])
m.weight.data.copy_(initial_weight)
def _get_upsampling_weight(self, in_channels, out_channels, kernel_size):
"""
Make a 2D bilinear kernel suitable for upsampling
"""
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),
dtype=np.float64)
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.from_numpy(weight).float()
class VGG16(nn.Module):
def __init__(self, pretrained=True):
super(VGG16, self).__init__()
self.features = nn.Sequential(self._conv3x3_relu(3, 64),
self._conv3x3_relu(64, 64),
nn.MaxPool2d(3, stride=2, padding=1), # 1/2
self._conv3x3_relu(64, 128),
self._conv3x3_relu(128, 128),
nn.MaxPool2d(3, stride=2, padding=1), # 1/4
self._conv3x3_relu(128, 256),
self._conv3x3_relu(256, 256),
self._conv3x3_relu(256, 256),
nn.MaxPool2d(3, stride=2, padding=1), # 1/8
self._conv3x3_relu(256, 512),
self._conv3x3_relu(512, 512),
self._conv3x3_relu(512, 512),
nn.MaxPool2d(3, stride=1, padding=1), # stride를 1로 해서 사이즈 유지
self._conv3x3_relu(512, 512, rate=2), # dilated rate = 2
self._conv3x3_relu(512, 512, rate=2),
self._conv3x3_relu(512, 512, rate=2),
nn.MaxPool2d(3, stride=1, padding=1), # stride를 1로 해서 사이즈 유지
nn.AvgPool2d(3, stride=1, padding=1)) # stride를 1로 해서 사이즈 유지
if pretrained:
backbone = torchvision.models.vgg16(pretrained=True)
weight = backbone.state_dict()
weight2_keys = list(self.features.state_dict().keys())
weight2 = dict()
for idx, key in enumerate(list(weight.keys())[:26]):
weight2[weight2_keys[idx]] = weight[key]
self.features.load_state_dict(weight2)
def forward(self, x):
output = self.features(x)
return output
def _conv3x3_relu(self, inplanes, planes, rate=1):
conv3x3_relu = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=rate, dilation=rate),
nn.ReLU())
return conv3x3_relu
class Atrous_module_2(nn.Module):
def __init__(self, inplanes, num_classes, rate):
super(Atrous_module_2, self).__init__()
planes = inplanes
self.atrous = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=rate, dilation=rate),
nn.ReLU(),
nn.Dropout2d(),
nn.Conv2d(planes, planes, kernel_size=1, stride=1),
nn.ReLU(),
nn.Dropout2d(),
nn.Conv2d(planes, num_classes, kernel_size=1, stride=1))
self._init_parameters()
def forward(self, x):
output = self.atrous(x)
return output
def _init_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d): # init conv
nn.init.kaiming_normal_(m.weight)
nn.init.zeros_(m.bias)
class DeepLabV2(nn.Module):
def __init__(self, num_classes, pretrained=True):
super(DeepLabV2, self).__init__()
self.backbone = VGG16(pretrained=pretrained)
rates = [6, 12, 18, 24]
self.aspp1 = Atrous_module_2(512 , num_classes, rate=rates[0])
self.aspp2 = Atrous_module_2(512 , num_classes, rate=rates[1])
self.aspp3 = Atrous_module_2(512 , num_classes, rate=rates[2])
self.aspp4 = Atrous_module_2(512 , num_classes, rate=rates[3])
self.global_avg_pool = ASPPPooling(512, outplanes)
def forward(self, x):
x = self.backbone(x)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x_sum = x1 + x2 + x3 + x4
output = F.interpolate(x_sum, scale_factor=8, mode='bilinear')
return output
# -------------------------------------------------------------------------------------------
class Atrous_module_3(nn.Module):
def __init__(self, inplanes, outplanes, kernel_size, padding, dilation):
super(Atrous_module_3, self).__init__()
self.atrous_convolution = nn.Conv2d(inplanes, outplanes, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=False)
self.batch_norm = nn.BatchNorm2d(outplanes)
self.relu = nn.ReLU()
self._init_parameters()
def forward(self, x):
x = self.atrous_convolution(x)
x = self.batch_norm(x)
x = self.relu(x)
return x
def _init_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d): # init conv
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d): # init BN
nn.init.constant_(m.weight,1)
nn.init.constant_(m.bias,0)
class DeepLabV3(nn.Module):
def __init__(self, num_classes=12, inplanes=512, outplanes=256, pretrained=True):
super(DeepLabV3, self).__init__()
self.backbone = VGG16(pretrained=pretrained)
rates = [1, 6, 12, 18]
self.aspp1 = Atrous_module_3(inplanes, outplanes, kernel_size=1, padding=0, dilation=rates[0])
self.aspp2 = Atrous_module_3(inplanes, outplanes, kernel_size=3, padding=rates[1], dilation=rates[1])
self.aspp3 = Atrous_module_3(inplanes, outplanes, kernel_size=3, padding=rates[2], dilation=rates[2])
self.aspp4 = Atrous_module_3(inplanes, outplanes, kernel_size=3, padding=rates[3], dilation=rates[3])
self.image_pool = nn.Sequential(nn.AdaptiveMaxPool2d((1,1)),
nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(outplanes),
nn.ReLU())
self.fc1 = nn.Sequential(nn.Conv2d(outplanes * 5, outplanes, kernel_size=1, bias=False),
nn.BatchNorm2d(outplanes),
nn.ReLU(),
nn.Dropout2d())
self.fc2 = nn.Sequential(nn.Conv2d(outplanes, outplanes, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(outplanes),
nn.ReLU(),
nn.Conv2d(outplanes, num_classes, kernel_size=1, stride=1))
self._init_parameters()
def forward(self, x):
x = self.backbone(x)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.image_pool(x)
x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.fc1(x)
x = self.fc2(x)
x = F.interpolate(x, scale_factor=8, mode='bilinear', align_corners=True)
return x
def _init_parameters(self):
blocks = [self.image_pool, self.fc1, self.fc2]
for block in blocks:
for m in block.modules():
if isinstance(m, nn.Conv2d): # init conv
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d): # init BN
nn.init.constant_(m.weight,1)
nn.init.constant_(m.bias,0)
# -------------------------------------------------------------------------------------------
class TorchVisionDeepLabv3_ResNet101(nn.Module):
"""
DeepLabv3 class with custom head
Args:
outputchannels (int, optional): The number of output channels
in your dataset masks. Defaults to 1.
"""
def __init__(self, num_classes=12):
super(TorchVisionDeepLabv3_ResNet101, self).__init__()
self.seg_model = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=True)
self.seg_model.classifier = DeepLabHead(2048, num_classes)
def forward(self, x):
x = self.seg_model(x)
return x['out']
class TorchVisionDeepLabv3_ResNet50(nn.Module):
"""
DeepLabv3 class with custom head
Args:
outputchannels (int, optional): The number of output channels
in your dataset masks. Defaults to 1.
"""
def __init__(self, num_classes=12):
super(TorchVisionDeepLabv3_ResNet50, self).__init__()
self.seg_model = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=True)
self.seg_model.classifier = DeepLabHead(2048, num_classes)
def forward(self, x):
x = self.seg_model(x)
return x['out']
# --------------------------------------------------------------------------------------------
class SMP_DeepLabV3Plus_ResNet101(nn.Module):
def __init__(self, num_classes=12):
super(SMP_DeepLabV3Plus_ResNet101, self).__init__()
self.seg_model = smp.DeepLabV3Plus(encoder_name="resnet101",
encoder_weights="imagenet",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_DeepLabV3Plus_resnext101_32x4d(nn.Module):
def __init__(self, num_classes=12):
super(SMP_DeepLabV3Plus_resnext101_32x4d, self).__init__()
self.seg_model = smp.DeepLabV3Plus(encoder_name="resnext101_32x4d",
encoder_weights="ssl",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_DeepLabV3Plus_resnext101_32x8d(nn.Module):
def __init__(self, num_classes=12):
super(SMP_DeepLabV3Plus_resnext101_32x8d, self).__init__()
self.seg_model = smp.DeepLabV3Plus(encoder_name="resnext101_32x8d",
encoder_weights="ssl", # ssl: emi-supervised learning on ImageNet
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_DeepLabV3Plus_resnext101_32x16d(nn.Module):
def __init__(self, num_classes=12):
super(SMP_DeepLabV3Plus_resnext101_32x16d, self).__init__()
self.seg_model = smp.DeepLabV3Plus(encoder_name="resnext101_32x16d",
encoder_weights="ssl", # ssl: emi-supervised learning on ImageNet
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
# ResNest encoders do not support dilated mode
class SMP_DeepLabV3Plus_timm_resnest101e(nn.Module):
def __init__(self, num_classes=12):
super(SMP_DeepLabV3Plus_timm_resnest101e, self).__init__()
self.seg_model = smp.DeepLabV3Plus(encoder_name="timm-resnest101e",
encoder_weights="imagenet",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_DeepLabV3Plus_efficientnet_b1(nn.Module):
def __init__(self, num_classes=12):
super(SMP_DeepLabV3Plus_efficientnet_b1, self).__init__()
self.seg_model = smp.DeepLabV3Plus(encoder_name="efficientnet-b1",
encoder_weights="imagenet",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_DeepLabV3Plus_se_resnext101_32x4d(nn.Module):
def __init__(self, num_classes=12):
super(SMP_DeepLabV3Plus_se_resnext101_32x4d, self).__init__()
self.seg_model = smp.DeepLabV3Plus(encoder_name="se_resnext101_32x4d",
encoder_weights="imagenet",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_DeepLabV3Plus_xception(nn.Module):
def __init__(self, num_classes=12):
super(SMP_DeepLabV3Plus_xception, self).__init__()
self.seg_model = smp.DeepLabV3Plus(encoder_name="xception",
encoder_weights="imagenet",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
# ---------------------------------------------------------------
class SMP_PSPNet_resnext101_32x4d(nn.Module):
def __init__(self, num_classes=12):
super(SMP_PSPNet_resnext101_32x4d, self).__init__()
self.seg_model = smp.PSPNet(encoder_name="resnext101_32x4d",
encoder_weights="ssl",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
#----------------------------------------------------------------------
class SMP_UNet_effb4(nn.Module):
def __init__(self, num_classes=12):
super(SMP_UNet_effb4, self).__init__()
self.seg_model = smp.Unet(encoder_name="efficientnet-b4",
encoder_weights="imagenet",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_UNet_effb4_ns(nn.Module):
def __init__(self, num_classes=12):
super(SMP_UNet_effb4_ns, self).__init__()
self.seg_model = smp.Unet(encoder_name="timm-efficientnet-b4",
encoder_weights="noisy-student",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_UNet_resnext101_32x4d(nn.Module):
def __init__(self, num_classes=12):
super(SMP_UNet_resnext101_32x4d, self).__init__()
self.seg_model = smp.Unet(encoder_name="resnext101_32x4d",
encoder_weights="ssl",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
# ------------------------------------------------------
class SMP_Linknet_se_resnext50_32x4d(nn.Module):
def __init__(self, num_classes=12):
super(SMP_Linknet_se_resnext50_32x4d, self).__init__()
self.seg_model = smp.Unet(encoder_name="se_resnext50_32x4d",
encoder_weights="imagenet",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
# ------------------------------------------------------
class SMP_FPN_effb0(nn.Module):
def __init__(self, num_classes=12):
super(SMP_FPN_effb0, self).__init__()
self.seg_model = smp.FPN(encoder_name="efficientnet-b0",
encoder_weights="imagenet",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_FPN_effb1(nn.Module):
def __init__(self, num_classes=12):
super(SMP_FPN_effb1, self).__init__()
self.seg_model = smp.FPN(encoder_name="efficientnet-b1",
encoder_weights="imagenet",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_FPN_effb2(nn.Module):
def __init__(self, num_classes=12):
super(SMP_FPN_effb2, self).__init__()
self.seg_model = smp.FPN(encoder_name="efficientnet-b2",
encoder_weights="imagenet",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_FPN_effb3(nn.Module):
def __init__(self, num_classes=12):
super(SMP_FPN_effb3, self).__init__()
self.seg_model = smp.FPN(encoder_name="efficientnet-b3",
encoder_weights="imagenet",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_FPN_effb3_ns(nn.Module):
def __init__(self, num_classes=12):
super(SMP_FPN_effb3_ns, self).__init__()
self.seg_model = smp.FPN(encoder_name="timm-efficientnet-b3",
encoder_weights="noisy-student",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_FPN_effb4(nn.Module):
def __init__(self, num_classes=12):
super(SMP_FPN_effb4, self).__init__()
self.seg_model = smp.FPN(encoder_name="efficientnet-b4",
encoder_weights="imagenet",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_FPN_effb4_ns(nn.Module):
def __init__(self, num_classes=12):
super(SMP_FPN_effb4_ns, self).__init__()
self.seg_model = smp.FPN(encoder_name="timm-efficientnet-b4",
encoder_weights="noisy-student",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_FPN_effb5_ns(nn.Module):
def __init__(self, num_classes=12):
super(SMP_FPN_effb5_ns, self).__init__()
self.seg_model = smp.FPN(encoder_name="timm-efficientnet-b5",
encoder_weights="noisy-student",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_FPN_effb5(nn.Module):
def __init__(self, num_classes=12):
super(SMP_FPN_effb5, self).__init__()
self.seg_model = smp.FPN(encoder_name="efficientnet-b5",
encoder_weights="imagenet",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_FPN_effb6(nn.Module):
def __init__(self, num_classes=12):
super(SMP_FPN_effb6, self).__init__()
self.seg_model = smp.FPN(encoder_name="efficientnet-b6",
encoder_weights="imagenet",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_FPN_resnext101_32x4d(nn.Module):
def __init__(self, num_classes=12):
super(SMP_FPN_resnext101_32x4d, self).__init__()
self.seg_model = smp.FPN(encoder_name="resnext101_32x4d",
encoder_weights="ssl",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_FPN_resnext101_32x8d(nn.Module):
def __init__(self, num_classes=12):
super(SMP_FPN_resnext101_32x8d, self).__init__()
self.seg_model = smp.FPN(encoder_name="resnext101_32x8d",
encoder_weights="ssl",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
class SMP_FPN_resnet101(nn.Module):
def __init__(self, num_classes=12):
super(SMP_FPN_resnet101, self).__init__()
self.seg_model = smp.FPN(encoder_name="resnet101",
encoder_weights="imagenet",
in_channels=3,
classes=12)
def forward(self, x):
x = self.seg_model(x)
return x
# --------------------------------------------------------------
# for checking forward progress
if __name__ == "__main__":
# backbone = torchvision.models.vgg16(pretrained=True)
# print(backbone)
# x = torch.randn(2, 3, 512, 512)
# model = FCN8s()
# print(model)
# output = model(x)
# print(output.shape)
# model = DeepLabV2(num_classes=12)
# x = torch.randn(2, 3, 512, 512)
# output = model(x)
# print(output.shape)
# model = DeepLabV3(num_classes=12)
# x = torch.randn(2, 3, 512, 512)
# output = model(x)
# print(output.shape)
model = SMP_UNet_resnext101_32x4d(num_classes=12)
x = torch.randn(2, 3, 512, 512)
print(model)
output = model(x)
print(output.shape)
pass
|
{"/WEB_P3/p3_web/p3_app/views.py": ["/WEB_P3/p3_web/p3_app/visualize.py", "/WEB_P3/p3_web/p3_app/detect_model/detection_result.py"]}
|
26,699,163
|
bcaitech1/p3-ims-obd-savetheearth
|
refs/heads/master
|
/code/augmentation.py
|
from albumentations import (Compose, Resize, Normalize, ShiftScaleRotate, Rotate, GridDistortion, CenterCrop, RandomResizedCrop, CLAHE, RandomBrightnessContrast, ElasticTransform, RandomContrast, GaussNoise, HorizontalFlip, pytorch, Cutout, VerticalFlip, OneOf, CropNonEmptyMaskIfExists)
from albumentations.pytorch import ToTensorV2
class BaseTrainAugmentation:
def __init__(self):
self.transformer = Compose([
Normalize(mean=[0.46009655, 0.43957878, 0.41827092], std=[0.2108204, 0.20766491, 0.21656131], max_pixel_value=255.0, p = 1.0),
ToTensorV2(),
])
def __call__(self, image, mask):
return self.transformer(image=image, mask=mask)
class Aug1TrainAugmentation:
def __init__(self):
self.transformer = Compose([
OneOf([
VerticalFlip(),
HorizontalFlip(),
], p=0.5),
Cutout(num_holes=8, max_h_size=20, max_w_size=20, p=0.5),
Normalize(mean=[0.46009655, 0.43957878, 0.41827092], std=[0.2108204, 0.20766491, 0.21656131], max_pixel_value=255.0, p = 1.0),
ToTensorV2(),
])
def __call__(self, image, mask):
return self.transformer(image=image, mask=mask)
class Aug2TrainAugmentation:
def __init__(self):
self.transformer = Compose([
OneOf([
VerticalFlip(),
HorizontalFlip(),
], p=0.5),
ElasticTransform(always_apply=False, p=0.3, alpha=1.68, sigma=48.32, alpha_affine=44.97, interpolation=0, border_mode=2, value=(0, 0, 0), mask_value=None, approximate=False),
Cutout(num_holes=8, max_h_size=20, max_w_size=20, p=0.5),
Normalize(mean=[0.46009655, 0.43957878, 0.41827092], std=[0.2108204, 0.20766491, 0.21656131], max_pixel_value=255.0, p = 1.0),
ToTensorV2(),
])
def __call__(self, image, mask):
return self.transformer(image=image, mask=mask)
class Aug3TrainAugmentation:
def __init__(self):
self.transformer = Compose([
OneOf([
VerticalFlip(),
HorizontalFlip(),
], p=0.5),
GridDistortion(always_apply=False, p=0.5, num_steps=5, distort_limit=(-0.46, 0.40), interpolation=0, border_mode=0, value=(0, 0, 0), mask_value=None),
Cutout(num_holes=8, max_h_size=20, max_w_size=20, p=0.5),
Normalize(mean=[0.46009655, 0.43957878, 0.41827092], std=[0.2108204, 0.20766491, 0.21656131], max_pixel_value=255.0, p = 1.0),
ToTensorV2(),
])
def __call__(self, image, mask):
return self.transformer(image=image, mask=mask)
class Aug4TrainAugmentation:
def __init__(self):
self.transformer = Compose([
OneOf([
VerticalFlip(),
HorizontalFlip(),
], p=0.5),
OneOf([
GridDistortion(always_apply=False, p=0.5, num_steps=5, distort_limit=(-0.46, 0.40), interpolation=0, border_mode=0, value=(0, 0, 0), mask_value=None),
ElasticTransform(always_apply=False, p=0.5, alpha=1.68, sigma=48.32, alpha_affine=44.97, interpolation=0, border_mode=2, value=(0, 0, 0), mask_value=None, approximate=False),
], p=0.5),
CLAHE(clip_limit=(1, 8), tile_grid_size=(10, 10),p=0.3),
Cutout(num_holes=8, max_h_size=20, max_w_size=20, p=0.5),
Normalize(mean=[0.46009655, 0.43957878, 0.41827092], std=[0.2108204, 0.20766491, 0.21656131], max_pixel_value=255.0, p = 1.0),
ToTensorV2(),
])
def __call__(self, image, mask):
return self.transformer(image=image, mask=mask)
class Aug5TrainAugmentation:
def __init__(self):
self.transformer = Compose([
HorizontalFlip(p=0.5),
ShiftScaleRotate(always_apply=False, p=0.5, shift_limit=(-0.06, 0.06), scale_limit=(-0.10, 0.10), rotate_limit=(-15, 15), interpolation=0, border_mode=0, value=(0, 0, 0), mask_value=None),
GridDistortion(always_apply=False, p=0.5, num_steps=5, distort_limit=(-0.46, 0.40), interpolation=0, border_mode=0, value=(0, 0, 0), mask_value=None),
Cutout(num_holes=8, max_h_size=20, max_w_size=20, p=0.5),
Normalize(mean=[0.46009655, 0.43957878, 0.41827092], std=[0.2108204, 0.20766491, 0.21656131], max_pixel_value=255.0, p = 1.0),
ToTensorV2(),
])
def __call__(self, image, mask):
return self.transformer(image=image, mask=mask)
class AugLastTrainAugmentation:
def __init__(self):
self.transformer = Compose([
HorizontalFlip(p=0.5),
CLAHE(clip_limit=(1, 8), tile_grid_size=(10, 10), p=0.3),
OneOf([
GridDistortion(num_steps=5, distort_limit=(-0.46, 0.40)),
ElasticTransform(alpha=1.68, sigma=48.32, alpha_affine=44.97),
], p=0.3),
RandomResizedCrop(p=0.3, height=512, width=512, scale=(0.08, 1.0), ratio=(0.75, 1.33)),
ShiftScaleRotate(p=0.3, shift_limit=(-0.06, 0.06), scale_limit=(-0.10, 0.10), rotate_limit=(-20, 20)),
Normalize(mean=[0.46009655, 0.43957878, 0.41827092], std=[0.2108204, 0.20766491, 0.21656131], max_pixel_value=255.0, p = 1.0),
ToTensorV2(),
])
def __call__(self, image, mask):
return self.transformer(image=image, mask=mask)
class FinalTrainAugmentation:
def __init__(self):
self.transformer = Compose([
HorizontalFlip(p=0.5),
OneOf([
GridDistortion(num_steps=5, distort_limit=(-0.46, 0.40), value=(0, 0, 0)),
ElasticTransform(alpha=1.68, sigma=48.32, alpha_affine=44.97,value=(0, 0, 0)),
RandomResizedCrop(height=512, width=512, scale=(0.08, 1.0), ratio=(0.75, 1.33))
], p=.3),
ShiftScaleRotate(shift_limit=(-0.06, 0.06), scale_limit=(-0.1, 0.1), rotate_limit=(-90, 90),p=0.3),
Normalize(mean=[0.46009655, 0.43957878, 0.41827092], std=[0.2108204, 0.20766491, 0.21656131], max_pixel_value=255.0, p = 1.0),
ToTensorV2(),
])
def __call__(self, image, mask):
return self.transformer(image=image, mask=mask)
class BaseTestAugmentation:
def __init__(self):
self.transformer = Compose([
Normalize(mean=[0.46009655, 0.43957878, 0.41827092], std=[0.2108204, 0.20766491, 0.21656131], max_pixel_value=255.0, p = 1.0),
ToTensorV2(),
])
def __call__(self, image):
return self.transformer(image=image)
class CenterCropBaseAugmentation:
def __init__(self, resize_height, resize_width):
self.resize_height = resize_height
self.resize_width = resize_width
self.transformer = Compose([
CenterCrop(height = self.resize_height, width = self.resize_width),
Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
albumentations.pytorch.transforms.ToTensor(),
])
def __call__(self, image):
return self.transformer(image=image)
class ResizeVariousAugmentation:
def __init__(self, resize_height, resize_width):
self.resize_height = resize_height
self.resize_width = resize_width
self.transformer = Compose([
Resize(height = self.resize_height, width = self.resize_width),
RandomContrast(limit=[0.5,0.51],always_apply=True),
HorizontalFlip(p=0.5),
Rotate(limit=5, p=0.5),
Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
albumentations.pytorch.transforms.ToTensor(),
])
def __call__(self, image):
return self.transformer(image=image)
class CenterCropVariousAugmentation:
def __init__(self, resize_height, resize_width):
self.resize_height = resize_height
self.resize_width = resize_width
self.transformer = Compose([
CenterCrop(height = self.resize_height, width = self.resize_width, always_apply=True),
RandomBrightnessContrast(p=0.5),
HorizontalFlip(p=0.5),
Rotate(limit=3, p=0.5),
Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
albumentations.pytorch.transforms.ToTensor(),
])
def __call__(self, image):
return self.transformer(image=image)
|
{"/WEB_P3/p3_web/p3_app/views.py": ["/WEB_P3/p3_web/p3_app/visualize.py", "/WEB_P3/p3_web/p3_app/detect_model/detection_result.py"]}
|
26,699,164
|
bcaitech1/p3-ims-obd-savetheearth
|
refs/heads/master
|
/code/loss.py
|
# https://github.com/CoinCheung/pytorch-loss
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
# https://discuss.pytorch.org/t/is-this-a-correct-implementation-for-focal-loss-in-pytorch/43327/8
class FocalLoss(nn.Module):
def __init__(self, weight=None, gamma=2., reduction='mean'):
nn.Module.__init__(self)
self.weight = weight
self.gamma = gamma
self.reduction = reduction
def forward(self, input_tensor, target_tensor):
log_prob = F.log_softmax(input_tensor, dim=-1)
prob = torch.exp(log_prob)
return F.nll_loss(
((1 - prob) ** self.gamma) * log_prob,
target_tensor,
weight=self.weight,
reduction=self.reduction
)
class FocalLossV1(nn.Module):
def __init__(self,
alpha=0.25,
gamma=2,
reduction='mean',):
super(FocalLossV1, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.crit = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, logits, label):
'''
logits and label have same shape, and label data type is long
args:
logits: tensor of shape (N, ...)
label: tensor of shape(N, ...)
Usage is like this:
>>> criteria = FocalLossV1()
>>> logits = torch.randn(8, 19, 384, 384)# nchw, float/half
>>> lbs = torch.randint(0, 19, (8, 384, 384)) # nchw, int64_t
>>> loss = criteria(logits, lbs)
'''
# compute loss
logits = logits.float() # use fp32 if logits is fp16
with torch.no_grad():
alpha = torch.empty_like(logits).fill_(1 - self.alpha)
alpha[label == 1] = self.alpha
probs = torch.sigmoid(logits)
pt = torch.where(label == 1, probs, 1 - probs)
ce_loss = self.crit(logits, label.float())
loss = (alpha * torch.pow(1 - pt, self.gamma) * ce_loss)
if self.reduction == 'mean':
loss = loss.mean()
if self.reduction == 'sum':
loss = loss.sum()
return loss
class GeneralizedSoftDiceLoss(nn.Module):
def __init__(self,
p=1,
smooth=1,
reduction='mean',
weight=None,
ignore_lb=255):
super(GeneralizedSoftDiceLoss, self).__init__()
self.p = p
self.smooth = smooth
self.reduction = reduction
self.weight = None if weight is None else torch.tensor(weight)
self.ignore_lb = ignore_lb
def forward(self, logits, label):
'''
args: logits: tensor of shape (N, C, H, W)
args: label: tensor of shape(N, H, W)
'''
# overcome ignored label
logits = logits.float()
ignore = label.data.cpu() == self.ignore_lb
label = label.clone()
label[ignore] = 0
lb_one_hot = torch.zeros_like(logits).scatter_(1, label.unsqueeze(1), 1)
ignore = ignore.nonzero()
_, M = ignore.size()
a, *b = ignore.chunk(M, dim=1)
lb_one_hot[[a, torch.arange(lb_one_hot.size(1)).long(), *b]] = 0
lb_one_hot = lb_one_hot.detach()
# compute loss
probs = torch.sigmoid(logits)
numer = torch.sum((probs*lb_one_hot), dim=(2, 3))
denom = torch.sum(probs.pow(self.p)+lb_one_hot.pow(self.p), dim=(2, 3))
if not self.weight is None:
numer = numer * self.weight.view(1, -1)
denom = denom * self.weight.view(1, -1)
numer = torch.sum(numer, dim=1)
denom = torch.sum(denom, dim=1)
loss = 1 - (2*numer+self.smooth)/(denom+self.smooth)
if self.reduction == 'mean':
loss = loss.mean()
return loss
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes=3, smoothing=0.0, dim=-1):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
# class Focal_Dice(nn.Module):
# def __init__(self):
# super(BCE_Dice_combination, self).__init__()
# nn.cr
_criterion_entrypoints = {
'cross_entropy': nn.CrossEntropyLoss,
'focal_loss': FocalLoss,
'label_smoothing': LabelSmoothingLoss,
}
def criterion_entrypoint(criterion_name):
return _criterion_entrypoints[criterion_name]
def is_criterion(criterion_name):
return criterion_name in _criterion_entrypoints
def create_criterion(criterion_name, **kwargs):
if is_criterion(criterion_name):
create_fn = criterion_entrypoint(criterion_name)
criterion = create_fn(**kwargs)
else:
raise RuntimeError('Unknown loss (%s)' % criterion_name)
return criterion
if __name__ == "__main__":
pass
|
{"/WEB_P3/p3_web/p3_app/views.py": ["/WEB_P3/p3_web/p3_app/visualize.py", "/WEB_P3/p3_web/p3_app/detect_model/detection_result.py"]}
|
26,841,411
|
metazool/forambulator
|
refs/heads/master
|
/forams/test/test_download.py
|
from forams.download import download_data
|
{"/forambulator/test/test_download.py": ["/forambulator/download.py"], "/scripts/collect_process_data.py": ["/forambulator/images.py", "/forambulator/download.py"], "/forambulator/test/test_images.py": ["/forambulator/images.py"], "/forams/test/test_download.py": ["/forams/download.py"], "/forams/test/test_images.py": ["/forams/images.py"]}
|
26,841,412
|
metazool/forambulator
|
refs/heads/master
|
/forams/test/test_images.py
|
from forams.images import list_image_filenames
def test_files():
print(list_image_filenames('./data'))
|
{"/forambulator/test/test_download.py": ["/forambulator/download.py"], "/scripts/collect_process_data.py": ["/forambulator/images.py", "/forambulator/download.py"], "/forambulator/test/test_images.py": ["/forambulator/images.py"], "/forams/test/test_download.py": ["/forams/download.py"], "/forams/test/test_images.py": ["/forams/images.py"]}
|
26,841,413
|
metazool/forambulator
|
refs/heads/master
|
/forams/train.py
|
"""Kick off the StyleGAN2 training run
Slight adaptation to this script:
https://raw.githubusercontent.com/NVlabs/stylegan2/master/run_training.py
Which provides more defaults and should work with existing notebooks
This needs a checkout of stylegan2 in PYTHONPATH
"""
import copy
import os
import dnnlib
from dnnlib import EasyDict
from metrics.metric_defaults import metric_defaults
# ----------------------------------------------------------------------------
_valid_configs = [
# Table 1
'config-a', # Baseline StyleGAN
'config-b', # + Weight demodulation
'config-c', # + Lazy regularization
'config-d', # + Path length regularization
'config-e', # + No growing, new G & D arch.
'config-f', # + Large networks (default)
# Table 2
'config-e-Gorig-Dorig', 'config-e-Gorig-Dresnet', 'config-e-Gorig-Dskip',
'config-e-Gresnet-Dorig', 'config-e-Gresnet-Dresnet', 'config-e-Gresnet-Dskip',
'config-e-Gskip-Dorig', 'config-e-Gskip-Dresnet', 'config-e-Gskip-Dskip',
]
# ----------------------------------------------------------------------------
def train(dataset='tfrecords',
data_dir=None,
resume_from=None,
result_dir='results',
config_id='config-f',
num_gpus=1,
gamma=None,
mirror_augment=True,
metrics=[],
total_kimg=25000,
save_ticks=1):
# Options for training loop.
train = EasyDict(run_func_name='training.training_loop.training_loop')
# Options for generator network.
G = EasyDict(func_name='training.networks_stylegan2.G_main')
# Options for discriminator network.
D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2')
# Options for generator optimizer.
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8)
# Options for discriminator optimizer.
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8)
# Options for generator loss.
G_loss = EasyDict(func_name='training.loss.G_logistic_ns_pathreg')
# Options for discriminator loss.
D_loss = EasyDict(func_name='training.loss.D_logistic_r1')
# Options for TrainingSchedule.
sched = EasyDict()
# Options for setup_snapshot_image_grid().
grid = EasyDict(size='8k', layout='random')
# Options for dnnlib.submit_run().
sc = dnnlib.SubmitConfig()
# Options for tflib.init_tf().
tf_config = {'rnd.np_random_seed': 1000}
if not data_dir:
data_dir = os.getcwd()
if resume_from:
train.resume_pkl = resume_from
train.data_dir = data_dir
train.total_kimg = total_kimg
train.mirror_augment = mirror_augment
train.image_snapshot_ticks = train.network_snapshot_ticks = save_ticks
sched.G_lrate_base = sched.D_lrate_base = 0.002
sched.minibatch_size_base = 32
sched.minibatch_gpu_base = 4
D_loss.gamma = 10
metrics = [metric_defaults[x] for x in metrics]
desc = 'stylegan2'
desc += '-' + dataset
dataset_args = EasyDict(tfrecord_dir=dataset)
assert num_gpus in [1, 2, 4, 8]
sc.num_gpus = num_gpus
desc += '-%dgpu' % num_gpus
assert config_id in _valid_configs
desc += '-' + config_id
# Configs A-E: Shrink networks to match original StyleGAN.
if config_id != 'config-f':
G.fmap_base = D.fmap_base = 8 << 10
# Config E: Set gamma to 100 and override G & D architecture.
if config_id.startswith('config-e'):
D_loss.gamma = 100
if 'Gorig' in config_id:
G.architecture = 'orig'
if 'Gskip' in config_id:
G.architecture = 'skip' # (default)
if 'Gresnet' in config_id:
G.architecture = 'resnet'
if 'Dorig' in config_id:
D.architecture = 'orig'
if 'Dskip' in config_id:
D.architecture = 'skip'
if 'Dresnet' in config_id:
D.architecture = 'resnet' # (default)
# Configs A-D: Enable progressive growing and switch to networks that
# support it.
if config_id in ['config-a', 'config-b', 'config-c', 'config-d']:
sched.lod_initial_resolution = 8
sched.G_lrate_base = sched.D_lrate_base = 0.001
sched.G_lrate_dict = sched.D_lrate_dict = {
128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
sched.minibatch_size_base = 32 # (default)
sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32}
sched.minibatch_gpu_base = 4 # (default)
sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4}
G.synthesis_func = 'G_synthesis_stylegan_revised'
D.func_name = 'training.networks_stylegan2.D_stylegan'
# Configs A-C: Disable path length regularization.
if config_id in ['config-a', 'config-b', 'config-c']:
G_loss = EasyDict(func_name='training.loss.G_logistic_ns')
# Configs A-B: Disable lazy regularization.
if config_id in ['config-a', 'config-b']:
train.lazy_regularization = False
# Config A: Switch to original StyleGAN networks.
if config_id == 'config-a':
G = EasyDict(func_name='training.networks_stylegan.G_style')
D = EasyDict(func_name='training.networks_stylegan.D_basic')
if gamma is not None:
D_loss.gamma = gamma
sc.submit_target = dnnlib.SubmitTarget.LOCAL
sc.local.do_not_copy_source_files = True
kwargs = EasyDict(train)
kwargs.update(
G_args=G,
D_args=D,
G_opt_args=G_opt,
D_opt_args=D_opt,
G_loss_args=G_loss,
D_loss_args=D_loss)
kwargs.update(
dataset_args=dataset_args,
sched_args=sched,
grid_args=grid,
metric_arg_list=metrics,
tf_config=tf_config)
kwargs.submit_config = copy.deepcopy(sc)
kwargs.submit_config.run_dir_root = result_dir
kwargs.submit_config.run_desc = desc
dnnlib.submit_run(**kwargs)
|
{"/forambulator/test/test_download.py": ["/forambulator/download.py"], "/scripts/collect_process_data.py": ["/forambulator/images.py", "/forambulator/download.py"], "/forambulator/test/test_images.py": ["/forambulator/images.py"], "/forams/test/test_download.py": ["/forams/download.py"], "/forams/test/test_images.py": ["/forams/images.py"]}
|
26,841,414
|
metazool/forambulator
|
refs/heads/master
|
/forams/download.py
|
import json
import os
import logging
import errno
import requests, zipfile, io
logging.basicConfig(level=logging.INFO)
START = 'http://endlessforams.org/summary'
DATA = 'http://endlessforams.org/randomizer/download/{0}/{1}?download=capsule.zip'
def download_capsules(overwrite=False):
summary = requests.get(START).json()
for taxon in summary['results']:
download_data(taxon['sci_name'], taxon['amount_images'])
def download_data(name, number, overwrite=False):
url = DATA.format(name, number)
# "results": [{"sci_name": "Beella digitata", "amount_images": 40},
dir_name = os.path.join(os.getcwd(), 'data', name.replace(' ','_'))
try:
os.makedirs(dir_name)
except OSError as e:
if e.errno != errno.EEXIST:
raise
elif not overwrite:
return
r = requests.get(url, stream=True)
try:
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(dir_name)
except:
logging.error(f"not really a zipfile at {url}")
if __name__ == '__main__':
download_capsules()
|
{"/forambulator/test/test_download.py": ["/forambulator/download.py"], "/scripts/collect_process_data.py": ["/forambulator/images.py", "/forambulator/download.py"], "/forambulator/test/test_images.py": ["/forambulator/images.py"], "/forams/test/test_download.py": ["/forams/download.py"], "/forams/test/test_images.py": ["/forams/images.py"]}
|
26,841,415
|
metazool/forambulator
|
refs/heads/master
|
/forams/images.py
|
"""Utilities for image processing - based on stylegan-art forked here
https://github.com/metazool/stylegan-art/blob/master/dataset_tool.py
And building on the region thresholding examples in skimage
"""
import os
import logging
import numpy as np
import tensorflow as tf
import PIL.Image
from skimage.measure import label, regionprops
from skimage.transform import resize
import skimage.io
import skimage.filters
from skimage import img_as_ubyte
class TFRecordExporter:
def __init__(self, tfrecord_dir, expected_images,
print_progress=True, progress_interval=10):
self.tfrecord_dir = tfrecord_dir
self.tfr_prefix = os.path.join(
self.tfrecord_dir, os.path.basename(
self.tfrecord_dir))
self.expected_images = expected_images
self.cur_images = 0
self.shape = None
self.resolution_log2 = None
self.tfr_writers = []
self.print_progress = print_progress
self.progress_interval = progress_interval
if self.print_progress:
print('Creating dataset "%s"' % tfrecord_dir)
if not os.path.isdir(self.tfrecord_dir):
os.makedirs(self.tfrecord_dir)
assert os.path.isdir(self.tfrecord_dir)
def close(self):
if self.print_progress:
print('%-40s\r' % 'Flushing data...', end='', flush=True)
for tfr_writer in self.tfr_writers:
tfr_writer.close()
self.tfr_writers = []
if self.print_progress:
print('%-40s\r' % '', end='', flush=True)
print('Added %d images.' % self.cur_images)
# Note: Images and labels must be added in shuffled order.
def choose_shuffled_order(self):
order = np.arange(self.expected_images)
np.random.RandomState(123).shuffle(order)
return order
def add_image(self, img):
if self.print_progress and self.cur_images % self.progress_interval == 0:
print(
'%d / %d\r' %
(self.cur_images,
self.expected_images),
end='',
flush=True)
if self.shape is None:
self.shape = img.shape
self.resolution_log2 = int(np.log2(self.shape[1]))
assert self.shape[0] in [1, 3]
assert self.shape[1] == self.shape[2]
assert self.shape[1] == 2**self.resolution_log2
tfr_opt = tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.NONE)
for lod in range(self.resolution_log2 - 1):
tfr_file = self.tfr_prefix + \
'-r%02d.tfrecords' % (self.resolution_log2 - lod)
self.tfr_writers.append(
tf.python_io.TFRecordWriter(
tfr_file, tfr_opt))
assert img.shape == self.shape
for lod, tfr_writer in enumerate(self.tfr_writers):
if lod:
img = img.astype(np.float32)
img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] +
img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25
quant = np.rint(img).clip(0, 255).astype(np.uint8)
ex = tf.train.Example(features=tf.train.Features(feature={
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)),
'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))}))
tfr_writer.write(ex.SerializeToString())
self.cur_images += 1
def add_labels(self, labels):
if self.print_progress:
print('%-40s\r' % 'Saving labels...', end='', flush=True)
assert labels.shape[0] == self.cur_images
with open(self.tfr_prefix + '-rxx.labels', 'wb') as f:
np.save(f, labels.astype(np.float32))
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def list_image_filenames(image_dir):
"""Recurse through image_dir, return paths to jpg files"""
matches = []
for root, dirnames, filenames in os.walk(image_dir):
for filename in filenames:
if filename.lower().endswith('jpg') or filename.lower().endswith('png'):
matches.append(os.path.join(root, filename))
return matches
def tfrecords_from_images(tfrecord_dir, image_dir, shuffle):
print('Loading images from "%s"' % image_dir)
image_filenames = list_image_filenames(image_dir)
if len(image_filenames) == 0:
logging.error('No input images found')
img = np.asarray(PIL.Image.open(image_filenames[0]))
resolution = img.shape[0]
channels = img.shape[2] if img.ndim == 3 else 1
if img.shape[1] != resolution:
logging.error('Input images must have the same width and height')
if resolution != 2 ** int(np.floor(np.log2(resolution))):
logging.error('Input image resolution must be a power-of-two')
if channels not in [1, 3]:
logging.error('Input images must be stored as RGB or grayscale')
with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames))
for idx in range(order.size):
img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
if channels == 1:
img = img[np.newaxis, :, :] # HW => CHW
else:
img = img.transpose([2, 0, 1]) # HWC => CHW
tfr.add_image(img)
def best_guess_crop(props):
"""The foram will often be in the region with second biggest area
It ought to be in the squarest area in the largest couple of regions
Yes I am sure this probably would be more efficient with retinanet etc
"""
props = sorted(props, key=lambda prop: prop.area)
props.reverse()
ratios = []
for index, prop in enumerate(props[0:2]):
ratio = prop.minor_axis_length / prop.major_axis_length
ratios.append(ratio)
use_index = ratios.index(max(ratios))
best_guess = props[use_index]
# In some cases we can't threshold the foram and select the
# largest character instead; in which case return nothing
if best_guess.area < 100:
best_guess = None
return best_guess
class NoForamFound(Exception):
pass
def crop_foram(filename, directory=None, size=256, pad=4):
"""Accepts a filename of an image collected from Endless Forams
Finds the region with the actual foram in it, resizes,
Saves the results in directory if specified,
Returns the result of the crop
Accepts image size (default 256) and padding around selection"""
image = skimage.io.imread(fname=filename)
image = skimage.color.rgb2gray(image)
region = regions_threshold(image)
# In some cases Yen threshold fails, we see the whole image; use Otsu
# There must be several better ways
if not region or region.area > 100000:
region = regions_threshold(image, method=skimage.filters.threshold_otsu)
if not region:
raise NoForamFound("couldn't identify the foram")
minr, minc, maxr, maxc = region.bbox
cropped = None
try:
cropped = resize(image[minr-pad:maxr+pad, minc-pad:maxc+pad],
(size, size),
preserve_range=True)
except ValueError:
raise NoForamFound("couldnt resize the crop")
if directory:
if not os.path.exists(directory):
os.makedirs(directory)
# save each cropped image by its original filename
filename = filename.split('/')[-1]
filename = filename.replace('.jpg', '.png')
skimage.io.imsave(os.path.join(directory, filename), cropped)
return cropped
def regions_threshold(image, method=skimage.filters.threshold_yen):
t = method(image)
mask = image > t
label_img = label(mask, connectivity=mask.ndim)
props = regionprops(label_img)
region = best_guess_crop(props)
return region
|
{"/forambulator/test/test_download.py": ["/forambulator/download.py"], "/scripts/collect_process_data.py": ["/forambulator/images.py", "/forambulator/download.py"], "/forambulator/test/test_images.py": ["/forambulator/images.py"], "/forams/test/test_download.py": ["/forams/download.py"], "/forams/test/test_images.py": ["/forams/images.py"]}
|
26,917,320
|
smruthi19/BudgetRoyale
|
refs/heads/master
|
/budgetroyale/views.py
|
from django.shortcuts import render
from .models import User, BudgetSubmission, Room
# Create your views here.
def index(request):
if request.method == "POST":
return render(request, 'room.html')
return render(request, 'index.html')
def judge(request):
return render(request, 'judge.html')
def room(request):
if(request.method == "get"):
#arham got this part, set up urls already
return render(request, 'room.html')
def submit(request):
if(request.method == "post"):
return render(request, 'submit.html')
|
{"/budgetroyale/serializers.py": ["/budgetroyale/models.py"], "/budgetroyale/urls.py": ["/budgetroyale/models.py", "/budgetroyale/views.py"], "/budgetroyale/admin.py": ["/budgetroyale/models.py"], "/budgetroyale/views.py": ["/budgetroyale/models.py", "/budgetroyale/serializers.py"]}
|
27,136,543
|
Shedarshian/chiharu
|
refs/heads/develop
|
/chiharu/plugins/games/achievement_command.py
|
from .achievement import _all
from .. import config
from ..inject import on_command
from nonebot import CommandSession, get_bot, permission
@on_command('game', only_to_me=False, short_des="\U0001F6AA七海千春游戏大厅\U0001F6AA")
@config.ErrorHandle
async def game_center(session: CommandSession):
"""欢迎使用-game 指令访问七海千春游戏大厅~"""
if session.current_arg_text == '':
await session.send(config.game_center_help)
elif session.current_arg_text == 'card':
await session.send(config.center_card)
else:
await session.send('game not found')
config.CommandGroup('achievement', short_des='成就系统。')
@on_command(('achievement', 'check'), only_to_me=False, args='[name]')
@config.ErrorHandle
async def check(session: CommandSession):
"""查看成就信息。"""
qq = session.ctx['user_id']
for key, val in _all.items():
if session.current_arg_text == val.val['name'] and ('hide' not in val.val or val.check(qq)):
session.finish(val.get_des(qq))
else:
await session.send('未发现此成就。')
@on_command(('achievement', 'list'), only_to_me=False)
@config.ErrorHandle
async def achievement_list(session: CommandSession):
"""列出已获得成就。"""
qq = session.ctx['user_id']
await session.send('成就列表:\n\t' + '\n\t'.join(val.get_brief(qq) for key, val in sorted(_all.items(), key=lambda x: x[1].val['id'])))
|
{"/chiharu/plugins/games/logic_dragon/QQSession.py": ["/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/logic_dragon/Game.py": ["/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/UserData.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Item.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/AllItems.py"], "/chiharu/plugins/games/logic_dragon/Equipment.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/config.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/games/logic_dragon/User.py": ["/chiharu/plugins/games/logic_dragon/UserData.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Item.py", "/chiharu/plugins/games/logic_dragon/AllCards0.py"], "/chiharu/plugins/games/logic_dragon/Types.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py"], "/chiharu/plugins/games/logic_dragon/Card.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/games/logic_dragon/Status.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/config.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/games/logic_dragon/EventListener.py": ["/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Priority.py"], "/chiharu/plugins/games/yahtzee.py": ["/chiharu/plugins/game.py", "/chiharu/plugins/games/achievement.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/achievement_command.py": ["/chiharu/plugins/games/achievement.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/game.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/net.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon/AllCards0.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/logic_dragon/Item.py": ["/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/User.py"], "/chiharu/plugins/games/logic_dragon/Dragon.py": ["/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/misc.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/birth.py", "/chiharu/plugins/games/achievement.py", "/chiharu/plugins/helper/function/function.py", "/chiharu/plugins/config.py", "/chiharu/plugins/helper/dice/dice.py"], "/chiharu/plugins/games/splendor_duel.py": ["/chiharu/plugins/config.py", "/chiharu/plugins/game.py", "/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/birth.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/tiemu.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon_type.py": ["/chiharu/plugins/config.py", "/chiharu/plugins/games/maj.py", "/chiharu/plugins/misc.py"], "/chiharu/plugins/games/chiharu.py": ["/chiharu/plugins/config.py"], "/chiharu/plugins/if.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/xiangqi.py": ["/chiharu/plugins/game.py"], "/chiharu/plugins/thwiki.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/config.py"], "/chiharu/plugins/games/logic_dragon/UserData.py": ["/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Game.py"], "/chiharu/plugins/games/witness_parse.py": ["/chiharu/plugins/helper/witness/symbol.py"], "/chiharu/plugins/games/ccs.py": ["/chiharu/plugins/games/ccs_tile.py", "/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs_extra.py", "/chiharu/plugins/games/ccs_player.py", "/chiharu/plugins/games/ccs_board.py"], "/chiharu/plugins/games/logic_dragon/AllCards1.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/AllCards0.py"], "/chiharu/plugins/games/logic_dragon/AllEquipments.py": ["/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/User.py"], "/chiharu/plugins/games/snakebird.py": ["/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/games/zhu_core.py": ["/chiharu/plugins/games/cardboard.py"], "/chiharu/plugins/games/ccs_command.py": ["/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs_tile.py", "/chiharu/plugins/games/ccs_board.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/sausage.py": ["/chiharu/plugins/games/boxgame.py"], "/chiharu/plugins/games/logic_dragon/Document.py": ["/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Card.py"], "/chiharu/plugins/games/logic_dragon/AllCards4.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/config.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/games/logic_dragon/AllCardsDLC8.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/ccs_board.py": ["/chiharu/plugins/games/ccs_tile.py", "/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs.py", "/chiharu/plugins/games/ccs_extra.py", "/chiharu/plugins/games/ccs_player.py"], "/chiharu/plugins/games/logic_dragon/AllItems.py": ["/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Item.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/Game.py"], "/chiharu/plugins/inject.py": ["/chiharu/plugins/config.py"], "/chiharu/plugins/mbf.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/help.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/config.py"], "/chiharu/plugins/games/bw.py": ["/chiharu/plugins/game.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/pig.py": ["/chiharu/plugins/game.py"], "/chiharu/plugins/games/logic_dragon/AllCards6.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/AllCards2.py"], "/chiharu/plugins/games/achievement.py": ["/chiharu/plugins/game.py"], "/chiharu/plugins/games/ccs_player.py": ["/chiharu/plugins/games/ccs.py", "/chiharu/plugins/games/ccs_extra.py", "/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs_board.py"], "/chiharu/plugins/games/logic_dragon/AllCards2.py": ["/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/dyson_sphere.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/jetstream.py": ["/chiharu/plugins/games/boxgame.py", "/chiharu/plugins/config.py", "/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/games/logic_dragon/Attack.py": ["/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/User.py"], "/chiharu/plugins/games/logic_dragon/Mission.py": ["/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/logic_dragon/AllCards3.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/AllCards1.py"], "/chiharu/plugins/games/ccs_extra.py": ["/chiharu/plugins/games/ccs.py", "/chiharu/plugins/games/ccs_player.py", "/chiharu/plugins/games/ccs_helper.py"], "/chiharu/plugins/math.py": ["/chiharu/plugins/helper/function/function.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/alarm.py": ["/chiharu/plugins/config.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon/Helper.py": ["/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Card.py"], "/chiharu/plugins/games/ccs_tile.py": ["/chiharu/plugins/games/carcassonne_asset/readTile.py"], "/chiharu/plugins/games/ccs_helper.py": ["/chiharu/plugins/games/carcassonne_asset/readTile.py"], "/chiharu/plugins/games/witness.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/sokobond.py": ["/chiharu/plugins/games/boxgame.py"], "/chiharu/plugins/eventer.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/maj_command.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/solver.py": ["/chiharu/plugins/inject.py"]}
|
27,136,544
|
Shedarshian/chiharu
|
refs/heads/develop
|
/chiharu/plugins/game.py
|
from ast import Call
from typing import Callable, Iterable, Tuple, Any, Awaitable, List, Dict, TypedDict
from abc import ABC, abstractmethod
import json
import random
from . import config
from .inject import on_command
from nonebot import CommandSession, get_bot, permission, on_natural_language, NLPSession, IntentCommand
# example usage for GameSameGroup:
# xiangqi = GameSameGroup('xiangqi')
#
# @xiangqi.begin_uncomplete(('play', 'xiangqi', 'begin'), (2, 2))
# async def chess_begin_uncomplete(session: CommandSession, data: Dict[str, Any]):
# # data: {'players': [qq], 'args': [args], 'anything': anything}
# await session.send('已为您安排红方,等候黑方')
#
# @xiangqi.begin_complete(('play', 'xiangqi', 'confirm'))
# async def chess_begin_complete(session: CommandSession, data: Dict[str, Any]):
# # data: {'players': [qq], 'game': GameSameGroup instance, 'args': [args], 'anything': anything}
# await session.send('已为您安排黑方')
# #开始游戏
# #data['board'] = board
#
# @xiangqi.end(('play', 'xiangqi', 'end'))
# async def chess_end(session: CommandSession, data: Dict[str, Any]):
# await session.send('已删除')
#
# @xiangqi.process(only_short_message=True)
# async def chess_process(session: NLPSession, data: Dict[str, Any], delete_func: Awaitable):
# pass
#
class ChessError(BaseException):
def __init__(self, arg):
self.args = [arg]
class ChessWin(ChessError):
pass
config.CommandGroup('play', hide=True)
class GameSameGroup:
# group_id: [{'players': [qq], 'game': GameSameGroup instance, 'anything': anything}]
def __init__(self, name: str, can_private=False):
# group_id: {'players': [qq], 'anything': anything}
self.uncomplete: dict[int, dict[str, Any]] = {}
self.name = name
self.can_private = can_private
self.center: dict[int, list[dict[str, Any]]] = {}
def begin_uncomplete(self, command: Iterable[str], player: Tuple[int, int]):
self.begin_command = command
self.begin_player = player
def _(_i: Awaitable) -> Awaitable:
self.uncomplete_func = _i
return _i
return _
def begin_complete(self, confirm_command: Iterable[str]):
self.confirm_command = confirm_command
def _(_f: Awaitable) -> Awaitable:
self.complete_func = _f
@on_command(self.begin_command, only_to_me=False, hide=True)
@config.ErrorHandle
async def _g(session: CommandSession):
try:
group_id = int(session.ctx['group_id'])
except KeyError:
if self.can_private:
group_id = int(session.ctx['user_id'])
else:
await session.send("请在群里玩")
return
qq = int(session.ctx['user_id'])
if group_id in self.center:
for dct in self.center[group_id]:
if self is dct['game']:
await session.send('本群已有本游戏进行中')
return
elif qq in dct['players']:
await session.send('您在本群正在游戏中')
return
if group_id in self.uncomplete:
if qq in self.uncomplete[group_id]['players']:
await session.send('您已参加本游戏匹配,请耐心等待')
return
self.uncomplete[group_id]['players'].append(qq)
self.uncomplete[group_id]['args'].append(
session.current_arg_text)
else:
self.uncomplete[group_id] = {'players': [
qq], 'args': [session.current_arg_text]}
# 已达上限,开始游戏
if len(self.uncomplete[group_id]['players']) == self.begin_player[1]:
dct = self.uncomplete.pop(group_id)
dct['game'] = self
try:
await _f(session, dct) # add data to dct
except ChessError:
return
if group_id in self.center:
self.center[group_id].append(dct)
else:
self.center[group_id] = [dct]
bot = get_bot()
for group in config.group_id_dict['log']:
await bot.send_group_msg(group_id=group, message='%s begin in group %s' % (self.name, group_id))
return
await self.uncomplete_func(session, self.uncomplete[group_id])
@on_command(confirm_command, only_to_me=False, hide=True)
@config.ErrorHandle
async def _h(session: CommandSession):
try:
group_id = int(session.ctx['group_id'])
except KeyError:
if self.can_private:
group_id = int(session.ctx['user_id'])
else:
await session.send("请在群里玩")
return
qq = int(session.ctx['user_id'])
if group_id not in self.uncomplete:
return
if len(self.uncomplete[group_id]['players']) < self.begin_player[0]:
await session.send('匹配人数未达下限,请耐心等待')
else:
dct = self.uncomplete.pop(group_id)
dct['game'] = self
try:
await _f(session, dct) # add data to dct
except ChessError:
return
if group_id in self.center:
self.center[group_id].append(dct)
else:
self.center[group_id] = [dct]
bot = get_bot()
for group in config.group_id_dict['log']:
await bot.send_group_msg(group_id=group, message='%s begin in group %s' % (self.name, group_id))
return _f
return _
def end(self, end_command: Iterable[str]):
self.end_command = end_command
def _(_f: Awaitable) -> Awaitable:
@on_command(end_command, only_to_me=False, hide=True)
@config.ErrorHandle
async def _g(session: CommandSession):
try:
group_id = int(session.ctx['group_id'])
except KeyError:
if self.can_private:
group_id = int(session.ctx['user_id'])
else:
await session.send("请在群里玩")
return
qq = int(session.ctx['user_id'])
is_admin = await permission.check_permission(get_bot(), session.ctx, permission.GROUP_ADMIN)
if_in = False
if group_id in self.center:
l = list(
filter(lambda x: x['game'] is self, self.center[group_id]))
if_in = is_admin or (len(l) != 0 and qq in l[0]['players'])
if if_in and len(l) != 0:
await _f(session, l[0])
self.center[group_id].remove(l[0]) # delete 函数?
bot = get_bot()
for group in config.group_id_dict['log']:
await bot.send_group_msg(group_id=group, message='%s end in group %s' % (self.name, group_id))
elif group_id in self.uncomplete and (is_admin or qq in self.uncomplete[group_id]['players']):
await _f(session, self.uncomplete[group_id])
self.uncomplete.pop(group_id)
return _f
return _
def process(self, only_short_message: bool = True):
def _(_f: Awaitable) -> Awaitable:
@on_natural_language(only_to_me=False, only_short_message=only_short_message)
async def _g(session: NLPSession): # 以后可能搁到一起?
try:
group_id = int(session.ctx['group_id'])
except KeyError:
if self.can_private:
group_id = int(session.ctx['user_id'])
else:
return
qq = int(session.ctx['user_id'])
if group_id not in self.center:
return
l = list(filter(lambda x: x['game']
is self, self.center[group_id]))
if len(l) == 0 or qq not in l[0]['players']:
return
async def _h():
self.center[group_id].remove(l[0])
bot = get_bot()
for group in config.group_id_dict['log']:
await bot.send_group_msg(group_id=group, message='%s end in group %s' % (self.name, group_id))
return await _f(session, l[0], _h)
return _g
return _
def open_data(self, qq):
try:
with open(config.rel(f'games\\user_data\\{qq}.json'), encoding='utf-8') as f:
data = json.load(f)
if self.name not in data:
return {}
return data[self.name]
except FileNotFoundError:
return {}
def save_data(self, qq, data_given):
try:
with open(config.rel(f'games\\user_data\\{qq}.json'), encoding='utf-8') as f:
data = json.load(f)
except FileNotFoundError:
data = {}
data[self.name] = data_given
with open(config.rel(f'games\\user_data\\{qq}.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(data, ensure_ascii=False,
indent=4, separators=(',', ': ')))
@classmethod
async def get_name(cls, session: CommandSession):
import aiocqhttp
qq = session.ctx['user_id']
group = session.ctx['group_id']
try:
c = await get_bot().get_group_member_info(group_id=group, user_id=qq)
if c['card'] == '':
name = c['nickname']
else:
name = c['card']
except aiocqhttp.exceptions.ActionFailed:
name = str(qq)
return name
# example usage for GamePrivate:
# maj = GamePrivate('maj')
#
# @maj.begin_uncomplete(('play', 'maj', 'begin'), (4, 4))
# async def chess_begin_uncomplete(session: CommandSession, data: Dict[str, Any]):
# # data: {'players': [qq], 'public': bool, 'type': type_str, 'game': GamePrivate instance, 'group': group, 'anything': anything}
# # args: -play.maj.begin 'type_str public/private+password' or '友人房id+password(optional)'
# await session.send('已为您参与匹配')
class TRoomPrivate(TypedDict, total=False):
players: list[int]
public: bool
id: int
type: str
game: 'GamePrivate'
password: str | None
class GamePrivate:
def __init__(self, name: str, allow_group_live: bool = True):
# room_id: {'players': [qq], 'public': bool, 'id': room_id, 'type': type_str, 'game': GamePrivate instance, 'group': group, 'anything': anything}
self.center: dict[int, TRoomPrivate] = {}
self.uncomplete: dict[int, TRoomPrivate] = {} # room_id: dct
self.players_status: dict[int, tuple[bool, TRoomPrivate]] = {} # qq: [bool: Complete, ptr to dct]
self.allow_group_live = allow_group_live
self.name = name
self.types = {'': (0, 32767)}
self.begin_command: tuple[str,...] = ()
self.confirm_command: tuple[str,...] = ()
self.quit_command: tuple[str,...] = ()
self.uncomplete_func: Callable[[CommandSession, TRoomPrivate], Awaitable] = None
self.complete_func: Callable[[CommandSession, TRoomPrivate], Awaitable] = None
def set_types(self, types: Dict[str, Tuple[int, int]]):
self.types = types
def begin_uncomplete(self, command: tuple[str,...], player: Tuple[int, int] = (0, 32767)):
self.begin_command = command
if '' in self.types:
self.types[''] = player
def _(_i: Callable[[CommandSession, TRoomPrivate], Awaitable]) \
-> Callable[[CommandSession, TRoomPrivate], Awaitable]:
self.uncomplete_func = _i
return _i
return _
def begin_complete(self, confirm_command: tuple[str,...]):
self.confirm_command = confirm_command
def _(_f: Callable[[CommandSession, TRoomPrivate], Awaitable]) \
-> Callable[[CommandSession, TRoomPrivate], Awaitable]:
self.complete_func = _f
@on_command(self.begin_command, only_to_me=False, hide=True)
@config.ErrorHandle
async def _g(session: CommandSession):
qq = int(session.ctx['user_id'])
s = session.current_arg_text.strip()
n = s.split(' ')
room_id = None
password = None
# args: -play.maj.begin 'type_str public/private+password' or '友人房id+password(optional)'
try:
if len(n) == 0:
if '' in self.types:
public = True
typ = ''
else:
raise FileNotFoundError
if len(n) == 1:
if s in {'public', 'private'} and '' in self.types:
public = s == 'public'
typ = ''
elif s in self.types:
public = True
typ = s
elif s.isdigit():
room_id = int(s)
password = None
else:
raise FileNotFoundError
elif len(n) == 2:
if n[0] == 'priavte' and '' in self.types:
public = False
password = n[1]
typ = ''
elif n[0] in self.types and n[1] in {'public', 'private'}:
public = n[1] == 'public'
typ = n[0]
elif n[0].isdigit():
room_id = int(s)
password = n[1]
else:
raise FileNotFoundError
elif len(n) == 3:
if n[0] in self.types and n[1] == 'private':
public = False
typ = n[0]
password = n[2]
else:
raise FileNotFoundError
except FileNotFoundError:
await session.send('未发现此分类,支持分类:\n' + ','.join(self.types))
return
if not public and password is None:
await session.send('请在private空格后输入房间密码')
elif qq in self.players_status:
await session.send('不能同时进行两个同一游戏')
elif password is not None and not password.encode('utf-8').isalnum():
await session.send('密码只能包含字母与数字!')
elif room_id is not None:
# 加入房间
room = self.uncomplete.get(room_id)
if room is None:
if room_id in self.center:
await session.send('此房间对战已开始')
else:
await session.send('未发现此房间')
elif not room['public'] and password is None:
await session.send('此房间为private房间,请输入密码')
elif not room['public'] and password != room['password']:
await session.send('密码错误!')
elif len(room['players']) == self.types[room['type']][1]:
await session.send('房间已满!')
else:
room['players'].append(qq)
self.players_status[qq] = (False, room)
full = len(room["players"]) == self.types[room['type']][1]
msg = f'玩家{qq}已加入房间{room_id},现有{len(room["players"])}人' + (
',已满' if full else '')
await self.send(room, msg)
await self.uncomplete_func(session, room)
else:
prefix = 0
while 1:
r = [i for i in range(
prefix, prefix + 1000) if i not in self.center and i not in self.uncomplete]
if len(r) == 0:
prefix += 1000
else:
break
room_id = random.choice(r)
room = self.uncomplete[room_id] = {'players': [
qq], 'public': public, 'type': typ, 'game': self, 'id': room_id, 'password': None}
if not public and password is not None:
room['password'] = password
self.players_status[qq] = (False, room)
await session.send(f'已创建{"公开" if public else "非公开"}房间 {room_id}')
await self.uncomplete_func(session, room)
@on_command(self.confirm_command, only_to_me=False, hide=True)
@config.ErrorHandle
async def _h(session: CommandSession):
qq = int(session.ctx['user_id'])
if qq not in self.players_status:
return
begin, room = self.players_status[qq]
if begin:
await session.send("房间对战已开始")
elif len(room['players']) < self.types[room['type']][0]:
await session.send('匹配人数未达下限,请耐心等待')
else:
room_id = room["id"]
dct = self.uncomplete.pop(room_id)
await _f(session, dct) # add data to dct
self.center[room_id] = dct
bot = get_bot()
for group in config.group_id_dict['log']:
await bot.send_group_msg(group_id=group, message='%s begin in roomid %i' % (self.name, room_id))
return _f
return _
def quit(self, quit_command: tuple[str,...]):
self.quit_command = quit_command
def _(_f: Awaitable) -> Awaitable:
@on_command(quit_command, only_to_me=False, hide=True)
@config.ErrorHandle
async def _g(session: CommandSession):
qq = int(session.ctx['user_id'])
if qq not in self.players_status:
return
begin, room = self.players_status[qq]
if begin:
await _f(session, room)
self.end_room(room)
await self.send(room, f"玩家{qq}已中止此游戏。")
elif len(room['players']) == 1:
await _f(session, room)
self.end_room(room)
await session.send("已退出房间。房间已关闭。")
else:
room['players'].pop(qq)
self.players_status.pop(qq)
await self.send(room, f"玩家{qq}已退出此房间,此房间剩余:{','.join(f'玩家{q}' for q in room['players'])}")
return _f
return _
def process(self, only_short_message: bool = True):
def _(_f: Callable[[NLPSession, TRoomPrivate, Callable[[], Awaitable]], Awaitable]) \
-> Callable[[NLPSession, TRoomPrivate, Callable[[], Awaitable]], Awaitable]:
@on_natural_language(only_to_me=False, only_short_message=only_short_message)
async def _g(session: NLPSession): # 以后可能搁到一起?
qq = int(session.ctx['user_id'])
if qq not in self.players_status:
return
begin, room = self.players_status[qq]
if not begin:
return
async def _h():
self.end_room(room)
bot = get_bot()
for group in config.group_id_dict['log']:
await bot.send_group_msg(group_id=group, message='%s end in room %i' % (self.name, room['id']))
return await _f(session, room, _h)
return _f
return _
def end_room(self, room: TRoomPrivate):
for qq in room['players']:
self.players_status.pop(qq)
room_id = room['id']
if room_id in self.uncomplete:
self.uncomplete.pop(room_id)
elif room_id in self.center:
self.center.pop(room_id)
async def send(self, room: TRoomPrivate, msg: str):
for qqq in room['players']:
await get_bot().send_private_msg(user_id=qqq, message=msg)
async def send_private(self, player: int, msg: str):
await get_bot().send_private_msg(user_id=player, message=msg)
|
{"/chiharu/plugins/games/logic_dragon/QQSession.py": ["/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/logic_dragon/Game.py": ["/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/UserData.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Item.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/AllItems.py"], "/chiharu/plugins/games/logic_dragon/Equipment.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/config.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/games/logic_dragon/User.py": ["/chiharu/plugins/games/logic_dragon/UserData.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Item.py", "/chiharu/plugins/games/logic_dragon/AllCards0.py"], "/chiharu/plugins/games/logic_dragon/Types.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py"], "/chiharu/plugins/games/logic_dragon/Card.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/games/logic_dragon/Status.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/config.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/games/logic_dragon/EventListener.py": ["/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Priority.py"], "/chiharu/plugins/games/yahtzee.py": ["/chiharu/plugins/game.py", "/chiharu/plugins/games/achievement.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/achievement_command.py": ["/chiharu/plugins/games/achievement.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/game.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/net.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon/AllCards0.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/logic_dragon/Item.py": ["/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/User.py"], "/chiharu/plugins/games/logic_dragon/Dragon.py": ["/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/misc.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/birth.py", "/chiharu/plugins/games/achievement.py", "/chiharu/plugins/helper/function/function.py", "/chiharu/plugins/config.py", "/chiharu/plugins/helper/dice/dice.py"], "/chiharu/plugins/games/splendor_duel.py": ["/chiharu/plugins/config.py", "/chiharu/plugins/game.py", "/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/birth.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/tiemu.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon_type.py": ["/chiharu/plugins/config.py", "/chiharu/plugins/games/maj.py", "/chiharu/plugins/misc.py"], "/chiharu/plugins/games/chiharu.py": ["/chiharu/plugins/config.py"], "/chiharu/plugins/if.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/xiangqi.py": ["/chiharu/plugins/game.py"], "/chiharu/plugins/thwiki.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/config.py"], "/chiharu/plugins/games/logic_dragon/UserData.py": ["/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Game.py"], "/chiharu/plugins/games/witness_parse.py": ["/chiharu/plugins/helper/witness/symbol.py"], "/chiharu/plugins/games/ccs.py": ["/chiharu/plugins/games/ccs_tile.py", "/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs_extra.py", "/chiharu/plugins/games/ccs_player.py", "/chiharu/plugins/games/ccs_board.py"], "/chiharu/plugins/games/logic_dragon/AllCards1.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/AllCards0.py"], "/chiharu/plugins/games/logic_dragon/AllEquipments.py": ["/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/User.py"], "/chiharu/plugins/games/snakebird.py": ["/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/games/zhu_core.py": ["/chiharu/plugins/games/cardboard.py"], "/chiharu/plugins/games/ccs_command.py": ["/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs_tile.py", "/chiharu/plugins/games/ccs_board.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/sausage.py": ["/chiharu/plugins/games/boxgame.py"], "/chiharu/plugins/games/logic_dragon/Document.py": ["/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Card.py"], "/chiharu/plugins/games/logic_dragon/AllCards4.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/config.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/games/logic_dragon/AllCardsDLC8.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/ccs_board.py": ["/chiharu/plugins/games/ccs_tile.py", "/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs.py", "/chiharu/plugins/games/ccs_extra.py", "/chiharu/plugins/games/ccs_player.py"], "/chiharu/plugins/games/logic_dragon/AllItems.py": ["/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Item.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/Game.py"], "/chiharu/plugins/inject.py": ["/chiharu/plugins/config.py"], "/chiharu/plugins/mbf.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/help.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/config.py"], "/chiharu/plugins/games/bw.py": ["/chiharu/plugins/game.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/pig.py": ["/chiharu/plugins/game.py"], "/chiharu/plugins/games/logic_dragon/AllCards6.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/AllCards2.py"], "/chiharu/plugins/games/achievement.py": ["/chiharu/plugins/game.py"], "/chiharu/plugins/games/ccs_player.py": ["/chiharu/plugins/games/ccs.py", "/chiharu/plugins/games/ccs_extra.py", "/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs_board.py"], "/chiharu/plugins/games/logic_dragon/AllCards2.py": ["/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/dyson_sphere.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/jetstream.py": ["/chiharu/plugins/games/boxgame.py", "/chiharu/plugins/config.py", "/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/games/logic_dragon/Attack.py": ["/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/User.py"], "/chiharu/plugins/games/logic_dragon/Mission.py": ["/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/logic_dragon/AllCards3.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/AllCards1.py"], "/chiharu/plugins/games/ccs_extra.py": ["/chiharu/plugins/games/ccs.py", "/chiharu/plugins/games/ccs_player.py", "/chiharu/plugins/games/ccs_helper.py"], "/chiharu/plugins/math.py": ["/chiharu/plugins/helper/function/function.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/alarm.py": ["/chiharu/plugins/config.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon/Helper.py": ["/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Card.py"], "/chiharu/plugins/games/ccs_tile.py": ["/chiharu/plugins/games/carcassonne_asset/readTile.py"], "/chiharu/plugins/games/ccs_helper.py": ["/chiharu/plugins/games/carcassonne_asset/readTile.py"], "/chiharu/plugins/games/witness.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/sokobond.py": ["/chiharu/plugins/games/boxgame.py"], "/chiharu/plugins/eventer.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/maj_command.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/solver.py": ["/chiharu/plugins/inject.py"]}
|
27,136,545
|
Shedarshian/chiharu
|
refs/heads/develop
|
/config.py
|
from nonebot.default_config import *
COMMAND_START = {'-'}
HOST = '127.0.0.1'
PORT = 8080
SUPERUSERS = {1569603950, 1440962524}
NICKNAME = ('千春', '七海千春')
MAX_VALIDATION_FAILURES = 0
from datetime import timedelta
SESSION_RUN_TIMEOUT = None
SESSION_EXPIRE_TIMEOUT = None
|
{"/chiharu/plugins/games/logic_dragon/QQSession.py": ["/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/logic_dragon/Game.py": ["/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/UserData.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Item.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/AllItems.py"], "/chiharu/plugins/games/logic_dragon/Equipment.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/config.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/games/logic_dragon/User.py": ["/chiharu/plugins/games/logic_dragon/UserData.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Item.py", "/chiharu/plugins/games/logic_dragon/AllCards0.py"], "/chiharu/plugins/games/logic_dragon/Types.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py"], "/chiharu/plugins/games/logic_dragon/Card.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/games/logic_dragon/Status.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/config.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/games/logic_dragon/EventListener.py": ["/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Priority.py"], "/chiharu/plugins/games/yahtzee.py": ["/chiharu/plugins/game.py", "/chiharu/plugins/games/achievement.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/achievement_command.py": ["/chiharu/plugins/games/achievement.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/game.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/net.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon/AllCards0.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/logic_dragon/Item.py": ["/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/User.py"], "/chiharu/plugins/games/logic_dragon/Dragon.py": ["/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/misc.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/birth.py", "/chiharu/plugins/games/achievement.py", "/chiharu/plugins/helper/function/function.py", "/chiharu/plugins/config.py", "/chiharu/plugins/helper/dice/dice.py"], "/chiharu/plugins/games/splendor_duel.py": ["/chiharu/plugins/config.py", "/chiharu/plugins/game.py", "/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/birth.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/tiemu.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon_type.py": ["/chiharu/plugins/config.py", "/chiharu/plugins/games/maj.py", "/chiharu/plugins/misc.py"], "/chiharu/plugins/games/chiharu.py": ["/chiharu/plugins/config.py"], "/chiharu/plugins/if.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/xiangqi.py": ["/chiharu/plugins/game.py"], "/chiharu/plugins/thwiki.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/config.py"], "/chiharu/plugins/games/logic_dragon/UserData.py": ["/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Game.py"], "/chiharu/plugins/games/witness_parse.py": ["/chiharu/plugins/helper/witness/symbol.py"], "/chiharu/plugins/games/ccs.py": ["/chiharu/plugins/games/ccs_tile.py", "/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs_extra.py", "/chiharu/plugins/games/ccs_player.py", "/chiharu/plugins/games/ccs_board.py"], "/chiharu/plugins/games/logic_dragon/AllCards1.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/AllCards0.py"], "/chiharu/plugins/games/logic_dragon/AllEquipments.py": ["/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/User.py"], "/chiharu/plugins/games/snakebird.py": ["/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/games/zhu_core.py": ["/chiharu/plugins/games/cardboard.py"], "/chiharu/plugins/games/ccs_command.py": ["/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs_tile.py", "/chiharu/plugins/games/ccs_board.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/sausage.py": ["/chiharu/plugins/games/boxgame.py"], "/chiharu/plugins/games/logic_dragon/Document.py": ["/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Card.py"], "/chiharu/plugins/games/logic_dragon/AllCards4.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/config.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/games/logic_dragon/AllCardsDLC8.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/ccs_board.py": ["/chiharu/plugins/games/ccs_tile.py", "/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs.py", "/chiharu/plugins/games/ccs_extra.py", "/chiharu/plugins/games/ccs_player.py"], "/chiharu/plugins/games/logic_dragon/AllItems.py": ["/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Item.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/Game.py"], "/chiharu/plugins/inject.py": ["/chiharu/plugins/config.py"], "/chiharu/plugins/mbf.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/help.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/config.py"], "/chiharu/plugins/games/bw.py": ["/chiharu/plugins/game.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/pig.py": ["/chiharu/plugins/game.py"], "/chiharu/plugins/games/logic_dragon/AllCards6.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/AllCards2.py"], "/chiharu/plugins/games/achievement.py": ["/chiharu/plugins/game.py"], "/chiharu/plugins/games/ccs_player.py": ["/chiharu/plugins/games/ccs.py", "/chiharu/plugins/games/ccs_extra.py", "/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs_board.py"], "/chiharu/plugins/games/logic_dragon/AllCards2.py": ["/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/dyson_sphere.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/jetstream.py": ["/chiharu/plugins/games/boxgame.py", "/chiharu/plugins/config.py", "/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/games/logic_dragon/Attack.py": ["/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/User.py"], "/chiharu/plugins/games/logic_dragon/Mission.py": ["/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/logic_dragon/AllCards3.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/AllCards1.py"], "/chiharu/plugins/games/ccs_extra.py": ["/chiharu/plugins/games/ccs.py", "/chiharu/plugins/games/ccs_player.py", "/chiharu/plugins/games/ccs_helper.py"], "/chiharu/plugins/math.py": ["/chiharu/plugins/helper/function/function.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/alarm.py": ["/chiharu/plugins/config.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon/Helper.py": ["/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Card.py"], "/chiharu/plugins/games/ccs_tile.py": ["/chiharu/plugins/games/carcassonne_asset/readTile.py"], "/chiharu/plugins/games/ccs_helper.py": ["/chiharu/plugins/games/carcassonne_asset/readTile.py"], "/chiharu/plugins/games/witness.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/sokobond.py": ["/chiharu/plugins/games/boxgame.py"], "/chiharu/plugins/eventer.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/maj_command.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/solver.py": ["/chiharu/plugins/inject.py"]}
|
27,136,546
|
Shedarshian/chiharu
|
refs/heads/develop
|
/chiharu/plugins/net.py
|
import asyncio
import requests
import re
import html
import paramiko, paramiko_expect
import ncmbot
import json
import random
import traceback
import functools
import difflib
from datetime import datetime, timedelta
from nonebot import on_command, CommandSession, permission, get_bot, scheduler
from . import config
from .inject import on_command
async def Event(year, month, day):
loop = asyncio.get_event_loop()
url = await loop.run_in_executor(None, requests.get,
"https://www.eventernote.com/events/search?year=%s&month=%s&day=%s" % (year, month, day))
text = url.text
def _f(text):
class _c:
pass
class Actor:
def __init__(self, url, name):
self.url = url
self.name = name
begin_pos = 0
while 1:
match_pos = re.search('<div class="event">', text[begin_pos:])
if not match_pos:
break
begin_pos += match_pos.span()[1]
name_match = re.search(
'<h4><a href="(.*?)">(.*?)</a></h4>', text[begin_pos:], re.S)
if not name_match:
break
m = _c()
m.url = name_match.group(1)
m.name = name_match.group(2)
place_match = re.search(
'<div class="place">(.*?)</div>', text[begin_pos:], re.S)
if not place_match:
m.place = None
else:
begin_pos += place_match.span()[1]
m.place = re.sub('\n|\t| ', '', re.sub(
'<.*?>', '', place_match.group(1)))
actors_match = re.search('<div class="actor">', text[begin_pos:])
m.actor = []
if actors_match:
begin_pos += actors_match.span()[1]
next_match = re.search('<div class="event">', text[begin_pos:])
if next_match:
end_pos = begin_pos + next_match.span()[0]
else:
end_pos = -1
while 1:
actor_match = re.search(
'<li><a href="(.*?)">(.*?)</a></li>', text[begin_pos:end_pos], re.S)
if not actor_match:
break
begin_pos += actor_match.span()[1]
m.actor.append(
Actor(actor_match.group(1), actor_match.group(2)))
note_match = re.search(
'<div class="note_count">.*?<p title=".*?">(.*?)</p>.*?</div>', text[begin_pos:], re.S)
if not note_match:
m.note = 1
else:
m.note = int(note_match.group(1))
begin_pos += note_match.span()[1]
yield m
return list(_f(text))
@on_command(('misc', 'event'), only_to_me=False, short_des="查询Event。", hide=True)
@config.ErrorHandle
async def event(session: CommandSession):
"""查询Event。"""
g = await Event(session.get('year'), session.get('month'), session.get('day'))
max_note = session.get('max_note')
def _():
for m in filter(lambda x: x.note >= max_note, g):
if len(m.actor) >= 7:
actor_str = ', '.join(
map(lambda x: x.name, m.actor[:7])) + '...'
else:
actor_str = ', '.join(map(lambda x: x.name, m.actor))
yield "%s\n%s\n出演者: %s" % (m.name, m.place, actor_str)
l = list(_())
for strout in l:
await session.send(escape(strout))
@event.args_parser
async def _(session: CommandSession):
tup = session.current_arg_text.split(' ')
if len(tup) == 3:
session.args['year'], session.args['month'], session.args['day'] = tup
session.args['max_note'] = 100
else:
session.args['year'], session.args['month'], session.args['day'], max_note_str = tup
session.args['max_note'] = int(max_note_str)
interact = None
PROMPT = '.*qity@.*>\s*'
isLoggedin = False
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
with open(config.rel("boss_check.txt")) as f:
BossCheck = bool(int(f.readline().strip('\n')))
with open(config.rel("boss_check_begin.txt")) as f:
BeginCheck = bool(int(f.readline().strip('\n')))
with open(config.rel("QAQ.txt")) as f:
p = f.readline().strip('\n')
ssh.connect("lxslc7.ihep.ac.cn", 22, 'qity', p)
del p
interact = paramiko_expect.SSHClientInteraction(ssh, timeout=10)
@scheduler.scheduled_job('date', id='boss_login', run_date=datetime.now() + timedelta(seconds=15))
async def login():
global isLoggedin
interact.expect(PROMPT)
isLoggedin = True
for group in config.group_id_dict['boss']:
await get_bot().send_group_msg(group_id=group, message='boss logged in!')
told_not_logged_in = False
told_permission_denied = False
config.CommandGroup('boss', hide=True)
@on_command(('boss', 'login'), only_to_me=False, permission=permission.SUPERUSER, hide=True)
@config.ErrorHandle
async def boss_login(session: CommandSession):
global interact, ssh
ssh.connect("lxslc6.ihep.ac.cn", 22, 'qity', session.current_arg_text)
interact = paramiko_expect.SSHClientInteraction(ssh, timeout=10)
async def login():
global isLoggedin
interact.expect(PROMPT)
isLoggedin = True
await asyncio.get_event_loop().run_in_executor(None, login)
await session.send('Successfully logged in')
@on_command(('boss', 'reboot'), only_to_me=False, permission=permission.SUPERUSER, hide=True)
@config.ErrorHandle
async def boss_reboot(session: CommandSession):
global interact, ssh, isLoggedin
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
with open(config.rel("QAQ.txt")) as f:
p = f.readline().strip('\n')
ssh.connect("lxslc7.ihep.ac.cn", 22, 'qity', p)
del p
interact = paramiko_expect.SSHClientInteraction(ssh, timeout=10)
interact.expect(PROMPT)
isLoggedin = True
await session.send('boss rebooted!')
@on_command(('boss', 'begin'), only_to_me=False, hide=True)
@config.ErrorHandle
async def boss_begin(session: CommandSession):
global BeginCheck
BeginCheck = bool(session.current_arg_text)
with open(config.rel("boss_check_begin.txt"), 'w') as f:
f.write(str(int(BeginCheck)))
if BeginCheck:
await session.send('boss check begin!')
else:
await session.send('boss check end!')
# @on_command(('boss', 'process'), only_to_me=False, permission=permission.SUPERUSER, hide=True)
@config.ErrorHandle
async def boss_process(session: CommandSession):
if not isLoggedin:
await session.send('not logged in!')
return
def _f(c):
std = ssh.exec_command(c.strip())
return '\n'.join([''.join(s.readlines()).strip() for s in std[1:3]])
output = '\n'.join([_f(c) for c in session.current_arg_text.split('\n')])
#output = '\n'.join(['\n'.join(['$ ' + '\n'.join([''.join(s.readlines()).strip() for s in std]) for std in ssh.exec_command(c.strip())]) for c in session.current_arg_text.split('\n')])
await session.send(output)
class Status:
def __init__(self, groups):
if groups is None:
self.valid = False
else:
self.valid = True
(self.all, self.completed, self.removed, self.idle,
self.running, self.held, self.suspended) = map(int, groups)
if self.all != self.completed + self.removed + self.idle + self.running + self.held + self.suspended:
self.valid = False
def isValid(self):
return self.valid
def Running(self):
return self.valid and (self.idle + self.running != 0)
def process(self, f):
if self.held != 0:
f()
return "Job held!"
elif self.all - self.completed - self.removed == 0:
f()
return "All of your jobs have ended! func executed."
else:
return ""
@scheduler.scheduled_job('cron', id='check_boss', minute='00-57/3')
async def check_boss():
global BeginCheck, BossCheck, isLoggedin, told_not_logged_in, interact
if not BeginCheck:
return
bot = get_bot()
if not isLoggedin:
if not told_not_logged_in:
for group in config.group_id_dict['boss']:
await bot.send_group_msg(group_id=group, message='please login: -boss.login password')
told_not_logged_in = True
return
interact.send('submit -c')
interact.expect(PROMPT)
output = interact.current_output_clean
# stdin, stdout, stderr = ssh.exec_command('/workfs/bes/qity/shell/script/submit -c')
# output = ''.join(stdout.readlines()).strip()
global told_permission_denied
if output.strip() != '' and output.strip() != 'submit -c':
if 'Permission denied' in output:
if not told_permission_denied:
for group in config.group_id_dict['boss']:
await bot.send_group_msg(group_id=group, message=output.strip())
told_permission_denied = True
else:
for group in config.group_id_dict['boss']:
await bot.send_group_msg(group_id=group, message=output.strip())
def _f():
global interact
interact.send('hep_q -u qity')
interact.expect(PROMPT)
output = interact.current_output_clean
match = re.search(
"(\d*) jobs; (\d*) completed, (\d*) removed, (\d*) idle, (\d*) running, (\d*) held, (\d*) suspended", output)
if not match:
print("Not found")
return Status(None)
return Status(match.groups())
status = _f()
if BossCheck:
if not status.isValid():
strout = "Error!"
else:
with open(config.rel("boss_check.txt")) as f:
f.readline()
command = f.readline().strip()
if command == '':
def _g():
global BossCheck
BossCheck = False
with open(config.rel("boss_check.txt"), 'w') as f:
f.write('0')
else:
def _g():
# stdin, stdout, stderr = ssh.exec_command(command)
# print((stdout.readlines(), stderr.readlines()))
with open(config.rel("boss_check.txt"), 'w') as f:
f.write('1')
f.write('\n')
strout = status.process(_g)
if strout != "":
if 'Permission denied' in strout:
if not told_permission_denied:
for group in config.group_id_dict['boss']:
await bot.send_group_msg(group_id=group, message=strout)
told_permission_denied = True
else:
for group in config.group_id_dict['boss']:
await bot.send_group_msg(group_id=group, message=strout)
else:
if status.Running():
BossCheck = True
with open(config.rel("boss_check.txt"), 'w') as f:
f.write('1')
for group in config.group_id_dict['boss']:
await bot.send_group_msg(group_id=group, message='Running job found! Begin boss check')
@on_command(('boss', 'hang'), only_to_me=False, permission=permission.SUPERUSER, hide=True)
@config.ErrorHandle
async def boss_hang(session: CommandSession):
with open(config.rel('boss_check.txt'), 'w') as f:
f.write('1' if BossCheck else '0')
f.write('\n')
f.write(session.current_arg_text)
await session.send('Successfully saved.')
idmap = {'all': 2503049358,
'LL': 138461796,
'll': 138461796,
'lovelive': 138461796,
'bandori': 2221214678,
"mu's": 423336425,
'Aqours': 449636768,
'starlight': 2482865249,
'sphere': 994322013,
'Sphere': 994322013,
'aki': 994296036,
'ML': 50015591,
'ml': 50015591,
'KON': 812754,
'kon': 812754,
'MH': 46568099,
'mh': 46568099,
'VOCALO': 37258756,
'vocalo': 37258756,
'cgss': 526680154,
'CGSS': 526680154}
@functools.total_ordering
class Time:
match = re.compile('^(\d{1,3}):(\d{1,2})\.(\d{1,3})$')
def __init__(self, timestr):
self.str = timestr
match = re.match(self.match, self.str)
assert(match)
self.minute, self.second, self.milisecond = match.groups()
self.milisecond *= 10 ** (3 - len(match.group(3)))
def __lt__(self, other):
return (self.minute, self.second, self.milisecond) < (other.minute, other.second, other.milisecond)
def __eq__(self, other):
return (self.minute, self.second, self.milisecond) == (other.minute, other.second, other.milisecond)
def __str__(self):
return self.str
class Line:
def __init__(self, string):
if string == "":
self.valid = False
return
e = string.find(']')
if e == -1:
self.content = string
self.trans = None
self.valid = True
self.time = None
return
timestr = string[:e]
self.content = string[e + 1:].replace('\n', '')
self.trans = None
try:
self.time = Time(timestr)
except:
self.time = None
self.valid = False
else:
self.valid = True
def empty(self):
return self.content == ""
def isValid(self):
return self.valid
def addTrans(self, string):
self.trans = string
def clearTrans(self):
self.trans = None
class LyricTransErr(Exception):
def __init__(self, song_id, time):
self.id = song_id
self.time = time
def __str__(self):
return "Song " + str(self.id) + "'s translated lyric at time " + str(self.time) + " are dislocated"
class anyErrWithId(Exception):
def __init__(self, err, id, tb):
self.err = err
self.id = id
self.traceback = tb
def __str__(self):
return self.err.__class__.__name__ + ": " + str(self.err) + ", song id: " + str(self.id)
def printLyric(idx):
lyric = ncmbot.lyric(id=idx).json()
with open('test.txt', 'w') as f:
f.write(json.dumps(lyric, indent=4, separators=(',', ': ')
).decode('unicode_escape').encode('utf-8'))
def getLyric(listid):
pl = ncmbot.play_list_detail(id=str(listid)).json()
trks = pl['playlist']['trackIds']
# print(len(trks))
while 1:
ran_trk = random.choice(trks)
lyricl = ncmbot.lyric(id=ran_trk['id']).json()
if 'lrc' in lyricl:
break
song_id = ran_trk['id']
try:
lyricstr = lyricl['lrc']['lyric']
tlyricstr = lyricl['tlyric']['lyric']
# klyricstr = lyricl['klyric']['lyric']#???
lyric = list(filter(Line.isValid, map(Line, lyricstr.split('['))))
# print lyric
if tlyricstr is not None:
try:
tlyric = list(
filter(Line.isValid, map(Line, tlyricstr.split('['))))
liter = iter(lyric)
titer = iter(tlyric)
try:
line = next(liter)
tl = next(titer)
while 1:
if line.time == tl.time:
line.addTrans(tl.content)
elif line.time < tl.time:
line = next(liter)
continue
else:
raise LyricTransErr(song_id, tl.time)
line = next(liter)
tl = next(titer)
except StopIteration:
pass
except LyricTransErr:
for line in lyric:
line.clearTrans()
blocks = []
t = ()
for line in lyric:
if line.empty():
if t != ():
if len(t) == 1 and len(blocks) != 0:
blocks[-1] += t
else:
blocks.append(t)
t = ()
else:
t += (line, )
if t != ():
if len(t) == 1 and len(blocks) != 0:
blocks[-1] += t
else:
blocks.append(t)
# print(blocks)
def _f(blocks):
for block in blocks:
b = len(block) >= 6
def _v(string):
return '作词' not in string.content and '作曲' not in string.content and '编曲' not in string.content and u'词:' not in string.content and u'曲:' not in string.content
if not b:
b = all(map(_v, block))
else:
block = list(filter(_v, block))
if b:
for i in range(len(block)):
# print block[i].content.encode('utf-8')
if len(block[i].content) >= 25:
yield (block[i], )
elif len(block[i].content) >= 8 and i < len(block) - 1:
yield (block[i], block[i + 1])
elif len(block[i].content) < 8 and i < len(block) - 2:
yield (block[i], block[i + 1], block[i + 2])
pool = tuple(_f(blocks))
# print len(pool)
# print(pool)
t = random.choice(pool)
lyricrstr = '\n'.join(map(lambda x: x.content, t))
tlyricrstr = '\n'.join(
filter(lambda x: x is not None, map(lambda x: x.trans, t)))
r = ncmbot.song_detail([ran_trk['id']]).json()
# print r['songs']#.encode('utf-8')
# print repr(r['songs'])#.encode('utf-8')
trk_name = r['songs'][0]['name']
trk_ar = ', '.join(map(lambda x: x['name'], r['songs'][0]['ar']))
return {'lyric': lyricrstr, 'translated': tlyricrstr, 'name': trk_name, 'artists': trk_ar}
except Exception as err:
raise anyErrWithId(err, song_id, traceback.format_exc())
config.CommandGroup(('misc', 'roll'), hide=True)
@on_command(('misc', 'roll', 'lyric'), only_to_me=False, short_des="随机歌词。", display_parents='misc')
@config.ErrorHandle
async def roll_lyric(session: CommandSession):
"""随机歌词。
不加参数则为从全曲库中随机。
支持曲库:vocalo kon imas ml cgss sphere aki bandori ll mu's Aqours starlight mh"""
args = 'all' if session.current_arg_text == '' else session.current_arg_text
if args not in idmap:
await session.send('name not found')
else:
d = getLyric(idmap[args])
await session.send('抽歌词!:\n%s%s\n——《%s》(%s)' %
(d['lyric'], (u"\n翻译:\n" + d['translated'] if d['translated'] != "" else u""), d['name'], d['artists']))
# @scheduler.scheduled_job('cron', minute='00-57/3')
# async def check_bicaf():
# with open(config.rel('bicaf.html'), encoding='utf-8') as f:
# l = f.readlines()
# loop = asyncio.get_event_loop()
# url = await loop.run_in_executor(None, requests.get,
# "https://bicaf.com.cn/news")
# text = url.text.splitlines(keepends=True)
# d = list(difflib.ndiff(l, text))
# if any([x.startswith('+ ') or x.startswith('- ') for x in d]):
# with open(config.rel('bicaf.html'), 'w', encoding='utf-8') as f:
# f.write(url.text)
# for group in config.group_id_dict['boss']:
# await get_bot().send_group_msg(group_id=group, message=''.join([x for x in d if not x.startswith(' ')]))
# with open(config.rel('bicaf_ticket.html'), encoding='utf-8') as f:
# l = f.readlines()
# url = await loop.run_in_executor(None, requests.get,
# "https://bicaf.com.cn/ticket")
# text = url.text.splitlines(keepends=True)
# d = list(difflib.ndiff(l, text))
# if any([x.startswith('+ ') or x.startswith('- ') for x in d]):
# with open(config.rel('bicaf_ticket.html'), 'w', encoding='utf-8') as f:
# f.write(url.text)
# for group in config.group_id_dict['boss']:
# await get_bot().send_group_msg(group_id=group, message=''.join([x for x in d if not x.startswith(' ')]))
bibtex_url = {'pra': 'https://journals.aps.org/pra/export/10.1103/PhysRevA.{}.{}', 'prb': 'https://journals.aps.org/prb/export/10.1103/PhysRevB.{}.{}', 'prc': 'https://journals.aps.org/prc/export/10.1103/PhysRevC.{}.{}', 'prd': 'https://journals.aps.org/prd/export/10.1103/PhysRevD.{}.{}', 'pre': 'https://journals.aps.org/pre/export/10.1103/PhysRevE.{}.{}',
'prl': 'https://journals.aps.org/prl/export/10.1103/PhysRevLett.{}.{}', 'cpc': 'https://iopscience.iop.org/export?articleId=1674-1137/{}/{}/{}&exportFormat=iopexport_bib&exportType=abs&navsubmit=Export+abstract', 'cpb': 'https://iopscience.iop.org/export?articleId=1674-1056/{}/{}/{}&exportFormat=iopexport_bib&exportType=abs&navsubmit=Export+abstract'}
@on_command(('tools', 'bibtex'), only_to_me=False, short_des="查询文章的bibtex。", args=("journal", "volume", "pages"))
@config.ErrorHandle
async def bibtex(session: CommandSession):
"""查询文章的bibtex。
目前支持期刊:pra prb prc prd pre prl cpb cpc"""
args = session.current_arg_text.split(' ')
if len(args) == 0 or args[0].lower() not in bibtex_url:
session.finish('支持期刊:pra prb prc prd pre prl cpb cpc')
elif len(args) < 3:
session.finish('请使用:-tools.bibtex 期刊名 卷数 首页页码')
name = args.pop(0).lower()
loop = asyncio.get_event_loop()
try:
if int(args[0]) <= 0 or int(args[1]) <= 0:
raise ValueError
if name in ('cpc', 'cpb'):
args = args[0], str(int(args[1][0:2])), args[1]
url = await asyncio.wait_for(loop.run_in_executor(None, requests.get, bibtex_url[name].format(*args)), timeout=60)
if url.status_code != 200:
await session.send('not found!')
else:
if len(url.text) >= 2000:
await session.send(url.text[0:2000])
await session.send(url.text[2000:])
else:
await session.send(url.text)
except ValueError:
await session.send('请输入合理的期刊卷数与页码。')
except asyncio.TimeoutError:
await session.send('time out!')
config.CommandGroup('steam', hide=True)
@on_command(('steam', 'price'), only_to_me=False, hide=True)
@config.ErrorHandle
async def steam_price(session: CommandSession):
name = session.current_arg_text.strip()
loop = asyncio.get_event_loop()
try:
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate, br',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
,'Cookies':'__cfduid=d86e702d95c9f19d5f33c5ae30ded8d881572688847; _ga=GA1.2.940923473.1572688859; __Host-cc=cn; cf_clearance=e13ec0c58bb63cfa52ea085e2424466fdc3213c3-1574825182-0-150; _gid=GA1.2.1066487208.1574825186'
,'Sec-Fetch-Mode': 'navigate'
,'Sec-Fetch-Site': 'none'
,'Sec-Fetch-User': '?1'
,'Upgrade-Insecure-Requests': '1'
,'Accept-Language': 'zh-CN,zh;q=0.9'
,'Cache-Control': 'max-age=0'
}
cookies = {'__cfduid':'d86e702d95c9f19d5f33c5ae30ded8d881572688847','_ga':'GA1.2.940923473.1572688859','__Host-cc':'cn','cf_clearance':'e13ec0c58bb63cfa52ea085e2424466fdc3213c3-1574825182-0-150','_gid':'GA1.2.1066487208.1574825186'}
url = await asyncio.wait_for(loop.run_in_executor(None, functools.partial(requests.get, 'https://steamdb.info/search/?a=app&q=' + name, cookies=cookies)), timeout=60)
if url.status_code != 200:
await session.send('url error!')
return
begin = re.search('<tbody hidden>', url.text)
if not begin:
await session.send('url error!')
return
begin_pos = begin.span()[1]
match = re.search(
'<tr class="app" data-appid="(\d+)">', url.text[begin_pos:])
if not match:
await session.send('未找到此游戏。')
else:
app_id = match.group(1)
url = await asyncio.wait_for(loop.run_in_executor(None, functools.partial(requests.get, f'https://steamdb.info/app/{app_id}/', headers=headers)), timeout=60)
if url.status_code != 200:
await session.send('url error!')
return
title = re.search(
'<td>Name</td>\s*<td itemprop="name">([^<>]+?)</td>', url.text)
if not title:
await session.send('url error!')
return
name = html.unescape(title.group(1))
store = f'https://store.steampowered.com/app/{app_id}/'
price_match = re.search(
'Chinese Yuan Renminbi\s*</td>\s*¥ (\d+)(?: at <span class="price-discount">-(\d+)%</span>)?</td>\s*<td [^<>]*?>.*?</td>\s*<td data-sort=".*?">¥ (\d+)</td>', url.text)
if not price_match:
await session.send('未找到价格信息。')
return
price, discount, price_lowest = price_match.groups()
await session.send(f'游戏名称:{name}\nSteam store链接:{store}\n现价:¥ {price}{f"(-{discount}%)" if discount is not None else ""}\n史低:¥ {price_lowest}')
except asyncio.TimeoutError:
await session.send('time out!')
with open(config.rel('thtk_github_last_update.txt')) as f:
thtk_time = datetime.fromisoformat(f.read())
# @scheduler.scheduled_job('cron', minute='00-40/20')
async def check_github_thtk():
global thtk_time
loop = asyncio.get_event_loop()
ret = await loop.run_in_executor(None, functools.partial(requests.get, 'https://api.github.com/repos/thpatch/thtk/commits'))
j = ret.json()
for i, d in enumerate(j):
if datetime.fromisoformat(d['commit']['committer']['date'][:-1]) <= thtk_time:
break
if i != 0:
t = j[0]['commit']['committer']['date'][:-1]
thtk_time = datetime.fromisoformat(t)
with open(config.rel('thtk_github_last_update.txt'), 'w') as f:
f.write(t)
for group in config.group_id_dict['thtk_update']:
await get_bot().send_group_msg(message='Thtk commit detected.\n' + '\n'.join(f"Commit in {d['commit']['committer']['date']}:\n{d['commit']['message']}" for d in j[:i]), group_id=group)
|
{"/chiharu/plugins/games/logic_dragon/QQSession.py": ["/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/logic_dragon/Game.py": ["/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/UserData.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Item.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/AllItems.py"], "/chiharu/plugins/games/logic_dragon/Equipment.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/config.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/games/logic_dragon/User.py": ["/chiharu/plugins/games/logic_dragon/UserData.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Item.py", "/chiharu/plugins/games/logic_dragon/AllCards0.py"], "/chiharu/plugins/games/logic_dragon/Types.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py"], "/chiharu/plugins/games/logic_dragon/Card.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/games/logic_dragon/Status.py": ["/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/config.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/games/logic_dragon/EventListener.py": ["/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Priority.py"], "/chiharu/plugins/games/yahtzee.py": ["/chiharu/plugins/game.py", "/chiharu/plugins/games/achievement.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/achievement_command.py": ["/chiharu/plugins/games/achievement.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/game.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/net.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon/AllCards0.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/logic_dragon/Item.py": ["/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/User.py"], "/chiharu/plugins/games/logic_dragon/Dragon.py": ["/chiharu/plugins/games/logic_dragon/Types.py"], "/chiharu/plugins/misc.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/birth.py", "/chiharu/plugins/games/achievement.py", "/chiharu/plugins/helper/function/function.py", "/chiharu/plugins/config.py", "/chiharu/plugins/helper/dice/dice.py"], "/chiharu/plugins/games/splendor_duel.py": ["/chiharu/plugins/config.py", "/chiharu/plugins/game.py", "/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/birth.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/tiemu.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon_type.py": ["/chiharu/plugins/config.py", "/chiharu/plugins/games/maj.py", "/chiharu/plugins/misc.py"], "/chiharu/plugins/games/chiharu.py": ["/chiharu/plugins/config.py"], "/chiharu/plugins/if.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/xiangqi.py": ["/chiharu/plugins/game.py"], "/chiharu/plugins/thwiki.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/config.py"], "/chiharu/plugins/games/logic_dragon/UserData.py": ["/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Game.py"], "/chiharu/plugins/games/witness_parse.py": ["/chiharu/plugins/helper/witness/symbol.py"], "/chiharu/plugins/games/ccs.py": ["/chiharu/plugins/games/ccs_tile.py", "/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs_extra.py", "/chiharu/plugins/games/ccs_player.py", "/chiharu/plugins/games/ccs_board.py"], "/chiharu/plugins/games/logic_dragon/AllCards1.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/AllCards0.py"], "/chiharu/plugins/games/logic_dragon/AllEquipments.py": ["/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/User.py"], "/chiharu/plugins/games/snakebird.py": ["/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/games/zhu_core.py": ["/chiharu/plugins/games/cardboard.py"], "/chiharu/plugins/games/ccs_command.py": ["/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs_tile.py", "/chiharu/plugins/games/ccs_board.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/sausage.py": ["/chiharu/plugins/games/boxgame.py"], "/chiharu/plugins/games/logic_dragon/Document.py": ["/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Card.py"], "/chiharu/plugins/games/logic_dragon/AllCards4.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/config.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/games/logic_dragon/AllCardsDLC8.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/ccs_board.py": ["/chiharu/plugins/games/ccs_tile.py", "/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs.py", "/chiharu/plugins/games/ccs_extra.py", "/chiharu/plugins/games/ccs_player.py"], "/chiharu/plugins/games/logic_dragon/AllItems.py": ["/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/Item.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Equipment.py", "/chiharu/plugins/games/logic_dragon/Game.py"], "/chiharu/plugins/inject.py": ["/chiharu/plugins/config.py"], "/chiharu/plugins/mbf.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/help.py": ["/chiharu/plugins/inject.py", "/chiharu/plugins/config.py"], "/chiharu/plugins/games/bw.py": ["/chiharu/plugins/game.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/pig.py": ["/chiharu/plugins/game.py"], "/chiharu/plugins/games/logic_dragon/AllCards6.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/AllCards2.py"], "/chiharu/plugins/games/achievement.py": ["/chiharu/plugins/game.py"], "/chiharu/plugins/games/ccs_player.py": ["/chiharu/plugins/games/ccs.py", "/chiharu/plugins/games/ccs_extra.py", "/chiharu/plugins/games/ccs_helper.py", "/chiharu/plugins/games/ccs_board.py"], "/chiharu/plugins/games/logic_dragon/AllCards2.py": ["/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/dyson_sphere.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/jetstream.py": ["/chiharu/plugins/games/boxgame.py", "/chiharu/plugins/config.py", "/chiharu/plugins/games/achievement.py"], "/chiharu/plugins/games/logic_dragon/Attack.py": ["/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/User.py"], "/chiharu/plugins/games/logic_dragon/Mission.py": ["/chiharu/plugins/games/logic_dragon/Helper.py"], "/chiharu/plugins/games/logic_dragon/AllCards3.py": ["/chiharu/plugins/games/logic_dragon/Game.py", "/chiharu/plugins/games/logic_dragon/Card.py", "/chiharu/plugins/games/logic_dragon/User.py", "/chiharu/plugins/games/logic_dragon/Status.py", "/chiharu/plugins/games/logic_dragon/Attack.py", "/chiharu/plugins/games/logic_dragon/Priority.py", "/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Dragon.py", "/chiharu/plugins/games/logic_dragon/Mission.py", "/chiharu/plugins/games/logic_dragon/EventListener.py", "/chiharu/plugins/games/logic_dragon/Helper.py", "/chiharu/plugins/games/logic_dragon/AllCards1.py"], "/chiharu/plugins/games/ccs_extra.py": ["/chiharu/plugins/games/ccs.py", "/chiharu/plugins/games/ccs_player.py", "/chiharu/plugins/games/ccs_helper.py"], "/chiharu/plugins/math.py": ["/chiharu/plugins/helper/function/function.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/alarm.py": ["/chiharu/plugins/config.py", "/chiharu/plugins/inject.py"], "/chiharu/plugins/games/logic_dragon/Helper.py": ["/chiharu/plugins/games/logic_dragon/Types.py", "/chiharu/plugins/games/logic_dragon/Card.py"], "/chiharu/plugins/games/ccs_tile.py": ["/chiharu/plugins/games/carcassonne_asset/readTile.py"], "/chiharu/plugins/games/ccs_helper.py": ["/chiharu/plugins/games/carcassonne_asset/readTile.py"], "/chiharu/plugins/games/witness.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/sokobond.py": ["/chiharu/plugins/games/boxgame.py"], "/chiharu/plugins/eventer.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/games/maj_command.py": ["/chiharu/plugins/inject.py"], "/chiharu/plugins/solver.py": ["/chiharu/plugins/inject.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.