index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
1,493
|
tanmesh/cat-and-dog
|
refs/heads/master
|
/img_cla.py
|
import numpy as np
from keras.layers import Activation
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import MaxPooling2D
from keras.models import Sequential
from keras_preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from prepare_data import prepare_data, split_data
def img_classi():
print("Splitting data into train and test...")
train_images_dogs_cats, test_images_dogs_cats = split_data()
img_width = 150
img_height = 150
print("Preparing the train data...")
x, y = prepare_data(train_images_dogs_cats, img_width, img_height)
print("Splitting the train data into training and validation set...")
x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
n_train = len(x_train)
n_val = len(x_val)
batch_size = 16
print("Building the model..")
model = Sequential()
print("Running the first layer...")
model.add(Conv2D(32, (3, 3), input_shape=(img_width, img_height, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
print("Running the second layer...")
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
print("Running the third layer...")
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
print("Running the last layer...")
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
# try:
# model.add(Dropout(0.5))
# except Exception as e:
# print("There is error........."+str(e))
model.add(Dense(1))
model.add(Activation('sigmoid'))
print("Compiling the model...")
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
print("Model build.")
print('Data augmentation...')
train_data_gen = ImageDataGenerator(rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
val_data_gen = ImageDataGenerator(rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
print('Preparing generators for training and validation sets...')
train_generator = train_data_gen.flow(np.array(x_train), y_train, batch_size=batch_size)
validation_generator = val_data_gen.flow(np.array(x_val), y_val, batch_size=batch_size)
print('Fitting the model...')
model.fit_generator(train_generator, steps_per_epoch=n_train // batch_size, epochs=30,
validation_data=validation_generator, validation_steps=n_val // batch_size)
print('Saving the model...')
model.save_weights('model_wieghts.h5')
model.save('model_keras.h5')
print("Model saved...")
print('Generating test data...')
x_test, y_test = prepare_data(test_images_dogs_cats, img_width, img_height)
test_data_gen = ImageDataGenerator(rescale=1. / 255)
test_generator = test_data_gen.flow(np.array(x_test), batch_size=batch_size)
print("Predicting...")
pred = model.predict_generator(test_generator, verbose=1, steps=len(test_generator))
print("Prediction is " + str(pred))
img_classi()
|
{"/img_cla.py": ["/prepare_data.py"]}
|
1,494
|
tanmesh/cat-and-dog
|
refs/heads/master
|
/prepare_data.py
|
import os
import re
import cv2
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split('(\d+)', text)]
def prepare_data(list_of_images_path, img_width, img_height):
x = [] # images as arrays
y = [] # labels
for image_path in list_of_images_path:
read_image = cv2.imread(image_path)
tmp = cv2.resize(read_image, (img_width, img_height), interpolation=cv2.INTER_CUBIC)
x.append(tmp)
for i in list_of_images_path:
if 'dog' in i:
y.append(1)
elif 'cat' in i:
y.append(0)
return x, y
def split_data():
train_dir = '/Users/tanmesh/dev/cat_and_dog/dataset/train/'
test_dir = '/Users/tanmesh/dev/cat_and_dog/dataset/test/'
train_images_dogs_cats = [train_dir + i for i in os.listdir(train_dir)] # use this for full dataset
test_images_dogs_cats = [test_dir + i for i in os.listdir(test_dir)]
train_images_dogs_cats.sort(key=natural_keys)
# train_images_dogs_cats = train_images_dogs_cats[0:1300] + train_images_dogs_cats[12500:13800]
test_images_dogs_cats.sort(key=natural_keys)
return train_images_dogs_cats, test_images_dogs_cats
|
{"/img_cla.py": ["/prepare_data.py"]}
|
1,507
|
yueyoum/bulk_create_test
|
refs/heads/master
|
/myapp/admin.py
|
from django.contrib import admin
from import_export import resources
from import_export.admin import ImportExportModelAdmin
from myapp.models import TestModel
class ResourceTestModel_1(resources.ModelResource):
class Meta:
model = TestModel
def before_import(self, *args, **kwargs):
self._meta.model.objects.all().delete()
def get_or_init_instance(self, instance_loader, row):
return (self.init_instance(row), True)
class ResourceTestModel_2(resources.ModelResource):
class Meta:
model = TestModel
bulk_replace = True
@admin.register(TestModel)
class AdminTestModel(ImportExportModelAdmin):
resource_class = ResourceTestModel_2
list_display = ('id', 'f1', 'f2', 'f3', 'f4',)
|
{"/myapp/admin.py": ["/myapp/models.py"], "/set_random_data.py": ["/myapp/models.py"]}
|
1,508
|
yueyoum/bulk_create_test
|
refs/heads/master
|
/myapp/models.py
|
from __future__ import unicode_literals
from django.db import models
class TestModel(models.Model):
id = models.IntegerField(primary_key=True)
f1 = models.CharField(max_length=255)
f2 = models.IntegerField()
f3 = models.TextField()
f4 = models.IntegerField()
class Meta:
db_table = 'test_table'
|
{"/myapp/admin.py": ["/myapp/models.py"], "/set_random_data.py": ["/myapp/models.py"]}
|
1,509
|
yueyoum/bulk_create_test
|
refs/heads/master
|
/myapp/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-20 09:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TestModel',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('f1', models.CharField(max_length=255)),
('f2', models.IntegerField()),
('f3', models.TextField()),
('f4', models.IntegerField()),
],
options={
'db_table': 'test_table',
},
),
]
|
{"/myapp/admin.py": ["/myapp/models.py"], "/set_random_data.py": ["/myapp/models.py"]}
|
1,510
|
yueyoum/bulk_create_test
|
refs/heads/master
|
/set_random_data.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: Wang Chao <yueyoum@gmail.com>
Filename: set_random_data.py
Date created: 2016-06-20 17:45:27
Description:
"""
import os
import sys
import uuid
import random
import pymysql
pymysql.install_as_MySQLdb()
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mytest1.settings")
import django
django.setup()
from myapp.models import TestModel
try:
AMOUNT = int(sys.argv[1])
except:
AMOUNT = 10000
def create_random_data():
data = []
for i in range(1, AMOUNT+1):
data.append({
'id': i,
'f1': str(uuid.uuid4()),
'f2': random.randint(1, 10000),
'f3': str(uuid.uuid4()),
'f4': random.randint(1, 10000),
})
return data
def set_data():
TestModel.objects.all().delete()
data = create_random_data()
objs = [TestModel(**d) for d in data]
TestModel.objects.bulk_create(objs)
if __name__ == '__main__':
set_data()
|
{"/myapp/admin.py": ["/myapp/models.py"], "/set_random_data.py": ["/myapp/models.py"]}
|
1,511
|
yueyoum/bulk_create_test
|
refs/heads/master
|
/mytest1/middleware.py
|
# -*- coding: utf-8 -*-
"""
Author: Wang Chao <yueyoum@gmail.com>
Filename: middleware.py
Date created: 2016-06-20 18:11:01
Description:
"""
import time
class TimeMeasureRequestMiddleware(object):
def process_request(self, request):
request._time_measure_star_at = time.time()
class TimeMeasureResponseMiddleware(object):
def process_response(self, request, response):
time_passed = time.time() - request._time_measure_star_at
print "[TIME MEASURE] {0}: {1}".format(request.path, time_passed)
return response
|
{"/myapp/admin.py": ["/myapp/models.py"], "/set_random_data.py": ["/myapp/models.py"]}
|
1,512
|
UshshaqueBarira/Data-Analysis
|
refs/heads/main
|
/DecisionTree_heartattack.py
|
#!/usr/bin/env python
# coding: utf-8
# In[7]:
import pandas as pd
import seaborn as sns
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier
# In[49]:
heart=pd.read_csv("./heart.csv")
heart.head()
# In[50]:
sns.set_style('white')
# In[52]:
sns.relplot(x='age',y='chol',data=heart,color='green',hue='sex')
# In[54]:
sns.relplot(x='age',y='cp',data=heart,hue='sex')
# In[68]:
feature_cols=['age','cp','trtbps','chol','fbs','restecg','thalachh','exng','oldpeak','slp','caa','thall','output']
feature_cols
# In[115]:
X=heart[feature_cols]
y=heart.sex
y1=heart.chol
# In[116]:
x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=1)
# In[117]:
clf=DecisionTreeClassifier()
clf=clf.fit(x_train,y_train)
y_pred=clf.predict(x_test)
# In[118]:
print("Accuracy:(Gender)",(metrics.accuracy_score(y_test,y_pred))*100)
# In[122]:
x_train,x_test,y_train,y_test=train_test_split(X,y1,test_size=0.4,random_state=1)
# In[123]:
clf1=DecisionTreeClassifier()
clf1=clf1.fit(x_train,y_train)
y_pred=clf1.predict(x_test)
# In[124]:
print("Accuracy:(Cholestrol)",(metrics.accuracy_score(y_test,y_pred)*100))
# In[ ]:
|
{"/DecisionTree_heartattack.py": ["/seaborn.py"], "/Decision Tree_Titanic.py": ["/seaborn.py"]}
|
1,513
|
UshshaqueBarira/Data-Analysis
|
refs/heads/main
|
/Decision Tree_Titanic.py
|
#!/usr/bin/env python
# coding: utf-8
# In[3]:
#titanic data set is all manipulated thus we have an accuracy level of 1.0 that is 100 matching as trained and test data
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
import seaborn as sns
# In[4]:
sns.set_style('dark')
# In[51]:
titanic=sns.load_dataset('titanic')
titanic.head()
# In[66]:
feature_cols=['survived','pclass','sibsp','parch','fare']
# In[78]:
X=titanic[feature_cols]
#y=titanic.pclass
y1=titanic.survived
#print(X.isnull())
# In[79]:
x_train,x_test,y_train,y_test=train_test_split(X,y1,test_size=0.4,random_state=1)#test 30% and 70% train data
# In[80]:
clf=DecisionTreeClassifier()
clf=clf.fit(x_train,y_train)
y_pred=clf.predict(x_test)
# In[81]:
print("Accuracy:",metrics.accuracy_score(y_test,y_pred))
# In[ ]:
|
{"/DecisionTree_heartattack.py": ["/seaborn.py"], "/Decision Tree_Titanic.py": ["/seaborn.py"]}
|
1,514
|
UshshaqueBarira/Data-Analysis
|
refs/heads/main
|
/seaborn.py
|
#!/usr/bin/env python
# coding: utf-8
# In[10]:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib.pyplot', '% inline')
# In[11]:
sns.get_dataset_names()
# In[12]:
attention=sns.load_dataset('attention')
attention.head()
# In[13]:
sns.relplot(x='subject',y='score',data=attention,hue='attention',size='subject')
# In[14]:
tips=sns.load_dataset('tips')
tips.head()
# In[15]:
sns.scatterplot(x='total_bill',y='tip',data=tips)
# In[16]:
# using linear regression technique----one independent variable and one dependent variable(total_bill, tip)
import sklearn.linear_model
tips.head()
# In[17]:
x=tips['total_bill']
y=tips['tip']
# In[29]:
x.train=x[:100]
x.test=x[-100:]
y.train=y[:100]
y.test=y[-100:]
# In[18]:
plt.scatter(x.test,y.test,color='blue')
# In[19]:
regr=linear_model.LinearRegression()
regr.fit(x.train,y.train)
plt.plot(x.test,regr.predict(x.test),color='green',linewidth=2)
# In[20]:
sns.set_style('dark')
sns.regplot(x,y,data=tips,color='green')
# In[24]:
#using the different dataset as car_crashes
car_crashes=sns.load_dataset('car_crashes')
car_crashes.head()
# In[25]:
penguins=sns.load_dataset('penguins')
penguins.head()
# In[29]:
#cross dimensional features correlation graph
sns.pairplot(penguins,hue='species',height=2.5);
# In[31]:
sns.relplot(x='bill_length_mm',y='bill_depth_mm',data=penguins,hue='sex')
# In[35]:
sns.set_style('white')
sns.scatterplot(x='bill_length_mm',y='species',data=penguins,color='green')
# In[37]:
sns.scatterplot(x='bill_length_mm',y='sex',data=penguins,color='orange')
# In[ ]:
|
{"/DecisionTree_heartattack.py": ["/seaborn.py"], "/Decision Tree_Titanic.py": ["/seaborn.py"]}
|
1,515
|
liuhongbo830117/ntire2018_adv_rgb2hs
|
refs/heads/master
|
/models/mylosses.py
|
# -*- coding: utf-8 -*-
import numpy as np
from torch.nn.modules import loss
from torch.nn import functional as F
import torch
from torch.autograd import Variable
class RelMAELoss(loss._Loss):
r"""Creates a criterion that measures the mean squared error between
`n` elements in the input `x` and target `y`.
The loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = \left( x_n - y_n \right)^2,
where :math:`N` is the batch size. If reduce is ``True``, then:
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if}\; \text{size_average} = \text{True},\\
\operatorname{sum}(L), & \text{if}\; \text{size_average} = \text{False}.
\end{cases}
`x` and `y` arbitrary shapes with a total of `n` elements each.
The sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets the internal variable
`size_average` to ``False``.
To get a batch of losses, a loss per batch element, set `reduce` to
``False``. These losses are not averaged and are not affected by
`size_average`.
Args:
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
size_average is set to ``False``, the losses are instead summed for
each minibatch. Only applies when reduce is ``True``. Default: ``True``
reduce (bool, optional): By default, the losses are averaged
over observations for each minibatch, or summed, depending on
size_average. When reduce is ``False``, returns a loss per batch
element instead and ignores size_average. Default: ``True``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
Examples::
>>> loss = nn.MSELoss()
>>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
>>> target = autograd.Variable(torch.randn(3, 5))
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, size_average=True, reduce=True):
super(RelMAELoss, self).__init__(size_average)
self.reduce = reduce
def forward(self, input, target):
input = (input + 1) / 2.0 * 4095.0
target = (target + 1) / 2.0 * 4095.0
loss._assert_no_grad(target)
abs_diff = torch.abs(target - input)
relative_abs_diff = abs_diff / (target + np.finfo(float).eps)
rel_mae = torch.mean(relative_abs_diff)
#from eval:
# compute MRAE
# diff = gt - rc
# abs_diff = np.abs(diff)
# relative_abs_diff = np.divide(abs_diff, gt + np.finfo(float).eps) # added epsilon to avoid division by zero.
# MRAEs[f] = np.mean(relative_abs_diff)
return rel_mae
class ZeroGanLoss(loss._Loss):
r"""Creates a criterion that measures the mean squared error between
`n` elements in the input `x` and target `y`.
The loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = \left( x_n - y_n \right)^2,
where :math:`N` is the batch size. If reduce is ``True``, then:
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if}\; \text{size_average} = \text{True},\\
\operatorname{sum}(L), & \text{if}\; \text{size_average} = \text{False}.
\end{cases}
`x` and `y` arbitrary shapes with a total of `n` elements each.
The sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets the internal variable
`size_average` to ``False``.
To get a batch of losses, a loss per batch element, set `reduce` to
``False``. These losses are not averaged and are not affected by
`size_average`.
Args:
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
size_average is set to ``False``, the losses are instead summed for
each minibatch. Only applies when reduce is ``True``. Default: ``True``
reduce (bool, optional): By default, the losses are averaged
over observations for each minibatch, or summed, depending on
size_average. When reduce is ``False``, returns a loss per batch
element instead and ignores size_average. Default: ``True``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
Examples::
>>> loss = nn.MSELoss()
>>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
>>> target = autograd.Variable(torch.randn(3, 5))
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, size_average=True, reduce=True):
super(ZeroGanLoss, self).__init__(size_average)
self.reduce = reduce
def forward(self, input, target):
# zero = Variable(torch.Tensor([0]).double())
zeros = input * 0.
return torch.sum(zeros)
|
{"/data/icvl_dataset.py": ["/util/spectral_color.py"]}
|
1,516
|
liuhongbo830117/ntire2018_adv_rgb2hs
|
refs/heads/master
|
/data/icvl_dataset.py
|
import os.path
import random
import torchvision.transforms as transforms
import torch
# import torch.nn.functional as F
from data.base_dataset import BaseDataset
from data.image_folder import make_dataset_from_dir_list
from PIL import Image, ImageOps
import h5py
import numpy as np
import spectral
from tqdm import tqdm
from joblib import Parallel, delayed
from util.spectral_color import dim_ordering_tf2th, dim_ordering_th2tf
class IcvlNtire2018Dataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.challenge = opt.challenge # 'Clean' or 'RealWorld'
self.root = opt.dataroot # e.g. icvl_ntire2018
assert (opt.phase in ['train', 'Validate', 'Test'])
self.dirlist_rgb = [os.path.join(self.root, 'NTIRE2018_Train1_' + self.challenge), os.path.join(self.root, 'NTIRE2018_Train2_' + self.challenge)] if opt.phase == 'train' else [os.path.join(self.root, 'NTIRE2018_' + opt.phase + '_' + self.challenge)] # A
self.dirlist_hs = [os.path.join(self.root, 'NTIRE2018_Train1_Spectral'), os.path.join(self.root, 'NTIRE2018_Train2_Spectral')] if opt.phase == 'train' else [os.path.join(self.root, 'NTIRE2018_' + opt.phase + '_Spectral')] # B
self.paths_rgb = sorted(make_dataset_from_dir_list(self.dirlist_rgb))
self.paths_hs = sorted(make_dataset_from_dir_list(self.dirlist_hs))
# self.dir_AB = os.path.join(opt.dataroot, opt.phase)
# self.AB_paths = sorted(make_dataset(self.dir_AB))
# print('RETURN TO FULL SIZE PATHS_hs and RGB') #fixme
# self.paths_rgb = self.paths_rgb[:5]
# self.paths_hs = self.paths_hs[:5]
# to handle envi files, so that we can do partial loads
self.use_envi = opt.use_envi
if self.use_envi:
# update self.dirlist_hs
self.dirlist_hs_mat = self.dirlist_hs
self.dirlist_hs = [os.path.join(self.root, 'NTIRE2018_Train_Spectral_envi')]
print(spectral.io.envi.get_supported_dtypes())
if opt.generate_envi_files:
self.generate_envi_files(overwrite_envi=opt.overwrite_envi)
# update self.paths_hs with the hdr files
self.paths_hs = sorted(make_dataset_from_dir_list(self.dirlist_hs))
# for dir_hs in self.dirlist_hs:
# if not os.path.exists(dir_hs):
assert(opt.resize_or_crop == 'resize_and_crop')
def __getitem__(self, index):
# AB_path = self.AB_paths[index]
# AB = Image.open(AB_path).convert('RGB')
# AB = AB.resize((self.opt.loadSize * 2, self.opt.loadSize), Image.BICUBIC)
# AB = transforms.ToTensor()(AB)
# load rgb image
path_rgb = self.paths_rgb[index]
rgb = Image.open(path_rgb)#.convert('RGB')
# fixme set it between 0,1?
# rgb = transforms.ToTensor()(rgb) # rgb.shape: torch.Size([3, 1392, 1300])
# sample crop locations
# w = rgb.shape[2] # over the tensor already
# h = rgb.shape[1] # over the tensor already
w = rgb.width #store them in self so as to accesswhile testing for cropping final result
h = rgb.height
w_offset = random.randint(0, max(0, w - self.opt.fineSize - 1))
h_offset = random.randint(0, max(0, h - self.opt.fineSize - 1))
# actually crop rgb image
if self.opt.phase.lower() == 'train':
if self.opt.challenge.lower() == 'realworld':
# print('realworld<----------------------------------jitter')
rgb = transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.01)(rgb)
rgb = transforms.ToTensor()(rgb) # rgb.shape: torch.Size([3, 1392, 1300])
# train on random crops
rgb_crop = rgb[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize] # rgb_crop is created as a tensor already
else:
topdown_pad = (1536 - h) // 2
leftright_pad = (1536 - w) // 2
full_img_padding = (leftright_pad, topdown_pad, leftright_pad, topdown_pad)
rgb_crop = ImageOps.expand(rgb, full_img_padding)
rgb_crop = transforms.ToTensor()(rgb_crop)
## load hs image
if self.opt.phase == 'train':
path_hs = self.paths_hs[index]
if self.use_envi:
hs = spectral.io.envi.open(path_hs) # https://github.com/spectralpython/spectral/blob/master/spectral/io/envi.py#L282 not loaded yet until read_subregion
# hs.shape: Out[3]: (1392, 1300, 31) (nrows, ncols, nbands)
# check dimensions and crop hs image (actually read only that one
# print(rgb.shape)
# print(hs.shape)
assert (rgb.shape[1] == hs.shape[0] and rgb.shape[2] == hs.shape[1])
hs_crop = (hs.read_subregion(row_bounds=(h_offset, h_offset + self.opt.fineSize), col_bounds=(w_offset, w_offset + self.opt.fineSize))).astype(float)
# hs_crop.shape = (h,w,c)=(256,256,31) here
hs_crop = hs_crop / 4095. * 255 # 4096: db max. totensor expects in [0, 255]
hs_crop = transforms.ToTensor()(hs_crop) # convert ndarray (h,w,c) [0,255]-> torch tensor (c,h,w) [0.0, 1.0] #move to GPU only the 256,256 crop!good!
else:
mat = h5py.File(path_hs) # b[{'rgb', 'bands', 'rad'}] # Shape: (Bands, Cols, Rows) <-> (bands, samples, lines)
hs = mat['rad'].value # ndarray (c,w,h)
hs = np.transpose(hs) # reverse axis order. ndarray (h,w,c). totensor expects this shape
hs = hs / 4095. * 255 #4096: db max. totensor expects in [0, 255]
hs = transforms.ToTensor()(hs) # convert ndarray (h,w,c) [0,255] -> torch tensor (c,h,w) [0.0, 1.0] #fixme why move everything and not only the crop to the gpu?
# check dimensions and crop hs image
# assert(rgb.shape[1] == hs.shape[1] and rgb.shape[2] == hs.shape[2])
if self.opt.phase == 'train':
# train on random crops
hs_crop = hs[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize]
else:
# Validate or Test
hs_crop = hs #will pad on the net
# topdown_pad = (1536 - 1392) // 2
# leftright_pad = (1536 - 1300) // 2
# hs_crop = F.pad(hs, (leftright_pad, leftright_pad, topdown_pad, topdown_pad))
rgb_crop = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(rgb_crop) #fixme still valid in icvl?
if self.opt.phase == 'train':
hs_crop = transforms.Normalize(tuple([0.5] * 31), tuple([0.5] * 31))(hs_crop)
if self.opt.which_direction == 'BtoA':
input_nc = self.opt.output_nc
output_nc = self.opt.input_nc
else:
input_nc = self.opt.input_nc
output_nc = self.opt.output_nc
if (not self.opt.no_flip) and random.random() < 0.5:
idx = [i for i in range(rgb_crop.size(2) - 1, -1, -1)]
idx = torch.LongTensor(idx)
rgb_crop = rgb_crop.index_select(2, idx)
if self.opt.phase == 'train':
hs_crop = hs_crop.index_select(2, idx)
if input_nc == 1: # RGB to gray
tmp = rgb_crop[0, ...] * 0.299 + rgb_crop[1, ...] * 0.587 + rgb_crop[2, ...] * 0.114
rgb_crop = tmp.unsqueeze(0)
if self.opt.phase == 'train':
if output_nc == 1: # RGB to gray
tmp = hs_crop[0, ...] * 0.299 + hs_crop[1, ...] * 0.587 + hs_crop[2, ...] * 0.114
hs_crop = tmp.unsqueeze(0)
if self.opt.phase == 'train':
return_dict = {'A': rgb_crop, 'B': hs_crop,
'A_paths': path_rgb, 'B_paths': path_hs}
else:
# we just use the rgb paths instead, won't use them anyway. nasty, I know
return_dict = {'A': rgb_crop, 'B': rgb_crop,
'A_paths': path_rgb, 'B_paths': path_rgb}
if self.opt.phase == 'Validate' or self.opt.phase == 'Test':
return_dict['full_img_padding'] = full_img_padding
return return_dict
def generate_single_envi_file(self, fpath_hs_mat, overwrite_envi=False):
dir_hs = self.dirlist_hs[0] # for brevity
hsmat = h5py.File(fpath_hs_mat) # b[{'rgb', 'bands', 'rad'}] # Shape: (Bands, Cols, Rows) <-> (bands, samples, lines)
hsnp = hsmat['rad'].value # hs image numpy array # ndarray (c,w,h)spec
# hdr = io.envi.read_envi_header(file='data/envi_template.hdr')
# hdr = self.update_hs_metadata(metadata=hdr, wl=hsmat['bands'].value.flatten())
hdr_file = os.path.join(dir_hs, os.path.splitext(os.path.basename(fpath_hs_mat))[0] + '.hdr')
spectral.io.envi.save_image(hdr_file=hdr_file, image=np.transpose(hsnp).astype(np.int16), force=overwrite_envi,
dtype=np.int16) # dtype int16 range: [-32000, 32000]
def generate_envi_files(self, overwrite_envi=False):
if not os.path.exists(self.dirlist_hs[0]):
os.makedirs(self.dirlist_hs[0])
nb_free_cores=1
Parallel(n_jobs=-1 - nb_free_cores)(
delayed(self.generate_single_envi_file)(fpath_hs_mat=fpath_hs_mat, overwrite_envi=overwrite_envi) for fpath_hs_mat in tqdm(self.paths_hs))
def create_base_hdr(self):
hdr=[]
"""
http://www.harrisgeospatial.com/docs/ENVIHeaderFiles.html#Example
data_Type: The type of data representation:
1 = Byte: 8-bit unsigned integer
2 = Integer: 16-bit signed integer
3 = Long: 32-bit signed integer
4 = Floating-point: 32-bit single-precision
5 = Double-precision: 64-bit double-precision floating-point
6 = Complex: Real-imaginary pair of single-precision floating-point
9 = Double-precision complex: Real-imaginary pair of double precision floating-point
12 = Unsigned integer: 16-bit
13 = Unsigned long integer: 32-bit
14 = 64-bit long integer (signed)
15 = 64-bit unsigned long integer (unsigned)"""
return hdr
def update_hs_metadata(self, metadata, wl):
metadata['interleave'] = 'bsq' # (Rows, Cols, Bands) <->(lines, samples, bands)
# metadata['lines'] = int(metadata['lines']) - 4 # lines = rows. Lines <= 1300
# metadata['samples'] = 1392 # samples = cols. Samples are 1392 for the whole dataset
# metadata['bands'] = len(wl)
metadata['data type'] = 4 #5 = Double-precision: 64-bit double-precision floating-point http://www.harrisgeospatial.com/docs/ENVIHeaderFiles.html#Example
metadata['wavelength'] = wl
metadata['default bands'] = [5, 15, 25]
metadata['fwhm'] = np.diff(wl)
metadata['vroi'] = [1, len(wl)]
return metadata
def __len__(self):
return len(self.paths_rgb)
def name(self):
return 'icvl_ntire2018_dataset'
|
{"/data/icvl_dataset.py": ["/util/spectral_color.py"]}
|
1,517
|
liuhongbo830117/ntire2018_adv_rgb2hs
|
refs/heads/master
|
/data/aligned_dataset.py
|
import os.path
import random
import torchvision.transforms as transforms
import torch
from data.base_dataset import BaseDataset
from data.image_folder import make_dataset
from PIL import Image
|
{"/data/icvl_dataset.py": ["/util/spectral_color.py"]}
|
1,518
|
liuhongbo830117/ntire2018_adv_rgb2hs
|
refs/heads/master
|
/eval/evaluation.py
|
# Evaluation script for the NTIRE 2018 Spectral Reconstruction Challenge
#
# * Provide input and output directories as arguments
# * Validation files should be found in the '/ref' subdirectory of the input dir
# * Input validation files are expected in the v7.3 .mat format
import h5py as h5py
import numpy as np
import sys
import os
MRAEs = {}
RMSEs = {}
def get_ref_from_file(filename):
matfile = h5py.File(filename, 'r')
mat={}
for k, v in matfile.items():
mat[k] = np.array(v)
return mat['rad']
#input and output directories given as arguments
[_, input_dir, output_dir] = sys.argv
validation_files = os.listdir(input_dir +'/ref')
for f in validation_files:
# Read ground truth data
if not(os.path.splitext(f)[1] in '.mat'):
print('skipping '+f)
continue
gt = get_ref_from_file(input_dir + '/ref/' + f)
# Read user submission
rc = get_ref_from_file(input_dir + '/res/' + f)
# compute MRAE
diff = gt-rc
abs_diff = np.abs(diff)
relative_abs_diff = np.divide(abs_diff,gt+np.finfo(float).eps) # added epsilon to avoid division by zero.
MRAEs[f] = np.mean(relative_abs_diff)
# compute RMSE
square_diff = np.power(diff,2)
RMSEs[f] = np.sqrt(np.mean(square_diff))
print(f)
print(MRAEs[f])
print(RMSEs[f])
MRAE = np.mean(MRAEs.values())
print("MRAE:\n"+MRAE.astype(str))
RMSE = np.mean(RMSEs.values())
print("\nRMSE:\n"+RMSE.astype(str))
with open(output_dir + '/scores.txt', 'w') as output_file:
output_file.write("MRAE:"+MRAE.astype(str))
output_file.write("\nRMSE:"+RMSE.astype(str))
|
{"/data/icvl_dataset.py": ["/util/spectral_color.py"]}
|
1,519
|
liuhongbo830117/ntire2018_adv_rgb2hs
|
refs/heads/master
|
/eval/select_model.py
|
# -*- coding: utf-8 -*-
import pandas as pd
import os
import sacred
import glob
from sacred import Experiment
ex = Experiment('rename_to_samename')
@ex.config
def config():
results_home_dir = os.path.abspath('/home/aitor/dev/adv_rgb2hs_pytorch/results')
@ex.automain
def select_model(results_home_dir):
res_dir_list = glob.glob(results_home_dir + '/*')
dfall_list = []
for res_dir in res_dir_list:
exp = os.path.basename(res_dir)
fpath = os.path.join(res_dir, 'scores.txt')
try:
f = open(fpath)
except IOError:
print(fpath + ' does not exist')
else:
with f:
content = f.readlines()
content = [x.strip() for x in content]
results = dict([elem.split(':') for elem in content])
results = {k: [v] for k, v in results.items()} # from_dict() needs iterable as value per key/column name
results['exp'] = [exp]
dfbuff = pd.DataFrame.from_dict(results)
dfbuff = dfbuff.set_index('exp')
dfall_list.append(dfbuff)
dfall = pd.concat(dfall_list)
dfall = dfall.astype(float)
print(dfall.sort_values(by='RMSE', ascending=True))
print(dfall.sort_values(by='MRAE', ascending=True))
pass
|
{"/data/icvl_dataset.py": ["/util/spectral_color.py"]}
|
1,520
|
liuhongbo830117/ntire2018_adv_rgb2hs
|
refs/heads/master
|
/util/spectral_color.py
|
# -*- coding: utf-8 -*-
import os
import numpy as np
from colour.plotting import *
import colour
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from skimage.color import colorconv
from spectral import *
### to avoid importing pyresources.assemple data
def dim_ordering_tf2th(img_list_ndarray):
"""
convert ndarray with dimensions ordered as tf to th
'tf' expects (nb_imgs, nb_rows, nb_cols, nb_channels) < -- compatible with plt.imshow(img_list[0,:,:,:])
'th' expects (nb_imgs, nb_channels, nb_rows, nb_cols)
Parameters
----------
img_list_ndarray: ndarray
Input ndarray of dimensions coherent with 'tf': (nb_imgs, nb_rows, nb_cols, nb_channels)
Returns
-------
img_ndarray: ndarray
Output ndarray of dimensions coherent with 'th': (nb_imgs, nb_channels, nb_rows, nb_cols)
"""
if len(img_list_ndarray.shape) == 4:
img_list_ndarray = np.rollaxis(img_list_ndarray, 3, 1)
elif len(img_list_ndarray.shape) == 3: # single image
img_list_ndarray = np.rollaxis(img_list_ndarray, 2, 0)
else:
raise NotImplementedError('Input must be 3 or 4 dimnesional ndarray')
return img_list_ndarray
def dim_ordering_th2tf(img_list_ndarray):
"""
convert ndarray with dimensions ordered as th to tf
'tf' expects (nb_imgs, nb_rows, nb_cols, nb_channels) < -- compatible with plt.imshow(img_list[0,:,:,:])
'th' expects (nb_imgs, nb_channels, nb_rows, nb_cols)
Parameters
----------
img_list_ndarray: ndarray
Input ndarray of dimensions coherent with 'th': (nb_imgs, nb_channels, nb_rows, nb_cols)
Returns
-------
img_ndarray: ndarray
Output ndarray of dimensions coherent with 'tf': (nb_imgs, nb_rows, nb_cols, nb_channels)
"""
if len(img_list_ndarray.shape) == 4:
img_list_ndarray = np.rollaxis(img_list_ndarray, 1, 4)
elif len(img_list_ndarray.shape) == 3: # single image
img_list_ndarray = np.rollaxis(img_list_ndarray, 0, 3)
else:
raise NotImplementedError('Input must be 3 or 4 dimnesional ndarray')
return img_list_ndarray
def spectral2XYZ_img_vectorized(cmfs, R):
"""
Parameters
----------
cmfs
R: np.ndarray (nb_pixels, 3) in [0., 1.]
Returns
-------
"""
x_bar, y_bar, z_bar = colour.tsplit(cmfs) # tested: OK. x_bar is the double one, the rightmost one (red). z_bar is the leftmost one (blue)
plt.close('all')
plt.plot(np.array([z_bar, y_bar, x_bar]).transpose())
plt.savefig('cmf_cie1964_10.png')
plt.close('all')
# illuminant. We assume that the captured R is reflectance with illuminant E (although it really is not, it is reflected radiance with an unknown illuminant, but the result is the same)
S = colour.ILLUMINANTS_RELATIVE_SPDS['E'].values[20:81:2] / 100. # Equal-energy radiator (ones) sample_spectra_from_hsimg 300 to xxx with delta=5nm
# print S
# dw = cmfs.shape.interval
dw = 10
k = 100 / (np.sum(y_bar * S) * dw)
X_p = R * x_bar * S * dw # R(N,31) * x_bar(31,) * S(31,) * dw(1,)
Y_p = R * y_bar * S * dw
Z_p = R * z_bar * S * dw
XYZ = k * np.sum(np.array([X_p, Y_p, Z_p]), axis=-1)
XYZ = np.rollaxis(XYZ, 1, 0) # th2tf() but for 2D input
return XYZ
def spectral2XYZ_img(hs, cmf_name, image_data_format='channels_last'):
"""
Convert spectral image input to XYZ (tristimulus values) image
Parameters
----------
hs: numpy.ndarray
3 dimensional numpy array containing the spectral information in either (h,w,c) ('channels_last') or (c,h,w) ('channels_first') formats
cmf_name: basestring
String describing the color matching functions to be used
image_data_format: basestring {'channels_last', 'channels_first'}. Default: 'channels_last'
Channel dimension ordering of the input spectral image. the rgb output will follow the same dim ordering format
Returns
-------
XYZ: numpy.ndarray
3 dimensional numpy array containing the tristimulus value information in either (h,w,3) ('channels_last') or (3,h,w) ('channels_first') formats
"""
if image_data_format == 'channels_first':
hs = dim_ordering_th2tf(hs) # th2tf (convert to channels_last
elif image_data_format == 'channels_last':
pass
else:
raise AttributeError('Wrong image_data_format parameter ' + image_data_format)
# flatten
h, w, c = hs.shape
hs = hs.reshape(-1, c)
cmfs = get_cmfs(cmf_name=cmf_name, nm_range=(400., 700.), nm_step=10, split=False)
XYZ = spectral2XYZ_img_vectorized(cmfs, hs) # (nb_px, 3)
# recover original shape (needed to call to xyz2rgb()
XYZ = XYZ.reshape((h, w, 3))
if image_data_format == 'channels_first':
# convert back to channels_first
XYZ = dim_ordering_tf2th(XYZ)
return XYZ
def spectral2sRGB_img(spectral, cmf_name, image_data_format='channels_last'):
"""
Convert spectral image input to rgb image
Parameters
----------
spectral: numpy.ndarray
3 dimensional numpy array containing the spectral information in either (h,w,c) ('channels_last') or (c,h,w) ('channels_first') formats
cmf_name: basestring
String describing the color matching functions to be used
image_data_format: basestring {'channels_last', 'channels_first'}. Default: 'channels_last'
Channel dimension ordering of the input spectral image. the rgb output will follow the same dim ordering format
Returns
-------
rgb: numpy.ndarray
3 dimensional numpy array containing the spectral information in either (h,w,3) ('channels_last') or (3,h,w) ('channels_first') formats
"""
XYZ = spectral2XYZ_img(hs=spectral, cmf_name=cmf_name, image_data_format=image_data_format)
if image_data_format == 'channels_first':
XYZ = dim_ordering_th2tf(XYZ) # th2tf (convert to channels_last
elif image_data_format == 'channels_last':
pass
else:
raise AttributeError('Wrong image_data_format parameter ' + image_data_format)
#we need to pass in channels_last format to xyz2rgb
sRGB = colorconv.xyz2rgb(XYZ/100.)
if image_data_format == 'channels_first':
# convert back to channels_first
sRGB = dim_ordering_tf2th(sRGB)
return sRGB
def save_hs_as_envi(fpath, hs31, image_data_format_in='channels_last'):#, image_data_format_out='channels_last'):
#output is always channels_last
if image_data_format_in == 'channels_first':
hs31 = dim_ordering_th2tf(hs31)
elif image_data_format_in != 'channels_last':
raise AttributeError('Wrong image_data_format_in')
# dst_dir = os.path.dirname(fpath)
hdr_fpath = fpath + '.hdr'
wl = np.arange(400, 701, 10)
hs31_envi = envi.create_image(hdr_file=hdr_fpath,
metadata=generate_metadata(wl=wl),
shape=hs31.shape, # Must be in (Rows, Cols, Bands)
force=True,
dtype=np.float32, # np.float32, 32MB/img np.ubyte: 8MB/img
ext='.envi31')
mm = hs31_envi.open_memmap(writable=True)
mm[:, :, :] = hs31
def generate_metadata(wl):
md = dict()
md['interleave'] = 'bsq' # (Rows, Cols, Bands) <->(lines, samples, bands)
md['data type'] = 12
md['wavelength'] = wl
md['default bands'] = [22, 15, 6] # for spectral2dummyRGB
md['fwhm'] = np.diff(wl)
# md['vroi'] = [1, len(wl)]
return md
def load_envi(fpath_envi, fpath_hdr=None):
if fpath_hdr is None:
fpath_hdr = os.path.splitext(fpath_envi)[0] + '.hdr'
hs = io.envi.open(fpath_hdr, fpath_envi)
return hs
def get_cmfs(cmf_name='cie1964_10', nm_range=(400., 700.), nm_step=10, split=True):
if cmf_name == 'cie1931_2':
cmf_full_name = 'CIE 1931 2 Degree Standard Observer'
elif cmf_name == 'cie1931_10':
cmf_full_name = 'CIE 1931 10 Degree Standard Observer'
elif cmf_name == 'cie1964_2':
cmf_full_name = 'CIE 1964 2 Degree Standard Observer'
elif cmf_name == 'cie1964_10':
cmf_full_name = 'CIE 1964 10 Degree Standard Observer'
else:
raise AttributeError('Wrong cmf name')
cmfs = colour.STANDARD_OBSERVERS_CMFS[cmf_full_name]
# subsample and trim range
ix_wl_first = np.where(cmfs.wavelengths == nm_range[0])[0][0]
ix_wl_last = np.where(cmfs.wavelengths == nm_range[1] + 1.)[0][0]
cmfs = cmfs.values[ix_wl_first:ix_wl_last:int(nm_step), :] # make sure the nm_step is an int
if split:
x_bar, y_bar, z_bar = colour.tsplit(cmfs) #tested: OK. x_bar is the double one, the rightmost one (red). z_bar is the leftmost one (blue)
return x_bar, y_bar, z_bar
else:
return cmfs
|
{"/data/icvl_dataset.py": ["/util/spectral_color.py"]}
|
1,536
|
lonce/dcn_soundclass
|
refs/heads/master
|
/testPickledModel.py
|
"""
eg
python testPickledModel.py logs.2017.04.28/mtl_2.or_channels.epsilon_1.0/state.pickle
"""
import tensorflow as tf
import numpy as np
import pickledModel
from PIL import TiffImagePlugin
from PIL import Image
# get args from command line
import argparse
FLAGS = None
VERBOSE=False
# ------------------------------------------------------
# get any args provided on the command line
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('pickleFile', type=str, help='stored graph' )
FLAGS, unparsed = parser.parse_known_args()
k_freqbins=257
k_width=856
styg = pickledModel.load(FLAGS.pickleFile)
print(' here we go ........')
def soundfileBatch(slist) :
return ([pickledModel.loadImage(name) for name in slist ])
#just test the validation set
#Flipping and scaling seem to have almost no effect on the clasification accuracy
rimages=soundfileBatch(['data2/validate/205 - Chirping birds/5-242490-A._11_.tif',
'data2/validate/205 - Chirping birds/5-242491-A._12_.tif',
'data2/validate/205 - Chirping birds/5-243448-A._14_.tif',
'data2/validate/205 - Chirping birds/5-243449-A._15_.tif',
'data2/validate/205 - Chirping birds/5-243450-A._15_.tif',
'data2/validate/205 - Chirping birds/5-243459-A._13_.tif',
'data2/validate/205 - Chirping birds/5-243459-B._13_.tif',
'data2/validate/205 - Chirping birds/5-257839-A._10_.tif',
'data2/validate/101 - Dog/5-203128-A._4_.tif',
'data2/validate/101 - Dog/5-203128-B._5_.tif',
'data2/validate/101 - Dog/5-208030-A._9_.tif',
'data2/validate/101 - Dog/5-212454-A._4_.tif',
'data2/validate/101 - Dog/5-213855-A._4_.tif',
'data2/validate/101 - Dog/5-217158-A._2_.tif',
'data2/validate/101 - Dog/5-231762-A._1_.tif',
'data2/validate/101 - Dog/5-9032-A._12_.tif',
])
im=np.empty([1,1,k_width,k_freqbins ])
np.set_printoptions(precision=2)
np.set_printoptions(suppress=True)
with tf.Session() as sess:
predictions=[]
sess.run ( tf.global_variables_initializer ())
#print('ok, all initialized')
if 0 :
print ('...GLOBAL_VARIABLES :') #probalby have to restore from checkpoint first
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
for v in all_vars:
v_ = sess.run(v)
print(v_)
if 0 :
for v in ["s_w1:0", "s_b1:0", "s_w2:0", "s_b2:0", "s_W_fc1:0", "s_b_fc1:0", "s_W_fc2:0", "s_b_fc2:0"] :
print(tf.get_default_graph().get_tensor_by_name(v))
print(sess.run(tf.get_default_graph().get_tensor_by_name(v)))
if 1 :
for v in ["s_h1:0"] :
#im = np.reshape(np.transpose(rimages[6]), [1,k_width*k_freqbins ])
im=rimages[6]
print('assigning input variable an image with shape ' + str(im.shape))
sess.run(styg["X"].assign(im)) #transpose to make freqbins channels
print(tf.get_default_graph().get_tensor_by_name(v))
print(sess.run(tf.get_default_graph().get_tensor_by_name(v)))
print('predictions are : ')
for im_ in rimages :
#im = np.reshape(np.transpose(im_), [1,k_width*k_freqbins ])
im=im_
sess.run(styg["X"].assign(im)) #transpose to make freqbins channels
prediction = sess.run(styg["softmax_preds"])
print(str(prediction[0]))
#predictions.extend(prediction[0])
#pickledModel.save_image(np.transpose(im, [0,3,2,1])[0,:,:,0],'fooimage.tif')
pickledModel.save_image(im[0,:,:,:],'fooimage.tif')
|
{"/testPickledModel.py": ["/pickledModel.py"], "/testTrainedModel.py": ["/trainedModel.py"], "/style_transfer.py": ["/pickledModel.py"]}
|
1,537
|
lonce/dcn_soundclass
|
refs/heads/master
|
/utils/ESC50_Convert.py
|
import os
import numpy as np
import matplotlib.pyplot as plt
# https://github.com/librosa/librosa
import librosa
import librosa.display
import scipy
from PIL import TiffImagePlugin
from PIL import Image
import tiffspect
# Set some project parameters
K_SR = 22050
K_FFTSIZE = 512 # also used for window length where that parameter is called for
K_HOP = 128
K_DUR = 5.0 # make all files this duration
K_FRAMEMULTIPLEOF = 4 # some programs like to have convinent dimensions for conv and decimation
# the last columns of a matrix are removed if necessary to satisfy
# 1 means any number of frames will work
# location of subdirectories of ogg files organized by category
K_OGGDIR = '/home/lonce/tflow/DATA-SETS/ESC-50'
# location to write the wav files (converted from ogg)
K_WAVEDIR = '/home/lonce/tflow/DATA-SETS/ESC-50-wave'
# location to write the spectrogram files (converted from wave files)
K_SPECTDIR = '/home/lonce/tflow/DATA-SETS/ESC-50-spect'
#===============================================
def get_subdirs(a_dir):
""" Returns a list of sub directory names in a_dir """
return [name for name in os.listdir(a_dir)
if (os.path.isdir(os.path.join(a_dir, name)) and not (name.startswith('.')))]
def listDirectory(directory, fileExtList):
"""Returns list of file info objects in directory that extension in the list fileExtList - include the . in your extension string"""
fnameList = [os.path.normcase(f)
for f in os.listdir(directory)
if (not(f.startswith('.')))]
fileList = [os.path.join(directory, f)
for f in fnameList
if os.path.splitext(f)[1] in fileExtList]
return fileList , fnameList
def dirs2labelfile(parentdir, labelfile):
"""takes subdirectories of parentdir and writes them, one per line, to labelfile"""
namelist = get_subdirs(parentdir)
#with open(labelfile, mode='wt', encoding='utf-8') as myfile:
with open(labelfile, mode='wt') as myfile:
myfile.write('\n'.join(namelist))
# ===============================================
def stereo2mono(data) :
""" Combine 2D array into a single array, averaging channels """
""" Deprecated, since we use librosa for this now. """
print('converting stereo data of shape ' + str(data.shape))
outdata=np.ndarray(shape=(data.shape[0]), dtype=np.float32)
if data.ndim != 2 :
print('You are calling stero2mono on a non-2D array')
else :
print(' converting stereo to mono, with outdata shape = ' + str(outdata.shape))
for idx in range(data.shape[0]) :
outdata[idx] = (data[idx,0]+data[idx,1])/2
return outdata
# ===============================================
def esc50Ogg2Wav (topdir, outdir, dur, srate) :
"""
Creates regularlized wave files for the ogg files in the ESC-50 dataset.
Creates class folders for the wav files in outdir with the same structure found in topdir.
Parameters
topdir - the ESC-50 dir containing class folders.
outdir - the top level directory to write wave files to (written in to class subfolders)
dur - (in seconds) all files will be truncated or zeropadded to have this duration given the srate
srate - input files will be resampled to srate as they are read in before being saved as wav files
"""
sample_length = int(dur * srate)
try:
os.stat(outdir) # test for existence
except:
os.mkdir(outdir) # create if necessary
subdirs = get_subdirs(topdir)
for subdir in subdirs :
try:
os.stat(outdir + '/' + subdir) # test for existence
except:
os.mkdir(outdir + '/' + subdir) # create if necessary
print('creating ' + outdir + '/' + subdir)
fullpaths, _ = listDirectory(topdir + '/' + subdir, '.ogg')
for idx in range(len(fullpaths)) :
fname = os.path.basename(fullpaths[idx])
# librosa.load resamples to sr, clips to duration, combines channels.
audiodata, samplerate = librosa.load(fullpaths[idx], sr=srate, mono=True, duration=dur) # resamples if necessary (some esc-50 files are in 48K)
# just checking .....
if (samplerate != srate) :
print('You got a sound file ' + subdir + '/' + fname + ' with sample rate ' + str(samplerate) + '!')
print(' ********* BAD SAMPLE RATE ******** ')
if (audiodata.ndim != 1) :
print('You got a sound file ' + subdir + '/' + fname + ' with ' + str(audiodata.ndim) + ' channels!')
audiodata = stereo2mono(audiodata)
if (len(audiodata) > sample_length) :
print('You got a long sound file ' + subdir + '/' + fname + ' with shape ' + str(audiodata.shape) + '!')
audiodata = np.resize(audiodata, sample_length)
# print(' ..... and len(audiodata) = ' + str(len(audiodata)) + ', while sample_length is sposed to be ' + str(sample_length))
print('trimming data to shape ' + str(audiodata.shape))
if (len(audiodata) < sample_length) :
print('You got a short sound file ' + subdir + '/' + fname + ' with shape ' + str(audiodata.shape) + '!')
audiodata = np.concatenate([audiodata, np.zeros((sample_length-len(audiodata)))])
print(' zero padding data to shape ' + str(audiodata.shape))
# write the file out as a wave file
librosa.output.write_wav(outdir + '/' + subdir + '/' + os.path.splitext(fname)[0] + '.wav', audiodata, samplerate)
# ===============================================
def wav2spect(fname, srate, fftSize, fftHop, dur=None, showplt=False, dcbin=True, framesmulitpleof=1) :
try:
audiodata, samplerate = librosa.load(fname, sr=srate, mono=True, duration=dur)
except:
print('can not read ' + fname)
return
S = np.abs(librosa.stft(audiodata, n_fft=fftSize, hop_length=fftHop, win_length=fftSize, center=False))
if (dcbin == False) :
S = np.delete(S, (0), axis=0) # delete freq 0 row
#note: a pure DC input signal bleeds into bin 1, too.
#trim the non-mulitple fat if necessary
nr, nc = S.shape
fat = nc%framesmulitpleof
for num in range(0,fat):
S = np.delete(S, (nc-1-num), axis=1)
D = librosa.amplitude_to_db(S, ref=np.max)
if showplt : # Dangerous for long runs - it opens a new figure for each file!
librosa.display.specshow(D, y_axis='linear', x_axis='time', sr=srate, hop_length=fftHop)
plt.colorbar(format='%+2.0f dB')
plt.title(showplt)
plt.show(block=True)
return D
# ===============================================
def esc50Wav2Spect(topdir, outdir, dur, srate, fftSize, fftHop, showplt=False, dcbin=True) :
"""
Creates spectrograms for subfolder-labeled wavfiles.
Creates class folders for the spectrogram files in outdir with the same structure found in topdir.
Parameters
topdir - the dir containing class folders containing wav files.
outdir - the top level directory to write wave files to (written in to class subfolders)
dur - (in seconds) all files will be truncated or zeropadded to have this duration given the srate
srate - input files will be resampled to srate as they are read in before being saved as wav files
"""
try:
os.stat(outdir) # test for existence
except:
os.mkdir(outdir) # create if necessary
subdirs = get_subdirs(topdir)
count = 0
for subdir in subdirs :
try:
os.stat(outdir + '/' + subdir) # test for existence
except:
os.mkdir(outdir + '/' + subdir) # create if necessary
print('creating ' + outdir + '/' + subdir)
fullpaths, _ = listDirectory(topdir + '/' + subdir, '.wav')
for idx in range(len(fullpaths)) :
fname = os.path.basename(fullpaths[idx])
# librosa.load resamples to sr, clips to duration, combines channels.
#
#try:
# audiodata, samplerate = librosa.load(fullpaths[idx], sr=srate, mono=True, duration=dur)
#except:
# print('can not read ' + fname)
#
#S = np.abs(librosa.stft(audiodata, n_fft=fftSize, hop_length=fftHop, win_length=fftSize, center=False))
#
#if (! dcbin) :
# S = np.delete(S, (0), axis=0) # delete freq 0 row
##print('esc50Wav2Spect" Sfoo max is ' + str(np.max(Sfoo)) + ', and Sfoo sum is ' + str(np.sum(Sfoo)) + ', and Sfoo min is ' + str(np.min(Sfoo)))
#
#
#D = librosa.amplitude_to_db(S, ref=np.max)
D = wav2spect(fullpaths[idx], srate, fftSize, fftHop, dur=dur, dcbin=True, showplt=False, framesmulitpleof=K_FRAMEMULTIPLEOF)
#plt.title(str(count) + ': ' + subdir + '/' + os.path.splitext(fname)[0])
tiffspect.logSpect2Tiff(D, outdir + '/' + subdir + '/' + os.path.splitext(fname)[0] + '.tif')
print(str(count) + ': ' + subdir + '/' + os.path.splitext(fname)[0])
count +=1
# ===============================================
# DO IT
#esc50Ogg2Wav(K_OGGDIR, K_WAVEDIR, K_DUR, K_SR)
#esc50Wav2Spect(K_WAVEDIR, K_SPECTDIR, K_DUR, K_SR, K_FFTSIZE, K_HOP, dcbin=True)
dirs2labelfile(K_SPECTDIR, K_SPECTDIR + '/labels.text')
|
{"/testPickledModel.py": ["/pickledModel.py"], "/testTrainedModel.py": ["/trainedModel.py"], "/style_transfer.py": ["/pickledModel.py"]}
|
1,538
|
lonce/dcn_soundclass
|
refs/heads/master
|
/trainedModel.py
|
#
#
#Morgans great example code:
#https://blog.metaflow.fr/tensorflow-how-to-freeze-a-model-and-serve-it-with-a-python-api-d4f3596b3adc
#
# GitHub utility for freezing graphs:
#https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py
#
#https://www.tensorflow.org/api_docs/python/tf/graph_util/convert_variables_to_constants
import tensorflow as tf
import numpy as np
#global variables
g_st_saver=None
g_chkptdir=None
g_trainedgraph=None
VERBOSE=1
#-------------------------------------------------------------
def load(meta_model_file, restore_chkptDir) :
global g_st_saver
global g_chkptdir
global g_trainedgraph
g_st_saver = tf.train.import_meta_graph(meta_model_file)
# Access the graph
g_trainedgraph = tf.get_default_graph()
with tf.Session() as sess:
g_chkptdir=restore_chkptDir # save in global for use during initialize
#g_st_saver.restore(sess, tf.train.latest_checkpoint(restore_chkptDir))
return g_trainedgraph, g_st_saver
def initialize_variables(sess) :
g_st_saver.restore(sess, tf.train.latest_checkpoint(g_chkptdir))
tf.GraphKeys.USEFUL = 'useful'
var_list = tf.get_collection(tf.GraphKeys.USEFUL)
#print('var_list[3] is ' + str(var_list[3]))
#JUST WANTED TO TEST THIS TO COMPARE TO STYLE MODEL CODE
# Now get the values of the trained graph in to the new style graph
#sess.run((g_trainedgraph.get_tensor_by_name("w1:0")).assign(var_list[3]))
#sess.run(g_trainedgraph.get_tensor_by_name("b1:0").assign(var_list[4]))
#sess.run(g_trainedgraph.get_tensor_by_name("w2:0").assign(var_list[5]))
#sess.run(g_trainedgraph.get_tensor_by_name("b2:0").assign(var_list[6]))
#sess.run(g_trainedgraph.get_tensor_by_name("W_fc1:0").assign(var_list[7]))
#sess.run(g_trainedgraph.get_tensor_by_name("b_fc1:0").assign(var_list[8]))
#sess.run(g_trainedgraph.get_tensor_by_name("W_fc2:0").assign(var_list[9]))
#sess.run(g_trainedgraph.get_tensor_by_name("b_fc2:0").assign(var_list[10]))
|
{"/testPickledModel.py": ["/pickledModel.py"], "/testTrainedModel.py": ["/trainedModel.py"], "/style_transfer.py": ["/pickledModel.py"]}
|
1,539
|
lonce/dcn_soundclass
|
refs/heads/master
|
/testTrainedModel.py
|
"""
eg
python testModel.py logs.2017.04.28/mtl_2.or_channels.epsilon_1.0/my-model.meta logs.2017.04.28/mtl_2.or_channels.epsilon_1.0/checkpoints/
"""
import tensorflow as tf
import numpy as np
import trainedModel
from PIL import TiffImagePlugin
from PIL import Image
# get args from command line
import argparse
FLAGS = None
VERBOSE=False
# ------------------------------------------------------
# get any args provided on the command line
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('metamodel', type=str, help='stored graph' )
parser.add_argument('checkptDir', type=str, help='the checkpoint directory from where the latest checkpoint will be read to restore values for variables in the graph' )
FLAGS, unparsed = parser.parse_known_args()
k_freqbins=257
k_width=856
g, savior = trainedModel.load(FLAGS.metamodel, FLAGS.checkptDir)
#vnamelist =[n.name for n in tf.global_variables()]
if VERBOSE :
vnamelist =[n.name for n in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)]
print('TRAINABLE vars:')
for n in vnamelist :
print(n)
#opslist = [n.name for n in g.get_operations()]
#print('----Operatios in graph are : ' + str(opslist))
tf.GraphKeys.USEFUL = 'useful'
if VERBOSE :
print ('...and useful :') #probalby have to restore from checkpoint first
all_vars = tf.get_collection(tf.GraphKeys.USEFUL)
for v in all_vars:
print(v)
#
#print(' here we go ........')
var_list = tf.get_collection(tf.GraphKeys.USEFUL)
####tf.add_to_collection(tf.GraphKeys.USEFUL, X) #input place holder
####tf.add_to_collection(tf.GraphKeys.USEFUL, keepProb) #place holder
####tf.add_to_collection(tf.GraphKeys.USEFUL, softmax_preds)
####tf.add_to_collection(tf.GraphKeys.USEFUL, h1)
####tf.add_to_collection(tf.GraphKeys.USEFUL, h2)
#X = g.get_tensor_by_name('X/Adam:0')# placeholder for input
#X = tf.placeholder(tf.float32, [None,k_freqbins*k_width], name= "X")
X=var_list[0]
#print('X is ' + str(X))
#keepProb = g.get_tensor_by_name('keepProb')
#keepProb=tf.placeholder(tf.float32, (), name= "keepProb")
keepProb=var_list[1]
#print('keepProb is ' + str(keepProb))
softmax_preds=var_list[2]
assert softmax_preds.graph is tf.get_default_graph()
def soundfileBatch(slist) :
# The training network scales to 255 and then flattens before stuffing into batches
return [np.array(Image.open(name).point(lambda i: i*255)).flatten() for name in slist ]
#just test the validation set
#Flipping and scaling seem to have almost no effect on the clasification accuracy
rimages=soundfileBatch(['data2/validate/205 - Chirping birds/5-242490-A._11_.tif',
'data2/validate/205 - Chirping birds/5-242491-A._12_.tif',
'data2/validate/205 - Chirping birds/5-243448-A._14_.tif',
'data2/validate/205 - Chirping birds/5-243449-A._15_.tif',
'data2/validate/205 - Chirping birds/5-243450-A._15_.tif',
'data2/validate/205 - Chirping birds/5-243459-A._13_.tif',
'data2/validate/205 - Chirping birds/5-243459-B._13_.tif',
'data2/validate/205 - Chirping birds/5-257839-A._10_.tif',
'data2/validate/101 - Dog/5-203128-A._4_.tif',
'data2/validate/101 - Dog/5-203128-B._5_.tif',
'data2/validate/101 - Dog/5-208030-A._9_.tif',
'data2/validate/101 - Dog/5-212454-A._4_.tif',
'data2/validate/101 - Dog/5-213855-A._4_.tif',
'data2/validate/101 - Dog/5-217158-A._2_.tif',
'data2/validate/101 - Dog/5-231762-A._1_.tif',
'data2/validate/101 - Dog/5-9032-A._12_.tif',
])
#rimages=np.random.uniform(0.,1., (3,k_freqbins*k_width))
#print('got my image, ready to run!')
#Z = tf.placeholder(tf.float32, [k_freqbins*k_width], name= "Z")
#Y=tf.Variable(tf.truncated_normal([k_freqbins*k_width], stddev=0.1), name="Y")
#Y=tf.assign(Y,Z)
#with tf.Session() as sess:
# sess.run ( tf.global_variables_initializer ())
# foo = sess.run(Y, feed_dict={Z: rimage})
print(' here we go ........')
np.set_printoptions(precision=2)
np.set_printoptions(suppress=True)
with tf.Session() as sess:
#sess.run ( tf.global_variables_initializer ())
#savior.restore(sess, tf.train.latest_checkpoint(FLAGS.checkptDir))
trainedModel.initialize_variables(sess)
if 0 :
print ('...GLOBAL_VARIABLES :') #probalby have to restore from checkpoint first
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
for v in all_vars:
v_ = sess.run(v)
print(v_)
if 0 :
for v in ["w1:0", "b1:0", "w2:0", "b2:0", "W_fc1:0", "b_fc1:0", "W_fc2:0", "b_fc2:0"] :
print(tf.get_default_graph().get_tensor_by_name(v))
print(sess.run(tf.get_default_graph().get_tensor_by_name(v)))
if 1 :
for v in ["h1:0"] :
im = np.reshape(rimages[6], [1,k_width*k_freqbins ])
print(tf.get_default_graph().get_tensor_by_name(v))
print(sess.run(tf.get_default_graph().get_tensor_by_name(v), feed_dict ={ X : im, keepProb : 1.0 }))
print('predictions are : ')
for im_ in rimages :
im = np.reshape(im_, [1,k_width*k_freqbins ])
prediction = sess.run(softmax_preds, feed_dict ={ X : im, keepProb : 1.0 })
print(str(prediction[0]))
# Run the standard way .... in batches
#predictions = sess.run(softmax_preds, feed_dict ={ X : rimages , keepProb : 1.0 })
#print('predictions are : ')
#print(str(predictions))
|
{"/testPickledModel.py": ["/pickledModel.py"], "/testTrainedModel.py": ["/trainedModel.py"], "/style_transfer.py": ["/pickledModel.py"]}
|
1,540
|
lonce/dcn_soundclass
|
refs/heads/master
|
/style_transfer.py
|
""" An implementation of the paper "A Neural Algorithm of Artistic Style"
by Gatys et al. in TensorFlow.
Author: Chip Huyen (huyenn@stanford.edu)
Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"
For more details, please read the assignment handout:
http://web.stanford.edu/class/cs20si/assignments/a2.pdf
"""
from __future__ import print_function
import sys
import os
import time
import numpy as np
import tensorflow as tf
import pickledModel
# get args from command line
import argparse
FLAGS = []
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--content', type=str, help='name of file in content dir, w/o .ext' )
parser.add_argument('--style', type=str, help='name of file in style dir, w/o .ext' )
parser.add_argument('--noise', type=float, help='in range [0,1]', default=.5 )
parser.add_argument('--iter', type=int, help='number of iterations (on cpu, runtime is less than 1 sec/iter)', default=600 )
parser.add_argument('--alpha', type=float, help='amount to weight conent', default=10 )
parser.add_argument('--beta', type=float, help='amount to weight style', default=200 )
parser.add_argument('--randomize', type=int, help='0: use trained weights, 1: randomize model weights', choices=[0,1], default=0 )
parser.add_argument('--weightDecay', type=float, help='factor for L2 loss to keep vals in [0,255]', default=.01 )
parser.add_argument('--outdir', type=str, help='for output images', default="." )
parser.add_argument('--stateFile', type=str, help='stored graph', default=None )
FLAGS, unparsed = parser.parse_known_args()
print('\n FLAGS parsed : {0}'.format(FLAGS))
if any(v is None for v in vars(FLAGS).values()) :
print('All args are required with their flags. For help: python style_transfer --help')
sys.exit()
CHECKPOINTING=False
FILETYPE = ".tif"
# parameters to manage experiments
STYLE = FLAGS.style
CONTENT = FLAGS.content
STYLE_IMAGE = 'content/' + STYLE + FILETYPE
CONTENT_IMAGE = 'content/' + CONTENT + FILETYPE
# This seems to be the paramter that really controls the balance between content and style
# The more noise, the less content
NOISE_RATIO = FLAGS.noise # percentage of weight of the noise for intermixing with the content image
# Layers used for style features. You can change this.
STYLE_LAYERS = ['h1', 'h2']
W = [1.0, 2.0] # give more weights to deeper layers.
# Layer used for content features. You can change this.
CONTENT_LAYER = 'h2'
#Relationship a/b is 1/20
ALPHA = FLAGS.alpha #content
BETA = FLAGS.beta #style
LOGDIR = FLAGS.outdir + '/log_graph' #create folder manually
CHKPTDIR = FLAGS.outdir + '/checkpoints' # create folder manually
OUTPUTDIR = FLAGS.outdir
ITERS = FLAGS.iter
LR = 2.0
WEIGHT_DECAY=FLAGS.weightDecay
def _create_range_loss(im) :
over = tf.maximum(im-255, 0)
under = tf.minimum(im, 0)
out = tf.add(over, under)
rangeloss = WEIGHT_DECAY*tf.nn.l2_loss(out)
return rangeloss
def _create_content_loss(p, f):
""" Calculate the loss between the feature representation of the
content image and the generated image.
Inputs:
p, f are just P, F in the paper
(read the assignment handout if you're confused)
Note: we won't use the coefficient 0.5 as defined in the paper
but the coefficient as defined in the assignment handout.
Output:
the content loss
"""
pdims=p.shape
#print('p has dims : ' + str(pdims))
coef = np.multiply.reduce(pdims) # Hmmmm... maybe don't want to include the first dimension
#this makes the loss 0!!!
#return (1/4*coef)*tf.reduce_sum(tf.square(f-p))
return tf.reduce_sum((f-p)**2)/(4*coef)
def _gram_matrix(F, N, M):
""" Create and return the gram matrix for tensor F
Hint: you'll first have to reshape F
inputs: F: the tensor of all feature channels in a given layer
N: number of features (channels) in the layer
M: the total number of filters in each filter (length * height)
F comes in as numchannels*length*height, and
"""
# We want to reshape F to be number of feaures (N) by the values in the feature array ( now represented in one long vector of length M)
Fshaped = tf.reshape(F, (M, N))
return tf.matmul(tf.transpose(Fshaped), Fshaped) # return G of size #channels x #channels
def _single_style_loss(a, g):
""" Calculate the style loss at a certain layer
Inputs:
a is the feature representation of the real image
g is the feature representation of the generated image
Output:
the style loss at a certain layer (which is E_l in the paper)
Hint: 1. you'll have to use the function _gram_matrix()
2. we'll use the same coefficient for style loss as in the paper
3. a and g are feature representation, not gram matrices
"""
horizdim = 1 # recall that first dimension of tensor is minibatch size
vertdim = 2
featuredim = 3
# N - number of features
N = a.shape[featuredim] #a & g are the same shape
# M - product of first two dimensions of feature map
M = a.shape[horizdim]*a.shape[vertdim]
#print(' N is ' + str(N) + ', and M is ' + str(M))
# This is 'E' from the paper and the homework handout.
# It is a scalar for a single layer
diff = _gram_matrix(a, N, M)-_gram_matrix(g, N, M)
sq = tf.square(diff)
s=tf.reduce_sum(sq)
return (s/(4*N*N*M*M))
def _create_style_loss(A, model):
""" Return the total style loss
"""
n_layers = len(STYLE_LAYERS)
# E has one dimension with length equal to the number of layers
E = [_single_style_loss(A[i], model[STYLE_LAYERS[i]]) for i in range(n_layers)]
###############################
## TO DO: return total style loss
return np.dot(W, E)
###############################
def _create_losses(model, input_image, content_image, style_image):
print('_create_losses')
with tf.variable_scope('loss') as scope:
with tf.Session() as sess:
sess.run(input_image.assign(content_image)) # assign content image to the input variable
# model[CONTENT_LAYER] is a relu op
p = sess.run(model[CONTENT_LAYER])
content_loss = _create_content_loss(p, model[CONTENT_LAYER])
with tf.Session() as sess:
sess.run(input_image.assign(style_image))
A = sess.run([model[layer_name] for layer_name in STYLE_LAYERS])
style_loss = _create_style_loss(A, model)
reg_loss = _create_range_loss(model['X'])
##########################################
## TO DO: create total loss.
## Hint: don't forget the content loss and style loss weights
total_loss = ALPHA*content_loss + BETA*style_loss + reg_loss
##########################################
return content_loss, style_loss, total_loss
def _create_summary(model):
""" Create summary ops necessary
Hint: don't forget to merge them
"""
with tf.name_scope ( "summaries" ):
tf.summary.scalar ( "content loss" , model['content_loss'])
tf.summary.scalar ( "style_loss" , model['style_loss'])
tf.summary.scalar ( "total_loss" , model['total_loss'])
# because you have several summaries, we should merge them all
# into one op to make it easier to manage
return tf.summary.merge_all()
def train(model, generated_image, initial_image):
""" Train your model.
Don't forget to create folders for checkpoints and outputs.
"""
skip_step = 1
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run ( tf.global_variables_initializer ())
print('initialize .....')
writer = tf.summary.FileWriter(LOGDIR, sess.graph)
###############################
print('Do initial run to assign image')
sess.run(generated_image.assign(initial_image))
if CHECKPOINTING :
ckpt = tf.train.get_checkpoint_state(os.path.dirname(CHKPTDIR + '/checkpoint'))
else :
ckpt = False
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
initial_step = model['global_step'].eval()
start_time = time.time()
step_time=start_time
for index in range(initial_step, ITERS):
if index >= 5 and index < 20:
skip_step = 10
elif index >= 20:
skip_step = 100
sess.run(model['optimizer'])
if (index + 1) % skip_step == 0:
###############################
## TO DO: obtain generated image and loss
# following the optimazaiton step, calculate loss
gen_image, total_loss, summary = sess.run([generated_image, model['total_loss'],
model['summary_op']])
###############################
#gen_image = gen_image + MEAN_PIXELS
writer.add_summary(summary, global_step=index)
print('Step {}\n Sum: {:5.1f}'.format(index + 1, np.sum(gen_image)))
print(' Loss: {:5.1f}'.format(sess.run(model['total_loss']))) #???????
print(' Time: {}'.format(time.time() - step_time))
step_time = time.time()
filename = OUTPUTDIR + '/%d.tif' % (index)
#pickledModel.save_image(np.transpose(gen_image[0][0]), filename)
print('style_transfer: about to save image with shape ' + str(gen_image.shape))
pickledModel.save_image(gen_image[0], filename)
if (index + 1) % 20 == 0:
saver.save(sess, CHKPTDIR + '/style_transfer', index)
print(' TOTAL Time: {}'.format(time.time() - start_time))
writer.close()
#-----------------------------------
print('RUN MAIN')
model=pickledModel.load(FLAGS.stateFile, FLAGS.randomize)
print('MODEL LOADED')
model['global_step'] = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
content_image = pickledModel.loadImage(CONTENT_IMAGE)
print('content_image shape is ' + str(content_image.shape))
print('content_image max is ' + str(np.amax(content_image) ))
print('content_image min is ' + str(np.amin(content_image) ))
#content_image = content_image - MEAN_PIXELS
style_image = pickledModel.loadImage(STYLE_IMAGE)
print('style_image max is ' + str(np.amax(style_image) ))
print('style_image min is ' + str(np.amin(style_image) ))
#style_image = style_image - MEAN_PIXELS
print(' NEXT, create losses')
model['content_loss'], model['style_loss'], model['total_loss'] = _create_losses(model,
model["X"], content_image, style_image)
###############################
## TO DO: create optimizer
## model['optimizer'] = ...
model['optimizer'] = tf.train.AdamOptimizer(LR).minimize(model['total_loss'], var_list=[model["X"]])
###############################
model['summary_op'] = _create_summary(model)
initial_image = pickledModel.generate_noise_image(content_image, NOISE_RATIO)
#def train(model, generated_image, initial_image):
train(model, model["X"], initial_image)
#if __name__ == '__main__':
# main()
|
{"/testPickledModel.py": ["/pickledModel.py"], "/testTrainedModel.py": ["/trainedModel.py"], "/style_transfer.py": ["/pickledModel.py"]}
|
1,541
|
lonce/dcn_soundclass
|
refs/heads/master
|
/pickledModel.py
|
#
#
#Morgans great example code:
#https://blog.metaflow.fr/tensorflow-how-to-freeze-a-model-and-serve-it-with-a-python-api-d4f3596b3adc
#
# GitHub utility for freezing graphs:
#https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py
#
#https://www.tensorflow.org/api_docs/python/tf/graph_util/convert_variables_to_constants
import tensorflow as tf
import numpy as np
from PIL import TiffImagePlugin, ImageOps
from PIL import Image
import pickle
g_graph=None
#k_freqbins=257
#k_width=856
VERBOSE=0
#------------------------------------------------------------
#global
# gleaned from the parmeters in the pickle file; used to load images
height=0
width=0
depth=0
#-------------------------------------------------------------
def getShape(g, name) :
return g.get_tensor_by_name(name + ":0").get_shape()
def loadImage(fname) :
#transform into 1D width with frequbins in channel dimension (we do this in the graph in the training net, but not with this reconstructed net)
if (height==1) :
return np.transpose(np.reshape(np.array(Image.open(fname).point(lambda i: i*255)), [1,depth,width,1]), [0,3,2,1])
else :
return np.reshape(np.array(Image.open(fname).point(lambda i: i*255)), [1,height,width,1])
def generate_noise_image(content_image, noise_ratio=0.6):
print('generate_noise_image with height=' + str(height) + ', width =' + str(width) + ', and depth =' + str(depth))
noise_image = np.random.uniform(-1, 1, (1, height, width, depth)).astype(np.float32)
print('noise_image shape is ' + str(noise_image.shape))
return noise_image * noise_ratio + content_image * (1. - noise_ratio)
# Assumes caller puts image into the correct orientation
def save_image(image, fname, scaleinfo=None):
print('save_image: shape is ' + str(image.shape))
if (height==1) : # orientation is freq bins in channels
print('saving image in channel orientation')
image = np.transpose(image, [2,1,0])[:,:,0]
else :
print('saving image in image orientation')
image = image[:,:,0]
print('AFTER reshaping, save_image: shape is ' + str(image.shape))
print('image max is ' + str(np.amax(image) ))
print('image min is ' + str(np.amin(image) ))
# Output should add back the mean pixels we subtracted at the beginning
# [0,80db] -> [0, 255]
# after style transfer, images range outside of [0,255].
# To preserve scale, and mask low values, we shift by (255-max), then clip at 0 and then have all bins in the top 80dB.
image = np.clip(image-np.amax(image)+255, 0, 255).astype('uint8')
info = TiffImagePlugin.ImageFileDirectory()
if (scaleinfo == None) :
info[270] = '80, 0'
else :
info[270] = scaleinfo
#scipy.misc.imsave(path, image)
bwarray=np.asarray(image)/255.
savimg = Image.fromarray(np.float64(bwarray)) #==============================
savimg.save(fname, tiffinfo=info)
#print('RGB2TiffGray : tiffinfo is ' + str(info))
return info[270] # just in case you want it for some reason
def constructSTModel(state, params) :
global g_graph
g_graph = {}
#This is the variable that we will "train" to match style and content images.
##g_graph["X"] = tf.Variable(np.zeros([1,k_width*k_freqbins]), dtype=tf.float32, name="s_x_image")
##g_graph["x_image"] = tf.reshape(g_graph["X"], [1,k_height,k_width,k_inputChannels])
g_graph["X"] = tf.Variable(np.zeros([1,params['k_height'], params['k_width'], params['k_inputChannels']]), dtype=tf.float32, name="s_X")
g_graph["w1"]=tf.constant(state["w1:0"], name="s_w1")
g_graph["b1"]=tf.constant(state["b1:0"], name="s_b1")
#g_graph["w1"]=tf.Variable(tf.truncated_normal(getShape( tg, "w1"), stddev=0.1), name="w1")
#g_graph["b1"]=tf.Variable(tf.constant(0.1, shape=getShape( tg, "b1")), name="b1")
# tf.nn.relu(tf.nn.conv2d(x_image, w1, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME') + b1, name="h1")
g_graph["h1"]=tf.nn.relu(tf.nn.conv2d(g_graph["X"], g_graph["w1"], strides=[1, params['k_ConvStrideRows'], params['k_ConvStrideCols'], 1], padding='SAME') + g_graph["b1"], name="s_h1")
# 2x2 max pooling
g_graph["h1pooled"] = tf.nn.max_pool(g_graph["h1"], ksize=[1, params['k_poolRows'], 2, 1], strides=[1, params['k_poolStride'], 2, 1], padding='SAME', name="s_h1_pooled")
g_graph["w2"]=tf.constant(state["w2:0"], name="s_w2")
g_graph["b2"]=tf.constant(state["b2:0"], name="s_b2")
#g_graph["w2"]=tf.Variable(tf.truncated_normal(getShape( tg, "w2"), stddev=0.1), name="w2")
#g_graph["b2"]=tf.Variable(tf.constant(0.1, shape=getShape( tg, "b2")), name="b2")
g_graph["h2"]=tf.nn.relu(tf.nn.conv2d(g_graph["h1pooled"], g_graph["w2"], strides=[1, params['k_ConvStrideRows'], params['k_ConvStrideCols'], 1], padding='SAME') + g_graph["b2"], name="s_h2")
g_graph["h2pooled"] = tf.nn.max_pool(g_graph["h2"], ksize=[1, params['k_poolRows'], 2, 1], strides=[1, params['k_poolStride'], 2, 1], padding='SAME', name='s_h2_pooled')
g_graph["convlayers_output"] = tf.reshape(g_graph["h2pooled"], [-1, params['k_downsampledWidth'] * params['k_downsampledHeight']*params['L2_CHANNELS']]) # to prepare it for multiplication by W_fc1
#
g_graph["W_fc1"] = tf.constant(state["W_fc1:0"], name="s_W_fc1")
g_graph["b_fc1"] = tf.constant(state["b_fc1:0"], name="s_b_fc1")
#g_graph["keepProb"]=tf.placeholder(tf.float32, (), name= "keepProb")
#g_graph["h_fc1"] = tf.nn.relu(tf.matmul(tf.nn.dropout(g_graph["convlayers_output"], g_graph["keepProb"]), g_graph["W_fc1"]) + g_graph["b_fc1"], name="h_fc1")
g_graph["h_fc1"] = tf.nn.relu(tf.matmul(g_graph["convlayers_output"], g_graph["W_fc1"]) + g_graph["b_fc1"], name="s_h_fc1")
#Read out layer
g_graph["W_fc2"] = tf.constant(state["W_fc2:0"], name="s_W_fc2")
g_graph["b_fc2"] = tf.constant(state["b_fc2:0"], name="s_b_fc2")
g_graph["logits_"] = tf.matmul(g_graph["h_fc1"], g_graph["W_fc2"])
g_graph["logits"] = tf.add(g_graph["logits_"] , g_graph["b_fc2"] , name="s_logits")
g_graph["softmax_preds"] = tf.nn.softmax(logits=g_graph["logits"], name="s_softmax_preds")
return g_graph
# Create and save the picke file of paramters
def saveState(sess, vlist, parameters, fname) :
# create object to stash tensorflow variables in
state={}
for v in vlist :
state[v.name] = sess.run(v)
# combine state and parameters into a single object for serialization
netObject={
'state' : state,
'parameters' : parameters
}
pickle.dump(netObject, open( fname, "wb" ))
# Load the pickle file of parameters
def load(pickleFile, randomize=0) :
print(' will read state from ' + pickleFile)
netObject=pickle.load( open( pickleFile, "rb" ) )
state = netObject['state']
parameters = netObject['parameters']
if randomize ==1 :
print('randomizing weights')
for n in state.keys():
print('shape of state[' + n + '] is ' + str(state[n].shape))
state[n] = .2* np.random.random_sample(state[n].shape).astype(np.float32) -.1
for p in parameters.keys() :
print('param[' + p + '] = ' + str(parameters[p]))
global height
height = parameters['k_height']
global width
width = parameters['k_width']
global depth
depth = parameters['k_inputChannels']
return constructSTModel(state, parameters)
|
{"/testPickledModel.py": ["/pickledModel.py"], "/testTrainedModel.py": ["/trainedModel.py"], "/style_transfer.py": ["/pickledModel.py"]}
|
1,542
|
lonce/dcn_soundclass
|
refs/heads/master
|
/DCNSoundClass.py
|
"""
"""
import tensorflow as tf
import numpy as np
import spectreader
import os
import time
import math
import pickledModel
# get args from command line
import argparse
FLAGS = None
# ------------------------------------------------------
# get any args provided on the command line
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--indir', type=str, help='directory holding TFRecords of data', default='.')
parser.add_argument('--outdir', type=str, help='output directory for logging', default='.')
parser.add_argument('--numClasses', type=int, help='number of classes in data', choices=[2,50], default=2) #default for testing
parser.add_argument('--checkpointing', type=int, help='0/1 - used for both saving and starting from checkpoints', choices=[0,1], default=0)
parser.add_argument('--checkpointPeriod', type=int, help='checkpoint every n batches', default=8)
parser.add_argument('--freqbins', type=int, help='number of frequency bins in the spectrogram input', default=513)
parser.add_argument('--numFrames', type=int, help='number of frames in the spectrogram input (must be divisible by 4)', default=424)
parser.add_argument('--learning_rate', type=float, help='learning rate', default=.001)
parser.add_argument('--batchsize', type=int, help='number of data records per training batch', default=8) #default for testing
parser.add_argument('--n_epochs', type=int, help='number of epochs to use for training', default=2) #default for testing
parser.add_argument('--keepProb', type=float, help='keep probablity for dropout before 1st fully connected layer during training', default=1.0) #default for testing
parser.add_argument('--batchnorm', type=int, help='0/1 - to batchnorm or not to batchnorm', choices=[0,1], default=1)
parser.add_argument('--freqorientation', type=str, help='freq as height or as channels', choices=["height","channels"], default="channels") #default for testing
parser.add_argument('--numconvlayers', type=int, help='number of convolutional layers', choices=[1,2], default=2) #default for testing
parser.add_argument('--l1channels', type=int, help='Number of channels in the first convolutional layer', default=32) #default for testing
parser.add_argument('--l2channels', type=int, help='Number of channels in the second convolutional layer (ignored if numconvlayers is 1)', default=64) #default for testing
parser.add_argument('--fcsize', type=int, help='Dimension of the final fully-connected layer', default=32) #default for testing
parser.add_argument('--convRows', type=int, help='size of conv kernernel in freq dimension if orientation is height (otherwise ignored)', default=5) #default for testing
parser.add_argument('--convColumns', type=int, help='size of conv kernernel in temporal dimension ', default=5) #default for testing
parser.add_argument('--optimizer', type=str, help='optimizer', choices=["adam","gd"], default="gd") #default for testing
parser.add_argument('--adamepsilon', type=float, help='epsilon param for adam optimizer', default=.1)
parser.add_argument('--learnCondition', type=str, help='when to learn', choices=["always","whenWrong"], default="always") #default for testing
parser.add_argument('--mtlnumclasses', type=int, help='if nonzero, train using secondary classes (which must be stored in TFRecord files', default=0)
FLAGS, unparsed = parser.parse_known_args()
print('\n FLAGS parsed : {0}'.format(FLAGS))
#HARD-CODED data-dependant parameters ------------------
#dimensions of image (pixels)
k_freqbins=FLAGS.freqbins
k_height=1 # default for freqs as channels
k_inputChannels=k_freqbins # default for freqs as channels
if FLAGS.freqorientation == "height" :
k_height=k_freqbins
k_inputChannels=1
k_numFrames=FLAGS.numFrames
#number of samples for training and validation
k_numClasses=FLAGS.numClasses #determines wether to read mini data set in data2 or full dataset in data50
validationSamples=8*k_numClasses
trainingSamples=32*k_numClasses
k_mtlnumclasses=FLAGS.mtlnumclasses #only matters if K_MTK is not 0
# ------------------------------------------------------
# Define paramaters for the training
learning_rate = FLAGS.learning_rate
k_batchsize = FLAGS.batchsize
n_epochs = FLAGS.n_epochs #6 #NOTE: we can load from checkpoint, but new run will last for n_epochs anyway
# ------------------------------------------------------
# Define paramaters for the model
K_NUMCONVLAYERS = FLAGS.numconvlayers
L1_CHANNELS=FLAGS.l1channels
L2_CHANNELS=FLAGS.l2channels
FC_SIZE = FLAGS.fcsize
k_downsampledHeight = 1 # default for freqs as channels
if FLAGS.freqorientation == "height" :
# see https://www.tensorflow.org/api_guides/python/nn#convolution for calculating size from strides and padding
k_downsampledHeight = int(math.ceil(math.ceil(k_height/2.)/2.))# k_height/4 #in case were using freqs as y dim, and conv layers = 2
print(':::::: k_downsampledHeight is ' + str(k_downsampledHeight))
k_downsampledWidth = k_numFrames/4 # no matter what the orientation - freqs as channels or as y dim
k_convLayerOutputChannels = L2_CHANNELS
if (K_NUMCONVLAYERS == 1) :
k_downsampledWidth = k_numFrames/2
k_convLayerOutputChannels = L1_CHANNELS
if FLAGS.freqorientation == "height" :
k_downsampledHeight = int(math.ceil(k_height/2.)) # k_height/2 #in case were using freqs as y dim, and conv layers = 1
print(':::::: k_downsampledHeight is ' + str(k_downsampledHeight))
print(':::::: k_downsampledWidth is ' + str(k_downsampledWidth))
K_ConvRows=1 # default for freqs as channels
if FLAGS.freqorientation == "height" :
K_ConvRows=FLAGS.convRows
K_ConvCols=FLAGS.convColumns
k_ConvStrideRows=1
k_ConvStrideCols=1
k_poolRows = 1 # default for freqs as channels
k_poolStrideRows = 1 # default for freqs as channels
if FLAGS.freqorientation == "height" :
k_poolRows = 2
k_poolStrideRows = 2
k_keepProb=FLAGS.keepProb
k_OPTIMIZER=FLAGS.optimizer
k_adamepsilon = FLAGS.adamepsilon
LEARNCONDITION = FLAGS.learnCondition
# ------------------------------------------------------
# Derived parameters for convenience (do not change these)
k_vbatchsize = min(validationSamples, k_batchsize)
k_numVBatches = validationSamples/k_vbatchsize
print(' ------- For validation, will run ' + str(k_numVBatches) + ' batches of ' + str(k_vbatchsize) + ' datasamples')
#ESC-50 dataset has 50 classes of 40 sounds each
k_batches_per_epoch = k_numClasses*40/k_batchsize
k_batchesPerLossReport= k_batches_per_epoch #writes loss to the console every n batches
print(' ----------will write out report every ' + str(k_batchesPerLossReport) + ' batches')
#k_batchesPerLossReport=1 #k_batches_per_epoch
# Create list of paramters for serializing so that network can be properly reconstructed, and for documentation purposes
parameters={
'k_height' : k_height,
'k_numFrames' : k_numFrames,
'k_inputChannels' : k_inputChannels,
'K_NUMCONVLAYERS' : K_NUMCONVLAYERS,
'L1_CHANNELS' : L1_CHANNELS,
'L2_CHANNELS' : L2_CHANNELS,
'FC_SIZE' : FC_SIZE,
'K_ConvRows' : K_ConvRows,
'K_ConvCols' : K_ConvCols,
'k_ConvStrideRows' : k_ConvStrideRows,
'k_ConvStrideCols' : k_ConvStrideCols,
'k_poolRows' : k_poolRows,
'k_poolStrideRows' : k_poolStrideRows,
'k_downsampledHeight' : k_downsampledHeight,
'k_downsampledWidth' : k_downsampledWidth,
'freqorientation' : FLAGS.freqorientation
}
# ------------------------------------------------------
#Other non-data, non-model params
CHECKPOINTING=FLAGS.checkpointing
k_checkpointPeriod = FLAGS.checkpointPeriod # in units of batches
INDIR = FLAGS.indir
OUTDIR = FLAGS.outdir
CHKPOINTDIR = OUTDIR + '/checkpoints' # create folder manually
CHKPTBASE = CHKPOINTDIR + '/model.ckpt' # base name used for checkpoints
LOGDIR = OUTDIR + '/log_graph' #create folder manually
#OUTPUTDIR = i_outdir
NUM_THREADS = 4 #used for enqueueing TFRecord data
#=============================================
def getImage(fnames, nepochs=None, mtlclasses=0) :
""" Reads data from the prepaired *list* files in fnames of TFRecords, does some preprocessing
params:
fnames - list of filenames to read data from
nepochs - An integer (optional). Just fed to tf.string_input_producer(). Reads through all data num_epochs times before generating an OutOfRange error. None means read forever.
"""
if mtlclasses :
label, image, mtlabel = spectreader.getImage(fnames, nepochs, mtlclasses)
else :
label, image = spectreader.getImage(fnames, nepochs)
#same as np.flatten
# I can't seem to make shuffle batch work on images in their native shapes.
image=tf.reshape(image,[k_freqbins*k_numFrames])
# re-define label as a "one-hot" vector
# it will be [0,1] or [1,0] here.
# This approach can easily be extended to more classes.
label=tf.stack(tf.one_hot(label-1, k_numClasses))
if mtlclasses :
mtlabel=tf.stack(tf.one_hot(mtlabel-1, mtlclasses))
return label, image, mtlabel
else :
return label, image
def get_datafiles(a_dir, startswith):
""" Returns a list of files in a_dir that start with the string startswith.
e.g. e.g. get_datafiles('data', 'train-')
"""
return [a_dir + '/' + name for name in os.listdir(a_dir)
if name.startswith(startswith)]
def batch_norm(x, is_trainingP, scope):
with tf.variable_scope(scope):
return tf.layers.batch_normalization(x,
axis=3, # is this right? - our conv2D returns NHWC ordering?
center=True,
scale=True,
training=is_trainingP,
name=scope+"_bn")
#=============================================
# Step 1: Read in data
# getImage reads data for enqueueing shufflebatch, shufflebatch manages it's own dequeing
# ---- First set up the graph for the TRAINING DATA
if k_mtlnumclasses :
target, data, mtltargets = getImage(get_datafiles(INDIR, 'train-'), nepochs=n_epochs, mtlclasses=k_mtlnumclasses)
imageBatch, labelBatch, mtltargetBatch = tf.train.shuffle_batch(
[data, target, mtltargets], batch_size=k_batchsize,
num_threads=NUM_THREADS,
allow_smaller_final_batch=True, #want to finish an eposh even if datasize doesn't divide by batchsize
enqueue_many=False, #IMPORTANT to get right, default=False -
capacity=1000, #1000,
min_after_dequeue=500) #500
else :
target, data = getImage(get_datafiles(INDIR, 'train-'), n_epochs)
imageBatch, labelBatch = tf.train.shuffle_batch(
[data, target], batch_size=k_batchsize,
num_threads=NUM_THREADS,
allow_smaller_final_batch=True, #want to finish an eposh even if datasize doesn't divide by batchsize
enqueue_many=False, #IMPORTANT to get right, default=False -
capacity=1000, #1000,
min_after_dequeue=500) #500
# ---- same for the VALIDATION DATA
# no need for mtl labels for validation
vtarget, vdata = getImage(get_datafiles(INDIR, 'validation-')) # one "epoch" for validation
#vimageBatch, vlabelBatch = tf.train.shuffle_batch(
# [vdata, vtarget], batch_size=k_vbatchsize,
# num_threads=NUM_THREADS,
# allow_smaller_final_batch=True, #want to finish an eposh even if datasize doesn't divide by batchsize
# enqueue_many=False, #IMPORTANT to get right, default=False -
# capacity=1000, #1000,
# min_after_dequeue=500) #500
vimageBatch, vlabelBatch = tf.train.batch(
[vdata, vtarget], batch_size=k_vbatchsize,
num_threads=NUM_THREADS,
allow_smaller_final_batch=False, #want to finish an eposh even if datasize doesn't divide by batchsize
enqueue_many=False, #IMPORTANT to get right, default=False -
capacity=1000)
# Step 2: create placeholders for features (X) and labels (Y)
# each lable is one hot vector.
# 'None' here allows us to fill the placeholders with different size batches (which we do with training and validation batches)
#X = tf.placeholder(tf.float32, [None,k_freqbins*k_numFrames], name= "X")
X = tf.placeholder(tf.float32, [None,k_freqbins*k_numFrames], name= "X")
if FLAGS.freqorientation == "height" :
x_image = tf.reshape(X, [-1,k_height,k_numFrames,k_inputChannels])
else :
print('set up reshaping for freqbins as channels')
foo1 = tf.reshape(X, [-1,k_freqbins,k_numFrames,1]) #unflatten (could skip this step if it wasn't flattenned in the first place!)
x_image = tf.transpose(foo1, perm=[0,3,2,1]) #moves freqbins from height to channel dimension
Y = tf.placeholder(tf.float32, [None,k_numClasses], name= "Y") #labeled classes, one-hot
MTLY = tf.placeholder(tf.float32, [None,k_mtlnumclasses], name= "MTLY") #labeled classes, one-hot
# Step 3: create weights and bias
trainable=[]
#Layer 1
# 1 input channel, L1_CHANNELS output channels
isTraining=tf.placeholder(tf.bool, (), name= "isTraining") #passed in feeddict to sess.runs
w1=tf.Variable(tf.truncated_normal([K_ConvRows, K_ConvCols, k_inputChannels, L1_CHANNELS], stddev=0.1), name="w1")
trainable.extend([w1])
if (FLAGS.batchnorm==1) :
#convolve Wx (w/o adding bias) then relu
l1preactivation=tf.nn.conv2d(x_image, w1, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME')
bn1=batch_norm(l1preactivation, isTraining, "batch_norm_1")
h1=tf.nn.relu(bn1, name="h1")
# 2x2 max pooling
else :
# convolve and add bias Wx+b
b1=tf.Variable(tf.constant(0.1, shape=[L1_CHANNELS]), name="b1")
trainable.extend([b1])
l1preactivation=tf.nn.conv2d(x_image, w1, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME') + b1
h1=tf.nn.relu(l1preactivation, name="h1")
h1pooled = tf.nn.max_pool(h1, ksize=[1, k_poolRows, 2, 1], strides=[1, k_poolStrideRows, 2, 1], padding='SAME')
if K_NUMCONVLAYERS == 2 :
#Layer 2
#L1_CHANNELS input channels, L2_CHANNELS output channels
w2=tf.Variable(tf.truncated_normal([K_ConvRows, K_ConvCols, L1_CHANNELS, L2_CHANNELS], stddev=0.1), name="w2")
trainable.extend([w2])
if (FLAGS.batchnorm==1) :
#convolve (w/o adding bias) then norm
l2preactivation= tf.nn.conv2d(h1pooled, w2, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME')
bn2=batch_norm(l2preactivation, isTraining, "batch_norm_2")
h2=tf.nn.relu(bn2, name="h2")
else :
b2=tf.Variable(tf.constant(0.1, shape=[L2_CHANNELS]), name="b2")
trainable.extend([b2])
l2preactivation= tf.nn.conv2d(h1pooled, w2, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME') + b2
h2=tf.nn.relu(l2preactivation, name="h2")
with tf.name_scope ( "Conv_layers_out" ):
h2pooled = tf.nn.max_pool(h2, ksize=[1, k_poolRows, 2, 1], strides=[1, k_poolStrideRows, 2, 1], padding='SAME', name='h2_pooled')
print('k_downsampledWidth = ' + str(k_downsampledWidth) + ', k_downsampledHeight = ' + str(k_downsampledHeight) + ', L2_CHANNELS = ' + str(L2_CHANNELS))
print('requesting a reshape of size ' + str(k_downsampledWidth * k_downsampledHeight*L2_CHANNELS))
convlayers_output = tf.reshape(h2pooled, [-1, k_downsampledWidth * k_downsampledHeight*L2_CHANNELS]) # to prepare it for multiplication by W_fc1
#h2pooled is number of pixels / 2 / 2 (halved in size at each layer due to pooling)
# check our dimensions are a multiple of 4
if (k_numFrames%4) : # or ((FLAGS.freqorientation == "height") and k_height%4 )):
print ('Error: width and height must be a multiple of 4')
sys.exit(1)
else :
convlayers_output = tf.reshape(h1pooled, [-1, k_downsampledWidth * k_downsampledHeight*L1_CHANNELS])
#now do a fully connected layer: every output connected to every input pixel of each channel
W_fc1 = tf.Variable(tf.truncated_normal([k_downsampledWidth * k_downsampledHeight * k_convLayerOutputChannels, FC_SIZE], stddev=0.1), name="W_fc1")
b_fc1 = tf.Variable(tf.constant(0.1, shape=[FC_SIZE]) , name="b_fc1")
keepProb=tf.placeholder(tf.float32, (), name= "keepProb")
fc1preactivation = tf.matmul(tf.nn.dropout(convlayers_output, keepProb), W_fc1) + b_fc1
h_fc1 = tf.nn.relu(fc1preactivation, name="h_fc1")
#Read out layer
W_fc2 = tf.Variable(tf.truncated_normal([FC_SIZE, k_numClasses], stddev=0.1), name="W_fc2")
b_fc2 = tf.Variable(tf.constant(0.1, shape=[k_numClasses]), name="b_fc2")
trainable.extend([W_fc1, b_fc1, W_fc2, b_fc2])
if k_mtlnumclasses :
#MTL Read out layer - This is the only part of the net that is different for the secondary classes
mtlW_fc2 = tf.Variable(tf.truncated_normal([FC_SIZE, k_mtlnumclasses], stddev=0.1), name="mtlW_fc2")
mtlb_fc2 = tf.Variable(tf.constant(0.1, shape=[k_mtlnumclasses]), name="mtlb_fc2")
trainable.extend([mtlW_fc2, mtlb_fc2])
# Step 4: build model
# the model that returns the logits.
# this logits will be later passed through softmax layer
# to get the probability distribution of possible label of the image
# DO NOT DO SOFTMAX HERE
#could do a dropout here on h
logits_ = tf.matmul(h_fc1, W_fc2)
logits = tf.add(logits_ , b_fc2, name="logits")
if k_mtlnumclasses :
mtllogits = tf.matmul(h_fc1, mtlW_fc2) + mtlb_fc2
# Step 5: define loss function
# use cross entropy loss of the real labels with the softmax of logits
# returns a 1D tensor of length batchsize
if LEARNCONDITION=="whenWrong" :
summaryloss_primary_raw = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y)
smpreds = tf.nn.softmax(logits=logits, name="softmax_preds")
# argmax returns a batchsize tensor of type int64, batchsize tensor of booleans
# equal returns a batchsize tensor of type boolean
wrong_preds = tf.not_equal(tf.argmax(smpreds, 1), tf.argmax(Y, 1))
# ones where labe != max of softmax, tensor of length batchsize
wrongMask = tf.cast(wrong_preds, tf.float32) # need numpy.count_nonzero(boolarr) :(
summaryloss_primary = tf.multiply(summaryloss_primary_raw, wrongMask, name="wrongloss")
else :
summaryloss_primary = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y)
meanloss_primary = tf.reduce_mean(summaryloss_primary)
if k_mtlnumclasses :
summaryloss_mtl = tf.nn.softmax_cross_entropy_with_logits(logits=mtllogits, labels=MTLY)
meanloss_mtl = tf.reduce_mean(summaryloss_mtl)
meanloss=meanloss_primary+meanloss_mtl
else :
meanloss=meanloss_primary
#if k_mtlnumclasses :
# meanloss = tf.assign(meanloss, meanloss_primary + meanloss_mtl) #training thus depends on MTLYY in the feeddict if k_mtlnumclasses != 0
#else :
# meanloss = tf.assign(meanloss, meanloss_primary)
# Step 6: define training op
# NOTE: Must save global step here if you are doing checkpointing and expect to start from step where you left off.
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
optimizer=None
if (k_OPTIMIZER == "adam") :
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=k_adamepsilon ).minimize(meanloss, var_list=trainable, global_step=global_step)
if (k_OPTIMIZER == "gd") :
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(meanloss, var_list=trainable, global_step=global_step)
assert(optimizer)
#Get the beta and gamma ops used for batchn ormalization since we have to update them explicitly during training
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
print('extra update ops are ' + str(extra_update_ops))
#---------------------------------------------------------------
# VALIDATE
#--------------------------------------------------------------
# The nodes are used for running the validation data and getting accuracy scores from the logits
with tf.name_scope("VALIDATION"):
softmax_preds = tf.nn.softmax(logits=logits, name="softmax_preds")
# argmax returns a batchsize tensor of type int64, batchsize tensor of booleans
# equal returns a batchsize tensor of type boolean
correct_preds = tf.equal(tf.argmax(softmax_preds, 1), tf.argmax(Y, 1))
batchNumCorrect = tf.reduce_sum(tf.cast(correct_preds, tf.float32)) # need numpy.count_nonzero(boolarr) :(
# All this, just to feed a friggin float computed over several batches into a tensor we want to use for a summary
validationtensor = tf.Variable(0.0, trainable=False, name="validationtensor")
wtf = tf.placeholder(tf.float32, ())
summary_validation = tf.assign(validationtensor, wtf)
#-----------------------------------------------------------------------------------
# These will be available to other programs that want to use this trained net.
tf.GraphKeys.USEFUL = 'useful'
tf.add_to_collection(tf.GraphKeys.USEFUL, X) #input place holder
tf.add_to_collection(tf.GraphKeys.USEFUL, keepProb) #place holder
tf.add_to_collection(tf.GraphKeys.USEFUL, softmax_preds)
tf.add_to_collection(tf.GraphKeys.USEFUL, w1)
if (FLAGS.batchnorm==0) :
tf.add_to_collection(tf.GraphKeys.USEFUL, b1)
tf.add_to_collection(tf.GraphKeys.USEFUL, w2)
if (FLAGS.batchnorm==0) :
tf.add_to_collection(tf.GraphKeys.USEFUL, b2)
tf.add_to_collection(tf.GraphKeys.USEFUL, W_fc1)
tf.add_to_collection(tf.GraphKeys.USEFUL, b_fc1)
tf.add_to_collection(tf.GraphKeys.USEFUL, W_fc2)
tf.add_to_collection(tf.GraphKeys.USEFUL, b_fc2)
#-----------------------------------------------------------------------------------
# Run the validation set through the model and compute statistics to report as summaries
def validate(sess, printout=False) :
with tf.name_scope ( "summaries" ):
# test the model
total_correct_preds = 0
try:
for i in range(k_numVBatches):
X_batch, Y_batch = sess.run([vimageBatch, vlabelBatch])
batch_correct, predictions = sess.run([batchNumCorrect, softmax_preds], feed_dict ={ X : X_batch , Y : Y_batch, keepProb : 1., isTraining : False})
total_correct_preds += batch_correct
#print (' >>>> Batch " + str(i) + ' with batch_correct = ' + str(batch_correct) + ', and total_correct is ' + str(total_correct_preds))
if printout:
print(' labels for batch:')
print(Y_batch)
print(' predictions for batch')
print(predictions)
# print num correct for each batch
print(u'(Validation batch) num correct for batchsize of {0} is {1}'.format(k_vbatchsize , batch_correct))
print (u'(Validation EPOCH) num correct for EPOCH size of {0} ({1} batches) is {2}'.format(validationSamples , i+1 , total_correct_preds))
print('so the percent correction for validation set = ' + str(total_correct_preds/validationSamples))
msummary = sess.run(mergedvalidation, feed_dict ={ X : X_batch , Y : Y_batch, wtf : total_correct_preds/validationSamples, keepProb : 1., isTraining : False}) #using last batch to computer loss for summary
except Exception, e:
print e
return msummary
#--------------------------------------------------------------
# Visualize with Tensorboard
# -------------------------------------------------------------
def create_train_summaries ():
with tf.name_scope ( "train_summaries" ):
tf.summary.scalar ( "mean_loss" , meanloss_primary)
tf.summary.histogram ("w_1", w1)
tf.summary.histogram ("l1preactivation", l1preactivation)
tf.summary.histogram ("h_1", h1)
tf.summary.histogram ("w_2", w2)
tf.summary.histogram ("l2preactivation", l2preactivation)
tf.summary.histogram ("h_2", h2)
tf.summary.histogram ("w_fc1", W_fc1)
tf.summary.histogram ("fc1preactivation", fc1preactivation)
tf.summary.histogram ("h_fc1", h_fc1)
tf.summary.histogram ("w_fc2", W_fc2)
return tf.summary.merge_all ()
mergedtrain = create_train_summaries()
def create_validation_summaries ():
with tf.name_scope ( "validation_summaries" ):
#tf.summary.scalar ( "validation_correct" , batchNumCorrect)
tf.summary.scalar ( "summary_validation", summary_validation)
return tf.summary.merge_all ()
mergedvalidation = create_validation_summaries()
# --------------------------------------------------------------
# TRAIN
#---------------------------------------------------------------
def trainModel():
with tf.Session() as sess:
writer = tf.summary.FileWriter(LOGDIR) # for logging
saver = tf.train.Saver() # for checkpointing
#### Must run local initializer if nepochs arg to getImage is other than None!
#sess.run(tf.local_variables_initializer())
sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))
#not doing it here, but global_step could have been initialized by a checkpoint
if CHECKPOINTING :
ckpt = tf.train.get_checkpoint_state(os.path.dirname(CHKPTBASE))
else :
ckpt = False
if ckpt and ckpt.model_checkpoint_path:
print('Checkpointing restoring from path ' + ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
#only save graph if we are not starting run from a checkpoint
writer.add_graph(sess.graph)
initial_step = global_step.eval()
print('initial step will be ' + str(initial_step)) # non-zero if check pointing
batchcount=initial_step
start_time = time.time()
# Create a coordinator, launch the queue runner threads.
coord = tf.train.Coordinator()
enqueue_threads = tf.train.start_queue_runners(sess=sess,coord=coord)
try:
batchcountloss = 0 #for reporting purposes
while True: # for each batch, until data runs out
if coord.should_stop():
break
if k_mtlnumclasses :
X_batch, Y_batch, MTLY_batch = sess.run([imageBatch, labelBatch, mtltargetBatch])
_, loss_batch, _nada = sess.run([optimizer, meanloss, extra_update_ops], feed_dict ={ X : X_batch , Y : Y_batch, keepProb : k_keepProb, MTLY : MTLY_batch, isTraining : True}) #DO WE NEED meanloss HERE? Doesn't optimer depend on it?
else :
X_batch, Y_batch = sess.run([imageBatch, labelBatch])
_, loss_batch, _nada = sess.run([optimizer, meanloss, extra_update_ops], feed_dict ={ X : X_batch , Y : Y_batch, keepProb : k_keepProb, isTraining : True}) #DO WE NEED meanloss HERE? Doesn't optimer depend on it?
batchcountloss += loss_batch
batchcount += 1
if (not batchcount%k_batchesPerLossReport) :
print('batchcount = ' + str(batchcount))
avgBatchLoss=batchcountloss/k_batchesPerLossReport
print(u'Average loss per batch {0}: {1}'.format(batchcount, avgBatchLoss))
batchcountloss=0
tsummary = sess.run(mergedtrain, feed_dict ={ X : X_batch , Y : Y_batch, keepProb : 1.0, isTraining : False }) #?? keep prob ??
writer.add_summary(tsummary, global_step=batchcount)
vsummary=validate(sess)
writer.add_summary(vsummary, global_step=batchcount)
if not (batchcount % k_checkpointPeriod) :
saver.save(sess, CHKPTBASE, global_step=batchcount)
except tf.errors.OutOfRangeError, e: #done with training epochs. Validate once more before closing threads
# So how, finally?
print('ok, let\'s validate now that we\'ve run ' + str(batchcount) + 'batches ------------------------------')
vsummary=validate(sess, False)
writer.add_summary(vsummary, global_step=batchcount+1)
coord.request_stop(e)
except Exception, e:
print('train: WTF')
print e
finally :
coord.request_stop()
coord.join(enqueue_threads)
writer.close()
# grab the total training time
totalruntime = time.time() - start_time
print 'Total training time: {0} seconds'.format(totalruntime)
print(' Finished!') # should be around 0.35 after 25 epochs
print(' now save meta model')
meta_graph_def = tf.train.export_meta_graph(filename=OUTDIR + '/my-model.meta')
pickledModel.saveState(sess, trainable, parameters, OUTDIR + '/state.pickle')
print(' ===============================================================')
#=============================================================================================
print(' ---- Actual parameters for this run ----')
print('INDIR : ' + INDIR)
print('k_freqbins : ' + str(k_freqbins)
+ ' ' + 'k_numFrames: ' + str(k_numFrames) )
#FLAGS.freqorientation, k_height, k_numFrames, k_inputChannels
print('FLAGS.freqorientation: ' + str(FLAGS.freqorientation)
+ ', ' + 'k_height: ' + str(k_height)
+ ', ' + 'k_numFrames: ' + str(k_numFrames)
+ ', ' + 'k_inputChannels: ' + str(k_inputChannels))
#k_numClasses, validationSamples, trainingSamples
print('k_numClasses: ' + str(k_numClasses)
+ ', ' + 'validationSamples: ' + str(validationSamples)
+ ', ' + 'trainingSamples: ' + str(trainingSamples))
#learning_rate, k_keepProb, k_batchsize, n_epochs
print('learning_rate: ' + str(learning_rate)
+ ', ' + 'k_keepProb: ' + str(k_keepProb)
+ ', ' + 'k_batchsize: ' + str(k_batchsize)
+ ', ' + 'n_epochs: ' + str(n_epochs))
#K_NUMCONVLAYERS, L1_CHANNELS, L2_CHANNELS, FC_SIZE
print('K_NUMCONVLAYERS: ' + str(K_NUMCONVLAYERS)
+ ', ' + 'L1_CHANNELS: ' + str(L1_CHANNELS)
+ ', ' + 'L2_CHANNELS: ' + str(L2_CHANNELS)
+ ', ' + 'FC_SIZE: ' + str(FC_SIZE))
#k_downsampledHeight, k_downsampledWidth , k_convLayerOutputChannels
print('k_downsampledHeight: ' + str(k_downsampledHeight)
+ ', ' + 'k_downsampledWidth: ' + str(k_downsampledWidth)
+ ', ' + 'k_convLayerOutputChannels: ' + str(k_convLayerOutputChannels))
#K_ConvRows, K_ConvCols, k_ConvStrideRows, k_ConvStrideCols, k_poolRows, k_poolStrideRows
print('K_ConvRows: ' + str(K_ConvRows)
+ ', ' + 'K_ConvCols: ' + str(K_ConvCols)
+ ', ' + 'k_ConvStrideRows: ' + str(k_ConvStrideRows)
+ ', ' + 'k_ConvStrideCols: ' + str(k_ConvStrideCols)
+ ', ' + 'k_poolRows: ' + str(k_poolRows)
+ ', ' + 'k_poolStrideRows : ' + str(k_poolStrideRows ))
if (k_OPTIMIZER == "adam") :
print('k_OPTIMIZER: ' + str(k_OPTIMIZER)
+ ', ' + 'k_adamepsilon: ' + str(k_adamepsilon))
else :
print('k_OPTIMIZER: ' + str(k_OPTIMIZER))
print('LEARNCONDITION: ' + LEARNCONDITION)
print('batchnorm: ' + str(FLAGS.batchnorm))
print('k_mtlnumclasses: ' + str(k_mtlnumclasses))
#OUTDIR
print('OUTDIR: ' + str(OUTDIR))
print('CHECKPOINTING: ' + str(CHECKPOINTING))
print(' vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ')
for x in trainable :
print(x.name + ' : ' + str(x.get_shape()))
print('TOTAL number of parameters in the model is ' + str(np.sum([np.product([xi.value for xi in x.get_shape()]) for x in trainable])))
print(' vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ')
#=============================================================================================
# Do it
trainModel()
|
{"/testPickledModel.py": ["/pickledModel.py"], "/testTrainedModel.py": ["/trainedModel.py"], "/style_transfer.py": ["/pickledModel.py"]}
|
1,543
|
lonce/dcn_soundclass
|
refs/heads/master
|
/utils/Centroid2ndaryClassMaker.py
|
import os
import re
import numpy as np
import math
import tiffspect
import librosa
import librosa.display
import matplotlib.pyplot as plt
K_SPECTDIR = '/home/lonce/tflow/DATA-SETS/ESC-50-spect'
k_soundsPerClass=125 # must divide the total number of sounds evenly!
#============================================
def weightedCentroid(spect) :
"""
param: spect - a magnitude spectrum
Returns the spectral centroid averaged over frames, and weighted by the rms of each frame
"""
cent = librosa.feature.spectral_centroid(S=spect)
rms = librosa.feature.rmse(S=spect)
avg = np.sum(np.multiply(cent, rms))/np.sum(rms)
return avg
def log2mag(S) :
""" Get your log magnitude spectrum back to magnitude"""
return np.power(10, np.divide(S,20.))
def spectFile2Centroid(fname) :
""" Our spect files are in log magnitude, and in tiff format"""
D1, _ = tiffspect.Tiff2LogSpect(fname)
D2 = log2mag(D1)
return weightedCentroid(D2)
#============================================
# Next, some utilities for managing files
#----------------------------------------
def fullpathfilenames(directory):
'''Returns the full path to all files living in directory (the leaves in the directory tree)
'''
fnames = [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(directory)) for f in fn]
return fnames
def esc50files(directory, regexString) :
filenames = fullpathfilenames(directory)
return [fname for fname in filenames if re.match(regexString, fname)]
def addClass2Filename(fname, cname, action="move") :
newname = re.sub('.tif', '._'+ str(cname) + '_.tif', fname)
if (action == "move") :
os.rename(fname, newname)
else :
print(newname)
def filestats (filenames, func) :
stats = [[fname, func(fname)] for fname in filenames]
return stats
#============================================
def createBalancedClassesWithFunc(topDirectory, regexString, func, numPerClass, action="move") :
"""
Groups files in topDirectory matching regexString by the single number returned by func.
Each group will have numPerClass files in it (the total number of files must be divisible by numPerClass)
Renames them using their group index, gidx: origFilename.tif -> origFilename._gidx_.tif
if action="move, files are renames. Otherwise, the new names are just printed to console.
"""
wholelist=esc50files(topDirectory, regexString)
stats = filestats(wholelist, func)
stats_ordered = sorted(stats, key=lambda a_entry: a_entry[1])
classes=np.array(stats_ordered)[:,0].reshape(-1, numPerClass)
for i in range(len(classes)) :
for j in range(len(classes[i])) :
addClass2Filename(classes[i,j],i, action)
return stats, stats_ordered #returns stuff just for viewing
#--------------------------------------------------------------------------------
#if you got yourself in trouble, and need to remove all the secondary classnames:
def removeAllSecondaryClassNames(directory) :
"""Revomve ALL the 2ndary class names (of the form ._cname_) from ALL files in the directory restoring them to their original"""
for fname in fullpathfilenames(directory) :
m = re.match('.*?(\._.*?_)\.tif$', fname) #grabs the string of all secondary classes if there is a seq of them
if (m) :
newname = re.sub(m.group(1), '', fname)
print('Will move ' + fname + '\n to ' + newname)
os.rename(fname, newname)
else :
print('do nothing with ' + fname)
#============================================
# DO IT
stats, stats_ordered = createBalancedClassesWithFunc(K_SPECTDIR, '.*/([1-5]).*', spectFile2Centroid, k_soundsPerClass, action="print")
stats, stats_ordered = createBalancedClassesWithFunc(K_SPECTDIR, '.*/([1-5]).*', spectFile2Centroid, k_soundsPerClass, action="move")
|
{"/testPickledModel.py": ["/pickledModel.py"], "/testTrainedModel.py": ["/trainedModel.py"], "/style_transfer.py": ["/pickledModel.py"]}
|
1,544
|
acheng6845/PuzzleSolver
|
refs/heads/master
|
/PADCompleter.py
|
__author__ = 'Aaron'
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5 import QtWidgets, QtCore, QtGui
class PADCompleter(QCompleter):
def __init__(self):
super().__init__()
self.prefix = ''
self.model = None
def _set_model_(self, model):
self.model = model
super().setModel(self.model)
def _update_model_(self):
prefix = self.prefix
class InnerProxyModel(QSortFilterProxyModel):
def filterAcceptsRow(self, row, parent):
index = self.sourceModel().index(row, 0, parent)
search_string = prefix.lower()
model_string = self.sourceModel().data(index, Qt.DisplayRole).lower()
#print(search_string, 'in', model_string, search_string in model_string)
return search_string in model_string
proxy_model = InnerProxyModel()
proxy_model.setSourceModel(self.model)
self.setModel(proxy_model)
#print('match :', proxy_model.rowCount())
def splitPath(self, path):
self.prefix = str(path)
self._update_model_()
return self.sourceModel().data()
|
{"/Calculator_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"], "/PAD_GUI.py": ["/PADScreen.py"], "/PADScreen.py": ["/Calculator_Screen.py", "/Board_Screen.py", "/PAD_Monster.py", "/PAD_Team.py"], "/PAD_Team.py": ["/PAD_Monster.py"], "/Board_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"]}
|
1,545
|
acheng6845/PuzzleSolver
|
refs/heads/master
|
/Calculator_Screen.py
|
__author__ = 'Aaron'
# Class Description:
# Create framework for the split screens used in PAD_GUI
# import necessary files
import os
import json
from functools import partial
from PyQt5.QtWidgets import (QLabel, QWidget, QHBoxLayout,
QFrame, QSplitter, QStyleFactory,
QGridLayout, QLineEdit, QPushButton,
QVBoxLayout, QCompleter, QComboBox,
QScrollArea, QToolTip)
from PyQt5.QtGui import QPixmap, QColor, QFont
from PyQt5.QtCore import Qt, QStringListModel
from PAD_Monster import PADMonster
from PAD_Team import PADTeam
class CalculatorScreen(QHBoxLayout):
def __init__(self, gui):
super().__init__()
# 0 = lead1, 1 = sub1,..., 5 = lead2
self.team = [PADMonster() for x in range(6)]
self.pad_team = PADTeam(self.team)
# keeps old team stats before modification from leader multipliers
self.team_base = [PADMonster() for x in range(6)]
# open monsters.txt and load it into a python object using json
# self.json_file = requests.get('https://padherder.com/api/monsters')
self.json_file = open(os.path.join('.\monsters.txt'), 'r')
self.json_monsters = json.loads(self.json_file.read())
# print(self.json_monsters[0]["name"])
self.completer_string_list_model = QStringListModel()
array_of_monster_names = []
for x in range(len(self.json_monsters)):
array_of_monster_names.append(self.json_monsters[x]["name"])
self.completer_string_list_model.setStringList(array_of_monster_names)
# checks if the modified button has been pressed so other functions can know which stat to display
self.is_pressed = False
QToolTip.setFont(QFont('SansSerif', 10))
self.init_screen(gui)
def init_screen(self, gui):
# add things to top of the screen here (Monitor section)!
# Create an overarching top widget/layout
supreme_top_box = QWidget()
supreme_top_box_layout = QVBoxLayout()
supreme_top_box.setLayout(supreme_top_box_layout)
# Monitor section will have labels inside of a grid layout
top_box = QWidget()
grid = QGridLayout()
top_box.setLayout(grid)
supreme_top_box_layout.addWidget(top_box)
# Creates lists of labels, initially having only static labels and having
# the tangible labels substituted with ''
static_labels = ['', '', '', '', '', '', '', '',
'', 'Lead 1', 'Sub 1 ', 'Sub 2 ', 'Sub 3 ', 'Sub 4 ', 'Lead 2', 'Team Totals',
'Type:', '', '', '', '', '', '', '',
'HP:', 0, 0, 0, 0, 0, 0, 0,
'Atk:', 0, 0, 0, 0, 0, 0, 0,
'Pronged Atk:', 0, 0, 0, 0, 0, 0, 0,
'RCV:', 0, 0, 0, 0, 0, 0, 0,
'Awakenings:', '', '', '', '', '', '', '']
self.display_labels = [QLabel(gui) for x in range(len(static_labels))]
for s_label, d_label in zip(static_labels, self.display_labels):
if s_label == '':
continue
d_label.setText(str(s_label))
positions = [(i, j) for i in range(8) for j in range(8)]
for position, d_label in zip(positions, self.display_labels):
# why *position? because the array is [(i,j), (i,j),...,(i,j)]
grid.addWidget(d_label, *position)
grid.setAlignment(d_label, Qt.AlignHCenter)
self.leader_skills_labels = [QLabel(gui) for x in range(2)]
for x in range(2):
self.leader_skills_labels[x].setText('Leader Skill '+str(x+1)+': ')
supreme_top_box_layout.addWidget(self.leader_skills_labels[x])
# Create another row of labels for Awoken Skills Image Lists
# Create another row of labels to show the Leader Skill Multipliers
########################################################################
# add things to bottom of the screen here (Input section)!
# Input section will be split in two: have LineEdits in a grid layout and then PushButtons in a separate grid
# layout
bottom_box = QWidget()
grid2 = QGridLayout()
bottom_box.setLayout(grid2)
bottom_labels_text = ['Leader 1', 'Sub 1', 'Sub 2', 'Sub 3', 'Sub 4', 'Leader 2']
bottom_labels = [QLabel(gui) for x in range(6)]
instruction_labels_text = ['Please enter the name here:', 'Enter level here:', 'Enter pluses here:']
instruction_labels = [QLabel(gui) for x in range(3)]
self.line_edits = [QLineEdit(gui) for x in range(6)]
line_edit_completer = QCompleter()
line_edit_completer.setCaseSensitivity(Qt.CaseInsensitive)
line_edit_completer.setFilterMode(Qt.MatchContains)
line_edit_completer.setModel(self.completer_string_list_model)
# Combo Boxes for Levels and Pluses
level_boxes = [QComboBox(gui) for x in range(6)]
self.plus_boxes_types = [QComboBox(gui) for x in range(6)]
self.plus_boxes_values = [QComboBox(gui) for x in range(6)]
for x in range(6):
for n in range(0,100):
if n != 0 and n <= self.team[x].max_level:
level_boxes[x].addItem(str(n))
self.plus_boxes_values[x].addItem(str(n))
self.plus_boxes_types[x].addItem('hp')
self.plus_boxes_types[x].addItem('atk')
self.plus_boxes_types[x].addItem('rcv')
self.plus_boxes_values[x].hide()
# add the labels and line_edits to the bottom grid
for x in range(6):
bottom_labels[x].setText(bottom_labels_text[x])
bottom_labels[x].adjustSize()
grid2.addWidget(bottom_labels[x], *(x+1, 0))
grid2.addWidget(self.line_edits[x], *(x+1, 1))
grid2.addWidget(level_boxes[x], *(x+1, 2))
grid2.addWidget(self.plus_boxes_types[x], *(x+1, 3))
grid2.addWidget(self.plus_boxes_values[x], *(x+1, 3))
self.line_edits[x].textChanged[str].connect(partial(self._on_changed_, x))
self.line_edits[x].setCompleter(line_edit_completer)
self.line_edits[x].setMaxLength(50)
level_boxes[x].activated[str].connect(partial(self._on_level_activated_, x))
self.plus_boxes_types[x].activated[str].connect(partial(self._on_plus_type_activated_, x))
for x in range(3):
instruction_labels[x].setText(instruction_labels_text[x])
instruction_labels[x].adjustSize()
grid2.addWidget(instruction_labels[x], *(0, x+1))
###########################################################################
# create the button widgets in a separate widget below bottom_box
below_bottom_box = QWidget()
grid3 = QGridLayout()
below_bottom_box.setLayout(grid3)
# create a set of buttons below the line_edits:
# White(Base) Red Blue Green Yellow Purple
buttons = []
button_labels = ['Fire', 'Water', 'Wood', 'Light', 'Dark', 'Base']
button_colors = ['red', 'lightskyblue', 'green', 'goldenrod', 'mediumpurple', 'white']
for x in range(6):
buttons.append(QPushButton(button_labels[x], gui))
buttons[x].clicked.connect(partial(self._handle_button_, x))
buttons[x].setStyleSheet('QPushButton { background-color : %s }' % button_colors[x])
grid3.addWidget(buttons[x], *(0, x))
# create a QHBoxLayout widget that holds the page turners and toggle
page_turner = QWidget()
page_turner_layout = QHBoxLayout()
page_turner.setLayout(page_turner_layout)
# create the page turner and toggle widgets
page_turner_layout.addStretch()
self.toggle_button = QPushButton('Toggle On Modified Stats', gui)
self.toggle_button.setCheckable(True)
self.toggle_button.clicked[bool].connect(self._handle_toggle_button_)
page_turner_layout.addWidget(self.toggle_button)
page_turner_layout.addStretch()
# Create overarching bottom widget
supreme_bottom_box = QWidget()
supreme_bottom_box_layout = QVBoxLayout()
supreme_bottom_box.setLayout(supreme_bottom_box_layout)
button_label = QLabel('Select from below the attribute you would like to display.')
supreme_bottom_box_layout.setAlignment(button_label, Qt.AlignHCenter)
supreme_bottom_box_layout.addWidget(bottom_box)
supreme_bottom_box_layout.addWidget(button_label)
supreme_bottom_box_layout.addWidget(below_bottom_box)
supreme_bottom_box_layout.addWidget(page_turner)
# Add the two screens into a split screen
splitter = QSplitter(Qt.Vertical)
splitter.addWidget(supreme_top_box)
splitter.addWidget(supreme_bottom_box)
# Add the split screen to our main screen
self.addWidget(splitter)
def _create_monster_(self, index, dict_index, name):
"""
When a valid name has been entered into the line edits, create a PADMonster Class
using the values stored in the json dictionary and save the PADMonster to the appropriate
index in the team array and PADTeam Class subsequently.
:param index: 0 = lead 1, 1 = sub 1, 2 = sub 2, 3 = sub 3, 4 = sub 4, 5 = lead 2
:param dict_index: the index in the json dictionary containing the monster
:param name: the monster's name
"""
self.team[index] = PADMonster()
self.team_base[index] = PADMonster()
hp_max = self.json_monsters[dict_index]["hp_max"]
atk_max = self.json_monsters[dict_index]["atk_max"]
rcv_max = self.json_monsters[dict_index]["rcv_max"]
attr1 = self.json_monsters[dict_index]["element"]
attr2 = self.json_monsters[dict_index]["element2"]
type1 = self.json_monsters[dict_index]["type"]
type2 = self.json_monsters[dict_index]["type2"]
image60_size = self.json_monsters[dict_index]["image60_size"]
image60_href = self.json_monsters[dict_index]["image60_href"]
awakenings = self.json_monsters[dict_index]["awoken_skills"]
leader_skill_name = self.json_monsters[dict_index]["leader_skill"]
max_level = self.json_monsters[dict_index]["max_level"]
hp_min = self.json_monsters[dict_index]["hp_min"]
atk_min = self.json_monsters[dict_index]["atk_min"]
rcv_min = self.json_monsters[dict_index]["rcv_min"]
hp_scale = self.json_monsters[dict_index]["hp_scale"]
atk_scale = self.json_monsters[dict_index]["atk_scale"]
rcv_scale = self.json_monsters[dict_index]["rcv_scale"]
# use PAD_Monster's function to set our monster's stats
self.team[index].set_base_stats(name, hp_max, atk_max, rcv_max, attr1, attr2, type1,
type2, image60_size, image60_href, awakenings,
leader_skill_name, max_level, hp_min, hp_scale,
atk_min, atk_scale, rcv_min, rcv_scale)
# create a PADTeam Class according to our team of Six PADMonster Classes
self.pad_team = PADTeam(self.team)
# set our labels according to our monsters
self._set_labels_(self.team[index], index)
# save our team for future modifications:
self.team_base[index].set_base_stats(name, hp_max, atk_max, rcv_max, attr1, attr2, type1,
type2, image60_size, image60_href, awakenings,
leader_skill_name, max_level, hp_min, hp_scale,
atk_min, atk_scale, rcv_min, rcv_scale)
def _set_labels_(self, monster, index):
"""
Set the labels according to the values in the indexed PADMonster Class
:param monster: the PADMonster associated with the index
:param index: the index associated with the PADMonster [0-5]
"""
# extract and display image
self.display_labels[index + 1].setPixmap(QPixmap(os.path.join('images') + '/' + monster.name + '.png'))
# display name
font = QFont()
font.setPointSize(5)
type_text = monster.type_main_name+'/'+monster.type_sub_name
self.display_labels[index + 17].setText(type_text)
self.display_labels[index + 17].setFont(font)
self.display_labels[index + 17].adjustSize()
self.display_labels[index + 17].setToolTip(type_text)
# display hp
hp = monster.hp
# if modified by leader skills button has been pressed, multiply monster's stat by its
# respective index in the stats modified variable of the PADTeam Class
if self.is_pressed:
hp *= self.pad_team.stats_modified_by[index][0]
# if plus values have been set, display how many
if monster.hp_plus > 0:
self.display_labels[index + 25].setText(str(round(hp)) + ' (+' + str(monster.hp_plus) + ')')
else:
self.display_labels[index + 25].setText(str(round(hp)))
self.display_labels[index + 25].adjustSize()
# display attack and pronged attack of main element
self._set_attack_labels_(index, 5, monster.atk[monster.attr_main], monster.pronged_atk[monster.attr_main],
monster.base_atk_plus)
# display rcv
rcv = monster.rcv
# if modified by leader skills button has been pressed, multiply monster's stat by its
# respective index in the stats modified variable of the PADTeam Class
if self.is_pressed:
rcv *= self.pad_team.stats_modified_by[index][2]
# if plus values have been set, display how many
if monster.rcv_plus > 0:
self.display_labels[index + 49].setText(str(round(rcv)) + ' (+' + str(monster.rcv_plus) + ')')
else:
self.display_labels[index + 49].setText(str(round(rcv)))
self.display_labels[index + 49].adjustSize()
# display awakenings
awakenings_text = ''
awakenings_font = QFont()
awakenings_font.setPointSize(6)
for x in range(len(monster.awakenings)):
if monster.awakenings[x][2] > 0:
awakenings_text += monster.awakenings[x][0]+': '+str(monster.awakenings[x][2])+'\n'
# set awakenings string to a tooltip since it can't fit into the grid
self.display_labels[index + 57].setText('Hover Me!')
self.display_labels[index + 57].setFont(awakenings_font)
self.display_labels[index + 57].adjustSize()
self.display_labels[index + 57].setToolTip(awakenings_text)
# calculate and change our display labels for team total values with each change in monster
self._set_team_labels_()
# if the monster is in the first or last index, it's considered the leader and its leader skill name
# and effect are displayed accordingly.
if index == 0:
text = 'Leader Skill 1: '+self.team[0].leader_skill_name+' > '+self.team[0].leader_skill_desc
# if the string is too long, splice it up
if len(text) > 50:
divider = len(text)//2
# separate the string at a part that is a whitespace
while text[divider] != ' ':
divider += 1
final_text = text[:divider]+'\n'+text[divider:]
else:
final_text = text
self.leader_skills_labels[0].setText(final_text)
elif index == 5:
text = 'Leader Skill 1: '+self.team[5].leader_skill_name+' > '+self.team[5].leader_skill_desc
# if the string is too long, splice it up
if len(text) > 50:
divider = len(text)//2
# separate the string at a part that is a whitespace
while text[divider] != ' ':
divider += 1
final_text = text[:divider]+'\n'+text[divider:]
else:
final_text = text
self.leader_skills_labels[1].setText(final_text)
def _set_attack_labels_(self, index, color_num, atk_value, pronged_atk_value, plus_value = 0):
"""
Set the attack labels according to the values given.
:param index: the index of the PADMonster [0-5] and 6 = the team total
:param color_num: 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = base
:param atk_value: the value to be displayed in the attack label
:param pronged_atk_value: the value to be displayed in the pronged attack label
:param plus_value: the amount of pluses is set to 0 initially
"""
# an array holding the colors associated with each value of color_num
colors = ['red', 'blue', 'green', 'goldenrod', 'purple', 'black']
# if modified by leader skills button has been pressed, multiply monster's stat by its
# respective index in the stats modified variable of the PADTeam Class
if self.is_pressed and index != 6:
atk_value *= self.pad_team.stats_modified_by[index][1]
pronged_atk_value *= self.pad_team.stats_modified_by[index][1]
# display attack of main element
if plus_value > 0:
self.display_labels[index + 33].setText(str(round(atk_value)) + ' (+' + str(plus_value) + ')')
else:
self.display_labels[index + 33].setText(str(round(atk_value)))
self.display_labels[index + 33].setStyleSheet("QLabel { color : %s }" % colors[color_num])
self.display_labels[index + 33].adjustSize()
# display pronged attack of main element
self.display_labels[index + 41].setText(str(round(pronged_atk_value)))
self.display_labels[index + 41].setStyleSheet("QLabel {color : %s }" % colors[color_num])
self.display_labels[index + 41].adjustSize()
def _set_team_labels_(self):
"""
Access the PADTeam Class to extract the values to be displayed in the Team Totals Labels
"""
# initialize objects to store the total values
hp_total = self.pad_team.hp
atk_total = self.pad_team.base_atk
pronged_atk_total = self.pad_team.base_pronged_atk
rcv_total = self.pad_team.rcv
total_awakenings = self.pad_team.awakenings
# if the modified by leader skills button is pressed, use the team's modified stats instead
if self.is_pressed:
hp_total = self.pad_team.hp_modified
atk_total = self.pad_team.base_atk_modified
pronged_atk_total = self.pad_team.base_pronged_atk_modified
rcv_total = self.pad_team.rcv_modified
# display our total value objects on our labels
self.display_labels[31].setText(str(round(hp_total)))
self.display_labels[31].adjustSize()
self._set_attack_labels_(6, 5, atk_total, pronged_atk_total)
self.display_labels[55].setText(str(round(rcv_total)))
self.display_labels[55].adjustSize()
# set the label containing the team's total awakenings to a tooltip since it won't fit
awakenings_font = QFont()
awakenings_font.setPointSize(6)
self.display_labels[63].setText('Hover Me!')
self.display_labels[63].setFont(awakenings_font)
self.display_labels[63].adjustSize()
self.display_labels[63].setToolTip(total_awakenings)
def _get_total_attr_attack_(self, attr):
"""
Returns the values stored in PADTeam for the Team's Total Attacks and Pronged Attacks
for the specified element or the sum of all the element's attacks (BASE)
:param attr: 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = base
:return:
"""
# if we're not looking for the base values a.k.a. sum of all the values
if attr != 5:
if not self.is_pressed:
atk_total = self.pad_team.atk[attr]
pronged_atk_total = self.pad_team.pronged_atk[attr]
else:
atk_total = self.pad_team.atk_modified[attr]
pronged_atk_total = self.pad_team.pronged_atk_modified[attr]
# if we're looking for the base values
else:
if not self.is_pressed:
atk_total = self.pad_team.base_atk
pronged_atk_total = self.pad_team.base_pronged_atk
else:
atk_total = self.pad_team.base_atk_modified
pronged_atk_total = self.pad_team.base_pronged_atk_modified
return atk_total, pronged_atk_total
# when line_edits are altered, activate this line code according to the text in the line
def _on_changed_(self, index, text):
"""
When a line edit is altered, check the text entered to see if it matches with any of
the names in the json dictionary and create a PADMonster at the appropriate index in
the team array if the name is found.
:param index: the index of the line edit corresponding to the index of the PADMonster
in the team array.
:param text: the text currently inside the line edit
"""
for x in range(len(self.json_monsters)):
if text == self.json_monsters[x]["name"]:
self._create_monster_(index, x, text)
elif text.title() == self.json_monsters[x]["name"]:
self._create_monster_(index, x, text.title())
def _handle_button_(self, color_num, pressed):
"""
Only show the Attack and Pronged Attack values of the appropriate element or sum of the
elements if BASE is chosen.
:param color_num: 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = base
:param pressed: useless event input
"""
for index in range(6):
if color_num == 5:
self._set_attack_labels_(index, color_num, self.team[index].atk[self.team[index].attr_main],
self.team[index].pronged_atk[self.team[index].attr_main])
else:
self._set_attack_labels_(index, color_num, self.team[index].atk[color_num],
self.team[index].pronged_atk[color_num])
atk_total, pronged_atk_total = self._get_total_attr_attack_(color_num)
self._set_attack_labels_(6, color_num, atk_total, pronged_atk_total)
def _handle_toggle_button_(self, pressed):
"""
If the modify stats by leader skills button is pressed, modify the button's text, set
the Class Variable is_pressed to True/False accordingly, and reset the labels now that
is_pressed has been changed.
:param pressed: Useless event input.
"""
if pressed:
self.is_pressed = True
self.toggle_button.setText('Toggle Off Modified Stats')
else:
self.is_pressed = False
self.toggle_button.setText('Toggle On Modified Stats')
for monster in range(6):
self._set_labels_(self.team[monster], monster)
def _on_level_activated_(self, index, level):
"""
If a level for the PADMonster has been selected, change the monster's base stats
according to that level, reset pad_team according to these new values and reset
labels accordingly.
:param index: PADMonster's index in the team array. [0-5]
:param level: the level the PADMonster will be set to
"""
self.team[index]._set_stats_at_level_(int(level))
self.team_base[index]._set_stats_at_level_(int(level))
self.pad_team = PADTeam(self.team)
for monster in range(6):
self._set_labels_(self.team[monster], monster)
def _on_plus_type_activated_(self, index, text):
"""
If hp, atk, or rcv has been selected in the drop down menu, hide the menu asking for the
type and show the menu asking for the value of pluses between 0-99.
:param index: PADMonster's index in the team array. [0-5]
:param text: 'hp', 'atk', or 'rcv'
"""
self.plus_boxes_types[index].hide()
self.plus_boxes_values[index].show()
try: self.plus_boxes_values[index].activated[str].disconnect()
except Exception: pass
self.plus_boxes_values[index].activated[str].connect(partial(self._on_plus_value_activated_, index, text))
self.plus_boxes_types[index].disconnect()
def _on_plus_value_activated_(self, index, type, value):
"""
If the value pertaining to the specified type has been selected, modify the appropriate
stat of the indexed PADMonster according the specified amount of pluses, reset the
pad_team according to the modified stats, and redisplay the new values
:param index: PADMonster's index in the team array. [0-5]
:param type: 'hp', 'atk', or 'rcv'
:param value: the value, 0-99, of pluses the PADMonster has for the specified type
"""
self.plus_boxes_types[index].show()
self.plus_boxes_types[index].activated[str].connect(partial(self._on_plus_type_activated_, index))
self.plus_boxes_values[index].hide()
self.team[index]._set_stats_with_pluses_(type, int(value))
self.team_base[index]._set_stats_with_pluses_(type, int(value))
self.pad_team = PADTeam(self.team)
for monster in range(6):
self._set_labels_(self.team[monster], monster)
# class mouselistener(QLabel):
# def __init__(self):
# super().__init__()
#
# self.setMouseTracking(True)
# self.widget_location = self.rect()
#
# def mouseMoveEvent(self, event):
# posMouse = event.pos()
# font = QFont()
# if self.widget_location.contains(posMouse):
# font.setPointSize(8)
#
# QToolTip.setFont(font)
# self.setToolTip(self.text())
#
# return super().mouseReleaseEvent(event)
|
{"/Calculator_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"], "/PAD_GUI.py": ["/PADScreen.py"], "/PADScreen.py": ["/Calculator_Screen.py", "/Board_Screen.py", "/PAD_Monster.py", "/PAD_Team.py"], "/PAD_Team.py": ["/PAD_Monster.py"], "/Board_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"]}
|
1,546
|
acheng6845/PuzzleSolver
|
refs/heads/master
|
/PAD_GUI.py
|
__author__ = 'Aaron'
# import necessary files
from PyQt5 import PyQt5
import sys
from PyQt5.QtWidgets import (QApplication, QWidget, QHBoxLayout,
QFrame, QSplitter, QStyleFactory,
QMainWindow, QStackedWidget)
from PyQt5.QtCore import Qt
from PADScreen import PADScreen
class GUIMainWindow(QMainWindow):
def __init__(self):
super().__init__()
widget = PADScreen(self)
self.setCentralWidget(widget)
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('PAD Damage Calculator')
self.show()
class PADGUI(QStackedWidget):
def __init__(self, main_window):
super().__init__()
self.init_UI(main_window)
def init_UI(self, main_window):
#The initial screen that we'll be working on
screen = PADScreen(self, main_window)
screen_widget = QWidget(main_window)
#Make the main screen our layout
screen_widget.setLayout(screen)
self.addWidget(screen_widget)
#Add simulation screen here:
#Set the window dimensions, title and show it off!
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('PAD Damage Calculator')
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
gui = GUIMainWindow()
sys.exit(app.exec_())
|
{"/Calculator_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"], "/PAD_GUI.py": ["/PADScreen.py"], "/PADScreen.py": ["/Calculator_Screen.py", "/Board_Screen.py", "/PAD_Monster.py", "/PAD_Team.py"], "/PAD_Team.py": ["/PAD_Monster.py"], "/Board_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"]}
|
1,547
|
acheng6845/PuzzleSolver
|
refs/heads/master
|
/PAD_Monster.py
|
__author__ = 'Aaron'
# Class Description:
# Our Monster Class where we hold all of the Monster's stats and calculate the values needed with those stats
import os
import json
class PADMonster:
def __init__(self):
# initialize the Class's stats
# _max, _min, and _scale are used for when the monster's level is set to something other than its max level
# _bonus used for when awakenings add value to the base stat
self.name = ''
self.hp = 0
self.hp_max = 0
self.hp_min = 0
self.hp_scale = 0
self.hp_plus = 0
self.hp_bonus = 0
self.hp_base = 0
self.rcv_base = 0
self.rcv = 0
self.rcv_max = 0
self.rcv_min = 0
self.rcv_scale = 0
self.rcv_plus = 0
self.rcv_bonus = 0
self.base_base_atk = 0
self.base_atk = 0
self.base_atk_max = 0
self.base_atk_min = 0
self.base_atk_scale = 0
self.base_atk_plus = 0
self.base_atk_bonus = 0
# Array of Attack: atk[attribute]
self.atk = [0, 0, 0, 0, 0]
# Array of Pronged Attack: [attribute][0 = Main, 1 = Sub]
self.pronged_atk = [0, 0, 0, 0, 0]
self.max_level = 99
self.current_level = 99
# 'fire' = 0, 'water' = 1, 'wood' = 2, 'light' = 3, 'dark' = 4
self.attr_main = 0
self.attr_sub = 0
# check if main attribute = sub attribute
self.is_same_attr = False
# save list of attribute types
self.attributes = ['fire', 'water', 'wood', 'light', 'dark']
# see list of types for corresponding index number
self.type_main = 0
self.type_sub = 0
self.type_main_name = ''
self.type_sub_name = ''
# save list of types
self.types = ['Evo Material', 'Balanced', 'Physical', 'Healer', 'Dragon', 'God', 'Attacker',
'Devil', '', '', '', '', 'Awoken Skill Material', 'Protected', 'Enhance Material']
# save leader skill multipliers; leader_skill[0 = hp, 1 = atk, 2 = rcv]
self.leader_skill = [0, 0, 0]
# store image 60x60 size and file location on padherder.com
self.image60_size = 0
self.image60_href = ''
# save amount of each awoken skill
# id: 1 -> Enhanced HP, 2 -> Enhanced Attack, 3 -> Enhanced Heal, 4 -> Reduce Fire Damage,
# 5 -> Reduce Water Damage,
# 6 -> Reduce Wood Damage, 7 -> Reduce Light Damage, 8 -> Reduce Dark Damage, 9 -> Auto-Recover,
# 10 -> Resistance-Bind, 11 -> Resistance-Dark, 12 -> Resistance-Jammers, 13 -> Resistance-Poison,
# 14 -> Enhanced Fire Orbs, 15 -> Enhanced Water Orbs, 16 -> Enhanced Wood Orbs, 17 -> Enhanced Light Orbs,
# 18 -> Enhanced Dark Orbs, 19 -> Extend Time, 20 -> Recover Bind, 21 -> Skill Boost, 22 -> Enhanced Fire Att.,
# 23 -> Enhanced Water Att., 24 -> Enhanced Wood Att., 25 -> Enhanced Light Att., 26 -> Enhanced Dark Att.,
# 27 -> Two-Pronged Attack, 28 -> Resistance-Skill Lock
self.awakenings = [['', '', 0] for x in range(28)]
self.awakenings_names = ['Enhanced HP', 'Enhanced Attack', 'Enhanced Heal', 'Reduce Fire Damage',
'Reduce Water Damage', 'Reduce Wood Damage', 'Reduce Light Damage',
'Reduce Dark Damage', 'Auto-Recover', 'Resistance-Bind', 'Resistance-Dark',
'Resistance-Jammers', 'Resistance-Poison', 'Enhanced Fire Orbs', 'Enhanced Water Orbs',
'Enahnced Wood Orbs', 'Enhanced Light Orbs', 'Enhanced Dark Orbs', 'Extend Time',
'Recover Bind', 'Skill Boost', 'Enhanced Fire Att.', 'Enhanced Water Att.',
'Enhanced Wood Att.', 'Enhanced Light Att.', 'Enhanced Dark Att.',
'Two-Pronged Attack', 'Resistance-Skill Lock']
# open awakenings.txt and load it into a python object using json
self.json_file = open(os.path.join('awakenings.txt'), 'r')
self.json_awakenings = json.loads(self.json_file.read())
# iterate through self.json_awakenings and extract the necessary information into self.awakenings
# awakenings[id-1][name, desc, count]
for awakening in self.json_awakenings:
self.awakenings[awakening['id'] - 1] = [awakening['name'], awakening['desc'], 0]
# leader skill
self.leader_skill_name = ''
self.leader_skill_desc = ''
# [xhp, xatk, xrcv, ['elem/type?', which elem/type?]]
self.leader_skill_effect = [1, 1, 1]
self.json_file = open(os.path.join('leader skills.txt'), 'r')
self.json_leader_skills = json.loads(self.json_file.read())
def set_base_stats(self, name, hp, atk, rcv, attr1, attr2, type1, type2, size, href, awakenings, leader_skill,
level, hp_min, hp_scale, atk_min, atk_scale, rcv_min, rcv_scale):
self.name = name
self.hp = hp
self.hp_base = hp
self.hp_max = hp
self.hp_min = hp_min
self.hp_scale = hp_scale
self.base_atk = atk
self.base_base_atk = atk
self.base_atk_max = atk
self.base_atk_min = atk_min
self.base_atk_scale = atk_scale
self.rcv = rcv
self.rcv_base = rcv
self.rcv_max = rcv
self.rcv_min = rcv_min
self.rcv_scale = rcv_scale
self.max_level = level
self.current_level = level
self.attr_main = attr1
self.attr_sub = attr2
self.type_main = type1
self.type_main_name = self.types[type1]
self.type_sub = type2
if type2:
self.type_sub_name = self.types[type2]
self.image60_size = size
self.image60_href = href
self.leader_skill_name = leader_skill
for awakening in awakenings:
self.awakenings[awakening - 1][2] += 1
# sets _bonus stats if awakenings[0-2][2] a.k.a. the stat bonus awakenings are greater than 1
for x in range(3):
if self.awakenings[x][2] > 0:
if x == 0:
self.hp_bonus = self.awakenings[x][2] * 200
self.hp += self.hp_bonus
self.hp_base = self.hp
if x == 1:
self.base_atk_bonus = self.awakenings[x][2] * 100
self.base_atk += self.base_atk_bonus
self.base_base_atk = self.base_atk
if x == 2:
self.rcv_bonus = self.awakenings[x][2] * 50
self.rcv += self.rcv_bonus
self.rcv_base = self.rcv
# find the leader skills' effects and description in the json library according to the name
for x in range(len(self.json_leader_skills)):
if leader_skill == self.json_leader_skills[x]['name']:
self.leader_skill_desc = self.json_leader_skills[x]['effect']
if 'data' in self.json_leader_skills[x].keys():
self.leader_skill_effect = self.json_leader_skills[x]['data']
self._set_atk_(self.attr_main, self.attr_sub)
self._set_pronged_atk_(self.attr_main, self.attr_sub)
def _set_attr_main_(self, attr):
"""
If the attribute name is valid, set the Class's attr_main value to the value corresponding
to the attr
:param attr: attribute name
"""
if attr.lower() in self.attributes:
self.attr_main = self.attributes.index(attr.lower())
# if attribute is changed, check if main and sub attributes are the same
if self.attr_main == self.attr_sub:
self.is_same_attr = True
else:
self.is_same_attr = False
def _set_attr_sub_(self, attr):
"""
If the attribute name is valid, set the Class's attr_sub value to the value corresponding
to the attr
:param attr: attribute name
"""
if attr.lower() in self.attributes:
self.attr_sub = self.attributes.index(attr.lower())
# if attribute is changed, check if main and sub attributes are the same
if self.attr_main == self.attr_sub:
self.is_same_attr = True
else:
self.is_same_attr = False
def _set_atk_(self, attr1, attr2):
"""
Calculate and set atk for each attribute
:param attr1: value corresponding to main attribute
:param attr2: value corresponding to sub attribute
"""
if attr1 in [0, 1, 2, 3, 4]:
if attr1 != attr2:
self.atk[attr1] = self.base_atk
else:
self.atk[attr1] = self.base_atk * 1.1
if attr2 in [0, 1, 2, 3, 4]:
if attr1 != attr2:
self.atk[attr2] = self.base_atk * (1/3)
def _set_pronged_atk_(self, attr1, attr2):
"""
Calculate and set pronged atk for each attribute
:param attr1: value corresponding to main attribute
:param attr2: value corresponding to sub attribute
"""
if attr1 in [0, 1, 2, 3, 4]:
self.pronged_atk[attr1] = self.atk[attr1] * 1.5 ** self.awakenings[26][2]
if attr2 in [0, 1, 2, 3, 4] and attr1 != attr2:
self.pronged_atk[attr2] = self.atk[attr2] * 1.5 ** self.awakenings[26][2]
def _set_stats_at_level_(self, level):
"""
Modify all stats according to level.
:param level: Level the monster will be set to.
"""
self.current_level = level
self.hp = self._use_growth_formula(self.hp_min, self.hp_max, self.hp_scale)
self.hp += self.hp_bonus
self.hp_base = self.hp
self._set_stats_with_pluses_('hp', self.hp_plus)
self.base_atk = self._use_growth_formula(self.base_atk_min, self.base_atk_max, self.base_atk_scale)
self.base_atk += self.base_atk_bonus
self.base_base_atk = self.base_atk
self._set_stats_with_pluses_('atk', self.base_atk_plus)
self.rcv = self._use_growth_formula(self.rcv_min, self.rcv_max, self.rcv_scale)
self.rcv += self.rcv_bonus
self.rcv_base = self.rcv
self._set_stats_with_pluses_('rcv', self.rcv_plus)
def _use_growth_formula(self, min_value, max_value, scale):
"""
Applies the growth formula to get the values of the specified stat at the current level.
:param min_value: the minimum value of the stat
:param max_value: the maximum value of the stat
:param scale: the scaling rate of the stat
:return: the value of the stat at the current level
"""
value = ((self.current_level - 1) / (self.max_level - 1)) ** scale
value *= (max_value - min_value)
value += min_value
return value
def _set_stats_with_pluses_(self, type, num):
"""
Modify the specified stat according to the specified amount of pluses
:param type: 'hp', 'atk', or 'rcv'
:param num: 0-99, the number of pluses for the specified stat
"""
if type == 'hp':
self.hp_plus = num
self.hp = self.hp_base + self.hp_plus * 10
elif type == 'atk':
self.base_atk_plus = num
self.base_atk = self.base_base_atk + self.base_atk_plus * 5
self._set_atk_(self.attr_main, self.attr_sub)
self._set_pronged_atk_(self.attr_main, self.attr_sub)
elif type == 'rcv':
self.rcv_plus = num
self.rcv = self.rcv_base + self.rcv_plus * 3
|
{"/Calculator_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"], "/PAD_GUI.py": ["/PADScreen.py"], "/PADScreen.py": ["/Calculator_Screen.py", "/Board_Screen.py", "/PAD_Monster.py", "/PAD_Team.py"], "/PAD_Team.py": ["/PAD_Monster.py"], "/Board_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"]}
|
1,548
|
acheng6845/PuzzleSolver
|
refs/heads/master
|
/PADScreen.py
|
__author__ = 'Aaron'
from Calculator_Screen import CalculatorScreen
from Board_Screen import BoardScreen
from PAD_Monster import PADMonster
from PAD_Team import PADTeam
from PyQt5.QtWidgets import (QVBoxLayout, QHBoxLayout, QWidget, QPushButton, QSplitter, QAction,
QFileDialog, QMainWindow, QStackedWidget, QSplitter)
from PyQt5.QtCore import Qt
import os
import json
from functools import partial
class PADScreen(QStackedWidget):
def __init__(self, main_window):
"""
Initialize the PADScreen Class
:param gui: the main interface which will hold all of our widgets
:param main_window: the main window widget which will hold our menu bar
"""
super().__init__()
# create an open file and save file action for our menu bar and connects them to their
# respective functions
open_file = QAction('Load Team...', main_window)
open_file.setShortcut('Ctrl+O')
open_file.triggered.connect(partial(self._show_dialog_box_, 'Open', main_window))
save_file = QAction('Save Team...', main_window)
save_file.setShortcut('Ctrl+S')
save_file.triggered.connect(partial(self._show_dialog_box_, 'Save', main_window))
clear_team = QAction('New Team', main_window)
clear_team.setShortcut('Ctrl+N')
clear_team.triggered.connect(self.__clear__team__)
# create our menu bar, attach it to our main window and add to it our open and save actions
menubar = main_window.menuBar()
file_menu = menubar.addMenu('&File')
file_menu.addAction(open_file)
file_menu.addAction(save_file)
file_menu.addAction(clear_team)
# create the widget containing the first page of the GUI, the calculator page
self.calculator_screen = QWidget(self)
# use custom calculator layout for the widget's layout
self.calculator_screen_layout = CalculatorScreen(self)
self.calculator_screen.setLayout(self.calculator_screen_layout)
# initialize a variable to hold the PADTeam
self.pad_team = self.calculator_screen_layout.pad_team
self.team = self.calculator_screen_layout.team
# create the widget containing the second page of the GUI, the board page
self.board_screen = QWidget(self)
# use custom board layout for the widget's layout
self.board_screen_layout = BoardScreen(self, self.team, self.pad_team)
self.board_screen.setLayout(self.board_screen_layout)
# initially hide this page until the next page button is pressed
#self.board_screen.hide()
# create the bottom widget for the GUI which will contain the page turning buttons
self.page_turner = QWidget(main_window)
page_turner_layout = QHBoxLayout(main_window)
self.page_turner.setLayout(page_turner_layout)
self.turn_left = QPushButton('<', main_window)
page_turner_layout.addWidget(self.turn_left)
page_turner_layout.addStretch()
page_turner_layout.addStretch()
self.turn_right = QPushButton('>', main_window)
page_turner_layout.addWidget(self.turn_right)
# initially hide the button to turn left as the GUI initializes on page 1
self.turn_left.hide()
self.page_one_splitter = QSplitter(Qt.Vertical)
self.page_one_splitter.addWidget(self.calculator_screen)
self.page_one_splitter.addWidget(self.page_turner)
self.addWidget(self.page_one_splitter)
#self.setCurrentWidget(self.page_one_splitter)
self.page_two_splitter = QSplitter(Qt.Vertical)
self.page_two_splitter.addWidget(self.board_screen)
#self.page_two_splitter.addWidget(page_turner)
self.addWidget(self.page_two_splitter)
#self.setCurrentWidget(self.page_two_splitter)
self._init_screen_()
def _init_screen_(self):
"""
Set right click button to connect to the second page
:param gui: the main interface all the widgets will be attached to
"""
self.turn_right.clicked.connect(self._go_to_board_screen_)
def _go_to_board_screen_(self, clicked):
"""
Set the active screen to the second page and hide the first page when the respective
button is clicked. Also hide the right button, show the left button and connect the
left button to the first page.
:param gui: same.
:param clicked: the clicking event, useless.
"""
self.board_screen_layout.team = self.calculator_screen_layout.team
self.board_screen_layout.team_totals = self.calculator_screen_layout.pad_team
self.board_screen_layout.set__team(self.board_screen_layout.team)
self.setCurrentWidget(self.page_two_splitter)
self.page_two_splitter.addWidget(self.page_turner)
#self.board_screen.show()
#self.calculator_screen.hide()
self.turn_right.hide()
self.turn_left.show()
self.turn_left.clicked.connect(self._go_to_calculator_screen_)
def _go_to_calculator_screen_(self, clicked):
"""
Set the active screen to the first page and hide the second page when the respective
button is clicked. Also hide the left button, show the right button and connect the
right button to the second page.
:param gui: same.
:param clicked: useless clicking event.
"""
self._init_screen_()
self.turn_left.hide()
self.turn_right.show()
self.turn_right.clicked.connect(self._go_to_board_screen_)
self.page_one_splitter.addWidget(self.page_turner)
self.setCurrentWidget(self.page_one_splitter)
#self.board_screen.hide()
#self.calculator_screen.show()
def _show_dialog_box_(self, stringname, gui):
"""
If the stringname is 'Open', open a dialog where the user can select a team to load
into the line edits.
If the stringname is 'Save', open a dialog where the user can save the names of the
team members into a txt file.
:param stringname: 'Open' or 'Save', the corresponding menu action will contain the
key stringname.
:param gui: same.
"""
if stringname == 'Open':
filename = QFileDialog.getOpenFileName(gui, 'Load Team...', os.path.join('saved teams'),
'Text files (*.txt)')
# if not empty string and has the appropriate subscript
if filename[0] and filename[0].endswith('txt'):
with open(os.path.realpath(filename[0]), 'r') as file:
json_content = json.loads(file.read())
# decode the names in case of unicode strings like the infinity sign
#content_decoded = content.decode('utf-8')
#monster_names = content_decoded.splitlines()
for monster in range(6):
# decode the name in case of unicode strings like the infinity sign
# name = json_content[monster]['name'].decode('utf-8')
name = json_content[monster]['name']
hp_plus = json_content[monster]['hp plus']
atk_plus = json_content[monster]['atk plus']
rcv_plus = json_content[monster]['rcv plus']
level = json_content[monster]['level']
# enter the names into the line edits
self.calculator_screen_layout.line_edits[monster].setText(name)
self.calculator_screen_layout._on_plus_value_activated_(monster, 'hp', hp_plus)
self.calculator_screen_layout._on_plus_value_activated_(monster, 'atk', atk_plus)
self.calculator_screen_layout._on_plus_value_activated_(monster, 'rcv', rcv_plus)
self.calculator_screen_layout._on_level_activated_(monster, level)
if stringname == 'Save':
filename = QFileDialog.getSaveFileName(gui, 'Save Team...', os.path.join('saved teams'),
'Text files (*.txt')
# if not empty string
if filename[0]:
# create json file
json_file = [{} for monster in range(6)]
#monster_names = ''
for monster in range(6):
# copy the team member's name to a variable
monster_name = self.calculator_screen_layout.team[monster].name
# copy the team member's pluses to variables
hp_plus = self.calculator_screen_layout.team[monster].hp_plus
atk_plus = self.calculator_screen_layout.team[monster].base_atk_plus
rcv_plus = self.calculator_screen_layout.team[monster].rcv_plus
# copy the team member's current level to a variable
current_level = self.calculator_screen_layout.team[monster].current_level
#monster_names += monster_name+'\n'
# encode the string to be saved for symbols like the infinity sign
#monster_name_encoded = monster_name.encode('utf8', 'replace')
json_file[monster]['name'] = monster_name
json_file[monster]['hp plus'] = hp_plus
json_file[monster]['atk plus'] = atk_plus
json_file[monster]['rcv plus'] = rcv_plus
json_file[monster]['level'] = current_level
with open(os.path.realpath(filename[0]+'.txt'), 'w') as file:
json.dump(json_file, file)
def __clear__team__(self):
for index in range(6):
self.calculator_screen_layout.line_edits[index].clear()
self.calculator_screen_layout.team = [PADMonster() for monster in range(6)]
self.calculator_screen_layout.pad_team = PADTeam(self.calculator_screen_layout.team)
for index in range(6):
self.calculator_screen_layout._set_labels_(self.calculator_screen_layout.team[index], index)
# self.calculator_screen = QWidget(gui)
# self.calculator_screen_layout = CalculatorScreen(gui)
# self.calculator_screen.setLayout(self.calculator_screen_layout)
# self.active_screen = self.calculator_screen
|
{"/Calculator_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"], "/PAD_GUI.py": ["/PADScreen.py"], "/PADScreen.py": ["/Calculator_Screen.py", "/Board_Screen.py", "/PAD_Monster.py", "/PAD_Team.py"], "/PAD_Team.py": ["/PAD_Monster.py"], "/Board_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"]}
|
1,549
|
acheng6845/PuzzleSolver
|
refs/heads/master
|
/PAD_Team.py
|
__author__ = 'Aaron'
import os
from PAD_Monster import PADMonster
class PADTeam:
def __init__(self, team):
"""
Initializes the PADTeam Class.
:param team: an array containing 6 PADMonster Classes
"""
# self.team = [PADMonster() for monster in range(6)] -> how the team should look
self.team = team
# below we initialize the variables that will be containing the team stats.
self.hp = 0
# for all atk arrays: [fire atk, water atk, wood atk, light atk, dark atk]
self.atk = [0, 0, 0, 0, 0]
# for all base atks, it's the sum of each value in the array
self.base_atk = 0
self.pronged_atk = [0, 0, 0, 0, 0]
self.base_pronged_atk = 0
self.rcv = 0
# below we initialize the modified stats, the team's total stats after being
# multiplied by the effects of the two leader skills
self.hp_modified = 0
self.atk_modified = [0, 0, 0, 0, 0]
self.base_atk_modified = 0
self.pronged_atk_modified = [0, 0, 0, 0, 0]
self.base_pronged_atk_modified = 0
self.rcv_modified = 0
# a string that will contain all our the teams' awakenings
self.awakenings = ''
# the leader skills effects: [hp multiplied by, atk multiplied by, rcv multiplied by]
self.leader1_effects = [1, 1, 1]
self.leader2_effects = [1, 1, 1]
# store how each monster's stats will be modified as in if the monster satisfies the
# leader skill's conditions
self.stats_modified_by = [[1, 1, 1] for monster in range(6)]
# set all the variables according to the team input
self.__set__team__hp()
self.__set__team__rcv()
self.__set__team__atk()
self.__set__team__base__atk()
self.__set__team__awakenings()
self.__set__modified__stats__()
def __set__team__hp(self):
self.hp = 0
for monster in range(6):
self.hp += self.team[monster].hp
def __set__team__rcv(self):
self.rcv = 0
for monster in range(6):
self.rcv += self.team[monster].rcv
def __set__team__awakenings(self):
self.awakenings = ''
for awakening in range(len(self.team[0].awakenings)):
# count stores how many instances of a specific awakening are contained in the team
count = 0
for monster in range(6):
if self.team[monster].awakenings[awakening][2] > 0:
count += self.team[monster].awakenings[awakening][2]
if count > 0:
# if the team has an awakening, save it to the string and add the count number
self.awakenings += self.team[0].awakenings[awakening][0]+': '+str(count)+'\n'
def __set__team__atk(self):
self.atk = [0, 0, 0, 0, 0]
self.pronged_atk = [0, 0, 0, 0, 0]
for attr in range(5):
for monster in self.team:
self.atk[attr] += monster.atk[attr]
self.pronged_atk[attr] += monster.pronged_atk[attr]
def __set__team__base__atk(self):
self.base_atk = 0
self.base_pronged_atk = 0
for monster in self.team:
self.base_atk += monster.atk[monster.attr_main]
self.base_pronged_atk += monster.pronged_atk[monster.attr_main]
def __set__modified__stats__(self):
self.stats_modified_by = [[1, 1, 1] for monster in range(6)]
# the first and last team members of the team are considered the leaders and we use
# their respective leader skills.
for index in [0, 5]:
# if the leader skill isn't ""
if self.team[index].leader_skill_name:
# the skill effect will look [hp modified by, atk modified by, rcv modified by]
# an additional 4th index exists if there's a conditional which will look like:
# [hp * by, atk * by, rcv * by, ['elem' or 'type', # associated with elem or type]]
if len(self.team[index].leader_skill_effect) > 3:
# if fourth array exists, save whether the conditional asks for an element
# or type in attribute variable
# and save the # associated in the num variable
attribute = self.team[index].leader_skill_effect[3][0]
num = self.team[index].leader_skill_effect[3][1]
# check if each monster in the team satisfies the elem or type conditional
# if true, the stats modified index for that monster will be multiplied appropriately
if attribute == "elem":
for monster in range(6):
if self.team[monster].attr_main == num or self.team[monster].attr_sub == num:
self.stats_modified_by[monster][0] *= self.team[index].leader_skill_effect[0]
self.stats_modified_by[monster][1] *= self.team[index].leader_skill_effect[1]
self.stats_modified_by[monster][2] *= self.team[index].leader_skill_effect[2]
elif attribute == "type":
for monster in range(6):
if self.team[monster].type_main == num or self.team[monster].type_sub == num:
self.stats_modified_by[monster][0] *= self.team[index].leader_skill_effect[0]
self.stats_modified_by[monster][1] *= self.team[index].leader_skill_effect[1]
self.stats_modified_by[monster][2] *= self.team[index].leader_skill_effect[2]
# if there isn't a 4th index conditional, just multiply all of the stats modified indexes
# by the appropriate skill effect amounts
else:
for monster in range(6):
self.stats_modified_by[monster][0] *= self.team[index].leader_skill_effect[0]
self.stats_modified_by[monster][1] *= self.team[index].leader_skill_effect[1]
self.stats_modified_by[monster][2] *= self.team[index].leader_skill_effect[2]
hp = 0
base_atk = 0
atk = [0, 0, 0, 0, 0]
base_pronged_attack = 0
pronged_atk = [0, 0, 0, 0, 0]
rcv = 0
# modify each team stat according to the leader skills' effects and save them to their respective
# variables.
for monster in range(6):
hp += self.team[monster].hp * self.stats_modified_by[monster][0]
rcv += self.team[monster].rcv * self.stats_modified_by[monster][2]
main_attr = self.team[monster].attr_main
base_atk += self.team[monster].atk[main_attr] * self.stats_modified_by[monster][1]
base_pronged_attack += self.team[monster].pronged_atk[main_attr] * self.stats_modified_by[monster][1]
for attr in range(5):
atk[attr] += self.team[monster].atk[attr] * self.stats_modified_by[monster][1]
pronged_atk[attr] += self.team[monster].pronged_atk[attr] * self.stats_modified_by[monster][1]
self.hp_modified = hp
self.atk_modified = atk
self.base_atk_modified = base_atk
self.pronged_atk_modified = pronged_atk
self.base_pronged_atk_modified = base_pronged_attack
self.rcv_modified = rcv
|
{"/Calculator_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"], "/PAD_GUI.py": ["/PADScreen.py"], "/PADScreen.py": ["/Calculator_Screen.py", "/Board_Screen.py", "/PAD_Monster.py", "/PAD_Team.py"], "/PAD_Team.py": ["/PAD_Monster.py"], "/Board_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"]}
|
1,550
|
acheng6845/PuzzleSolver
|
refs/heads/master
|
/Board_Screen.py
|
__author__ = 'Aaron'
from PyQt5.QtWidgets import (QVBoxLayout, QWidget, QLabel, QGridLayout, QSplitter,
QPushButton, QHBoxLayout)
from PyQt5.QtCore import Qt, QMimeData
from PyQt5.QtGui import QPixmap, QDrag
import os
from PAD_Monster import PADMonster
from PAD_Team import PADTeam
from functools import partial
class BoardScreen(QVBoxLayout):
default_team = [PADMonster() for monster in range(6)]
default_team_totals = PADTeam(default_team)
def __init__(self, gui, team=default_team, team_totals=default_team_totals):
super().__init__()
self.team = team
self.team_totals = team_totals
self.damage_array = [[{'main attribute': 0, 'sub attribute': 0} for col in range(2)] for row in range(6)]
self.__init__screen__(gui, self.team, self.team_totals)
def __init__screen__(self, gui, team, team_totals):
# DAMAGE SCREEN
damage_screen = QWidget()
damage_screen_layout = QGridLayout()
damage_screen.setLayout(damage_screen_layout)
self.addWidget(damage_screen)
self.damage_labels = [[QLabel(gui) for column in range(2)] for row in range(6)]
for row in range(6):
for column in range(2):
damage_screen_layout.addWidget(self.damage_labels[row][column], row, column)
# RECOVERY LABEL
self.hp_recovered = QLabel(gui)
self.addWidget(self.hp_recovered)
# BOARD
board = QWidget()
board_layout = QGridLayout()
board.setLayout(board_layout)
self.addWidget(board)
# TEAM IMAGES
self.team_labels = []
for index in range(6):
label = QLabel(gui)
self.team_labels.append(label)
board_layout.addWidget(label, 0, index)
board_layout.setAlignment(label, Qt.AlignHCenter)
self.set__team(team)
# BOARD
self.board_labels = [[PADLabel(gui) for column in range(8)] for row in range(8)]
# positions = [(i+1, j) for i in range(8) for j in range(8)]
light_brown = 'rgb(120, 73, 4)'
dark_brown = 'rgb(54, 35, 7)'
color = dark_brown
for row in self.board_labels:
for column in row:
row_index = self.board_labels.index(row)
col_index = row.index(column)
column.setStyleSheet("QLabel { background-color: %s }" % color)
if color == dark_brown and (col_index+1) % 8 != 0:
color = light_brown
elif color == light_brown and (col_index+1) % 8 != 0:
color = dark_brown
board_layout.addWidget(column, row_index+1, col_index)
#for position, label in zip(positions, self.board_labels):
# board_layout.addWidget(label, *position)
for row in range(9):
board_layout.setRowStretch(row, 1)
for column in range(8):
board_layout.setColumnStretch(column, 1)
self.board_array = []
self.__create__board___(5, 6)
# CALCULATE DAMAGE BUTTON
calculate_damage_button = QPushButton('Calculate Damage', gui)
calculate_damage_button.clicked.connect(partial(self.calculate_damage, team, team_totals))
self.addWidget(calculate_damage_button)
# ORBS
# orb_wrapper = QWidget(gui)
# orb_wrapper_layout = QHBoxLayout()
# orb_wrapper.setLayout(orb_wrapper_layout)
# elements = ['fire', 'water', 'wood', 'light', 'dark']
# for element in elements:
# orb = PADIcon(gui)
# orb.setPixmap(QPixmap(os.path.join('icons')+'\\'+element+'.png'))
# orb_wrapper_layout.addWidget(orb)
#
# self.addWidget(orb_wrapper)
def __create__board___(self, row, column):
self.board_array = [['' for column in range(column)] for row in range(row)]
for row_index in self.board_labels:
for col_label in row_index:
col_label.hide()
for x in range(row):
for y in range(column):
self.board_labels[x][y].show()
def calculate_damage(self, team=default_team, team_totals=default_team_totals):
for row in range(len(self.board_array)):
for column in range(len(self.board_array[0])):
self.board_array[row][column] = self.board_labels[row][column].element
all_positions = set()
# 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = heart
elemental_damage = [{'fire': 0, 'water': 0, 'wood': 0, 'light': 0, 'dark': 0}
for monster in range(6)]
total_hp_recovered = 0
combo_count = 0
colors = ['red', 'blue', 'green', 'goldenrod', 'purple', 'pink']
attribute_translator = ['fire', 'water', 'wood', 'light', 'dark', 'heart']
for row in range(len(self.board_array)):
for column in range(len(self.board_array[0])):
combo_length, positions = self.__find__combos__recursively__(self.board_array, row, column)
if combo_length >= 3 and not next(iter(positions)) in all_positions and self.board_array[row][column]:
print(str(self.board_array[row][column])+":",combo_length,'orb combo.')
attribute = attribute_translator.index(self.board_array[row][column])
if attribute != 5:
for monster in range(6):
if combo_length == 4:
damage = team[monster].pronged_atk[attribute] * 1.25
else:
damage = team[monster].atk[attribute] * (1+0.25*(combo_length-3))
elemental_damage[monster][self.board_array[row][column]] += damage
else:
total_rcv = 0
for monster in range(6):
total_rcv += team[monster].rcv
total_hp_recovered += total_rcv * (1+0.25*(combo_length-3))
print(total_hp_recovered)
print(total_rcv)
all_positions |= positions
combo_count += 1
combo_multiplier = 1+0.25*(combo_count-1)
for monster in range(6):
main_attribute = attribute_translator[team[monster].attr_main]
sub_attribute = ''
if team[monster].attr_sub or team[monster].attr_sub == 0:
sub_attribute = attribute_translator[team[monster].attr_sub]
if sub_attribute:
if main_attribute != sub_attribute:
main_damage = elemental_damage[monster][main_attribute] * combo_multiplier
sub_damage = elemental_damage[monster][sub_attribute] * combo_multiplier
else:
main_damage = elemental_damage[monster][main_attribute] * combo_multiplier * (10/11)
sub_damage = elemental_damage[monster][sub_attribute] * combo_multiplier * (1/11)
else:
main_damage = elemental_damage[monster][main_attribute] * combo_multiplier
sub_damage = 0
self.damage_labels[monster][0].setText(str(main_damage))
self.damage_labels[monster][0].setStyleSheet("QLabel { color : %s }" % colors[team[monster].attr_main])
self.damage_labels[monster][1].setText(str(sub_damage))
if team[monster].attr_sub or team[monster].attr_sub == 0:
self.damage_labels[monster][1].setStyleSheet("QLabel { color : %s }" % colors[team[monster].attr_sub])
total_hp_recovered *= combo_multiplier
self.hp_recovered.setText(str(total_hp_recovered))
self.hp_recovered.setStyleSheet("QLabel { color : %s }" % colors[5])
def set__team(self, team):
for label, member in zip(self.team_labels, team):
try:
image = QPixmap(os.path.join('images')+'/'+member.name+'.png')
image.scaled(75, 75)
label.setPixmap(image)
except Exception: pass
def __find__combos__recursively__(self, array, row, column):
combo_length = 0
positions = set()
row_length = self.checkIndexInRow(array, row, column)
if row_length >= 3:
more_length, more_positions = self.__find__combos__recursively__(array, row, column+row_length-1)
combo_length += row_length + more_length - 1
positions |= more_positions
for col_index in range(row_length):
positions.add((row, column+col_index))
column_length = self.checkIndexInColumn(array, row, column)
if column_length >= 3:
more_length, more_positions = self.__find__combos__recursively__(array, row+column_length-1, column)
combo_length += column_length + more_length - 1
positions |= more_positions
for row_index in range(column_length):
positions.add((row+row_index, column))
if row_length >= 3 and column_length >= 3:
return combo_length - 1, positions
elif row_length < 3 and column_length < 3:
return 1, positions
return combo_length, positions
def checkIndexInRow(self, array, row, col_index):
combo_length = 0
if array[row].count(array[row][col_index]) >= 3:
if col_index > 0:
if array[row][col_index - 1] != array[row][col_index]:
combo_length += self.recurseThroughRow(array, row, col_index)
else:
combo_length += self.recurseThroughRow(array, row, col_index)
return combo_length
def recurseThroughRow(self, array, row, col_index, count=1):
if array[row][col_index + count] == array[row][col_index]:
count += 1
if col_index + count < len(array[row]):
return self.recurseThroughRow(array, row, col_index, count)
else:
return count
else:
return count
def checkIndexInColumn(self, array, row_index, col):
elements_in_column = []
combo_length = 0
for index in range(row_index, len(array)):
elements_in_column.append(array[index][col])
if elements_in_column.count(array[row_index][col]) >= 3:
if row_index > 0:
if array[row_index][col] != array[row_index - 1][col]:
combo_length += self.recurseThroughCol(array, row_index, col)
else:
combo_length += self.recurseThroughCol(array, row_index, col)
return combo_length
def recurseThroughCol(self, array, row_index, col, count=1):
if array[row_index + count][col] == array[row_index][col]:
count += 1
if row_index + count < len(array):
return self.recurseThroughCol(array, row_index, col, count)
else:
return count
else:
return count
class PADLabel(QLabel):
def __init__(self, gui):
super().__init__(gui)
self.setAcceptDrops(True)
self.setMouseTracking(True)
self.setScaledContents(True)
self.color_counter = -1
self.colors = ['fire', 'water', 'wood', 'light', 'dark', 'heart']
self.element = ''
self.setFixedSize(75, 75)
def mousePressEvent(self, click):
if click.button() == Qt.LeftButton and self.rect().contains(click.pos()):
if self.color_counter != 5:
self.color_counter += 1
else:
self.color_counter = 0
self.element = self.colors[self.color_counter]
icon = QPixmap(os.path.join('icons')+'/'+self.element+'.png')
icon.scaled(75, 75)
self.setPixmap(icon)
def dragEnterEvent(self, event):
if event.mimeData().hasImage():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
image = event.mimeData().imageData().value<QImage>()
self.setPixmap(image)
class PADIcon(QLabel):
def __init__(self, gui):
super().__init__()
self.gui = gui
self.setMouseTracking(True)
self.location = self.rect()
def mousePressEvent(self, click):
if click.button() == Qt.LeftButton and self.rect().contains(click.pos()):
print('On it!')
drag = QDrag(self.gui)
mimeData = QMimeData()
mimeData.setImageData(self.pixmap().toImage())
drag.setMimeData(mimeData)
drag.setPixmap(self.pixmap())
dropAction = drag.exec()
|
{"/Calculator_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"], "/PAD_GUI.py": ["/PADScreen.py"], "/PADScreen.py": ["/Calculator_Screen.py", "/Board_Screen.py", "/PAD_Monster.py", "/PAD_Team.py"], "/PAD_Team.py": ["/PAD_Monster.py"], "/Board_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"]}
|
1,551
|
acheng6845/PuzzleSolver
|
refs/heads/master
|
/image_updater.py
|
__author__ = 'Aaron'
# Class Description:
# Update our monsters.txt file and our images folder
from urllib3 import urllib3
import shutil
import os
import json
class image_updater():
def __init__(self):
# update monsters.txt here:
self.json_file = open(os.path.realpath('./monsters.txt'), 'r')
self.json_object = json.loads(self.json_file.read())
path = os.path.realpath('images')
team = ['Sparkling Goddess of Secrets, Kali', 'Holy Night Kirin Princess, Sakuya',
'Soaring Dragon General, Sun Quan', 'divine law goddess, valkyrie rose']
for x in range(len(self.json_object)):
#for x in range(1):
url = 'https://padherder.com'+self.json_object[x]["image60_href"]
#print(url)
name = self.json_object[x]["name"]
if name in team:
#if name.islower():
# name += 'chibi'
request = urllib3.PoolManager().request('GET', url)
#print(os.path.realpath('images2'))
#is_accessible = os.access(path, os.F_OK)
#print(is_accessible)
# if the directory doesn't exist, create the directory - too risky
#if is_accessible == False:
# os.makedirs(os.path.realpath('images2'))
os.chdir(path)
#print(path)
#print(path+'\\'+name+'.png')
if os.access(path+'/'+name+'.png', os.F_OK) == False:
with open(os.path.join(path+'/'+name+'.png'), 'wb') as file:
file.write(request.data)
request.release_conn()
else:
print(name+'.png already exists.')
if __name__ == '__main__':
updater = image_updater()
|
{"/Calculator_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"], "/PAD_GUI.py": ["/PADScreen.py"], "/PADScreen.py": ["/Calculator_Screen.py", "/Board_Screen.py", "/PAD_Monster.py", "/PAD_Team.py"], "/PAD_Team.py": ["/PAD_Monster.py"], "/Board_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"]}
|
1,574
|
vanya2143/ITEA-tasks
|
refs/heads/master
|
/hw-2/task_2.py
|
"""
2. Написать декоратор log, который будет выводить на экран все аргументы,
которые передаются вызываемой функции.
@log
def my_sum(*args):
return sum(*args)
my_sum(1,2,3,1) - выведет "Функция была вызвана с - 1, 2, 3, 1"
my_sum(22, 1) - выведет "Функция была вызвана с - 22, 1"
"""
def log(func):
def wrapper(*args):
res = func(*args)
print("Функция была вызвана с - " + ', '.join(map(str, args)))
return res
return wrapper
@log
def my_sum(*args):
return
if __name__ == '__main__':
my_sum(11, 2, 3, 's', 4)
|
{"/hw-6/task_2.py": ["/hw-6/task_1.py"]}
|
1,575
|
vanya2143/ITEA-tasks
|
refs/heads/master
|
/hw-1/task_3.py
|
"""
Реализовать алгоритм бинарного поиска на python.
На вход подается упорядоченный список целых чисел, а так же элемент,
который необходимо найти и указать его индекс,
в противном случае – указать что такого элемента нет в заданном списке.
"""
def search_item(some_list, find_item):
some_list.sort()
list_length = len(some_list)
start = 0
end = list_length - 1
mid = list_length // 2
i = 0
while i < list_length:
if find_item == some_list[mid]:
return f'Число {some_list[mid]}, найдено по индексу {mid}'
elif find_item > some_list[mid]:
start = mid + 1
mid = start + (end - start) // 2
else:
end = mid - 1
mid = (end - start) // 2
i += 1
else:
return f'Числа {find_item} нету в списке!'
if __name__ == '__main__':
# my_list = list(range(0, 100))
my_list = [1, 23, 33, 54, 42, 77, 234, 99, 2]
my_item = 42
print(search_item(my_list, my_item))
|
{"/hw-6/task_2.py": ["/hw-6/task_1.py"]}
|
1,576
|
vanya2143/ITEA-tasks
|
refs/heads/master
|
/hw-6/task_2.py
|
# 2. Используя модуль unittests написать тесты: сложения двух матриц, умножения матрицы и метод transpose
import unittest
from .task_1 import Matrix, MatrixSizeError
class TestMatrix(unittest.TestCase):
def setUp(self) -> None:
self.matrix_1 = Matrix([[1, 2, 9], [3, 4, 0], [5, 6, 4]])
self.matrix_2 = Matrix([[2, 3, 0], [1, 2, 3], [5, 6, 4]])
self.matrix_3 = Matrix([[2, 9], [4, 0], [6, 4]])
self.matrix_4 = Matrix([[2, 9], [4, 0], [6, 4]])
def test_add_three(self):
self.assertEqual(self.matrix_1 + self.matrix_2, [[3, 5, 9], [4, 6, 3], [10, 12, 8]])
def test_add_two_size(self):
self.assertEqual(self.matrix_3 + self.matrix_4, [[4, 18], [8, 0], [12, 8]])
def test_add_error(self):
with self.assertRaises(MatrixSizeError):
self.matrix_1 + self.matrix_3
def test_mul_integer(self):
self.assertEqual(self.matrix_1 * 2, [[2, 4, 18], [6, 8, 0], [10, 12, 8]])
def test_mul_float(self):
self.assertEqual(self.matrix_1 * 2.5, [[2.5, 5.0, 22.5], [7.5, 10.0, 0.0], [12.5, 15.0, 10.0]])
def test_transpose_and_transpose_over_transposed_instance(self):
self.assertEqual(self.matrix_1.transpose(), [[1, 3, 5], [2, 4, 6], [9, 0, 4]])
self.assertEqual(self.matrix_1.transpose(), [[1, 2, 9], [3, 4, 0], [5, 6, 4]])
if __name__ == '__main__':
unittest.main()
|
{"/hw-6/task_2.py": ["/hw-6/task_1.py"]}
|
1,577
|
vanya2143/ITEA-tasks
|
refs/heads/master
|
/hw-1/task_1.py
|
"""
1. Определить количество четных и нечетных чисел в заданном списке.
Оформить в виде функции, где на вход будет подаваться список с целыми числами.
Результат функции должен быть 2 числа, количество четных и нечетных соответственно.
"""
def list_check(some_list):
even_numb = 0
not_even_numb = 0
for elem in some_list:
if elem % 2 == 0:
even_numb += 1
else:
not_even_numb += 1
return f"even: {even_numb}, not even: {not_even_numb}"
if __name__ == '__main__':
my_list = list(range(1, 20))
print(list_check(my_list))
|
{"/hw-6/task_2.py": ["/hw-6/task_1.py"]}
|
1,578
|
vanya2143/ITEA-tasks
|
refs/heads/master
|
/hw-3/task_1.py
|
"""
Реализовать некий класс Matrix, у которого:
1. Есть собственный конструктор, который принимает в качестве аргумента - список списков,
копирует его (то есть при изменении списков, значения в экземпляре класса не должны меняться).
Элементы списков гарантированно числа, и не пустые.
2. Метод size без аргументов, который возвращает кортеж вида (число строк, число столбцов).
3. Метод transpose, транспонирующий матрицу и возвращающую результат (данный метод модифицирует
экземпляр класса Matrix)
4. На основе пункта 3 сделать метод класса create_transposed, который будет принимать на вход список списков,
как и в пункте 1, но при этом создавать сразу транспонированную матрицу.
https://ru.wikipedia.org/wiki/%D0%A2%D1%80%D0%B0%D0%BD%D1%81%D0%BF%D0%BE%D0%BD%D0%B8%D1%80%D0%
"""
class Matrix:
def __init__(self, some_list):
self.data_list = some_list.copy()
def size(self):
row = len(self.data_list)
col = len(self.data_list[0])
return row, col
def transpose(self):
t_matrix = [
[item[i] for item in self.data_list] for i in range(self.size()[1])
]
self.data_list = t_matrix
return self.data_list
@classmethod
def create_transposed(cls, int_list):
obj = cls(int_list)
obj.transpose()
return obj
if __name__ == '__main__':
my_list = [[1, 2, 9], [3, 4, 0], [5, 6, 4]]
t = Matrix(my_list)
t.transpose()
print(t.data_list)
t2 = Matrix.create_transposed(my_list)
print(t2.data_list)
|
{"/hw-6/task_2.py": ["/hw-6/task_1.py"]}
|
1,579
|
vanya2143/ITEA-tasks
|
refs/heads/master
|
/hw-6/task_1.py
|
"""
1. Реализовать подсчёт елементов в классе Matrix с помощью collections.Counter.
Можно реализовать протоколом итератора и тогда будет такой вызов - Counter(maxtrix).
Либо сделать какой-то метод get_counter(), который будет возвращать объект Counter и подсчитывать все элементы
внутри матрицы. Какой метод - ваш выбор.
"""
from collections import Counter
class MatrixSizeError(Exception):
pass
class Matrix:
def __init__(self, some_list):
self.data_list = some_list.copy()
self.counter = Counter
def __add__(self, other):
if self.size() != other.size():
raise MatrixSizeError(
f'Matrixes have different sizes - Matrix{self.size()} and Matrix{other.size()}'
)
return [
[self.data_list[row][col] + other.data_list[row][col] for col in range(self.size()[1])]
for row in range(self.size()[0])
]
def __mul__(self, other):
return [[item * other for item in row] for row in self.data_list]
def __str__(self):
return ''.join('%s\n' % '\t'.join(map(str, x)) for x in self.data_list).rstrip('\n')
def get_counter(self):
return self.counter(elem for list_elem in self.data_list for elem in list_elem)
def size(self):
row = len(self.data_list)
col = len(self.data_list[0])
return row, col
def transpose(self):
t_matrix = [
[item[i] for item in self.data_list] for i in range(self.size()[1])
]
self.data_list = t_matrix
return self.data_list
@classmethod
def create_transposed(cls, int_list):
obj = cls(int_list)
obj.transpose()
return obj
if __name__ == '__main__':
list_1 = [[1, 2, 9], [3, 4, 0], [5, 6, 4]]
list_2 = [[2, 3], [1, 2], [5, 6]]
matrix1 = Matrix(list_1)
matrix2 = Matrix(list_2)
print(matrix1.get_counter())
print(matrix2.get_counter())
|
{"/hw-6/task_2.py": ["/hw-6/task_1.py"]}
|
1,580
|
vanya2143/ITEA-tasks
|
refs/heads/master
|
/hw-4/task_1.py
|
"""
К реализованному классу Matrix в Домашнем задании 3 добавить следующее:
1. __add__ принимающий второй экземпляр класса Matrix и возвращающий сумму матриц,
если передалась на вход матрица другого размера - поднимать исключение MatrixSizeError
(по желанию реализовать так, чтобы текст ошибки содержал размерность 1 и 2 матриц - пример:
"Matrixes have different sizes - Matrix(x1, y1) and Matrix(x2, y2)")
2. __mul__ принимающий число типа int или float и возвращающий матрицу, умноженную на скаляр
3. __str__ переводящий матрицу в строку.
Столбцы разделены между собой табуляцией, а строки — переносами строк (символ новой строки).
При этом после каждой строки не должно быть символа табуляции и в конце не должно быть переноса строки.
"""
class MatrixSizeError(Exception):
pass
class Matrix:
def __init__(self, some_list):
self.data_list = some_list.copy()
def __add__(self, other):
if self.size() != other.size():
raise MatrixSizeError(
f'Matrixes have different sizes - Matrix{self.size()} and Matrix{other.size()}'
)
return [
[self.data_list[row][col] + other.data_list[row][col] for col in range(self.size()[1])]
for row in range(self.size()[0])
]
def __mul__(self, other):
return [[item * other for item in row] for row in self.data_list]
def __str__(self):
return ''.join('%s\n' % '\t'.join(map(str, x)) for x in self.data_list).rstrip('\n')
def size(self):
row = len(self.data_list)
col = len(self.data_list[0])
return row, col
def transpose(self):
t_matrix = [
[item[i] for item in self.data_list] for i in range(self.size()[1])
]
self.data_list = t_matrix
return self.data_list
@classmethod
def create_transposed(cls, int_list):
obj = cls(int_list)
obj.transpose()
return obj
if __name__ == '__main__':
list_1 = [[1, 2, 9], [3, 4, 0], [5, 6, 4]]
list_2 = [[2, 3, 0], [1, 2, 3], [5, 6, 4]]
list_3 = [[2, 3], [1, 2], [5, 6]]
t1 = Matrix(list_1)
t1.transpose()
t2 = Matrix.create_transposed(list_2)
t3 = Matrix(list_3)
print("t1: ", t1.data_list)
print("t2: ", t2.data_list)
print("t3: ", t3.data_list)
# __add__
print("\nt1.__add__(t2) : ", t1 + t2)
try:
print("\nПробую: t1 + t3")
print(t1 + t3)
except MatrixSizeError:
print('Тут было вызвано исключение MatrixSizeError')
# __mul__
print("\nt2.__mul__(3): \n", t2 * 3)
# __str__
print('\nt1.__str__')
print(t1)
|
{"/hw-6/task_2.py": ["/hw-6/task_1.py"]}
|
1,581
|
vanya2143/ITEA-tasks
|
refs/heads/master
|
/hw-7/task_1.py
|
"""
Сделать скрипт, который будет делать GET запросы на следующие ресурсы:
"http://docs.python-requests.org/",
"https://httpbin.org/get",
"https://httpbin.org/",
"https://api.github.com/",
"https://example.com/",
"https://www.python.org/",
"https://www.google.com.ua/",
"https://regex101.com/",
"https://docs.python.org/3/this-url-will-404.html",
"https://www.nytimes.com/guides/",
"https://www.mediamatters.org/",
"https://1.1.1.1/",
"https://www.politico.com/tipsheets/morning-money",
"https://www.bloomberg.com/markets/economics",
"https://www.ietf.org/rfc/rfc2616.txt"
Для каждого запроса должен быть вывод по примеру: "Resource 'google.com.ua',
request took 0.23 sec, response status - 200."
В реализации нет ограничений - можно использовать процессы, потоки, асинхронность.
Любые вспомагательные механизмы типа Lock, Semaphore, пулы для тредов и потоков.
"""
import aiohttp
import asyncio
from time import time
async def get_response(session, url):
async with session.get(url) as resp:
return resp.status
async def request(url):
async with aiohttp.ClientSession() as session:
time_start = time()
status_code = await get_response(session, url)
print(f"Resource '{url}', request took {time() - time_start:.3f}, response status - {status_code}")
if __name__ == '__main__':
urls = [
"http://docs.python-requests.org/",
"https://httpbin.org/get",
"https://httpbin.org/",
"https://api.github.com/",
"https://example.com/",
"https://www.python.org/",
"https://www.google.com.ua/",
"https://regex101.com/",
"https://docs.python.org/3/this-url-will-404.html",
"https://www.nytimes.com/guides/",
"https://www.mediamatters.org/",
"https://1.1.1.1/",
"https://www.politico.com/tipsheets/morning-money",
"https://www.bloomberg.com/markets/economics",
"https://www.ietf.org/rfc/rfc2616.txt"
]
futures = [request(url) for url in urls]
loop = asyncio.get_event_loop()
t_start = time()
loop.run_until_complete(asyncio.wait(futures))
t_end = time()
print(f"Full fetching got {t_end - t_start:.3f} seconds.")
|
{"/hw-6/task_2.py": ["/hw-6/task_1.py"]}
|
1,582
|
vanya2143/ITEA-tasks
|
refs/heads/master
|
/hw-1/task_2.py
|
"""
Написать функцию, которая принимает 2 числа.
Функция должна вернуть сумму всех элементов числового ряда между этими двумя числами.
(если подать 1 и 5 на вход, то результат должен считаться как 1+2+3+4+5=15)
"""
def all_numbers_sum(num1, num2):
return sum([num for num in range(num1, num2 + 1)])
if __name__ == '__main__':
print(all_numbers_sum(1, 5))
|
{"/hw-6/task_2.py": ["/hw-6/task_1.py"]}
|
1,583
|
vanya2143/ITEA-tasks
|
refs/heads/master
|
/hw-5/task_1.py
|
# Реализовать пример использования паттерна Singleton
from random import choice
# Генератор событий
def gen_events(instance, data, count=2):
for i in range(count):
event = choice(data)
instance.add_event(f'Event-{event}-{i}', event)
# Singleton на примере списка событий
class EventsMeta(type):
_instance = None
def __call__(cls):
if cls._instance is None:
cls._instance = super().__call__()
return cls._instance
class Events(metaclass=EventsMeta):
# __metaclass__ = EventsMeta
_events = {
'ok': [],
'info': [],
'warn': [],
'error': []
}
def get_all_events(self):
"""
:return: dict with all events and types
"""
return self._events
def get_events_count(self, key: str = None):
"""
:param key: if need count of specific type
:return: all events count or specific event count if param key: not None
:rtype: tuple, int
"""
if key:
try:
return len(self._events[key])
# return key, len(self._events[key])
except KeyError:
print('Тип события должен быть ' + ', '.join(self._events.keys()))
return
return tuple((event, len(self._events[event])) for event in self._events.keys())
def add_event(self, event: str, event_type: str):
"""
:param event: event message
:param event_type: ok, info, warn, error
:return: None
"""
try:
self._events[event_type].append(event)
except KeyError:
print('Тип события должен быть ' + ', '.join(self._events.keys()))
def read_event(self, event_type: str):
"""
:param event_type: ok, info, warn, error
:return: tuple last item of event_type, all count events or None
"""
try:
return self._events[event_type].pop(), len(self._events[event_type])
except IndexError:
print('Событий больше нет')
return
except KeyError:
print('Указан неверный тип события')
return
@classmethod
def get_events_types(cls):
return cls._events.keys()
if __name__ == '__main__':
event_instance1 = Events()
event_instance2 = Events()
event_instance3 = Events()
print(type(event_instance1), id(event_instance1))
print(type(event_instance2), id(event_instance2))
# Генерируем события
gen_events(event_instance3, list(event_instance3.get_events_types()), 50)
# Получаем все события
print(event_instance2.get_all_events())
# Получаем колличества всех типов событий и обределенного типа
print(event_instance3.get_events_count())
print(f"Error: {event_instance3.get_events_count('error')}")
# Читаем события
while event_instance3.get_events_count('ok'):
print(event_instance3.read_event('ok'))
|
{"/hw-6/task_2.py": ["/hw-6/task_1.py"]}
|
1,584
|
vanya2143/ITEA-tasks
|
refs/heads/master
|
/hw-2/task_1.py
|
"""
1. Написать функцию, которая будет принимать на вход натуральное число n,
и возращать сумму его цифр. Реализовать используя рекурсию
(без циклов, без строк, без контейнерных типов данных).
Пример: get_sum_of_components(123) -> 6 (1+2+3)
"""
def get_sum_of_components_two(n):
return 0 if not n else n % 10 + get_sum_of_components_two(n // 10)
if __name__ == '__main__':
print(get_sum_of_components_two(123))
|
{"/hw-6/task_2.py": ["/hw-6/task_1.py"]}
|
1,586
|
Kw4dr4t/WebMovies
|
refs/heads/master
|
/WebMovies/migrations/0006_auto_20210209_1401.py
|
# Generated by Django 3.1.6 on 2021-02-09 14:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('WebMovies', '0005_auto_20210209_0759'),
]
operations = [
migrations.AlterField(
model_name='additionalinfo',
name='genre',
field=models.PositiveSmallIntegerField(choices=[(8, 'Historical'), (4, 'Crime'), (7, 'Fantasy'), (3, 'Comedy'), (13, 'Wester'), (11, 'Science Fiction'), (10, 'Romance'), (5, 'Drama'), (2, 'Animation'), (0, 'Other'), (12, 'Thriller'), (9, 'Horror'), (6, 'Experimental'), (1, 'Action')], default=0),
),
]
|
{"/WebMovies/views.py": ["/WebMovies/models.py"], "/WebMovies/admin.py": ["/WebMovies/models.py"]}
|
1,587
|
Kw4dr4t/WebMovies
|
refs/heads/master
|
/WebMovies/apps.py
|
from django.apps import AppConfig
class WebmoviesConfig(AppConfig):
name = 'WebMovies'
|
{"/WebMovies/views.py": ["/WebMovies/models.py"], "/WebMovies/admin.py": ["/WebMovies/models.py"]}
|
1,588
|
Kw4dr4t/WebMovies
|
refs/heads/master
|
/WebMovies/migrations/0003_movie_description.py
|
# Generated by Django 3.1.6 on 2021-02-04 08:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('WebMovies', '0002_auto_20210204_0806'),
]
operations = [
migrations.AddField(
model_name='movie',
name='description',
field=models.TextField(default=''),
),
]
|
{"/WebMovies/views.py": ["/WebMovies/models.py"], "/WebMovies/admin.py": ["/WebMovies/models.py"]}
|
1,589
|
Kw4dr4t/WebMovies
|
refs/heads/master
|
/WebMovies/migrations/0004_auto_20210204_0835.py
|
# Generated by Django 3.1.6 on 2021-02-04 08:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('WebMovies', '0003_movie_description'),
]
operations = [
migrations.AddField(
model_name='movie',
name='imdb_rating',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=4, null=True),
),
migrations.AddField(
model_name='movie',
name='poster',
field=models.ImageField(blank=True, null=True, upload_to='posters'),
),
migrations.AddField(
model_name='movie',
name='premiere',
field=models.DateField(blank=True, null=True),
),
]
|
{"/WebMovies/views.py": ["/WebMovies/models.py"], "/WebMovies/admin.py": ["/WebMovies/models.py"]}
|
1,590
|
Kw4dr4t/WebMovies
|
refs/heads/master
|
/WebMovies/views.py
|
from django.shortcuts import get_object_or_404, render, redirect
from django.http import HttpResponse
from WebMovies.models import Movie
from .forms import MovieForm
from django.contrib.auth.decorators import login_required
def all_movies(request):
movies_all = Movie.objects.all()
return render(request, "movies.html", {"movies": movies_all})
@login_required
def new_movie(request):
form = MovieForm(request.POST or None, request.FILES or None)
if form.is_valid():
form.save()
return redirect(all_movies)
return render(request, "movie_form.html", {"form": form, "new": True})
@login_required
def edit_movie(request, id):
movie = get_object_or_404(Movie, pk=id)
form = MovieForm(request.POST or None, request.FILES or None, instance=movie)
if form.is_valid():
form.save()
return redirect(all_movies)
return render(request, "movie_form.html", {"form": form, "new": False})
@login_required
def delete_movie(request, id):
movie = get_object_or_404(Movie, pk=id)
if request.method == "POST":
movie.delete()
return redirect(all_movies)
return render(request, "confirm.html", {"movie": movie})
|
{"/WebMovies/views.py": ["/WebMovies/models.py"], "/WebMovies/admin.py": ["/WebMovies/models.py"]}
|
1,591
|
Kw4dr4t/WebMovies
|
refs/heads/master
|
/WebMovies/migrations/0005_auto_20210209_0759.py
|
# Generated by Django 3.1.6 on 2021-02-09 07:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('WebMovies', '0004_auto_20210204_0835'),
]
operations = [
migrations.CreateModel(
name='AdditionalInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('duration', models.PositiveIntegerField(default=0)),
('genre', models.PositiveSmallIntegerField(choices=[(8, 'Historical'), (4, 'Crime'), (3, 'Comedy'), (5, 'Drama'), (11, 'Science Fiction'), (0, 'Other'), (9, 'Horror'), (1, 'Action'), (6, 'Experimental'), (10, 'Romance'), (7, 'Fantasy'), (12, 'Thriller'), (13, 'Wester'), (2, 'Animation')], default=0)),
],
),
migrations.AddField(
model_name='movie',
name='additional_info',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='WebMovies.additionalinfo'),
),
]
|
{"/WebMovies/views.py": ["/WebMovies/models.py"], "/WebMovies/admin.py": ["/WebMovies/models.py"]}
|
1,592
|
Kw4dr4t/WebMovies
|
refs/heads/master
|
/WebMovies/admin.py
|
from django.contrib import admin
from .models import AdditionalInfo, Movie
# Register your models here.
# admin.site.register(Movie)
@admin.register(Movie)
class MovieAdmin(admin.ModelAdmin):
# fields = ["Title", "Description", "Year"]
# exclude = ["Description"]
list_display = ["title", "imdb_rating", "year"]
list_filter = ("year",)
search_fields = ("title",)
admin.site.register(AdditionalInfo)
|
{"/WebMovies/views.py": ["/WebMovies/models.py"], "/WebMovies/admin.py": ["/WebMovies/models.py"]}
|
1,593
|
Kw4dr4t/WebMovies
|
refs/heads/master
|
/WebMovies/models.py
|
from django.db import models
class AdditionalInfo(models.Model):
GENRES = {
(0, "Other"),
(1, "Action"),
(2, "Animation"),
(3, "Comedy"),
(4, "Crime"),
(5, "Drama"),
(6, "Experimental"),
(7, "Fantasy"),
(8, "Historical"),
(9, "Horror"),
(10, "Romance"),
(11, "Science Fiction"),
(12, "Thriller"),
(13, "Wester"),
}
duration = models.PositiveIntegerField(default=0)
genre = models.PositiveSmallIntegerField(default=0, choices=GENRES)
class Movie(models.Model):
title = models.CharField(max_length=64, blank=False, unique=True)
year = models.PositiveSmallIntegerField(default=2000, blank=True)
description = models.TextField(default="")
premiere = models.DateField(auto_now=False, null=True, blank=True)
imdb_rating = models.DecimalField(
max_digits=4, decimal_places=2, null=True, blank=True
)
poster = models.ImageField(upload_to="posters", null=True, blank=True)
additional_info = models.OneToOneField(
AdditionalInfo, on_delete=models.CASCADE, null=True, blank=True
)
def __str__(self):
return self.title_with_year()
def title_with_year(self):
return "{} ({})".format(self.title, self.year)
|
{"/WebMovies/views.py": ["/WebMovies/models.py"], "/WebMovies/admin.py": ["/WebMovies/models.py"]}
|
1,597
|
eric-z-lin/DIAYN-PyTorch
|
refs/heads/main
|
/main.py
|
import gym
from Brain import SACAgent
from Common import Play, Logger, get_params
import numpy as np
from tqdm import tqdm
import mujoco_py
def concat_state_latent(s, z_, n):
z_one_hot = np.zeros(n)
z_one_hot[z_] = 1
return np.concatenate([s, z_one_hot])
if __name__ == "__main__":
params = get_params()
test_env = gym.make(params["env_name"])
n_states = test_env.observation_space.shape[0]
n_actions = test_env.action_space.shape[0]
action_bounds = [test_env.action_space.low[0], test_env.action_space.high[0]]
params.update({"n_states": n_states,
"n_actions": n_actions,
"action_bounds": action_bounds})
print("params:", params)
test_env.close()
del test_env, n_states, n_actions, action_bounds
env = gym.make(params["env_name"])
p_z = np.full(params["n_skills"], 1 / params["n_skills"])
agent = SACAgent(p_z=p_z, **params)
logger = Logger(agent, **params)
if params["do_train"]:
if not params["train_from_scratch"]:
episode, last_logq_zs, np_rng_state, *env_rng_states, torch_rng_state, random_rng_state = logger.load_weights()
agent.hard_update_target_network()
min_episode = episode
np.random.set_state(np_rng_state)
env.np_random.set_state(env_rng_states[0])
env.observation_space.np_random.set_state(env_rng_states[1])
env.action_space.np_random.set_state(env_rng_states[2])
agent.set_rng_states(torch_rng_state, random_rng_state)
print("Keep training from previous run.")
else:
min_episode = 0
last_logq_zs = 0
np.random.seed(params["seed"])
env.seed(params["seed"])
env.observation_space.seed(params["seed"])
env.action_space.seed(params["seed"])
print("Training from scratch.")
logger.on()
for episode in tqdm(range(1 + min_episode, params["max_n_episodes"] + 1)):
z = np.random.choice(params["n_skills"], p=p_z)
state = env.reset()
state = concat_state_latent(state, z, params["n_skills"])
episode_reward = 0
logq_zses = []
max_n_steps = min(params["max_episode_len"], env.spec.max_episode_steps)
for step in range(1, 1 + max_n_steps):
action = agent.choose_action(state)
next_state, reward, done, _ = env.step(action)
next_state = concat_state_latent(next_state, z, params["n_skills"])
agent.store(state, z, done, action, next_state)
logq_zs = agent.train()
if logq_zs is None:
logq_zses.append(last_logq_zs)
else:
logq_zses.append(logq_zs)
episode_reward += reward
state = next_state
if done:
break
logger.log(episode,
episode_reward,
z,
sum(logq_zses) / len(logq_zses),
step,
np.random.get_state(),
env.np_random.get_state(),
env.observation_space.np_random.get_state(),
env.action_space.np_random.get_state(),
*agent.get_rng_states(),
)
else:
logger.load_weights()
player = Play(env, agent, n_skills=params["n_skills"])
player.evaluate()
|
{"/main.py": ["/Brain/__init__.py", "/Common/__init__.py"], "/Common/__init__.py": ["/Common/config.py", "/Common/play.py", "/Common/logger.py"], "/Brain/agent.py": ["/Brain/model.py", "/Brain/replay_memory.py"], "/Brain/__init__.py": ["/Brain/agent.py"]}
|
1,598
|
eric-z-lin/DIAYN-PyTorch
|
refs/heads/main
|
/Brain/replay_memory.py
|
import random
from collections import namedtuple
Transition = namedtuple('Transition', ('state', 'z', 'done', 'action', 'next_state'))
class Memory:
def __init__(self, buffer_size, seed):
self.buffer_size = buffer_size
self.buffer = []
self.seed = seed
random.seed(self.seed)
def add(self, *transition):
self.buffer.append(Transition(*transition))
if len(self.buffer) > self.buffer_size:
self.buffer.pop(0)
assert len(self.buffer) <= self.buffer_size
def sample(self, size):
return random.sample(self.buffer, size)
def __len__(self):
return len(self.buffer)
@staticmethod
def get_rng_state():
return random.getstate()
@staticmethod
def set_rng_state(random_rng_state):
random.setstate(random_rng_state)
|
{"/main.py": ["/Brain/__init__.py", "/Common/__init__.py"], "/Common/__init__.py": ["/Common/config.py", "/Common/play.py", "/Common/logger.py"], "/Brain/agent.py": ["/Brain/model.py", "/Brain/replay_memory.py"], "/Brain/__init__.py": ["/Brain/agent.py"]}
|
1,599
|
eric-z-lin/DIAYN-PyTorch
|
refs/heads/main
|
/Brain/model.py
|
from abc import ABC
import torch
from torch import nn
from torch.nn import functional as F
from torch.distributions import Normal
def init_weight(layer, initializer="he normal"):
if initializer == "xavier uniform":
nn.init.xavier_uniform_(layer.weight)
elif initializer == "he normal":
nn.init.kaiming_normal_(layer.weight)
class Discriminator(nn.Module, ABC):
def __init__(self, n_states, n_skills, n_hidden_filters=256):
super(Discriminator, self).__init__()
self.n_states = n_states
self.n_skills = n_skills
self.n_hidden_filters = n_hidden_filters
self.hidden1 = nn.Linear(in_features=self.n_states, out_features=self.n_hidden_filters)
init_weight(self.hidden1)
self.hidden1.bias.data.zero_()
self.hidden2 = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_hidden_filters)
init_weight(self.hidden2)
self.hidden2.bias.data.zero_()
self.q = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_skills)
init_weight(self.q, initializer="xavier uniform")
self.q.bias.data.zero_()
def forward(self, states):
x = F.relu(self.hidden1(states))
x = F.relu(self.hidden2(x))
logits = self.q(x)
return logits
class ValueNetwork(nn.Module, ABC):
def __init__(self, n_states, n_hidden_filters=256):
super(ValueNetwork, self).__init__()
self.n_states = n_states
self.n_hidden_filters = n_hidden_filters
self.hidden1 = nn.Linear(in_features=self.n_states, out_features=self.n_hidden_filters)
init_weight(self.hidden1)
self.hidden1.bias.data.zero_()
self.hidden2 = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_hidden_filters)
init_weight(self.hidden2)
self.hidden2.bias.data.zero_()
self.value = nn.Linear(in_features=self.n_hidden_filters, out_features=1)
init_weight(self.value, initializer="xavier uniform")
self.value.bias.data.zero_()
def forward(self, states):
x = F.relu(self.hidden1(states))
x = F.relu(self.hidden2(x))
return self.value(x)
class QvalueNetwork(nn.Module, ABC):
def __init__(self, n_states, n_actions, n_hidden_filters=256):
super(QvalueNetwork, self).__init__()
self.n_states = n_states
self.n_hidden_filters = n_hidden_filters
self.n_actions = n_actions
self.hidden1 = nn.Linear(in_features=self.n_states + self.n_actions, out_features=self.n_hidden_filters)
init_weight(self.hidden1)
self.hidden1.bias.data.zero_()
self.hidden2 = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_hidden_filters)
init_weight(self.hidden2)
self.hidden2.bias.data.zero_()
self.q_value = nn.Linear(in_features=self.n_hidden_filters, out_features=1)
init_weight(self.q_value, initializer="xavier uniform")
self.q_value.bias.data.zero_()
def forward(self, states, actions):
x = torch.cat([states, actions], dim=1)
x = F.relu(self.hidden1(x))
x = F.relu(self.hidden2(x))
return self.q_value(x)
class PolicyNetwork(nn.Module, ABC):
def __init__(self, n_states, n_actions, action_bounds, n_hidden_filters=256):
super(PolicyNetwork, self).__init__()
self.n_states = n_states
self.n_hidden_filters = n_hidden_filters
self.n_actions = n_actions
self.action_bounds = action_bounds
self.hidden1 = nn.Linear(in_features=self.n_states, out_features=self.n_hidden_filters)
init_weight(self.hidden1)
self.hidden1.bias.data.zero_()
self.hidden2 = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_hidden_filters)
init_weight(self.hidden2)
self.hidden2.bias.data.zero_()
self.mu = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_actions)
init_weight(self.mu, initializer="xavier uniform")
self.mu.bias.data.zero_()
self.log_std = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_actions)
init_weight(self.log_std, initializer="xavier uniform")
self.log_std.bias.data.zero_()
def forward(self, states):
x = F.relu(self.hidden1(states))
x = F.relu(self.hidden2(x))
mu = self.mu(x)
log_std = self.log_std(x)
std = log_std.clamp(min=-20, max=2).exp()
dist = Normal(mu, std)
return dist
def sample_or_likelihood(self, states):
dist = self(states)
# Reparameterization trick
u = dist.rsample()
action = torch.tanh(u)
log_prob = dist.log_prob(value=u)
# Enforcing action bounds
log_prob -= torch.log(1 - action ** 2 + 1e-6)
log_prob = log_prob.sum(-1, keepdim=True)
return (action * self.action_bounds[1]).clamp_(self.action_bounds[0], self.action_bounds[1]), log_prob
|
{"/main.py": ["/Brain/__init__.py", "/Common/__init__.py"], "/Common/__init__.py": ["/Common/config.py", "/Common/play.py", "/Common/logger.py"], "/Brain/agent.py": ["/Brain/model.py", "/Brain/replay_memory.py"], "/Brain/__init__.py": ["/Brain/agent.py"]}
|
1,600
|
eric-z-lin/DIAYN-PyTorch
|
refs/heads/main
|
/Common/logger.py
|
import time
import numpy as np
import psutil
from torch.utils.tensorboard import SummaryWriter
import torch
import os
import datetime
import glob
class Logger:
def __init__(self, agent, **config):
self.config = config
self.agent = agent
self.log_dir = self.config["env_name"][:-3] + "/" + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
self.start_time = 0
self.duration = 0
self.running_logq_zs = 0
self.max_episode_reward = -np.inf
self._turn_on = False
self.to_gb = lambda in_bytes: in_bytes / 1024 / 1024 / 1024
if self.config["do_train"] and self.config["train_from_scratch"]:
self._create_wights_folder(self.log_dir)
self._log_params()
@staticmethod
def _create_wights_folder(dir):
if not os.path.exists("Checkpoints"):
os.mkdir("Checkpoints")
os.mkdir("Checkpoints/" + dir)
def _log_params(self):
with SummaryWriter("Logs/" + self.log_dir) as writer:
for k, v in self.config.items():
writer.add_text(k, str(v))
def on(self):
self.start_time = time.time()
self._turn_on = True
def _off(self):
self.duration = time.time() - self.start_time
def log(self, *args):
if not self._turn_on:
print("First you should turn the logger on once, via on() method to be able to log parameters.")
return
self._off()
episode, episode_reward, skill, logq_zs, step, *rng_states = args
self.max_episode_reward = max(self.max_episode_reward, episode_reward)
if self.running_logq_zs == 0:
self.running_logq_zs = logq_zs
else:
self.running_logq_zs = 0.99 * self.running_logq_zs + 0.01 * logq_zs
ram = psutil.virtual_memory()
assert self.to_gb(ram.used) < 0.98 * self.to_gb(ram.total), "RAM usage exceeded permitted limit!"
if episode % (self.config["interval"] // 3) == 0:
self._save_weights(episode, *rng_states)
if episode % self.config["interval"] == 0:
print("E: {}| "
"Skill: {}| "
"E_Reward: {:.1f}| "
"EP_Duration: {:.2f}| "
"Memory_Length: {}| "
"Mean_steps_time: {:.3f}| "
"{:.1f}/{:.1f} GB RAM| "
"Time: {} ".format(episode,
skill,
episode_reward,
self.duration,
len(self.agent.memory),
self.duration / step,
self.to_gb(ram.used),
self.to_gb(ram.total),
datetime.datetime.now().strftime("%H:%M:%S"),
))
with SummaryWriter("Logs/" + self.log_dir) as writer:
writer.add_scalar("Max episode reward", self.max_episode_reward, episode)
writer.add_scalar("Running logq(z|s)", self.running_logq_zs, episode)
writer.add_histogram(str(skill), episode_reward)
writer.add_histogram("Total Rewards", episode_reward)
self.on()
def _save_weights(self, episode, *rng_states):
torch.save({"policy_network_state_dict": self.agent.policy_network.state_dict(),
"q_value_network1_state_dict": self.agent.q_value_network1.state_dict(),
"q_value_network2_state_dict": self.agent.q_value_network2.state_dict(),
"value_network_state_dict": self.agent.value_network.state_dict(),
"discriminator_state_dict": self.agent.discriminator.state_dict(),
"q_value1_opt_state_dict": self.agent.q_value1_opt.state_dict(),
"q_value2_opt_state_dict": self.agent.q_value2_opt.state_dict(),
"policy_opt_state_dict": self.agent.policy_opt.state_dict(),
"value_opt_state_dict": self.agent.value_opt.state_dict(),
"discriminator_opt_state_dict": self.agent.discriminator_opt.state_dict(),
"episode": episode,
"rng_states": rng_states,
"max_episode_reward": self.max_episode_reward,
"running_logq_zs": self.running_logq_zs
},
"Checkpoints/" + self.log_dir + "/params.pth")
def load_weights(self):
model_dir = glob.glob("Checkpoints/" + self.config["env_name"][:-3] + "/")
model_dir.sort()
checkpoint = torch.load(model_dir[-1] + "/params.pth")
self.log_dir = model_dir[-1].split(os.sep)[-1]
self.agent.policy_network.load_state_dict(checkpoint["policy_network_state_dict"])
self.agent.q_value_network1.load_state_dict(checkpoint["q_value_network1_state_dict"])
self.agent.q_value_network2.load_state_dict(checkpoint["q_value_network2_state_dict"])
self.agent.value_network.load_state_dict(checkpoint["value_network_state_dict"])
self.agent.discriminator.load_state_dict(checkpoint["discriminator_state_dict"])
self.agent.q_value1_opt.load_state_dict(checkpoint["q_value1_opt_state_dict"])
self.agent.q_value2_opt.load_state_dict(checkpoint["q_value2_opt_state_dict"])
self.agent.policy_opt.load_state_dict(checkpoint["policy_opt_state_dict"])
self.agent.value_opt.load_state_dict(checkpoint["value_opt_state_dict"])
self.agent.discriminator_opt.load_state_dict(checkpoint["discriminator_opt_state_dict"])
self.max_episode_reward = checkpoint["max_episode_reward"]
self.running_logq_zs = checkpoint["running_logq_zs"]
return checkpoint["episode"], self.running_logq_zs, *checkpoint["rng_states"]
|
{"/main.py": ["/Brain/__init__.py", "/Common/__init__.py"], "/Common/__init__.py": ["/Common/config.py", "/Common/play.py", "/Common/logger.py"], "/Brain/agent.py": ["/Brain/model.py", "/Brain/replay_memory.py"], "/Brain/__init__.py": ["/Brain/agent.py"]}
|
1,601
|
eric-z-lin/DIAYN-PyTorch
|
refs/heads/main
|
/Common/__init__.py
|
from .config import get_params
from .play import Play
from .logger import Logger
|
{"/main.py": ["/Brain/__init__.py", "/Common/__init__.py"], "/Common/__init__.py": ["/Common/config.py", "/Common/play.py", "/Common/logger.py"], "/Brain/agent.py": ["/Brain/model.py", "/Brain/replay_memory.py"], "/Brain/__init__.py": ["/Brain/agent.py"]}
|
1,602
|
eric-z-lin/DIAYN-PyTorch
|
refs/heads/main
|
/Common/config.py
|
import argparse
def get_params():
parser = argparse.ArgumentParser(
description="Variable parameters based on the configuration of the machine or user's choice")
parser.add_argument("--env_name", default="BipedalWalker-v3", type=str, help="Name of the environment.")
parser.add_argument("--interval", default=20, type=int,
help="The interval specifies how often different parameters should be saved and printed,"
" counted by episodes.")
parser.add_argument("--do_train", action="store_true",
help="The flag determines whether to train the agent or play with it.")
parser.add_argument("--train_from_scratch", action="store_false",
help="The flag determines whether to train from scratch or continue previous tries.")
parser.add_argument("--mem_size", default=int(1e+6), type=int, help="The memory size.")
parser.add_argument("--n_skills", default=50, type=int, help="The number of skills to learn.")
parser.add_argument("--reward_scale", default=1, type=float, help="The reward scaling factor introduced in SAC.")
parser.add_argument("--seed", default=123, type=int,
help="The randomness' seed for torch, numpy, random & gym[env].")
parser_params = parser.parse_args()
# Parameters based on the DIAYN and SAC papers.
# region default parameters
default_params = {"lr": 3e-4,
"batch_size": 256,
"max_n_episodes": 5000,
"max_episode_len": 1000,
"gamma": 0.99,
"alpha": 0.1,
"tau": 0.005,
"n_hiddens": 300
}
# endregion
total_params = {**vars(parser_params), **default_params}
return total_params
|
{"/main.py": ["/Brain/__init__.py", "/Common/__init__.py"], "/Common/__init__.py": ["/Common/config.py", "/Common/play.py", "/Common/logger.py"], "/Brain/agent.py": ["/Brain/model.py", "/Brain/replay_memory.py"], "/Brain/__init__.py": ["/Brain/agent.py"]}
|
1,603
|
eric-z-lin/DIAYN-PyTorch
|
refs/heads/main
|
/Common/play.py
|
# from mujoco_py.generated import const
from mujoco_py import GlfwContext
import cv2
import numpy as np
import os
GlfwContext(offscreen=True)
class Play:
def __init__(self, env, agent, n_skills):
self.env = env
self.agent = agent
self.n_skills = n_skills
self.agent.set_policy_net_to_cpu_mode()
self.agent.set_policy_net_to_eval_mode()
self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
if not os.path.exists("Vid/"):
os.mkdir("Vid/")
@staticmethod
def concat_state_latent(s, z_, n):
z_one_hot = np.zeros(n)
z_one_hot[z_] = 1
return np.concatenate([s, z_one_hot])
def evaluate(self):
for z in range(self.n_skills):
video_writer = cv2.VideoWriter(f"Vid/skill{z}" + ".avi", self.fourcc, 50.0, (250, 250))
s = self.env.reset()
s = self.concat_state_latent(s, z, self.n_skills)
episode_reward = 0
for _ in range(self.env.spec.max_episode_steps):
action = self.agent.choose_action(s)
s_, r, done, _ = self.env.step(action)
s_ = self.concat_state_latent(s_, z, self.n_skills)
episode_reward += r
if done:
break
s = s_
I = self.env.render(mode='rgb_array')
I = cv2.cvtColor(I, cv2.COLOR_RGB2BGR)
I = cv2.resize(I, (250, 250))
video_writer.write(I)
print(f"skill: {z}, episode reward:{episode_reward:.1f}")
video_writer.release()
self.env.close()
cv2.destroyAllWindows()
|
{"/main.py": ["/Brain/__init__.py", "/Common/__init__.py"], "/Common/__init__.py": ["/Common/config.py", "/Common/play.py", "/Common/logger.py"], "/Brain/agent.py": ["/Brain/model.py", "/Brain/replay_memory.py"], "/Brain/__init__.py": ["/Brain/agent.py"]}
|
1,604
|
eric-z-lin/DIAYN-PyTorch
|
refs/heads/main
|
/Brain/agent.py
|
import numpy as np
from .model import PolicyNetwork, QvalueNetwork, ValueNetwork, Discriminator
import torch
from .replay_memory import Memory, Transition
from torch import from_numpy
from torch.optim.adam import Adam
from torch.nn.functional import log_softmax
class SACAgent:
def __init__(self,
p_z,
**config):
self.config = config
self.n_states = self.config["n_states"]
self.n_skills = self.config["n_skills"]
self.batch_size = self.config["batch_size"]
self.p_z = np.tile(p_z, self.batch_size).reshape(self.batch_size, self.n_skills)
self.memory = Memory(self.config["mem_size"], self.config["seed"])
self.device = "cuda" if torch.cuda.is_available() else "cpu"
torch.manual_seed(self.config["seed"])
self.policy_network = PolicyNetwork(n_states=self.n_states + self.n_skills,
n_actions=self.config["n_actions"],
action_bounds=self.config["action_bounds"],
n_hidden_filters=self.config["n_hiddens"]).to(self.device)
self.q_value_network1 = QvalueNetwork(n_states=self.n_states + self.n_skills,
n_actions=self.config["n_actions"],
n_hidden_filters=self.config["n_hiddens"]).to(self.device)
self.q_value_network2 = QvalueNetwork(n_states=self.n_states + self.n_skills,
n_actions=self.config["n_actions"],
n_hidden_filters=self.config["n_hiddens"]).to(self.device)
self.value_network = ValueNetwork(n_states=self.n_states + self.n_skills,
n_hidden_filters=self.config["n_hiddens"]).to(self.device)
self.value_target_network = ValueNetwork(n_states=self.n_states + self.n_skills,
n_hidden_filters=self.config["n_hiddens"]).to(self.device)
self.hard_update_target_network()
self.discriminator = Discriminator(n_states=self.n_states, n_skills=self.n_skills,
n_hidden_filters=self.config["n_hiddens"]).to(self.device)
self.mse_loss = torch.nn.MSELoss()
self.cross_ent_loss = torch.nn.CrossEntropyLoss()
self.value_opt = Adam(self.value_network.parameters(), lr=self.config["lr"])
self.q_value1_opt = Adam(self.q_value_network1.parameters(), lr=self.config["lr"])
self.q_value2_opt = Adam(self.q_value_network2.parameters(), lr=self.config["lr"])
self.policy_opt = Adam(self.policy_network.parameters(), lr=self.config["lr"])
self.discriminator_opt = Adam(self.discriminator.parameters(), lr=self.config["lr"])
def choose_action(self, states):
states = np.expand_dims(states, axis=0)
states = from_numpy(states).float().to(self.device)
action, _ = self.policy_network.sample_or_likelihood(states)
return action.detach().cpu().numpy()[0]
def store(self, state, z, done, action, next_state):
state = from_numpy(state).float().to("cpu")
z = torch.ByteTensor([z]).to("cpu")
done = torch.BoolTensor([done]).to("cpu")
action = torch.Tensor([action]).to("cpu")
next_state = from_numpy(next_state).float().to("cpu")
self.memory.add(state, z, done, action, next_state)
def unpack(self, batch):
batch = Transition(*zip(*batch))
states = torch.cat(batch.state).view(self.batch_size, self.n_states + self.n_skills).to(self.device)
zs = torch.cat(batch.z).view(self.batch_size, 1).long().to(self.device)
dones = torch.cat(batch.done).view(self.batch_size, 1).to(self.device)
actions = torch.cat(batch.action).view(-1, self.config["n_actions"]).to(self.device)
next_states = torch.cat(batch.next_state).view(self.batch_size, self.n_states + self.n_skills).to(self.device)
return states, zs, dones, actions, next_states
def train(self):
if len(self.memory) < self.batch_size:
return None
else:
batch = self.memory.sample(self.batch_size)
states, zs, dones, actions, next_states = self.unpack(batch)
p_z = from_numpy(self.p_z).to(self.device)
# Calculating the value target
reparam_actions, log_probs = self.policy_network.sample_or_likelihood(states)
q1 = self.q_value_network1(states, reparam_actions)
q2 = self.q_value_network2(states, reparam_actions)
q = torch.min(q1, q2)
target_value = q.detach() - self.config["alpha"] * log_probs.detach()
value = self.value_network(states)
value_loss = self.mse_loss(value, target_value)
logits = self.discriminator(torch.split(next_states, [self.n_states, self.n_skills], dim=-1)[0])
p_z = p_z.gather(-1, zs)
logq_z_ns = log_softmax(logits, dim=-1)
rewards = logq_z_ns.gather(-1, zs).detach() - torch.log(p_z + 1e-6)
# Calculating the Q-Value target
with torch.no_grad():
target_q = self.config["reward_scale"] * rewards.float() + \
self.config["gamma"] * self.value_target_network(next_states) * (~dones)
q1 = self.q_value_network1(states, actions)
q2 = self.q_value_network2(states, actions)
q1_loss = self.mse_loss(q1, target_q)
q2_loss = self.mse_loss(q2, target_q)
policy_loss = (self.config["alpha"] * log_probs - q).mean()
logits = self.discriminator(torch.split(states, [self.n_states, self.n_skills], dim=-1)[0])
discriminator_loss = self.cross_ent_loss(logits, zs.squeeze(-1))
self.policy_opt.zero_grad()
policy_loss.backward()
self.policy_opt.step()
self.value_opt.zero_grad()
value_loss.backward()
self.value_opt.step()
self.q_value1_opt.zero_grad()
q1_loss.backward()
self.q_value1_opt.step()
self.q_value2_opt.zero_grad()
q2_loss.backward()
self.q_value2_opt.step()
self.discriminator_opt.zero_grad()
discriminator_loss.backward()
self.discriminator_opt.step()
self.soft_update_target_network(self.value_network, self.value_target_network)
return -discriminator_loss.item()
def soft_update_target_network(self, local_network, target_network):
for target_param, local_param in zip(target_network.parameters(), local_network.parameters()):
target_param.data.copy_(self.config["tau"] * local_param.data +
(1 - self.config["tau"]) * target_param.data)
def hard_update_target_network(self):
self.value_target_network.load_state_dict(self.value_network.state_dict())
self.value_target_network.eval()
def get_rng_states(self):
return torch.get_rng_state(), self.memory.get_rng_state()
def set_rng_states(self, torch_rng_state, random_rng_state):
torch.set_rng_state(torch_rng_state.to("cpu"))
self.memory.set_rng_state(random_rng_state)
def set_policy_net_to_eval_mode(self):
self.policy_network.eval()
def set_policy_net_to_cpu_mode(self):
self.device = torch.device("cpu")
self.policy_network.to(self.device)
|
{"/main.py": ["/Brain/__init__.py", "/Common/__init__.py"], "/Common/__init__.py": ["/Common/config.py", "/Common/play.py", "/Common/logger.py"], "/Brain/agent.py": ["/Brain/model.py", "/Brain/replay_memory.py"], "/Brain/__init__.py": ["/Brain/agent.py"]}
|
1,605
|
eric-z-lin/DIAYN-PyTorch
|
refs/heads/main
|
/Brain/__init__.py
|
from .agent import SACAgent
|
{"/main.py": ["/Brain/__init__.py", "/Common/__init__.py"], "/Common/__init__.py": ["/Common/config.py", "/Common/play.py", "/Common/logger.py"], "/Brain/agent.py": ["/Brain/model.py", "/Brain/replay_memory.py"], "/Brain/__init__.py": ["/Brain/agent.py"]}
|
1,606
|
KimGyuri875/TIL
|
refs/heads/master
|
/Django/bbsApp_ORM practice/views.py
|
from django.shortcuts import render, redirect
from .models import *
# Create your views here.
# select * from table;
# -> modelName.objects.all()
# select * from table where id = xxxx;
# -> modelName.objects.get(id = xxxx)
# -> modelName.objects.filter(id = xxxx)
# select * from table where id = xxxx and pwd = yyyy;
# -> modelName.objects.get(id = xxxx, pwd = yyyy)
# -> modelName.objects.filter(id = xxxx, pwd = yyyy)
# select * from table where id = xxxx or pwd = yyyy;
# -> modelName.objects.filter(Q(id = xxxx) | Q(pwd = yyyy))
# select * from table where subject like '%공지%'
# -> modelName.objects.filter(subject_icontains='공지')
# select * from table where subject like '공지%'
# -> modelName.objects.filter(subject_startswith='공지')
# select * from table where subject like '공지%'
# -> modelName.objects.filter(subject_endswith='공지')
# insert into table values()
# model(attr=value, attr=value)
# model.save()
# delete * from tableName where id = xxxx
# -> modelName.objects.get(id=xxxx).delete()
# update tableName set attr = value where id = xxxx
# -> obj = modelName.objects.get(id=xxxx)
# odj.attr = value
# obj.save() --auto commit
def index(request):
return render(request, 'login.html')
def loginProc(request):
print('request - loginProc')
if request.method == "GET" :
return redirect('index')
elif request.method == "POST":
id = request.POST['id']
pwd = request.POST['pwd']
#select * from BbsUserRegister where user_id = id and user_pwd = pwd
#user = BbsUserRegister.objects.filter(user_id=id, user_pwd =pwd)
user = BbsUserRegister.objects.get(user_id=id, user_pwd=pwd)
print('user result - ', user)
if user is not None:
return render(request, 'home.html')
else:
return redirect('index')
def registerForm(request):
return render(request, 'join.html')
def register(request):
print('request - register')
if request.method == "GET":
return redirect('index')
elif request.method == "POST":
id = request.POST['id']
pwd = request.POST['pwd']
name = request.POST['name']
register = BbsUserRegister(user_id=id, user_pwd=pwd,user_name=name )
# insert into table values()
register.save()
return render(request, 'login.html')
|
{"/Django/bbsApp_ORM practice/views.py": ["/Django/bbsApp_ORM practice/models.py"]}
|
1,607
|
KimGyuri875/TIL
|
refs/heads/master
|
/Django/bbsApp_ORM practice/urls.py
|
from django.contrib import admin
from django.urls import path, include
from bbsApp import views
urlpatterns = [
path('index/', views.index, name='index'),
path('login/', views.loginProc, name='login'),
path('registerForm/', views.registerForm, name='registerForm'),
path('register/', views.register, name='register'),
]
|
{"/Django/bbsApp_ORM practice/views.py": ["/Django/bbsApp_ORM practice/models.py"]}
|
1,608
|
KimGyuri875/TIL
|
refs/heads/master
|
/Django/bbsApp_ORM practice/models.py
|
from django.db import models
# Create your models here.
#class is table
class BbsUserRegister(models.Model) :
user_id = models.CharField(max_length=50)
user_pwd = models.CharField(max_length=50)
user_name = models.CharField(max_length=50)
def __str__(self):
return self.user_id +" , " + self.user_pwd +" , "+self.user_name
|
{"/Django/bbsApp_ORM practice/views.py": ["/Django/bbsApp_ORM practice/models.py"]}
|
1,617
|
jeremw264/SheetsUnlockerExcel
|
refs/heads/master
|
/model/unlockSheet.py
|
from model.log import Log
import os
import re
class UnlockSheet:
def __init__(self, pathZip):
self.pathZip = pathZip
self.sheetsPath = []
self.searchSheetPath()
def unlock(self):
for path in self.sheetsPath:
data = ""
Log().writteLog("Read xl/worksheets/" + path)
with open("TempExtract/xl/worksheets/" + path, "r") as sheet:
data = self.searchSheetProtection(sheet.read(), path)
if data != 0:
with open("TempExtract/xl/worksheets/" + path, "w") as test:
test.write(data)
Log().writteLog("Unlock Sheet Finish")
def searchSheetPath(self):
try:
pathSheets = []
for path in os.listdir("TempExtract/xl/worksheets"):
if re.search(".xml", path):
pathSheets.append(path)
pathSheets.sort()
self.sheetsPath = pathSheets
Log().writteLog("Sheet Found")
return len(self.sheetsPath) > 0
except FileNotFoundError:
Log().writteLog("Error Sheet Not Found", 1)
return False
def searchSheetProtection(self, str, path):
try:
s = str.index("<sheetProtection")
cmp = 1
for c in str[s:]:
if c != ">":
cmp += 1
else:
Log().writteLog("Protection found")
return self.rewriteSheet(str, [s, s + cmp], path)
except ValueError:
Log().writteLog("Protection not found")
return False
def rewriteSheet(self, str, ind, path):
Log().writteLog("Rewritte Sheet File in " + path)
r = ""
for i in range(len(str)):
if i < ind[0] or i > ind[1] - 1:
r += str[i]
return r
|
{"/model/unlockSheet.py": ["/model/log.py"], "/main.py": ["/model/log.py", "/model/unlockSheet.py"]}
|
1,618
|
jeremw264/SheetsUnlockerExcel
|
refs/heads/master
|
/main.py
|
import zipfile
import shutil
import os
from model.log import Log
from model.unlockSheet import UnlockSheet
filePath = "filePath"
if __name__ == "__main__":
Log().writteLog("Launch Program on " + filePath)
try:
zipPath = filePath[: len(filePath) - 4] + "zip"
os.rename(filePath, zipPath)
zf = zipfile.ZipFile(zipPath)
nameListOrigin = zf.namelist()
zf.extractall("TempExtract/")
Log().writteLog("Extract Finish")
UnlockSheet(zipPath).unlock()
with zipfile.ZipFile(zipPath, "w") as myzip:
for name in nameListOrigin:
myzip.write("TempExtract/" + name, name)
Log().writteLog("Rewritte ZIP Finish")
shutil.rmtree("TempExtract/")
os.mkdir("TempExtract")
os.rename(zipPath, filePath)
except FileNotFoundError:
Log().writteLog("File " + filePath + " not Found", 2)
|
{"/model/unlockSheet.py": ["/model/log.py"], "/main.py": ["/model/log.py", "/model/unlockSheet.py"]}
|
1,619
|
jeremw264/SheetsUnlockerExcel
|
refs/heads/master
|
/model/log.py
|
from datetime import datetime
class Log:
def __init__(self) -> None:
self.path = "log/log.txt"
def writteLog(self, str, level=0):
now = datetime.now()
if level == 1:
levelMsg = "[Warning] "
elif level == 2:
levelMsg = "[Error] "
else:
levelMsg = ""
with open(self.path, "a") as log:
log.write(now.strftime("[%d/%m/%Y|%H:%M:%S] ") + levelMsg + str + "\n")
print(str)
|
{"/model/unlockSheet.py": ["/model/log.py"], "/main.py": ["/model/log.py", "/model/unlockSheet.py"]}
|
1,628
|
xmlabs-io/xmlabs-python
|
refs/heads/master
|
/xmlabs/__init__.py
|
from .aws_lambda import xmlabs_lambda_handler
|
{"/xmlabs/__init__.py": ["/xmlabs/aws_lambda/__init__.py"], "/xmlabs/aws_lambda/handler.py": ["/xmlabs/aws_lambda/config.py", "/xmlabs/aws_lambda/env.py"], "/tests/test_aws_lambda_settings.py": ["/xmlabs/aws_lambda/config.py"], "/xmlabs/aws_lambda/__init__.py": ["/xmlabs/aws_lambda/handler.py"], "/example/aws_lambda/app.py": ["/xmlabs/aws_lambda/__init__.py"], "/tests/test_aws_lambda_integration.py": ["/xmlabs/__init__.py"]}
|
1,629
|
xmlabs-io/xmlabs-python
|
refs/heads/master
|
/xmlabs/aws_lambda/handler.py
|
from .config import xmlabs_settings
from .env import get_environment
from functools import wraps
def xmlabs_lambda_handler(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
env, config = None , None
try:
env = get_environment(*args, **kwargs)
if not env:
raise Exception("No Environment detected")
except Exception as ex:
## TODO: Improve Exception catching here
## TODO: Log to cloudwatch that Getting environment failed
raise
try:
config = xmlabs_settings(env)
if not config:
raise Exception("No Configuration found")
except Exception as ex:
## TODO: Improve Exception catching
## TODO: Log to cloudwatch that Retrieving Settings failed
raise
## Standard Invoke logging for
#lambda_invoke_logger(*args, **kwargs)
try:
return fn(*args, **kwargs, config=config)
except Exception as ex:
# Make a standard error log to Cloudwatch for eas of capturing
raise
return wrapped
|
{"/xmlabs/__init__.py": ["/xmlabs/aws_lambda/__init__.py"], "/xmlabs/aws_lambda/handler.py": ["/xmlabs/aws_lambda/config.py", "/xmlabs/aws_lambda/env.py"], "/tests/test_aws_lambda_settings.py": ["/xmlabs/aws_lambda/config.py"], "/xmlabs/aws_lambda/__init__.py": ["/xmlabs/aws_lambda/handler.py"], "/example/aws_lambda/app.py": ["/xmlabs/aws_lambda/__init__.py"], "/tests/test_aws_lambda_integration.py": ["/xmlabs/__init__.py"]}
|
1,630
|
xmlabs-io/xmlabs-python
|
refs/heads/master
|
/tests/test_aws_lambda_settings.py
|
import pytest
from xmlabs.aws_lambda.config import settings
def test_xmlabs_aws_lambda_config():
"""Assert Settings"""
assert settings
|
{"/xmlabs/__init__.py": ["/xmlabs/aws_lambda/__init__.py"], "/xmlabs/aws_lambda/handler.py": ["/xmlabs/aws_lambda/config.py", "/xmlabs/aws_lambda/env.py"], "/tests/test_aws_lambda_settings.py": ["/xmlabs/aws_lambda/config.py"], "/xmlabs/aws_lambda/__init__.py": ["/xmlabs/aws_lambda/handler.py"], "/example/aws_lambda/app.py": ["/xmlabs/aws_lambda/__init__.py"], "/tests/test_aws_lambda_integration.py": ["/xmlabs/__init__.py"]}
|
1,631
|
xmlabs-io/xmlabs-python
|
refs/heads/master
|
/xmlabs/aws_lambda/__init__.py
|
from .handler import xmlabs_lambda_handler
|
{"/xmlabs/__init__.py": ["/xmlabs/aws_lambda/__init__.py"], "/xmlabs/aws_lambda/handler.py": ["/xmlabs/aws_lambda/config.py", "/xmlabs/aws_lambda/env.py"], "/tests/test_aws_lambda_settings.py": ["/xmlabs/aws_lambda/config.py"], "/xmlabs/aws_lambda/__init__.py": ["/xmlabs/aws_lambda/handler.py"], "/example/aws_lambda/app.py": ["/xmlabs/aws_lambda/__init__.py"], "/tests/test_aws_lambda_integration.py": ["/xmlabs/__init__.py"]}
|
1,632
|
xmlabs-io/xmlabs-python
|
refs/heads/master
|
/xmlabs/aws_lambda/env.py
|
import os
import logging
logger = logging.getLogger()
def get_environment(event, context=None):
valid_envs = ["stage", "prod", "dev"]
env = None
# default_env = os.getenv("DEFAULT_ENV", "dev")
default_env = os.getenv("APP_ENV", os.getenv("DEFAULT_ENV", "dev"))
override_env = os.getenv("ENV")
if override_env:
logger.info("Overriding Environment with {}".format(override_env))
return override_env
####################################
### X-Environment ###
### (override) ###
####################################
if event.get('headers'):
if event['headers'].get("X-Environment"):
return event['headers']['X-Environment'].lower()
####################################
### if lambda function arn ###
####################################
split_arn = None
try:
split_arn = context.invoked_function_arn.split(':')
except Exception as ex:
split_arn = None
if split_arn:
####################################
### lambda function arn alias ###
### (preferred) ###
####################################
e = split_arn[len(split_arn) - 1]
if e in valid_envs:
env = e
return env.lower()
#######################################
### Lambda Function Name Evaluation ###
#######################################
split_fn = split_arn[6].split("_")
if split_fn[-1].lower() in valid_envs:
return split_fn[-1].lower()
####################################
### Stage Variable Evaluation ###
####################################
apiStageVariable = None
if event.get("stageVariables"):
apiStageVariable = event["stageVariables"].get("env")
env = apiStageVariable
apiStage = None
if event.get("requestContext"):
apiStage = event["requestContext"].get("stage")
if not env:
env = apiStage
if apiStage and apiStageVariable and apiStage != apiStageVariable:
logger.warning("Tentrr: Using different api GW stagename and api Stage Variable is not recommended")
if env:
return env.lower()
# If invoked without alias
if (not split_arn or len(split_arn) == 7) and default_env:
return default_env
else:
raise Exception("Environment could not be determined")
return None
|
{"/xmlabs/__init__.py": ["/xmlabs/aws_lambda/__init__.py"], "/xmlabs/aws_lambda/handler.py": ["/xmlabs/aws_lambda/config.py", "/xmlabs/aws_lambda/env.py"], "/tests/test_aws_lambda_settings.py": ["/xmlabs/aws_lambda/config.py"], "/xmlabs/aws_lambda/__init__.py": ["/xmlabs/aws_lambda/handler.py"], "/example/aws_lambda/app.py": ["/xmlabs/aws_lambda/__init__.py"], "/tests/test_aws_lambda_integration.py": ["/xmlabs/__init__.py"]}
|
1,633
|
xmlabs-io/xmlabs-python
|
refs/heads/master
|
/xmlabs/dynaconf/aws_ssm_loader.py
|
import boto3
import logging
import requests
from functools import lru_cache
from dynaconf.utils.parse_conf import parse_conf_data
logger = logging.getLogger()
IDENTIFIER = 'aws_ssm'
def load(obj, env=None, silent=True, key=None, filename=None):
"""
Reads and loads in to "obj" a single key or all keys from source
:param obj: the settings instance
:param env: settings current env (upper case) default='DEVELOPMENT'
:param silent: if errors should raise
:param key: if defined load a single key, else load all from `env`
:param filename: Custom filename to load (useful for tests)
:return: None
"""
# Load data from your custom data source (file, database, memory etc)
# use `obj.set(key, value)` or `obj.update(dict)` to load data
# use `obj.find_file('filename.ext')` to find the file in search tree
# Return nothing
prefix = ""
if obj.get("AWS_SSM_PREFIX"):
prefix = "/{}".format(obj.AWS_SSM_PREFIX)
path = "{}/{}/".format(prefix, env.lower())
if key:
path = "{}{}/".format(path, key)
data = _read_aws_ssm_parameters(path)
try:
if data and key:
value = parse_conf_data(
data.get(key), tomlfy=True, box_settings=obj)
if value:
obj.set(key, value)
elif data:
obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True)
except Exception as e:
if silent:
return False
raise
@lru_cache
def _read_aws_ssm_parameters(path):
logger.debug(
"Reading settings AWS SSM Parameter Store (Path = {}).".format(path)
)
print(
"Reading settings AWS SSM Parameter Store (Path = {}).".format(path)
)
result = {}
try:
ssm = boto3.client("ssm")
response = ssm.get_parameters_by_path(
Path=path,
Recursive=True,
WithDecryption=True
)
while True:
params = response["Parameters"]
for param in params:
name = param["Name"].replace(path, "").replace("/", "_")
value = param["Value"]
result[name] = value
if "NextToken" in response:
response = ssm.get_parameters_by_path(
Path=path,
Recursive=True,
WithDecryption=True,
NextToken=response["NextToken"],
)
else:
break
except Exception as ex:
print(
"ERROR: Trying to read aws ssm parameters (for {}): {}!".format(
path, str(ex)
)
)
result = {}
logger.debug("Read {} parameters.".format(len(result)))
return result
|
{"/xmlabs/__init__.py": ["/xmlabs/aws_lambda/__init__.py"], "/xmlabs/aws_lambda/handler.py": ["/xmlabs/aws_lambda/config.py", "/xmlabs/aws_lambda/env.py"], "/tests/test_aws_lambda_settings.py": ["/xmlabs/aws_lambda/config.py"], "/xmlabs/aws_lambda/__init__.py": ["/xmlabs/aws_lambda/handler.py"], "/example/aws_lambda/app.py": ["/xmlabs/aws_lambda/__init__.py"], "/tests/test_aws_lambda_integration.py": ["/xmlabs/__init__.py"]}
|
1,634
|
xmlabs-io/xmlabs-python
|
refs/heads/master
|
/xmlabs/dynaconf/aws_ec2_userdata_loader.py
|
from .base import ConfigSource
import logging
import requests
logger = logging.getLogger()
class ConfigSourceAwsEc2UserData(ConfigSource):
def load(self):
if self._running_in_ec2():
#TODO: fetch EC2 USERDATA
raise Exception("ConfigSourceEC2UserData Load Unimplemented")
def _running_in_ec2(self):
try:
# Based on https://gist.github.com/dryan/8271687
instance_ip_url = "http://169.254.169.254/latest/meta-data/local-ipv4"
requests.get(instance_ip_url, timeout=0.01)
return True
except requests.exceptions.RequestException:
return False
|
{"/xmlabs/__init__.py": ["/xmlabs/aws_lambda/__init__.py"], "/xmlabs/aws_lambda/handler.py": ["/xmlabs/aws_lambda/config.py", "/xmlabs/aws_lambda/env.py"], "/tests/test_aws_lambda_settings.py": ["/xmlabs/aws_lambda/config.py"], "/xmlabs/aws_lambda/__init__.py": ["/xmlabs/aws_lambda/handler.py"], "/example/aws_lambda/app.py": ["/xmlabs/aws_lambda/__init__.py"], "/tests/test_aws_lambda_integration.py": ["/xmlabs/__init__.py"]}
|
1,635
|
xmlabs-io/xmlabs-python
|
refs/heads/master
|
/example/aws_lambda/app.py
|
from xmlabs.aws_lambda import lambda_handler
@lambda_handler
def main(event, context, config):
print(config.STRIPE_API_SECRET_KEY)
pass
if __name__ == "__main__":
main({"headers":{"X-Environment": "dev"}}, {})
main({"headers":{"X-Environment": "prod"}}, {})
main({"headers":{"X-Environment": "dev"}}, {})
main({"headers":{"X-Environment": "dev"}}, {})
main({"headers":{"X-Environment": "prod"}}, {})
|
{"/xmlabs/__init__.py": ["/xmlabs/aws_lambda/__init__.py"], "/xmlabs/aws_lambda/handler.py": ["/xmlabs/aws_lambda/config.py", "/xmlabs/aws_lambda/env.py"], "/tests/test_aws_lambda_settings.py": ["/xmlabs/aws_lambda/config.py"], "/xmlabs/aws_lambda/__init__.py": ["/xmlabs/aws_lambda/handler.py"], "/example/aws_lambda/app.py": ["/xmlabs/aws_lambda/__init__.py"], "/tests/test_aws_lambda_integration.py": ["/xmlabs/__init__.py"]}
|
1,636
|
xmlabs-io/xmlabs-python
|
refs/heads/master
|
/tests/test_aws_lambda_integration.py
|
import pytest
from xmlabs import xmlabs_lambda_handler
@xmlabs_lambda_handler
def lambda_handler(event, context, config):
assert(config)
def test_lambda_handler():
lambda_handler({},{})
|
{"/xmlabs/__init__.py": ["/xmlabs/aws_lambda/__init__.py"], "/xmlabs/aws_lambda/handler.py": ["/xmlabs/aws_lambda/config.py", "/xmlabs/aws_lambda/env.py"], "/tests/test_aws_lambda_settings.py": ["/xmlabs/aws_lambda/config.py"], "/xmlabs/aws_lambda/__init__.py": ["/xmlabs/aws_lambda/handler.py"], "/example/aws_lambda/app.py": ["/xmlabs/aws_lambda/__init__.py"], "/tests/test_aws_lambda_integration.py": ["/xmlabs/__init__.py"]}
|
1,637
|
xmlabs-io/xmlabs-python
|
refs/heads/master
|
/xmlabs/aws_lambda/config.py
|
from dynaconf import Dynaconf
from dynaconf.constants import DEFAULT_SETTINGS_FILES
LOADERS_FOR_DYNACONF = [
'dynaconf.loaders.env_loader', #Inorder to configure AWS_SSM_PREFIX we need to load it from environment
'xmlabs.dynaconf.aws_ssm_loader',
'dynaconf.loaders.env_loader', #Good to load environment last so that it takes precedenceover other config
]
ENVIRONMENTS= ['prod','dev','stage']
settings = Dynaconf(
#settings_files=['settings.toml', '.secrets.toml'],
warn_dynaconf_global_settings = True,
load_dotenv = True,
default_settings_paths = DEFAULT_SETTINGS_FILES,
loaders = LOADERS_FOR_DYNACONF,
envvar_prefix= "APP",
env_switcher = "APP_ENV",
env='dev',
environments=ENVIRONMENTS,
#environments=True,
)
def xmlabs_settings(env):
return settings.from_env(env)
|
{"/xmlabs/__init__.py": ["/xmlabs/aws_lambda/__init__.py"], "/xmlabs/aws_lambda/handler.py": ["/xmlabs/aws_lambda/config.py", "/xmlabs/aws_lambda/env.py"], "/tests/test_aws_lambda_settings.py": ["/xmlabs/aws_lambda/config.py"], "/xmlabs/aws_lambda/__init__.py": ["/xmlabs/aws_lambda/handler.py"], "/example/aws_lambda/app.py": ["/xmlabs/aws_lambda/__init__.py"], "/tests/test_aws_lambda_integration.py": ["/xmlabs/__init__.py"]}
|
1,638
|
xmlabs-io/xmlabs-python
|
refs/heads/master
|
/tests/test_dynaconf.py
|
from dynaconf import Dynaconf
def test_dynaconf_settingsenv():
settingsenv = Dynaconf(environments=True)
assert settingsenv
def test_dynaconf_settings():
settings = Dynaconf()
assert settings
|
{"/xmlabs/__init__.py": ["/xmlabs/aws_lambda/__init__.py"], "/xmlabs/aws_lambda/handler.py": ["/xmlabs/aws_lambda/config.py", "/xmlabs/aws_lambda/env.py"], "/tests/test_aws_lambda_settings.py": ["/xmlabs/aws_lambda/config.py"], "/xmlabs/aws_lambda/__init__.py": ["/xmlabs/aws_lambda/handler.py"], "/example/aws_lambda/app.py": ["/xmlabs/aws_lambda/__init__.py"], "/tests/test_aws_lambda_integration.py": ["/xmlabs/__init__.py"]}
|
1,639
|
Omrigan/essay-writer
|
refs/heads/master
|
/emotions.py
|
mat = [
'сука', "блять", "пиздец", "нахуй", "твою мать", "епта"]
import random
import re
# strong_emotions = re.sub('[^а-я]', ' ', open('strong_emotions').read().lower()).split()
def process(txt, ch):
words = txt.split(" ")
nxt = words[0] + ' '
i = 1
while i < len(words) - 1:
if words[i - 1][-1] != '.' and random.random() < ch:
nxt += random.choice(mat) + " "
else:
nxt += words[i] + " "
i += 1
nxt += words[-1]
return nxt
|
{"/essay.py": ["/emotions.py"]}
|
1,640
|
Omrigan/essay-writer
|
refs/heads/master
|
/essay.py
|
#!/usr/bin/python3
import re
import random
import pymorphy2
import json
import emotions
from plumbum import cli
morph = pymorphy2.MorphAnalyzer()
codes = {
'n': 'nomn',
'g': 'gent',
'd': 'datv',
'ac': 'accs',
'a': 'ablt',
'l': 'loct'
}
keywords = set(open('keywords.txt').read().replace(' ', '').split('\n'))
arguments = json.load(open('arguments.json'))
shuffled = set()
def mychoise(lst):
kek = lst.pop(0)
lst.append(kek)
return random.choice(lst)
def to_padez(val, padez):
if padez in codes:
padez = codes[padez]
return morph.parse(val)[0].inflect({padez}).word
def getwordlist(s):
clear_text = re.sub("[^а-яА-Я]",
" ", # The pattern to replace it with
s)
s = s[0].lower() + s[1:]
local_words = clear_text.split()
return local_words
class EssayBuilder:
def __init__(self, raw_text):
self.text = raw_text.split('\n')
self.text = list(filter(lambda a: len(a)>5, self.text))
self.author = self.text[-1]
self.text = "".join(self.text[:-1])
self.text_tokens = list(map(lambda s: s[1:] if s[0] == ' ' else s,
filter(lambda a: len(a) > 4, re.split("\.|\?|!|;", self.text))))
words = {}
for i, s in zip(range(10 ** 9), self.text_tokens):
local_words = getwordlist(s)
words_cnt = {}
for w in local_words:
p = morph.parse(w)
j = 0
while len(p) > 0 and 'NOUN' not in p[0].tag and j < 1:
p = p[1:]
j += 1
if len(p) > 0 and 'NOUN' in p[0].tag:
w = p[0].normal_form
if w not in words_cnt:
words_cnt[w] = 0
words_cnt[w] += 1
for w in words_cnt:
if w not in words:
words[w] = {
'total': 0,
'sent': []
}
words[w]['total'] += words_cnt[w]
words[w]['sent'].append((i, words_cnt[w]))
self.all_words = sorted([{'word': w,
'total': val['total'],
'sent': sorted(val['sent'], key=lambda a: a[1])} for w, val in
words.items()], key=lambda a: -a['total'])
self.good_words = list(filter(lambda a: a['word'] in keywords, self.all_words))
self.samples = json.load(open('awesome_text.json'))
self.samples['baseword'] = [self.good_words[0]['word']]
for s in self.samples:
random.shuffle(self.samples[s])
def get_str(self, val):
if val == "author":
if random.randint(0, 4) == 0: return self.author
vals = val.split('_')
self.samples[vals[0]] = self.samples[vals[0]][1:] + [self.samples[vals[0]][0]]
ret = self.samples[vals[0]][-1]
if len(vals) > 1:
if vals[1] in codes:
vals[1] = codes[vals[1]]
ret = morph.parse(ret)[0].inflect({vals[1]}).word
return ret
def get_problem(self):
return ['#intro',
"#wholeproblem"]
def get_quatation_comment(self):
w = mychoise(self.good_words)
s = self.text_tokens[mychoise(w['sent'])[0]]
comment = ["#commentbegin, #author в словах \"%s\" #speaks о %s" % \
(s, to_padez(w['word'], 'loct'))]
return comment
def get_epitet(self):
noun = []
w = None
while len(noun) < 2:
noun = []
w = mychoise(self.good_words)
s = self.text_tokens[mychoise(w['sent'])[0]]
for _ in getwordlist(s):
word = morph.parse(_)[0]
if w['word'] != word.normal_form and 'NOUN' in word.tag:
noun.append(word.normal_form)
comment = ["показывая важность понятия \"%s\", #author оперирует понятиями %s и %s" % \
(w['word'], to_padez(noun[0], 'g'), to_padez(noun[1], 'g'))]
return comment
def get_comment(self):
comment_sources = [self.get_quatation_comment, self.get_epitet]
comment = []
for i in range(3):
comment.extend(mychoise(comment_sources)())
return comment
def get_author_position(self):
return ["позиция #author_g в этом фрагменте лучше всего выраженна цитатой: \"%s\"" %
(random.choice(self.text_tokens))]
def get_my_position(self):
return ["#myposition"]
def get_lit_argument(self):
curbook = mychoise(arguments)
curarg = mychoise(curbook['args'])
replacements = {
'author': curbook['author'],
'book': curbook['book'],
'hero': curarg['hero'],
'action': random.choice(curarg['actions'])
}
if curbook['native']:
replacements['native'] = 'отечественной '
else:
replacements['native'] = ''
return ["в %(native)sлитературе много примеров #baseword_g" % replacements,
"#example, в романе %(book)s, который написал %(author)s,"
" герой по имени %(hero)s %(action)s, показывая таким образом своё отношение к #baseword_d" % replacements]
def get_left_argument(self):
return self.get_lit_argument()
def get_conclusion(self):
return ["#conclude0 #many в жизни зависит от #baseword_g",
"Необходимо всегда помнить о важности этого понятия в нашей жизни"]
def build_essay(self):
abzaces = [self.get_problem(), self.get_comment(), self.get_author_position(),
self.get_my_position(), self.get_lit_argument(), self.get_left_argument(), self.get_conclusion()]
nonterm = re.compile('#[a-z0-9_]+')
str_out_all = ''
for a in abzaces:
str_out = ''
for s in a:
while re.search(nonterm, s) is not None:
val = re.search(nonterm, s).group()[1:]
if val.split('_')[0] in self.samples:
s = s.replace('#' + val, self.get_str(val))
else:
s = s.replace('#' + val, '%' + val)
str_out += s[0].upper() + s[1:] + '. '
str_out += '\n'
str_out_all += str_out
return str_out_all
from sys import stdin, stdout
class MyApp(cli.Application):
_abuse = 0
_output = ''
@cli.switch(['-e'], float, help='Change emotionality')
def abuse_lexical(self, abuse):
self._abuse = abuse
@cli.switch(['-o'], str, help='Output')
def output(self, output):
self._output = output
@cli.switch(['--new'], str, help='New arguments')
def output(self, args):
global arguments
if args:
arguments = json.load(open('arguments-new.json'))
else:
arguments = json.load(open('arguments.json'))
random.shuffle(arguments)
print(arguments)
def main(self, filename='text.txt'):
raw_text = open(filename, 'r').read()
if self._output == '':
self._output = filename + '.out'
out = open(self._output, 'w')
e = EssayBuilder(raw_text)
str_out = e.build_essay()
str_out = emotions.process(str_out, self._abuse)
out.write(str_out)
if __name__ == '__main__':
MyApp.run()
|
{"/essay.py": ["/emotions.py"]}
|
1,642
|
sudo-dax/PythonScript_NmapToMacchange
|
refs/heads/master
|
/macch.py
|
#!/usr/bin/python
#Library
import os
import subprocess
import collections
import socket
import subnet
# Clear Screen
subprocess.call('clear', shell=True)
# Get Subnet
adapter = subnet.get_adapter_names()[-1]
Subnet = subnet.get_subnets(adapter)[0]
# Start Network Scan
print(f'Scanning {adapter} Network for Devices')
print(' ')
os.system("sudo nmap -sP " + Subnet + """ | awk '/Nmap scan report for/{printf $5;}/MAC Address:/{print " => "$3;}' | sort >> ips_macs_py.txt""")
print('Scan complete! ~~ Output in ips_macs_py.txt')
# Counting Number of connections per Device so far
data = open("ips_macs_py.txt","r")
c = collections.Counter()
for line in data:
if ' => ' not in line:
continue
line = line.strip()
ip, mac = line.split(' => ')
c[mac] += 1
# Changing MAC Address
mac_ad = c.most_common()[-1][0]
# print(mac_ad)
print(f"Chainging MAC to -1 Common on Network {mac_ad}")
print("Bringing down WiFi Adapter")
os.system(f"sudo ip link set {adapter} down")
print("Bringing down Network Manager")
os.system("sudo systemctl stop NetworkManager")
os.system("sudo systemctl disable NetworkManager")
print("Changing MAC")
os.system(f"sudo macchanger -m {mac_ad} {adapter}")
print("Bringing up Network Manager")
os.system("sudo systemctl enable NetworkManager")
os.system("sudo systemctl start NetworkManager")
print("Bringing down WiFi Adapter")
os.system(f"sudo ip link set {adapter} up")
print("Mac Change Complete!")
|
{"/macch.py": ["/subnet.py"]}
|
1,643
|
sudo-dax/PythonScript_NmapToMacchange
|
refs/heads/master
|
/scan.py
|
#!/usr/bin/python
#Library
import os
import subprocess
import socket
# Clear Screen
subprocess.call('clear', shell=True)
# Get Subnet
adapter = subnet.get_adapter_names()[-1]
Subnet = subnet.get_subnets(adapter)[0]
print(f'Scanning {adapter} Network for Devices')
print(' ')
# Start Network Scan
print('Scannig Network for Devices')
print(' ')
os.system("sudo nmap -sP " + Subnet + """ | awk '/Nmap scan report for/{printf $5;}/MAC Address:/{print " => "$3;}' | sort >> ips_macs_py.txt""")
print('Scan complete! ~~ Output in ips_macs_py.txt')
|
{"/macch.py": ["/subnet.py"]}
|
1,644
|
sudo-dax/PythonScript_NmapToMacchange
|
refs/heads/master
|
/subnet.py
|
"""
Some helper functions to get adapter names and ipv4 subnets on that adapter
"""
import ipaddress
import ifaddr
def compressed_subnet(host, bits):
"""
Given an ip and number of bits, (e.g. 10.0.3.1, 8), returns the compressed
subnet mask (10.0.0.0/8)
"""
net_string = '{host}/{bits}'.format(host=host, bits=bits)
network = ipaddress.ip_network(net_string, strict=False)
return network.compressed
def get_subnets(adapter_name='wlan0'):
"""
Returns a list of ipv4 subnet strings for the given adapter.
"""
all_adapters = {adapter.name: adapter
for adapter in ifaddr.get_adapters()}
adapter = all_adapters[adapter_name]
subnets = {compressed_subnet(ip.ip, ip.network_prefix)
for ip in adapter.ips
if len(ip.ip) > 3}
return list(subnets)
def get_adapter_names():
"""
Returns a list of available adapter names
"""
return [adapter.name for adapter in ifaddr.get_adapters()]
|
{"/macch.py": ["/subnet.py"]}
|
1,675
|
estebanfloresf/testcases
|
refs/heads/master
|
/testcases/spiders/createTestCase.py
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.utils.project import get_project_settings
from ..items import TestCasesItem
from scrapy.loader import ItemLoader
class createTestCaseSpider(scrapy.Spider):
name = "createTestCase"
settings = get_project_settings()
http_user = settings.get('HTTP_USER')
http_pass = settings.get('HTTP_PASS')
allowed_domains = ["confluence.verndale.com"]
start_urls = ['https://confluence.verndale.com/display/GEHC/My+Profile+Page+-+DOC']
def parse(self, response):
item = TestCasesItem()
title = response.xpath('//*[@id="title-text"]/a/text()').extract_first()
print('Documentation: '+title)
table_xpath = '//*[@id="main-content"]/div/div[4]/div/div/div[1]/table/tbody/tr'
table = response.xpath(table_xpath)
for index, row in enumerate(table):
if (index > 0):
components = row.select('.//td[2]/text() | .//td[2]/p/text()').extract()
for compName in components:
item['component'] = str(compName)
print('Verify ' + compName + ' Component')
# This path is usually the one to be used
component_xpath = ".//td[3][contains(@class,'confluenceTd')]"
description = ""
if (row.select(component_xpath + "/a/text()").extract()):
requirements = row.select(component_xpath + "/a//text()").extract()
description = "|".join(requirements)
else:
if (row.select(component_xpath + "/ul//*/text()").extract()):
requirements = row.select(component_xpath + "/ul//li//text()").extract()
print(requirements)
description = "|".join(requirements)
else:
if (row.select(component_xpath +"/div"+ "/ul//*/text()").extract()):
requirements = row.select(component_xpath +"/div"+ "/ul//li//text()").extract()
description = "|".join(requirements)
item['requirements'] = str(description)
yield item
|
{"/testcases/spiders/createTestCase.py": ["/testcases/items.py"], "/testcases/spiders/testSpider.py": ["/testcases/items.py"]}
|
1,676
|
estebanfloresf/testcases
|
refs/heads/master
|
/testcases/spiders/testSpider.py
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
from scrapy.utils.project import get_project_settings
from ..items import TestCasesItem, Responsive, Requirements
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError
class TestspiderSpider(scrapy.Spider):
name = "testspider"
settings = get_project_settings()
http_user = settings.get('HTTP_USER')
http_pass = settings.get('HTTP_PASS')
allowed_domains = ["confluence.verndale.com"]
def __init__(self, url):
super(TestspiderSpider, self).__init__()
self.start_urls = [url]
def parse(self, response):
table = response.xpath('//*[@id="main-content"]/div/div[4]/div/div/div[1]/table/tbody/tr')
for index, row in enumerate(table):
testcase = TestCasesItem()
if index > 0:
testcase['component'] = str(row.select('.//td[2]/text() | .//td[2]/p/text()').extract_first()).strip()
request = Request(
self.start_urls[0],
callback=self.responsive_req,
errback=self.errback_httpbin,
dont_filter=True,
meta={'testcase': testcase, 'row': row}
)
yield request
def responsive_req(self, response):
row = response.meta['row']
testcase = response.meta['testcase']
list_responsive = []
# Section Responsive Notes
responsive_path = row.xpath(".//td[3]/div[contains(@class,'content-wrapper')]")
path = ".//div[contains(@class,'confluence-information-macro confluence-information-macro-information conf-macro output-block')]"
# If to see if the component has responsive requirements
if responsive_path.xpath(path):
for req in responsive_path.xpath(path):
# If to see if the responsive requirements has devices
if req.xpath(".//div/p/span/text()").extract():
for device in req.xpath(".//div/p/span/text()").extract():
# Save Devices
responsive = Responsive()
responsive['device'] = str(device).strip(':')
request = Request(
self.start_urls[0],
callback=self.requirements,
errback=self.errback_httpbin,
dont_filter=True,
meta={'responsive': responsive, 'row': row, 'testcase': testcase}
)
yield request
else:
responsive = Responsive()
requirement = Requirements()
requirement_list = []
for index,req in enumerate(req.xpath(".//div/p/text()").extract()):
requirement['description'] = req
requirement_list.append(requirement)
responsive['requirements']=requirement_list
testcase['responsive'] = responsive
yield testcase
else:
yield testcase
# testcase['responsive'] = list_responsive
def requirements(self, response):
responsive = response.meta['responsive']
testcase = response.meta['testcase']
responsive['requirements'] = "sample"
testcase['responsive'] = responsive
#
# requirements = []
# path = ".//div[contains(@class,'confluence-information-macro-body')]//*/text()"
#
# for elem in response.xpath(path).extract():
# if (str(elem).strip(':') not in responsive['device']):
# requirements.append(str(elem).strip())
#
# responsive['requirements'] = requirements
# # Final testcase is added the devices and requirements for each
#
# # After creating the item appended to the devices list
# devices.append(responsive)
# testcase['responsive'] = devices
# yield testcase
# Function for handling Errors
def errback_httpbin(self, failure):
# log all failures
self.logger.error(repr(failure))
# in case you want to do something special for some errors,
# you may need the failure's type:
if failure.check(HttpError):
# these exceptions come from HttpError spider middleware
# you can get the non-200 response
response = failure.value.response
self.logger.error('HttpError on %s', response.url)
elif failure.check(DNSLookupError):
# this is the original request
request = failure.request
self.logger.error('DNSLookupError on %s', request.url)
elif failure.check(TimeoutError, TCPTimedOutError):
request = failure.request
self.logger.error('TimeoutError on %s', request.url)
|
{"/testcases/spiders/createTestCase.py": ["/testcases/items.py"], "/testcases/spiders/testSpider.py": ["/testcases/items.py"]}
|
1,677
|
estebanfloresf/testcases
|
refs/heads/master
|
/utils/generateTC.py
|
from openpyxl import load_workbook
#import the pandas library and aliasing as pd and numpy as np
import pandas as pd
import numpy as np
import os
class createTestCase():
def __init__(self):
self.dir_path = os.path.dirname(os.path.realpath(__file__))
self.wb = load_workbook(self.dir_path+'\\files\\inputTC.xlsx')
self.ws = self.wb['Sheet1']
self.commonWords = ["note:","notes:","important note:","onclick/ontap","consists of:"]
self.changeWords = [
{"from": "will be", "to": "is"},
{"from": "will wrap", "to": "wraps"},
{"from": "will not be", "to": "is not"},
{"from": "will dissapear", "to": "dissapears"},
{"from": "will have", "to": "has"},
{"from": "will move up", "to": "moves up"},
{"from": "will fall back", "to": "fallbacks"},
{"from": "will never be", "to": "is never"},
{"from": "if", "to": "when"}
]
self.verifyLst= []
self.expectedLst= []
# # Transform the ws into a panda dataframe
self.df = pd.DataFrame(self.ws.values)
# # replace None values with NA and drop them
self.df = self.df.replace(to_replace='None', value=np.nan).dropna()
header = self.df.iloc[0]
self.df = self.df[1:]
self.df = self.df.rename(columns = header)
self.df = self.df.reset_index(drop=True)
self.dfList = self.df[header].values
def __main__(self):
self.createVfyLst(self.dfList)
self.createExpLst(self.dfList)
self.df.to_csv(self.dir_path+'\\resultsTC.csv',encoding='utf-8', index=False)
def createVfyLst(self,dfList):
try:
for req in dfList:
band =0
req = str(req[0]).lower()
reqToLst = req.split(' ')
for word in reqToLst:
if(word in self.commonWords):
band =1
break
if(band==0):
self.verifyLst.append("Verify "+req)
else:
self.verifyLst.append(req.capitalize())
# Find the name of the column by index
replaceClmn = self.df.columns[0]
# Drop that column
self.df.drop(replaceClmn, axis = 1, inplace = True)
# Put whatever series you want in its place
self.df[replaceClmn] = self.verifyLst
except ValueError:
print("There was a problem")
def createExpLst(self,dfList):
try:
for req in dfList:
req = str(req[0]).lower()
for wordrplc in self.changeWords:
if(wordrplc['from'] in req):
req = req.replace(wordrplc['from'],wordrplc['to'] )
break
self.expectedLst.append(str(req).capitalize())
self.df['Expected'] = self.expectedLst
# Adding columns wth -1 value for the excel testcase format
browserList = [-1] * len(self.expectedLst)
browserListNoApply = ['---'] * len(self.expectedLst)
self.df['windowsIE'] = browserList
self.df['windowsCH'] = browserList
self.df['windowsFF'] = browserList
self.df['macSF'] = browserListNoApply
self.df['macCH'] = browserListNoApply
self.df['macFF'] = browserListNoApply
print("CSV file generated with success")
except ValueError:
print("There was a problem")
if __name__ == "__main__":
app = createTestCase()
app.__main__()
|
{"/testcases/spiders/createTestCase.py": ["/testcases/items.py"], "/testcases/spiders/testSpider.py": ["/testcases/items.py"]}
|
1,678
|
estebanfloresf/testcases
|
refs/heads/master
|
/utils/readTestCases.py
|
from openpyxl import load_workbook
import re
import json
class readFile():
def __init__(self):
path = 'C:\\Users\\Esteban.Flores\\Documents\\1 Verndale\\2 Projects\\GE-GeneralElectric\\GE TestCases\\0942-(QA) Course Registration Module.xlsx'
self.wb = load_workbook(path, data_only=True)
self.cleanWords = [
{"from": "Verify", "to": ""},
{"from": ":", "to": ""},
{"from": "On click", "to": "cta"},
{"from": "On hover", "to": "cta"},
{"from": "Component", "to": ""},
{"from": "page displays accordingly in mobile", "to": "mobile/tablet"},
{"from": "rtf (rich text format)", "to": "verify optional content managed rtf (rich text format)"},
]
self.tagWords = [
{"has": "text", "tag": "text"},
{"has": "hover", "tag": "cta"},
{"has": "click", "tag": "cta"},
{"has": "rtf", "tag": "text"},
{"has": "link", "tag": "link"},
{"has": "image", "tag": "image"},
]
self.final =[]
def __main__(self):
for a in self.wb.sheetnames:
validSheet = re.compile('TC|Mobile')
# validate expression to see if sheetname is an actual testcase
if(bool(re.search(validSheet, a))):
self.readCells(a)
def readCells(self, sheet):
item = {
"component":"",
"testcases":[]
}
# Get Component Name of the sheet
item['component'] = self.cleanCell(self.wb[str(sheet)].cell(row=1,column=2).value)
# Make a list of all the description columns
data = [self.wb[str(sheet)].cell(
row=i, column=2).value for i in range(13, 150)]
counter = 0
for cell in data:
test = {}
if(cell != None):
if('Verify' in cell):
# Get testcase of sheet
test[str(counter)] = cell.lower()
counter+=1
# Get tag for each testcase
for tag in self.tagWords:
if(tag['has'] in cell):
test["tag"] = tag['tag']
if(item['component']=='mobile/tablet'):
test["tag"] = 'mobile'
if(test != {}):
item["testcases"].append(test)
self.final.append(item)
with open('data.json', 'w') as outfile:
json.dump(self.final, outfile)
def cleanCell(self,cell):
for word in self.cleanWords:
cell = cell.replace(word['from'],word['to'])
cell = cell.lower()
return cell.strip()
if(__name__ == "__main__"):
app=readFile()
app.__main__()
|
{"/testcases/spiders/createTestCase.py": ["/testcases/items.py"], "/testcases/spiders/testSpider.py": ["/testcases/items.py"]}
|
1,679
|
estebanfloresf/testcases
|
refs/heads/master
|
/utils/readFiles.py
|
import os
import re
path = os.chdir('C://Users//503025052//Documents//GE//GE TestCases')
filenames = os.listdir(path)
for index,filename in enumerate(filenames):
try:
extension = os.path.splitext(filename)[1][1:]
if(extension=='xlsx'):
number =re.findall(r'\d+', str(filename))
if(number[0]):
taskName = filename.replace(number[0],'')
taskName = taskName.replace(extension,'')
taskName = taskName.replace('-','')
taskName = taskName.replace('.','')
taskName = taskName.replace('(QA)','')
taskName = taskName.strip()
numberJira = int(number[0])-3
print(str(index)+'|'+str(taskName)+'|https://jira.verndale.com/browse/GEHC-'+str(numberJira))
except IOError:
print('Cant change %s' % (filename))
print("All Files have been updated")
|
{"/testcases/spiders/createTestCase.py": ["/testcases/items.py"], "/testcases/spiders/testSpider.py": ["/testcases/items.py"]}
|
1,680
|
estebanfloresf/testcases
|
refs/heads/master
|
/testcases/items.py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class TestCasesItem(scrapy.Item):
component = scrapy.Field()
requirements = scrapy.Field()
responsive = scrapy.Field()
pass
class Requirements(scrapy.Item):
description = scrapy.Field()
pass
class Responsive(scrapy.Item):
device = scrapy.Field()
requirements = scrapy.Field()
pass
|
{"/testcases/spiders/createTestCase.py": ["/testcases/items.py"], "/testcases/spiders/testSpider.py": ["/testcases/items.py"]}
|
1,681
|
estebanfloresf/testcases
|
refs/heads/master
|
/testcases/variables.py
|
USER='Esteban.Flores'
PASS='estebanFS10'
|
{"/testcases/spiders/createTestCase.py": ["/testcases/items.py"], "/testcases/spiders/testSpider.py": ["/testcases/items.py"]}
|
1,682
|
estebanfloresf/testcases
|
refs/heads/master
|
/testcases/main.py
|
from scrapy import cmdline
import os
import inspect
import logging
path = os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)), os.pardir)) # script directory
# To generate the verified labels from the input excel (uncomment line below)
# os.system('python '+path+'\\utils\\generateTC.py')
# To Make a scrape of the confluence page (uncomment line below)
# var = input("Please enter something: ")
# print("You entered " + str(var))
cmdline.execute("scrapy crawl createTestCase".split())
# To read excel file
# os.system('python '+path+'\\utils\\readTestCases.py')
|
{"/testcases/spiders/createTestCase.py": ["/testcases/items.py"], "/testcases/spiders/testSpider.py": ["/testcases/items.py"]}
|
1,725
|
shikharbahl/multiworld
|
refs/heads/master
|
/multiworld/envs/pygame/__init__.py
|
from gym.envs.registration import register
import logging
LOGGER = logging.getLogger(__name__)
_REGISTERED = False
def register_custom_envs():
global _REGISTERED
if _REGISTERED:
return
_REGISTERED = True
LOGGER.info("Registering multiworld pygame gym environments")
register(
id='Point2DLargeEnv-offscreen-v0',
entry_point='multiworld.envs.pygame.point2d:Point2DEnv',
tags={
'git-commit-hash': '166f0f3',
'author': 'Vitchyr'
},
kwargs={
'images_are_rgb': True,
'target_radius': 1,
'ball_radius': 1,
'render_onscreen': False,
},
)
register(
id='Point2DLargeEnv-onscreen-v0',
entry_point='multiworld.envs.pygame.point2d:Point2DEnv',
tags={
'git-commit-hash': '166f0f3',
'author': 'Vitchyr'
},
kwargs={
'images_are_rgb': True,
'target_radius': 1,
'ball_radius': 1,
'render_onscreen': True,
},
)
register_custom_envs()
|
{"/multiworld/envs/mujoco/__init__.py": ["/multiworld/envs/mujoco/cameras.py"]}
|
1,726
|
shikharbahl/multiworld
|
refs/heads/master
|
/multiworld/envs/mujoco/__init__.py
|
import gym
from gym.envs.registration import register
import logging
LOGGER = logging.getLogger(__name__)
_REGISTERED = False
def register_custom_envs():
global _REGISTERED
if _REGISTERED:
return
_REGISTERED = True
LOGGER.info("Registering multiworld mujoco gym environments")
"""
Reaching tasks
"""
register(
id='SawyerReachXYEnv-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz.sawyer_reach:SawyerReachXYEnv',
tags={
'git-commit-hash': 'c5e15f7',
'author': 'vitchyr'
},
kwargs={
'hide_goal_markers': False,
},
)
register(
id='Image48SawyerReachXYEnv-v0',
entry_point=create_image_48_sawyer_reach_xy_env_v0,
tags={
'git-commit-hash': 'c5e15f7',
'author': 'vitchyr'
},
)
register(
id='Image84SawyerReachXYEnv-v0',
entry_point=create_image_84_sawyer_reach_xy_env_v0,
tags={
'git-commit-hash': 'c5e15f7',
'author': 'vitchyr'
},
)
"""
Pushing tasks, XY, With Reset
"""
register(
id='SawyerPushAndReacherXYEnv-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env:SawyerPushAndReachXYEnv',
tags={
'git-commit-hash': '3503e9f',
'author': 'vitchyr'
},
kwargs=dict(
hide_goal_markers=True,
action_scale=.02,
puck_low=[-0.25, .4],
puck_high=[0.25, .8],
mocap_low=[-0.2, 0.45, 0.],
mocap_high=[0.2, 0.75, 0.5],
goal_low=[-0.2, 0.45, 0.02, -0.25, 0.4],
goal_high=[0.2, 0.75, 0.02, 0.25, 0.8],
)
)
register(
id='Image48SawyerPushAndReacherXYEnv-v0',
entry_point=create_Image48SawyerPushAndReacherXYEnv_v0,
tags={
'git-commit-hash': '3503e9f',
'author': 'vitchyr'
},
)
register(
id='Image48SawyerPushAndReachXYEasyEnv-v0',
entry_point=create_image_48_sawyer_reach_and_reach_xy_easy_env_v0,
tags={
'git-commit-hash': 'fec148f',
'author': 'vitchyr'
},
)
register(
id='SawyerPushXYEnv-WithResets-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env:SawyerPushAndReachXYEnv',
tags={
'git-commit-hash': '1e2652f',
'author': 'vitchyr',
},
kwargs=dict(
reward_type='puck_distance',
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.25, 0.3, 0.02, -.2, .4),
goal_high=(0.25, 0.875, 0.02, .2, .8),
num_resets_before_puck_reset=int(1e6),
num_resets_before_hand_reset=int(1e6),
)
)
register(
id='SawyerPushAndReachXYEnv-WithResets-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env:SawyerPushAndReachXYEnv',
tags={
'git-commit-hash': '1e2652f',
'author': 'vitchyr',
},
kwargs=dict(
reward_type='state_distance',
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.25, 0.3, 0.02, -.2, .4),
goal_high=(0.25, 0.875, 0.02, .2, .8),
num_resets_before_puck_reset=int(1e6),
num_resets_before_hand_reset=int(1e6),
)
)
"""
Pushing tasks, XY, Reset Free
"""
register(
id='SawyerPushXYEnv-CompleteResetFree-v1',
entry_point='multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env:SawyerPushAndReachXYEnv',
tags={
'git-commit-hash': 'b9b5ce0',
'author': 'murtaza'
},
kwargs=dict(
reward_type='puck_distance',
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.25, 0.3, 0.02, -.2, .4),
goal_high=(0.25, 0.875, 0.02, .2, .8),
num_resets_before_puck_reset=int(1e6),
num_resets_before_hand_reset=int(1e6),
)
)
register(
id='SawyerPushAndReachXYEnv-CompleteResetFree-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env:SawyerPushAndReachXYEnv',
tags={
'git-commit-hash': '4ba667f',
'author': 'vitchyr'
},
kwargs=dict(
reward_type='state_distance',
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.25, 0.3, 0.02, -.2, .4),
goal_high=(0.25, 0.875, 0.02, .2, .8),
num_resets_before_puck_reset=int(1e6),
num_resets_before_hand_reset=int(1e6),
)
)
"""
Push XYZ
"""
register(
id='SawyerDoorPullEnv-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz'
'.sawyer_door:SawyerDoorEnv',
tags={
'git-commit-hash': '19f2be6',
'author': 'vitchyr'
},
kwargs=dict(
goal_low=(-.25, .3, .12, -1.5708),
goal_high=(.25, .6, .12, 0),
action_reward_scale=0,
reward_type='angle_difference',
indicator_threshold=(.02, .03),
fix_goal=False,
fixed_goal=(0, .45, .12, -.25),
num_resets_before_door_and_hand_reset=1,
fixed_hand_z=0.12,
hand_low=(-0.25, 0.3, .12),
hand_high=(0.25, 0.6, .12),
target_pos_scale=1,
target_angle_scale=1,
min_angle=-1.5708,
max_angle=0,
xml_path='sawyer_xyz/sawyer_door_pull.xml',
)
)
"""
Door Hook Env
"""
register(
id='SawyerDoorHookEnv-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz'
'.sawyer_door_hook:SawyerDoorHookEnv',
tags={
'git-commit-hash': 'b5ac6f9',
'author': 'vitchyr',
},
kwargs=dict(
goal_low=(-0.1, 0.42, 0.05, 0),
goal_high=(0.0, 0.65, .075, 1.0472),
hand_low=(-0.1, 0.42, 0.05),
hand_high=(0., 0.65, .075),
max_angle=1.0472,
xml_path='sawyer_xyz/sawyer_door_pull_hook.xml',
)
)
register(
id='Image48SawyerDoorHookEnv-v0',
entry_point=create_Image48SawyerDoorHookEnv_v0,
tags={
'git-commit-hash': 'b5ac6f9',
'author': 'vitchyr',
},
)
register(
id='SawyerDoorHookResetFreeEnv-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz'
'.sawyer_door_hook:SawyerDoorHookEnv',
tags={
'git-commit-hash': 'b5ac6f9',
'author': 'vitchyr',
},
kwargs=dict(
goal_low=(-0.1, 0.42, 0.05, 0),
goal_high=(0.0, 0.65, .075, 1.0472),
hand_low=(-0.1, 0.42, 0.05),
hand_high=(0., 0.65, .075),
max_angle=1.0472,
xml_path='sawyer_xyz/sawyer_door_pull_hook.xml',
reset_free=True,
)
)
register(
id='Image48SawyerDoorHookResetFreeEnv-v0',
entry_point=create_Image48SawyerDoorHookResetFreeEnv_v0,
tags={
'git-commit-hash': 'b5ac6f9',
'author': 'vitchyr',
},
)
register(
id='SawyerDoorHookResetFreeEnv-v1',
entry_point='multiworld.envs.mujoco.sawyer_xyz'
'.sawyer_door_hook:SawyerDoorHookEnv',
tags={
'git-commit-hash': '333776f',
'author': 'murtaza',
},
kwargs=dict(
goal_low=(-0.1, 0.45, 0.15, 0),
goal_high=(0.0, 0.65, .225, 1.0472),
hand_low=(-0.1, 0.45, 0.15),
hand_high=(0., 0.65, .225),
max_angle=1.0472,
xml_path='sawyer_xyz/sawyer_door_pull_hook.xml',
reset_free=True,
)
)
register(
id='Image48SawyerDoorHookResetFreeEnv-v1',
entry_point=create_Image48SawyerDoorHookResetFreeEnv_v1,
tags={
'git-commit-hash': '333776f',
'author': 'murtaza',
},
)
register(
id='SawyerDoorHookResetFreeEnv-v2',
entry_point='multiworld.envs.mujoco.sawyer_xyz'
'.sawyer_door_hook:SawyerDoorHookEnv',
tags={
'git-commit-hash': '2879edb',
'author': 'murtaza',
},
kwargs=dict(
goal_low=(-0.1, 0.45, 0.15, 0),
goal_high=(0.0, 0.65, .225, 1.0472),
hand_low=(-0.1, 0.45, 0.15),
hand_high=(0., 0.65, .225),
max_angle=1.0472,
xml_path='sawyer_xyz/sawyer_door_pull_hook.xml',
reset_free=True,
)
)
register(
id='SawyerDoorHookResetFreeEnv-v3',
entry_point='multiworld.envs.mujoco.sawyer_xyz'
'.sawyer_door_hook:SawyerDoorHookEnv',
tags={
'git-commit-hash': 'ffdb56e',
'author': 'murtaza',
},
kwargs=dict(
goal_low=(-0.1, 0.45, 0.15, 0),
goal_high=(0.0, 0.65, .225, 1.0472),
hand_low=(-0.1, 0.45, 0.15),
hand_high=(0., 0.65, .225),
max_angle=1.0472,
xml_path='sawyer_xyz/sawyer_door_pull_hook.xml',
reset_free=True,
)
)
register( #do not use!!!
id='SawyerDoorHookResetFreeEnv-v4',
entry_point='multiworld.envs.mujoco.sawyer_xyz'
'.sawyer_door_hook:SawyerDoorHookEnv',
tags={
'git-commit-hash': 'ffdb56e',
'author': 'murtaza',
},
kwargs=dict(
goal_low=(-0.2, 0.45, 0.1, 0),
goal_high=(0.2, 0.65, .25, 1.0472),
hand_low=(-0.2, 0.45, 0.15),
hand_high=(.2, 0.65, .25),
max_angle=1.0472,
xml_path='sawyer_xyz/sawyer_door_pull_hook.xml',
reset_free=True,
)
)
register(
id='SawyerDoorHookResetFreeEnv-v5',
entry_point='multiworld.envs.mujoco.sawyer_xyz'
'.sawyer_door_hook:SawyerDoorHookEnv',
tags={
'git-commit-hash': 'ffdb56e',
'author': 'murtaza',
},
kwargs=dict(
goal_low=(-0.1, 0.45, 0.1, 0),
goal_high=(0.05, 0.65, .25, .83),
hand_low=(-0.1, 0.45, 0.1),
hand_high=(0.05, 0.65, .25),
max_angle=.83,
xml_path='sawyer_xyz/sawyer_door_pull_hook.xml',
reset_free=True,
)
)
register(
id='SawyerDoorHookResetFreeEnv-v6',
entry_point='multiworld.envs.mujoco.sawyer_xyz'
'.sawyer_door_hook:SawyerDoorHookEnv',
tags={
'git-commit-hash': 'ffdb56e',
'author': 'murtaza',
},
kwargs=dict(
goal_low=(-0.1, 0.4, 0.1, 0),
goal_high=(0.05, 0.65, .25, .93),
hand_low=(-0.1, 0.4, 0.1),
hand_high=(0.05, 0.65, .25),
max_angle=.93,
xml_path='sawyer_xyz/sawyer_door_pull_hook.xml',
reset_free=True,
)
)
def create_image_48_sawyer_reach_xy_env_v0():
from multiworld.core.image_env import ImageEnv
from multiworld.envs.mujoco.cameras import sawyer_xyz_reacher_camera
wrapped_env = gym.make('SawyerReachXYEnv-v0')
return ImageEnv(
wrapped_env,
48,
init_camera=sawyer_xyz_reacher_camera,
transpose=True,
normalize=True,
)
def create_image_84_sawyer_reach_xy_env_v0():
from multiworld.core.image_env import ImageEnv
from multiworld.envs.mujoco.cameras import sawyer_xyz_reacher_camera
wrapped_env = gym.make('SawyerReachXYEnv-v0')
return ImageEnv(
wrapped_env,
84,
init_camera=sawyer_xyz_reacher_camera,
transpose=True,
normalize=True,
)
def create_image_48_sawyer_reach_and_reach_xy_easy_env_v0():
from multiworld.core.image_env import ImageEnv
from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright_v2
wrapped_env = gym.make('SawyerPushAndReachXYEasyEnv-v0')
return ImageEnv(
wrapped_env,
48,
init_camera=sawyer_pusher_camera_upright_v2,
transpose=True,
normalize=True,
)
def create_Image48SawyerPushAndReacherXYEnv_v0():
from multiworld.core.image_env import ImageEnv
from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_top_down
wrapped_env = gym.make('SawyerPushAndReacherXYEnv-v0')
return ImageEnv(
wrapped_env,
48,
init_camera=sawyer_pusher_camera_top_down,
transpose=True,
normalize=True,
)
def create_Image48SawyerDoorHookEnv_v0():
from multiworld.core.image_env import ImageEnv
from multiworld.envs.mujoco.cameras import sawyer_door_env_camera_v3
wrapped_env = gym.make('SawyerDoorHookEnv-v0')
return ImageEnv(
wrapped_env,
48,
init_camera=sawyer_door_env_camera_v3,
transpose=True,
normalize=True,
)
def create_Image48SawyerDoorHookResetFreeEnv_v0():
from multiworld.core.image_env import ImageEnv
from multiworld.envs.mujoco.cameras import sawyer_door_env_camera_v3
wrapped_env = gym.make('SawyerDoorHookResetFreeEnv-v0')
return ImageEnv(
wrapped_env,
48,
init_camera=sawyer_door_env_camera_v3,
transpose=True,
normalize=True,
)
def create_Image48SawyerDoorHookResetFreeEnv_v1():
from multiworld.core.image_env import ImageEnv
from multiworld.envs.mujoco.cameras import sawyer_door_env_camera_v3
wrapped_env = gym.make('SawyerDoorHookResetFreeEnv-v1')
return ImageEnv(
wrapped_env,
48,
init_camera=sawyer_door_env_camera_v3,
transpose=True,
normalize=True,
)
register_custom_envs()
|
{"/multiworld/envs/mujoco/__init__.py": ["/multiworld/envs/mujoco/cameras.py"]}
|
1,727
|
shikharbahl/multiworld
|
refs/heads/master
|
/multiworld/envs/mujoco/cameras.py
|
import numpy as np
def create_sawyer_camera_init(
lookat=(0, 0.85, 0.3),
distance=0.3,
elevation=-35,
azimuth=270,
trackbodyid=-1,
):
def init(camera):
camera.lookat[0] = lookat[0]
camera.lookat[1] = lookat[1]
camera.lookat[2] = lookat[2]
camera.distance = distance
camera.elevation = elevation
camera.azimuth = azimuth
camera.trackbodyid = trackbodyid
return init
def init_sawyer_camera_v1(camera):
"""
Do not get so close that the arm crossed the camera plane
"""
camera.lookat[0] = 0
camera.lookat[1] = 1
camera.lookat[2] = 0.3
camera.distance = 0.35
camera.elevation = -35
camera.azimuth = 270
camera.trackbodyid = -1
def init_sawyer_camera_v2(camera):
"""
Top down basically. Sees through the arm.
"""
camera.lookat[0] = 0
camera.lookat[1] = 0.8
camera.lookat[2] = 0.3
camera.distance = 0.3
camera.elevation = -65
camera.azimuth = 270
camera.trackbodyid = -1
def init_sawyer_camera_v3(camera):
"""
Top down basically. Sees through the arm.
"""
camera.lookat[0] = 0
camera.lookat[1] = 0.85
camera.lookat[2] = 0.3
camera.distance = 0.3
camera.elevation = -35
camera.azimuth = 270
camera.trackbodyid = -1
def sawyer_pick_and_place_camera(camera):
camera.lookat[0] = 0.0
camera.lookat[1] = .67
camera.lookat[2] = .1
camera.distance = .7
camera.elevation = 0
camera.azimuth = 180
camera.trackbodyid = 0
def init_sawyer_camera_v4(camera):
"""
This is the same camera used in old experiments (circa 6/7/2018)
"""
camera.lookat[0] = 0
camera.lookat[1] = 0.85
camera.lookat[2] = 0.3
camera.distance = 0.3
camera.elevation = -35
camera.azimuth = 270
camera.trackbodyid = -1
def sawyer_pick_and_place_camera_slanted_angle(camera):
camera.lookat[0] = 0.0
camera.lookat[1] = .67
camera.lookat[2] = .1
camera.distance = .65
camera.elevation = -37.85
camera.azimuth = 180
camera.trackbodyid = 0
def init_sawyer_camera_v5(camera):
"""
Purposely zoomed out to be hard.
"""
camera.lookat[0] = 0
camera.lookat[1] = 0.85
camera.lookat[2] = 0.3
camera.distance = 1
camera.elevation = -35
camera.azimuth = 270
camera.trackbodyid = -1
def sawyer_xyz_reacher_camera(camera):
# TODO: reformat or delete
camera.trackbodyid = 0
camera.distance = 1.0
# 3rd person view
cam_dist = 0.3
rotation_angle = 270
cam_pos = np.array([0, 1.0, 0.5, cam_dist, -30, rotation_angle])
for i in range(3):
camera.lookat[i] = cam_pos[i]
camera.distance = cam_pos[3]
camera.elevation = cam_pos[4]
camera.azimuth = cam_pos[5]
camera.trackbodyid = -1
def sawyer_torque_reacher_camera(camera):
# TODO: reformat or delete
camera.trackbodyid = 0
camera.distance = 1.0
# 3rd person view
cam_dist = 0.3
rotation_angle = 270
cam_pos = np.array([0, 1.0, 0.65, cam_dist, -30, rotation_angle])
for i in range(3):
camera.lookat[i] = cam_pos[i]
camera.distance = cam_pos[3]
camera.elevation = cam_pos[4]
camera.azimuth = cam_pos[5]
camera.trackbodyid = -1
def sawyer_door_env_camera(camera):
camera.trackbodyid = 0
camera.distance = 1.0
cam_dist = 0.1
rotation_angle = 0
cam_pos = np.array([0, 0.725, .9, cam_dist, -90, rotation_angle])
for i in range(3):
camera.lookat[i] = cam_pos[i]
camera.distance = cam_pos[3]
camera.elevation = cam_pos[4]
camera.azimuth = cam_pos[5]
camera.trackbodyid = -1
def sawyer_door_env_camera_v2(camera):
camera.trackbodyid = 0
camera.distance = 1.0
cam_dist = 0.1
rotation_angle = 0
cam_pos = np.array([.1, 0.55, .9, cam_dist, -90, rotation_angle])
for i in range(3):
camera.lookat[i] = cam_pos[i]
camera.distance = cam_pos[3]
camera.elevation = cam_pos[4]
camera.azimuth = cam_pos[5]
camera.trackbodyid = -1
def sawyer_door_env_camera_v3(camera):
camera.trackbodyid = 0
camera.distance = 1.0
# 3rd person view
cam_dist = 0.25
rotation_angle = 360
cam_pos = np.array([-.2, .55, 0.6, cam_dist, -60, rotation_angle])
for i in range(3):
camera.lookat[i] = cam_pos[i]
camera.distance = cam_pos[3]
camera.elevation = cam_pos[4]
camera.azimuth = cam_pos[5]
camera.trackbodyid = -1
def sawyer_pusher_camera_upright(camera):
camera.trackbodyid = 0
camera.distance = .45
camera.lookat[0] = 0
camera.lookat[1] = 0.85
camera.lookat[2] = 0.45
camera.elevation = -50
camera.azimuth = 270
camera.trackbodyid = -1
def sawyer_pusher_camera_upright_v2(camera):
camera.trackbodyid = 0
camera.distance = .45
camera.lookat[0] = 0
camera.lookat[1] = 0.85
camera.lookat[2] = 0.45
camera.elevation = -60
camera.azimuth = 270
camera.trackbodyid = -1
def sawyer_pusher_camera_top_down(camera):
camera.trackbodyid = 0
cam_dist = 0.1
rotation_angle = 0
cam_pos = np.array([0, 0.6, .9, cam_dist, -90, rotation_angle])
for i in range(3):
camera.lookat[i] = cam_pos[i]
camera.distance = cam_pos[3]
camera.elevation = cam_pos[4]
camera.azimuth = cam_pos[5]
camera.trackbodyid = -1
|
{"/multiworld/envs/mujoco/__init__.py": ["/multiworld/envs/mujoco/cameras.py"]}
|
1,734
|
vltanh/CaNet
|
refs/heads/master
|
/visualize.py
|
import torchvision.transforms as tvtf
from PIL import Image
import argparse
import torch
from torch import nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torchvision
import numpy as np
import matplotlib.pyplot as plt
from one_shot_network import Res_Deeplab
from utils import load_resnet50_param, convert_image_np
import random
# plt.rcParams["figure.figsize"] = (15, 5)
parser = argparse.ArgumentParser()
parser.add_argument('--gpus', default='0')
parser.add_argument('--weight')
parser.add_argument('--refid')
parser.add_argument('--queid')
parser.add_argument('--classid', type=int)
parser.add_argument('--niters', default=5, type=int)
parser.add_argument('--a', action='store_true')
parser.add_argument('--root', type=str)
args = parser.parse_args()
IMG_MEAN = [0.485, 0.456, 0.406]
IMG_STD = [0.229, 0.224, 0.225]
def set_seed(seed):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
model = Res_Deeplab(num_classes=2, use_attn=args.a)
model = load_resnet50_param(model, stop_layer='layer4')
model = nn.DataParallel(model, [0])
model.load_state_dict(torch.load(args.weight))
model.cuda()
model.eval()
CLASSES = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
root = args.root
ref_img_path = root + '/JPEGImages/' + args.refid + '.jpg'
ref_mask_path = root + '/Annotations/' + \
CLASSES[args.classid - 1] + '/' + args.refid + '.png'
que_img_path = root + '/JPEGImages/' + args.queid + '.jpg'
niters = args.niters
with torch.no_grad():
ref_img = Image.open(ref_img_path).convert('RGB')
ref_mask = Image.open(ref_mask_path).convert('P')
query_img = Image.open(que_img_path).convert('RGB')
tf = tvtf.Compose([
tvtf.ToTensor(),
tvtf.Normalize(IMG_MEAN, IMG_STD),
])
ref_img = tf(ref_img).unsqueeze(0).cuda()
ref_mask = torch.FloatTensor(
np.array(ref_mask) > 0).unsqueeze(0).unsqueeze(0).cuda()
query_img = tf(query_img).unsqueeze(0).cuda()
history_mask = torch.zeros(1, 2, 41, 41).cuda()
fig, ax = plt.subplots(1, niters+1)
ax[0].imshow(convert_image_np(ref_img[0].cpu()))
ax[0].imshow(ref_mask[0, 0].cpu(), alpha=0.5)
# ax[0].set_title('Reference')
ax[0].set_xticks([])
ax[0].set_yticks([])
for i in range(niters):
out = model(query_img, ref_img, ref_mask, history_mask)
history_mask = torch.softmax(out, dim=1)
pred = F.interpolate(history_mask, size=query_img.shape[-2:],
mode='bilinear',
align_corners=True)
pred = torch.argmax(pred, dim=1)
ax[1+i].imshow(convert_image_np(query_img[0].cpu()))
ax[1+i].imshow(pred[0].cpu(), alpha=0.5)
# ax[1+i].set_title(f'Query')
ax[1+i].set_xticks([])
ax[1+i].set_yticks([])
fig.tight_layout()
plt.show()
plt.close()
|
{"/visualize.py": ["/one_shot_network.py", "/utils.py"], "/val.py": ["/utils.py", "/one_shot_network.py"], "/one_shot_network.py": ["/utils.py"], "/train.py": ["/utils.py", "/one_shot_network.py"]}
|
1,735
|
vltanh/CaNet
|
refs/heads/master
|
/val.py
|
from torch.utils import data
import torch.optim as optim
import torch.backends.cudnn as cudnn
import os.path as osp
from utils import *
import time
import torch.nn.functional as F
import tqdm
import random
import argparse
from dataset_mask_train import Dataset as Dataset_train
from dataset_mask_val import Dataset as Dataset_val
import os
import torch
from one_shot_network import Res_Deeplab
import torch.nn as nn
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('-fold',
type=int,
help='fold',
default=0)
parser.add_argument('-gpu',
type=str,
help='gpu id to use',
default='0,1')
parser.add_argument('-iter_time',
type=int,
default=5)
parser.add_argument('-w',
type=str,
help='path to weight file')
parser.add_argument('-d',
type=str,
help='path to dataset')
parser.add_argument('-s',
type=int,
help='random seed',
default=3698)
parser.add_argument('-a',
action='store_true',
help='use attention or not')
parser.add_argument('-p',
type=int,
help='number of exps')
options = parser.parse_args()
def set_seed(seed):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
# GPU-related
gpu_list = [int(x) for x in options.gpu.split(',')]
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = options.gpu
cudnn.enabled = True
IMG_MEAN = [0.485, 0.456, 0.406]
IMG_STD = [0.229, 0.224, 0.225]
num_class = 2
input_size = (321, 321)
# Create network.
model = Res_Deeplab(num_classes=num_class, use_attn=options.a)
model = load_resnet50_param(model, stop_layer='layer4')
model = nn.DataParallel(model, [0])
model.load_state_dict(torch.load(options.w))
model.cuda()
set_seed(options.s)
seeds = random.sample(range(10**5), options.p)
print(seeds)
final_iou = []
for s in seeds:
set_seed(s)
valset = Dataset_val(data_dir=options.d, fold=options.fold,
input_size=input_size,
normalize_mean=IMG_MEAN, normalize_std=IMG_STD,
is_train=False)
valloader = data.DataLoader(valset, batch_size=1, shuffle=False, num_workers=4,
drop_last=False)
iou_list = []
highest_iou = 0
begin_time = time.time()
with torch.no_grad():
print('----Evaluation----')
model = model.eval()
valset.history_mask_list = [None] * 1000
best_iou = 0
for eva_iter in range(options.iter_time):
save_root = f'viz{options.fold}_{options.a}'
save_dir = f'{save_root}/{eva_iter}'
os.makedirs(save_dir, exist_ok=True)
#f = open(
# f'{save_root}/score{options.fold}_{eva_iter}.csv', 'w')
#f.write('support,query,class,score\n')
all_inter, all_union, all_predict = [0] * 5, [0] * 5, [0] * 5
for i_iter, batch in enumerate(tqdm.tqdm(valloader)):
# if i_iter != 55:
# continue
query_rgb, query_mask, support_rgb, support_mask, history_mask, sample_class, index, support_name, query_name = batch
query_rgb = (query_rgb).cuda(0)
support_rgb = (support_rgb).cuda(0)
support_mask = (support_mask).cuda(0)
# change formation for crossentropy use
query_mask = (query_mask).cuda(0).long()
# remove the second dim,change formation for crossentropy use
query_mask = query_mask[:, 0, :, :]
history_mask = (history_mask).cuda(0)
pred = model(query_rgb, support_rgb,
support_mask, history_mask)
pred_softmax = F.softmax(pred, dim=1).data.cpu()
# update history mask
for j in range(support_mask.shape[0]):
sub_index = index[j]
valset.history_mask_list[sub_index] = pred_softmax[j]
pred = nn.functional.interpolate(pred, size=query_mask.shape[-2:], mode='bilinear',
align_corners=True) # upsample # upsample
_, pred_label = torch.max(pred, 1)
#plt.subplot(1, 2, 1)
#plt.imshow(convert_image_np(support_rgb[0].cpu()))
#plt.imshow(support_mask[0][0].cpu(), alpha=0.5)
#plt.subplot(1, 2, 2)
#plt.imshow(convert_image_np(query_rgb[0].cpu()))
#plt.imshow(pred_label[0].cpu(), alpha=0.5)
#plt.tight_layout()
#plt.savefig(f'{save_dir}/{i_iter:03d}')
## plt.show()
#plt.close()
_, pred_label = torch.max(pred, 1)
inter_list, union_list, _, num_predict_list = get_iou_v1(
query_mask, pred_label)
#f.write(
# f'{support_name[0]},{query_name[0]},{sample_class[0]},{float(inter_list[0])/union_list[0]}\n')
for j in range(query_mask.shape[0]): # batch size
all_inter[sample_class[j] -
(options.fold * 5 + 1)] += inter_list[j]
all_union[sample_class[j] -
(options.fold * 5 + 1)] += union_list[j]
IOU = [0] * 5
for j in range(5):
IOU[j] = all_inter[j] / all_union[j]
mean_iou = np.mean(IOU)
print(IOU)
print('IOU:%.4f' % (mean_iou))
#if mean_iou > best_iou:
# best_iou = mean_iou
#f.close()
best_iou = mean_iou
print('IOU for this epoch: %.4f' % (best_iou))
final_iou.append(best_iou)
epoch_time = time.time() - begin_time
print('This epoch takes:', epoch_time, 'second')
print(np.mean(final_iou), np.std(final_iou))
|
{"/visualize.py": ["/one_shot_network.py", "/utils.py"], "/val.py": ["/utils.py", "/one_shot_network.py"], "/one_shot_network.py": ["/utils.py"], "/train.py": ["/utils.py", "/one_shot_network.py"]}
|
1,736
|
vltanh/CaNet
|
refs/heads/master
|
/utils.py
|
import torchvision
import os
import torch
import torch.nn as nn
from pylab import plt
import numpy as np
def convert_image_np(inp):
"""Convert a Tensor to numpy image."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return inp
def load_resnet50_param(model, stop_layer='layer4'):
resnet50 = torchvision.models.resnet50(pretrained=True)
saved_state_dict = resnet50.state_dict()
new_params = model.state_dict().copy()
for i in saved_state_dict:
i_parts = i.split('.')
if not i_parts[0] == stop_layer:
new_params['.'.join(i_parts)] = saved_state_dict[i]
else:
break
model.load_state_dict(new_params)
model.train()
return model
def check_dir(checkpoint_dir):
if not os.path.exists(checkpoint_dir):
os.makedirs(os.path.join(checkpoint_dir, 'model'))
os.makedirs(os.path.join(checkpoint_dir, 'pred_img'))
def optim_or_not(model, yes):
for param in model.parameters():
if yes:
param.requires_grad = True
else:
param.requires_grad = False
def turn_off(model):
optim_or_not(model.module.conv1, False)
optim_or_not(model.module.bn1, False)
optim_or_not(model.module.layer1, False)
optim_or_not(model.module.layer2, False)
optim_or_not(model.module.layer3, False)
def get_10x_lr_params(model):
b = []
if model.module.use_attn:
b.append(model.module.layer5_K.parameters())
b.append(model.module.layer5_V.parameters())
else:
b.append(model.module.layer5.parameters())
b.append(model.module.layer55.parameters())
b.append(model.module.layer6_0.parameters())
b.append(model.module.layer6_1.parameters())
b.append(model.module.layer6_2.parameters())
b.append(model.module.layer6_3.parameters())
b.append(model.module.layer6_4.parameters())
b.append(model.module.layer7.parameters())
b.append(model.module.layer9.parameters())
b.append(model.module.residule1.parameters())
b.append(model.module.residule2.parameters())
b.append(model.module.residule3.parameters())
for j in range(len(b)):
for i in b[j]:
yield i
def loss_calc_v1(pred, label, gpu):
label = label.long()
criterion = torch.nn.CrossEntropyLoss(ignore_index=255).cuda(gpu)
return criterion(pred, label)
def plot_loss(checkpoint_dir, loss_list, save_pred_every):
n = len(loss_list)
x = range(0, n * save_pred_every, save_pred_every)
y = loss_list
plt.switch_backend('agg')
plt.plot(x, y, color='blue', marker='.', label='Train loss')
plt.xticks(
range(0, n * save_pred_every + 3,
(n * save_pred_every + 10) // 10)
)
plt.legend()
plt.grid()
plt.savefig(os.path.join(checkpoint_dir, 'loss_fig.pdf'))
plt.close()
def plot_iou(checkpoint_dir, iou_list):
n = len(iou_list)
x = range(0, len(iou_list))
y = iou_list
plt.switch_backend('agg')
plt.plot(x, y, color='red', marker='.', label='IOU')
plt.xticks(range(0, n + 3, (n + 10) // 10))
plt.legend()
plt.grid()
plt.savefig(os.path.join(checkpoint_dir, 'iou_fig.pdf'))
plt.close()
def get_iou_v1(query_mask, pred_label, mode='foreground'):
if mode == 'background':
query_mask = 1 - query_mask
pred_label = 1 - pred_label
B = query_mask.shape[0]
num_predict_list, inter_list, union_list, iou_list = [], [], [], []
for i in range(B):
num_predict = (pred_label[i] > 0).sum().float().item()
combination = query_mask[i] + pred_label[i]
inter = (combination == 2).sum().float().item()
union = (combination == 1).sum().float().item() + inter
inter_list.append(inter)
union_list.append(union)
num_predict_list.append(num_predict)
if union != 0:
iou_list.append(inter / union)
else:
iou_list.append(0.0)
return inter_list, union_list, iou_list, num_predict_list
|
{"/visualize.py": ["/one_shot_network.py", "/utils.py"], "/val.py": ["/utils.py", "/one_shot_network.py"], "/one_shot_network.py": ["/utils.py"], "/train.py": ["/utils.py", "/one_shot_network.py"]}
|
1,737
|
vltanh/CaNet
|
refs/heads/master
|
/one_shot_network.py
|
import torch.nn as nn
import torch
import numpy as np
import torch.nn.functional as F
import math
from utils import convert_image_np
# code of dilated convolution part is referenced from https://github.com/speedinghzl/Pytorch-Deeplab
affine_par = True
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1,
stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes, affine=affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=padding, dilation=dilation, bias=False)
self.bn2 = nn.BatchNorm2d(planes, affine=affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4, affine=affine_par)
for i in self.bn3.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Memory(nn.Module):
def __init__(self):
super(Memory, self).__init__()
def forward(self, m_k, m_v, q_k):
# m_k: B, Dk, Hm, Wm
# m_v: B, Dv, Hm, Wm
# q_k: B, Dk, Hq, Wq
B, Dk, Hm, Wm = m_k.size()
_, _, Hq, Wq = q_k.size()
_, Dv, _, _ = m_v.size()
mk = m_k.reshape(B, Dk, Hm*Wm) # mk: B, Dk, Hm*Wm
mk = torch.transpose(mk, 1, 2) # mk: B, Hm*Wm, Dk
qk = q_k.reshape(B, Dk, Hq*Wq) # qk: B, Dk, Hq*Wq
p = torch.bmm(mk, qk) # p: B, Hm*Wm, Hq*Wq
p = p / math.sqrt(Dk) # p: B, Hm*Wm, Hq*Wq
p = F.softmax(p, dim=1) # p: B, Hm*Wm, Hq*Wq
mv = m_v.reshape(B, Dv, Hm*Wm) # mv: B, Dv, Hm*Wm
mem = torch.bmm(mv, p) # B, Dv, Hq*Wq
mem = mem.reshape(B, Dv, Hq, Wq) # B, Dv, Hq, Wq
return mem, p
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes, use_attn):
self.inplanes = 64
self.use_attn = use_attn
super(ResNet, self).__init__()
# ResNet-50 (Deeplab variant)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7,
stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64, affine=affine_par)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3,
stride=2, padding=1, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1],
stride=2)
self.layer3 = self._make_layer(block, 256, layers[2],
stride=1, dilation=2)
#self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
# Key-Value generator
if not self.use_attn:
self.layer5 = nn.Sequential(
nn.Conv2d(in_channels=1536, out_channels=256, kernel_size=3,
stride=1, padding=2, dilation=2, bias=True),
nn.ReLU(),
nn.Dropout2d(p=0.5),
)
else:
self.layer5_K = nn.Sequential(
nn.Conv2d(in_channels=1536, out_channels=256, kernel_size=3,
stride=1, padding=2, dilation=2, bias=True),
nn.ReLU(),
nn.Dropout2d(p=0.5),
)
self.layer5_V = nn.Sequential(
nn.Conv2d(in_channels=1536, out_channels=256, kernel_size=3,
stride=1, padding=2, dilation=2, bias=True),
nn.ReLU(),
nn.Dropout2d(p=0.5),
)
# Memory augmented feature map post-process
self.layer55 = nn.Sequential(
nn.Conv2d(in_channels=256 * 2, out_channels=256, kernel_size=3,
stride=1, padding=2, dilation=2, bias=True),
nn.ReLU(),
nn.Dropout2d(p=0.5),
)
# ASPP
self.layer6_0 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=1,
stride=1, padding=0, bias=True),
nn.ReLU(),
nn.Dropout2d(p=0.5),
)
self.layer6_1 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=1,
stride=1, padding=0, bias=True),
nn.ReLU(),
nn.Dropout2d(p=0.5),
)
self.layer6_2 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3,
stride=1, padding=6, dilation=6, bias=True),
nn.ReLU(),
nn.Dropout2d(p=0.5),
)
self.layer6_3 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3,
stride=1, padding=12, dilation=12, bias=True),
nn.ReLU(),
nn.Dropout2d(p=0.5),
)
self.layer6_4 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3,
stride=1, padding=18, dilation=18, bias=True),
nn.ReLU(),
nn.Dropout2d(p=0.5),
)
self.layer7 = nn.Sequential(
nn.Conv2d(1280, 256, kernel_size=1,
stride=1, padding=0, bias=True),
nn.ReLU(),
nn.Dropout2d(p=0.5),
)
# Decoder (Iterative Optimization Module)
self.residule1 = nn.Sequential(
nn.ReLU(),
nn.Conv2d(256+2, 256, kernel_size=3,
stride=1, padding=1, bias=True),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3,
stride=1, padding=1, bias=True)
)
self.residule2 = nn.Sequential(
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3,
stride=1, padding=1, bias=True),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3,
stride=1, padding=1, bias=True)
)
self.residule3 = nn.Sequential(
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3,
stride=1, padding=1, bias=True),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3,
stride=1, padding=1, bias=True)
)
# Prediction
self.layer9 = nn.Conv2d(
256, num_classes, kernel_size=1, stride=1, bias=True)
# Memory
self.memory = Memory()
# Initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, downsample=None):
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1,
stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, affine=affine_par)
)
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes,
stride, dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, query_rgb, support_rgb, support_mask, history_mask, vis_attn=False):
ref_img = support_rgb.clone()
ref_mask = support_mask.clone()
query_img = query_rgb.clone()
#print('Input:', query_img.shape)
# === Query feature extraction
query_rgb = self.conv1(query_rgb)
#print('Conv 0:', query_rgb.shape)
query_rgb = self.bn1(query_rgb)
query_rgb = self.relu(query_rgb)
query_rgb = self.maxpool(query_rgb)
#print('Layer 0:', query_rgb.shape)
query_rgb = self.layer1(query_rgb)
#print('Layer 1:', query_rgb.shape)
query_rgb = self.layer2(query_rgb)
#print('Layer 2:', query_rgb.shape)
query_feat_layer2 = query_rgb
query_rgb = self.layer3(query_rgb)
#print('Layer 3:', query_rgb.shape)
# query_rgb = self.layer4(query_rgb)
query_rgb_ = torch.cat([query_feat_layer2, query_rgb], dim=1)
feature_size = query_rgb_.shape[-2:]
#print('Encoder:', query_rgb_.shape)
# === Query key-value generation
if not self.use_attn:
query_rgb = self.layer5(query_rgb_)
else:
query_rgb_K = self.layer5_K(query_rgb_)
query_rgb_V = self.layer5_V(query_rgb_)
#print('Key/Value:', query_rgb_K.shape)
# === Reference feature extraction
support_rgb = self.conv1(support_rgb)
support_rgb = self.bn1(support_rgb)
support_rgb = self.relu(support_rgb)
support_rgb = self.maxpool(support_rgb)
support_rgb = self.layer1(support_rgb)
support_rgb = self.layer2(support_rgb)
support_feat_layer2 = support_rgb
support_rgb = self.layer3(support_rgb)
#support_rgb = self.layer4(support_rgb)
support_rgb_ = torch.cat([support_feat_layer2, support_rgb], dim=1)
# === Reference key-value generation
if not self.use_attn:
support_rgb = self.layer5(support_rgb_)
else:
support_rgb_K = self.layer5_K(support_rgb_)
support_rgb_V = self.layer5_V(support_rgb_)
# === Dense comparison OR Memory read
support_mask = F.interpolate(support_mask, support_rgb.shape[-2:],
mode='bilinear', align_corners=True)
if not self.use_attn:
z = support_mask * support_rgb
z, viz = self.memory(z, z, query_rgb)
out = torch.cat([query_rgb, z], dim=1)
else:
z_K = support_mask * support_rgb_K
z_V = support_mask * support_rgb_V
z, viz = self.memory(z_K, z_V, query_rgb_K)
out = torch.cat([query_rgb_V, z], dim=1)
#print(out.shape)
if vis_attn:
import matplotlib.pyplot as plt
for i in range(viz.size(2)):
m = torch.zeros(query_rgb.shape[-2], query_rgb.shape[-1])
m[i // query_rgb.shape[-1], i % query_rgb.shape[-1]] = 1
m = F.interpolate(m.unsqueeze(0).unsqueeze(
0), (query_img.shape[-2], query_img.shape[-1])).squeeze(0).squeeze(0)
# f = query_img[0].permute(1, 2, 0).detach().cpu()
plt.figure(figsize=(16, 8), dpi=100)
plt.subplot(1, 2, 1)
plt.imshow(convert_image_np(query_img[0].cpu()))
plt.imshow(m, alpha=0.5)
plt.xticks([])
plt.yticks([])
plt.subplot(1, 2, 2)
v = viz[0, :, i].reshape(
support_rgb.shape[-2], support_rgb.shape[-1]).detach().cpu()
v = F.interpolate(v.unsqueeze(
0).unsqueeze(0), (ref_img.shape[-2], ref_img.shape[-1])).squeeze(0).squeeze(0)
f = ref_img[0].detach().cpu()
plt.imshow(convert_image_np(f))
plt.imshow(v, alpha=0.5)
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.savefig(f'viz/{i:04d}')
# plt.show()
plt.close()
# === Decoder
# Residue blocks
history_mask = F.interpolate(history_mask, feature_size,
mode='bilinear', align_corners=True)
out = self.layer55(out)
out_plus_history = torch.cat([out, history_mask], dim=1)
out = out + self.residule1(out_plus_history)
out = out + self.residule2(out)
out = out + self.residule3(out)
#print('ResBlocks:', out.shape)
# ASPP
global_feature = F.avg_pool2d(out, kernel_size=feature_size)
global_feature = self.layer6_0(global_feature)
global_feature = global_feature.expand(-1, -1,
feature_size[0], feature_size[1])
out = torch.cat([global_feature,
self.layer6_1(out),
self.layer6_2(out),
self.layer6_3(out),
self.layer6_4(out)],
dim=1)
out = self.layer7(out)
#print('ASPP:', out.shape)
# === Prediction
out = self.layer9(out)
#print('Output:', out.shape)
return out
def Res_Deeplab(num_classes=2, use_attn=False):
model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes, use_attn)
return model
|
{"/visualize.py": ["/one_shot_network.py", "/utils.py"], "/val.py": ["/utils.py", "/one_shot_network.py"], "/one_shot_network.py": ["/utils.py"], "/train.py": ["/utils.py", "/one_shot_network.py"]}
|
1,738
|
vltanh/CaNet
|
refs/heads/master
|
/train.py
|
from torch.utils import data
import torch.optim as optim
import torch.backends.cudnn as cudnn
import os.path as osp
from utils import *
import time
import torch.nn.functional as F
import tqdm
import random
import argparse
from dataset_mask_train import Dataset as Dataset_train
from dataset_mask_val import Dataset as Dataset_val
import os
import torch
from one_shot_network import Res_Deeplab
import torch.nn as nn
import numpy as np
# === Parse CMD arguments
parser = argparse.ArgumentParser()
parser.add_argument('-lr',
type=float,
help='learning rate',
default=0.00025)
parser.add_argument('-prob',
type=float,
help='dropout rate of history mask',
default=0.7)
parser.add_argument('-bs',
type=int,
help='batch size in training',
default=4)
parser.add_argument('-fold',
type=int,
help='fold',
default=0)
parser.add_argument('-gpu',
type=str,
help='gpu id to use',
default='0,1')
parser.add_argument('-iter_time',
type=int,
help='number of iterations for the IOM',
default=5)
parser.add_argument('-data',
type=str,
help='path to the dataset folder')
parser.add_argument('-attn',
action='store_true',
help='whether or not to separate')
options = parser.parse_args()
def set_seed(seed):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
def set_determinism():
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# === Constants/Variables
IMG_MEAN = [0.485, 0.456, 0.406]
IMG_STD = [0.229, 0.224, 0.225]
num_class = 2
num_epoch = 200
learning_rate = options.lr # 0.000025#0.00025
input_size = (321, 321)
batch_size = options.bs
weight_decay = 0.0005
momentum = 0.9
# === GPU-related
gpu_list = [int(x) for x in options.gpu.split(',')]
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = options.gpu
cudnn.enabled = True
# === Log directory
checkpoint_dir = 'checkpoint/fo=%d/' % options.fold
check_dir(checkpoint_dir)
# === Network architecture
set_seed(3698)
model = Res_Deeplab(num_classes=num_class, use_attn=options.attn)
model = load_resnet50_param(model, stop_layer='layer4')
model = nn.DataParallel(model, [0])
turn_off(model)
# === Dataset
# Train
set_seed(3698)
dataset = Dataset_train(data_dir=options.data, fold=options.fold,
input_size=input_size,
normalize_mean=IMG_MEAN, normalize_std=IMG_STD,
prob=options.prob)
trainloader = data.DataLoader(dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4)
# Validation
set_seed(3698)
valset = Dataset_val(data_dir=options.data, fold=options.fold,
input_size=input_size,
normalize_mean=IMG_MEAN, normalize_std=IMG_STD,
is_train=True)
valloader = data.DataLoader(valset,
batch_size=1,
shuffle=False,
num_workers=4)
save_pred_every = len(trainloader)
# === Optimizer
optimizer = optim.SGD([{'params': get_10x_lr_params(model),
'lr': 10 * learning_rate}],
lr=learning_rate, momentum=momentum, weight_decay=weight_decay)
loss_list = []
iou_list = []
highest_iou = 0
model.cuda()
tempory_loss = 0
model = model.train()
best_epoch = 0
for epoch in range(0, num_epoch):
begin_time = time.time()
# === Train stage
model.train()
tqdm_gen = tqdm.tqdm(trainloader)
for i_iter, batch in enumerate(tqdm_gen):
query_rgb, query_mask, support_rgb, support_mask, history_mask, sample_class, index = batch
query_rgb = (query_rgb).cuda(0)
support_rgb = (support_rgb).cuda(0)
support_mask = (support_mask).cuda(0)
query_mask = (query_mask).cuda(0).long()
query_mask = query_mask[:, 0, :, :]
history_mask = (history_mask).cuda(0)
optimizer.zero_grad()
pred = model(query_rgb, support_rgb, support_mask, history_mask)
pred_softmax = F.softmax(pred, dim=1).data.cpu()
# update history mask
for j in range(support_mask.shape[0]):
sub_index = index[j]
dataset.history_mask_list[sub_index] = pred_softmax[j]
pred = nn.functional.interpolate(pred, size=input_size,
mode='bilinear', align_corners=True)
loss = loss_calc_v1(pred, query_mask, 0)
loss.backward()
optimizer.step()
tqdm_gen.set_description(
'e:%d loss = %.4f-:%.4f' % (epoch, loss.item(), highest_iou)
)
# save training loss
tempory_loss += loss.item()
if i_iter % save_pred_every == 0 and i_iter != 0:
loss_list.append(tempory_loss / save_pred_every)
plot_loss(checkpoint_dir, loss_list, save_pred_every)
np.savetxt(osp.join(checkpoint_dir, 'loss_history.txt'),
np.array(loss_list))
tempory_loss = 0
# === Validation stage
with torch.no_grad():
print('----Evaluation----')
model.eval()
valset.history_mask_list = [None] * 1000
best_iou = 0
for eva_iter in range(options.iter_time):
all_inter, all_union, all_predict = [0] * 5, [0] * 5, [0] * 5
for i_iter, batch in enumerate(valloader):
query_rgb, query_mask, support_rgb, support_mask, history_mask, sample_class, index = batch
query_rgb = query_rgb.cuda(0)
support_rgb = support_rgb.cuda(0)
support_mask = support_mask.cuda(0)
query_mask = query_mask.cuda(0).long()
query_mask = query_mask[:, 0, :, :]
history_mask = history_mask.cuda(0)
pred = model(query_rgb, support_rgb,
support_mask, history_mask)
pred_softmax = F.softmax(pred, dim=1).data.cpu()
# update history mask
for j in range(support_mask.shape[0]):
sub_index = index[j]
valset.history_mask_list[sub_index] = pred_softmax[j]
pred = nn.functional.interpolate(pred, size=query_rgb.shape[-2:],
mode='bilinear', align_corners=True)
_, pred_label = torch.max(pred, 1)
inter_list, union_list, _, num_predict_list = \
get_iou_v1(query_mask, pred_label)
for j in range(query_mask.shape[0]):
mapped_cid = sample_class[j] - (options.fold * 5 + 1)
all_inter[mapped_cid] += inter_list[j]
all_union[mapped_cid] += union_list[j]
IOU = [0] * 5
for j in range(5):
IOU[j] = all_inter[j] / all_union[j]
mean_iou = np.mean(IOU)
print('IOU:%.4f' % (mean_iou))
if mean_iou > best_iou:
best_iou = mean_iou
else:
break
iou_list.append(best_iou)
plot_iou(checkpoint_dir, iou_list)
np.savetxt(osp.join(checkpoint_dir, 'iou_history.txt'),
np.array(iou_list))
if best_iou > highest_iou:
highest_iou = best_iou
model = model.eval()
torch.save(model.cpu().state_dict(),
osp.join(checkpoint_dir, 'model', 'best' '.pth'))
model = model.train()
best_epoch = epoch
print('A better model is saved')
print('IOU for this epoch: %.4f' % (best_iou))
model.cuda()
epoch_time = time.time() - begin_time
print('best epoch:%d ,iout:%.4f' % (best_epoch, highest_iou))
print('This epoch taks:', epoch_time, 'second')
print('still need hour:%.4f' % ((num_epoch - epoch) * epoch_time / 3600))
|
{"/visualize.py": ["/one_shot_network.py", "/utils.py"], "/val.py": ["/utils.py", "/one_shot_network.py"], "/one_shot_network.py": ["/utils.py"], "/train.py": ["/utils.py", "/one_shot_network.py"]}
|
1,741
|
pt657407064/shippoTracking
|
refs/heads/master
|
/generator.py
|
import threading
from time import sleep
import shippo
class generator:
shippo.api_key = "shippo_test_a0159d5cfb4013f15b4db6360f5be757edb6a2d4"
def __init__(self,fromname,fromaddress,fromcity,fromstate,fromcountry,fromzipcode,fromemail,fromphone,
toname,toaddress,tocity,tostate,tocountry,tozipcode,toemail,tophone,
width,length,weight,unit,height):
print(fromname,fromaddress,fromcity,fromstate,fromcountry,fromzipcode,fromemail,fromphone,
toname,toaddress,tocity,tostate,tocountry,tozipcode,toemail,tophone,
width,length,weight,unit,height)
self.fromname = fromname
self.fromaddress = fromaddress
self.fromcity = fromcity
self.fromstate = fromstate
self.fromcountry = fromcountry
self.fromzipcode = fromzipcode
self.fromemail = fromemail
self.fromphone = fromphone
self.toname = toname
self.toaddress = toaddress
self.tocity = tocity
self.tostate = tostate
self.tocountry = tocountry
self.tozipcode = tozipcode
self.toemail = toemail
self.tophone = tophone
self.width = width
self.length = length
self.weight = weight
if unit == "Inch":
self.unit = "in"
else:
self.unit="cm"
self.height = height
def construct(self):
self.person_from = {
"name": self.fromname,
"street1": self.fromaddress,
"city": self.fromcity,
"state": self.fromstate,
"zip": self.fromzipcode,
"country": self.fromcountry,
"phone": self.fromphone,
"email": self.fromemail
}
self.person_to = {
"name": self.toname,
"street1": self.toaddress,
"city": self.tocity,
"state": self.tostate,
"zip": self.tozipcode,
"country": self.tocountry,
"phone": self.tophone,
"email": self.toemail
}
self.parcel = {
"length": self.length,
"width": self.width,
"height": self.height,
"distance_unit": self.unit,
"weight": self.weight,
"mass_unit": "lb"
}
def generating(self):
self.shipment = shippo.Shipment.create(
address_from=self.person_from,
address_to=self.person_to,
parcels = self.parcel,
async=False
)
print(self.person_to)
print(self.person_from)
print(self.parcel)
rate = self.shipment.rates[0]
transaction = shippo.Transaction.create(rate=rate.object_id, async=False)
if transaction.status == "SUCCESS":
print("tracking number %s" % str(transaction.tracking_number) + "\n" +
"Label url %s" % str(transaction.label_url))
else:
print("fail")
|
{"/main.py": ["/generator.py"]}
|
1,742
|
pt657407064/shippoTracking
|
refs/heads/master
|
/main.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\etherousUI.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
from generator import generator
class Ui_mainFrame(object):
def setupUi(self, mainFrame):
mainFrame.setObjectName("mainFrame")
mainFrame.resize(1386, 1457)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(mainFrame.sizePolicy().hasHeightForWidth())
mainFrame.setSizePolicy(sizePolicy)
mainFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)
mainFrame.setFrameShadow(QtWidgets.QFrame.Raised)
self.generateBtn = QtWidgets.QPushButton(mainFrame)
self.generateBtn.setGeometry(QtCore.QRect(570, 1300, 225, 69))
self.generateBtn.setObjectName("generateBtn")
self.generateBtn.clicked.connect(self.buttonClick)
self.line = QtWidgets.QFrame(mainFrame)
self.line.setGeometry(QtCore.QRect(650, 0, 71, 841))
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.line_2 = QtWidgets.QFrame(mainFrame)
self.line_2.setGeometry(QtCore.QRect(0, 830, 1381, 20))
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.lineEdit_7 = QtWidgets.QLineEdit(mainFrame)
self.lineEdit_7.setGeometry(QtCore.QRect(1510, 500, 71, 45))
self.lineEdit_7.setText("")
self.lineEdit_7.setObjectName("lineEdit_7")
self.label_16 = QtWidgets.QLabel(mainFrame)
self.label_16.setGeometry(QtCore.QRect(1420, 500, 138, 39))
self.label_16.setObjectName("label_16")
self.line_3 = QtWidgets.QFrame(mainFrame)
self.line_3.setGeometry(QtCore.QRect(0, 1220, 1381, 20))
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.formLayoutWidget = QtWidgets.QWidget(mainFrame)
self.formLayoutWidget.setGeometry(QtCore.QRect(10, 20, 651, 741))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.fromInfo = QtWidgets.QFormLayout(self.formLayoutWidget)
self.fromInfo.setContentsMargins(0, 0, 0, 0)
self.fromInfo.setObjectName("fromInfo")
self.label_2 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_2.setObjectName("label_2")
self.fromInfo.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.fromFirstNamelabel = QtWidgets.QLabel(self.formLayoutWidget)
self.fromFirstNamelabel.setObjectName("fromFirstNamelabel")
self.fromInfo.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.fromFirstNamelabel)
self.fromFirstName = QtWidgets.QLineEdit(self.formLayoutWidget)
self.fromFirstName.setObjectName("fromFirstName")
self.fromInfo.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.fromFirstName)
self.fromLastNamelabel = QtWidgets.QLabel(self.formLayoutWidget)
self.fromLastNamelabel.setObjectName("fromLastNamelabel")
self.fromInfo.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.fromLastNamelabel)
self.fromLastName = QtWidgets.QLineEdit(self.formLayoutWidget)
self.fromLastName.setObjectName("fromLastName")
self.fromInfo.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.fromLastName)
self.midNameLabel = QtWidgets.QLabel(self.formLayoutWidget)
self.midNameLabel.setObjectName("midNameLabel")
self.fromInfo.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.midNameLabel)
self.fromMidName = QtWidgets.QLineEdit(self.formLayoutWidget)
self.fromMidName.setObjectName("fromMidName")
self.fromInfo.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.fromMidName)
self.addressLabel = QtWidgets.QLabel(self.formLayoutWidget)
self.addressLabel.setObjectName("addressLabel")
self.fromInfo.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.addressLabel)
self.fromStreet = QtWidgets.QLineEdit(self.formLayoutWidget)
self.fromStreet.setObjectName("fromStreet")
self.fromInfo.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.fromStreet)
self.label_6 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_6.setObjectName("label_6")
self.fromInfo.setWidget(7, QtWidgets.QFormLayout.LabelRole, self.label_6)
self.fromCity = QtWidgets.QLineEdit(self.formLayoutWidget)
self.fromCity.setText("")
self.fromCity.setObjectName("fromCity")
self.fromInfo.setWidget(7, QtWidgets.QFormLayout.FieldRole, self.fromCity)
self.label_7 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_7.setObjectName("label_7")
self.fromInfo.setWidget(8, QtWidgets.QFormLayout.LabelRole, self.label_7)
self.fromState = QtWidgets.QLineEdit(self.formLayoutWidget)
self.fromState.setText("")
self.fromState.setObjectName("fromState")
self.fromInfo.setWidget(8, QtWidgets.QFormLayout.FieldRole, self.fromState)
self.label_8 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_8.setObjectName("label_8")
self.fromInfo.setWidget(9, QtWidgets.QFormLayout.LabelRole, self.label_8)
self.fromCountry = QtWidgets.QLineEdit(self.formLayoutWidget)
self.fromCountry.setText("")
self.fromCountry.setObjectName("fromCountry")
self.fromInfo.setWidget(9, QtWidgets.QFormLayout.FieldRole, self.fromCountry)
self.label_9 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_9.setObjectName("label_9")
self.fromInfo.setWidget(10, QtWidgets.QFormLayout.LabelRole, self.label_9)
self.fromZipcode = QtWidgets.QLineEdit(self.formLayoutWidget)
self.fromZipcode.setText("")
self.fromZipcode.setObjectName("fromZipcode")
self.fromInfo.setWidget(10, QtWidgets.QFormLayout.FieldRole, self.fromZipcode)
self.fromEmailLabel = QtWidgets.QLabel(self.formLayoutWidget)
self.fromEmailLabel.setObjectName("fromEmailLabel")
self.fromInfo.setWidget(11, QtWidgets.QFormLayout.LabelRole, self.fromEmailLabel)
self.fromEmail = QtWidgets.QLineEdit(self.formLayoutWidget)
self.fromEmail.setText("")
self.fromEmail.setObjectName("fromEmail")
self.fromInfo.setWidget(11, QtWidgets.QFormLayout.FieldRole, self.fromEmail)
self.fromEmailLabel_2 = QtWidgets.QLabel(self.formLayoutWidget)
self.fromEmailLabel_2.setObjectName("fromEmailLabel_2")
self.fromInfo.setWidget(12, QtWidgets.QFormLayout.LabelRole, self.fromEmailLabel_2)
self.fromPhone = QtWidgets.QLineEdit(self.formLayoutWidget)
self.fromPhone.setText("")
self.fromPhone.setObjectName("fromPhone")
self.fromInfo.setWidget(12, QtWidgets.QFormLayout.FieldRole, self.fromPhone)
self.formLayoutWidget_2 = QtWidgets.QWidget(mainFrame)
self.formLayoutWidget_2.setGeometry(QtCore.QRect(690, 20, 661, 741))
self.formLayoutWidget_2.setObjectName("formLayoutWidget_2")
self.toInfo = QtWidgets.QFormLayout(self.formLayoutWidget_2)
self.toInfo.setContentsMargins(0, 0, 0, 0)
self.toInfo.setObjectName("toInfo")
self.label_18 = QtWidgets.QLabel(self.formLayoutWidget_2)
self.label_18.setObjectName("label_18")
self.toInfo.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_18)
self.label_10 = QtWidgets.QLabel(self.formLayoutWidget_2)
self.label_10.setObjectName("label_10")
self.toInfo.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_10)
self.toFirstName = QtWidgets.QLineEdit(self.formLayoutWidget_2)
self.toFirstName.setObjectName("toFirstName")
self.toInfo.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.toFirstName)
self.toLastName = QtWidgets.QLineEdit(self.formLayoutWidget_2)
self.toLastName.setObjectName("toLastName")
self.toInfo.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.toLastName)
self.toMiddleName = QtWidgets.QLineEdit(self.formLayoutWidget_2)
self.toMiddleName.setObjectName("toMiddleName")
self.toInfo.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.toMiddleName)
self.toStreet = QtWidgets.QLineEdit(self.formLayoutWidget_2)
self.toStreet.setObjectName("toStreet")
self.toInfo.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.toStreet)
self.toCity = QtWidgets.QLineEdit(self.formLayoutWidget_2)
self.toCity.setText("")
self.toCity.setObjectName("toCity")
self.toInfo.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.toCity)
self.toState = QtWidgets.QLineEdit(self.formLayoutWidget_2)
self.toState.setText("")
self.toState.setObjectName("toState")
self.toInfo.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.toState)
self.toCountry = QtWidgets.QLineEdit(self.formLayoutWidget_2)
self.toCountry.setText("")
self.toCountry.setObjectName("toCountry")
self.toInfo.setWidget(7, QtWidgets.QFormLayout.FieldRole, self.toCountry)
self.toZipcode = QtWidgets.QLineEdit(self.formLayoutWidget_2)
self.toZipcode.setText("")
self.toZipcode.setObjectName("toZipcode")
self.toInfo.setWidget(8, QtWidgets.QFormLayout.FieldRole, self.toZipcode)
self.toEmail = QtWidgets.QLineEdit(self.formLayoutWidget_2)
self.toEmail.setText("")
self.toEmail.setObjectName("toEmail")
self.toInfo.setWidget(9, QtWidgets.QFormLayout.FieldRole, self.toEmail)
self.toPhone = QtWidgets.QLineEdit(self.formLayoutWidget_2)
self.toPhone.setText("")
self.toPhone.setObjectName("toPhone")
self.toInfo.setWidget(10, QtWidgets.QFormLayout.FieldRole, self.toPhone)
self.label_17 = QtWidgets.QLabel(self.formLayoutWidget_2)
self.label_17.setObjectName("label_17")
self.toInfo.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_17)
self.label_13 = QtWidgets.QLabel(self.formLayoutWidget_2)
self.label_13.setObjectName("label_13")
self.toInfo.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_13)
self.label_11 = QtWidgets.QLabel(self.formLayoutWidget_2)
self.label_11.setObjectName("label_11")
self.toInfo.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_11)
self.label_12 = QtWidgets.QLabel(self.formLayoutWidget_2)
self.label_12.setObjectName("label_12")
self.toInfo.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label_12)
self.label_19 = QtWidgets.QLabel(self.formLayoutWidget_2)
self.label_19.setObjectName("label_19")
self.toInfo.setWidget(6, QtWidgets.QFormLayout.LabelRole, self.label_19)
self.label_15 = QtWidgets.QLabel(self.formLayoutWidget_2)
self.label_15.setObjectName("label_15")
self.toInfo.setWidget(7, QtWidgets.QFormLayout.LabelRole, self.label_15)
self.label_14 = QtWidgets.QLabel(self.formLayoutWidget_2)
self.label_14.setObjectName("label_14")
self.toInfo.setWidget(8, QtWidgets.QFormLayout.LabelRole, self.label_14)
self.toEmailLabel = QtWidgets.QLabel(self.formLayoutWidget_2)
self.toEmailLabel.setObjectName("toEmailLabel")
self.toInfo.setWidget(9, QtWidgets.QFormLayout.LabelRole, self.toEmailLabel)
self.fromEmailLabel_3 = QtWidgets.QLabel(self.formLayoutWidget_2)
self.fromEmailLabel_3.setObjectName("fromEmailLabel_3")
self.toInfo.setWidget(10, QtWidgets.QFormLayout.LabelRole, self.fromEmailLabel_3)
self.label_3 = QtWidgets.QLabel(mainFrame)
self.label_3.setGeometry(QtCore.QRect(10, 820, 158, 78))
self.label_3.setObjectName("label_3")
self.formLayoutWidget_3 = QtWidgets.QWidget(mainFrame)
self.formLayoutWidget_3.setGeometry(QtCore.QRect(120, 850, 333, 362))
self.formLayoutWidget_3.setObjectName("formLayoutWidget_3")
self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget_3)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setObjectName("formLayout")
self.label_4 = QtWidgets.QLabel(self.formLayoutWidget_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth())
self.label_4.setSizePolicy(sizePolicy)
self.label_4.setFrameShape(QtWidgets.QFrame.NoFrame)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.label_20 = QtWidgets.QLabel(self.formLayoutWidget_3)
self.label_20.setObjectName("label_20")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_20)
self.label_22 = QtWidgets.QLabel(self.formLayoutWidget_3)
self.label_22.setObjectName("label_22")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_22)
self.label_21 = QtWidgets.QLabel(self.formLayoutWidget_3)
self.label_21.setObjectName("label_21")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_21)
self.label_23 = QtWidgets.QLabel(self.formLayoutWidget_3)
self.label_23.setObjectName("label_23")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_23)
self.label_5 = QtWidgets.QLabel(self.formLayoutWidget_3)
self.label_5.setObjectName("label_5")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.width = QtWidgets.QLineEdit(self.formLayoutWidget_3)
self.width.setObjectName("width")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.width)
self.length = QtWidgets.QLineEdit(self.formLayoutWidget_3)
self.length.setText("")
self.length.setObjectName("length")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.length)
self.weight = QtWidgets.QLineEdit(self.formLayoutWidget_3)
self.weight.setObjectName("weight")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.weight)
self.distanceUnit = QtWidgets.QComboBox()
self.distanceUnit.setObjectName("unit")
self.distanceUnit.addItem("Inch")
self.distanceUnit.addItem("CM")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.distanceUnit)
self.height = QtWidgets.QLineEdit(self.formLayoutWidget_3)
self.height.setObjectName("height")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.height)
self.retranslateui(mainFrame)
QtCore.QMetaObject.connectSlotsByName(mainFrame)
def retranslateui(self, mainFrame):
_translate = QtCore.QCoreApplication.translate
mainFrame.setWindowTitle(_translate("mainFrame", "Frame"))
self.generateBtn.setText(_translate("mainFrame", "Generate"))
self.label_16.setText(_translate("mainFrame", "State"))
self.label_2.setText(_translate("mainFrame", "From:"))
self.fromFirstNamelabel.setText(_translate("mainFrame", "First Name *"))
self.fromLastNamelabel.setText(_translate("mainFrame", "Last Name *"))
self.midNameLabel.setText(_translate("mainFrame", "Mid "))
self.addressLabel.setText(_translate("mainFrame", "Address *"))
self.label_6.setText(_translate("mainFrame", "City*"))
self.label_7.setText(_translate("mainFrame", "State"))
self.label_8.setText(_translate("mainFrame", "Counrty *"))
self.label_9.setText(_translate("mainFrame", "Zip Code *"))
self.fromEmailLabel.setText(_translate("mainFrame", "Email *"))
self.fromEmailLabel_2.setText(_translate("mainFrame", "Phone *"))
self.label_18.setText(_translate("mainFrame", "To:"))
self.label_10.setText(_translate("mainFrame", "First Name *"))
self.label_17.setText(_translate("mainFrame", "Last Name *"))
self.label_13.setText(_translate("mainFrame", "Mid"))
self.label_11.setText(_translate("mainFrame", "Address*"))
self.label_12.setText(_translate("mainFrame", "City*"))
self.label_19.setText(_translate("mainFrame", "State"))
self.label_15.setText(_translate("mainFrame", "Counrty*"))
self.label_14.setText(_translate("mainFrame", "Zip Code*"))
self.toEmailLabel.setText(_translate("mainFrame", "Email *"))
self.fromEmailLabel_3.setText(_translate("mainFrame", "Phone *"))
self.label_3.setText(_translate("mainFrame", "Parcel"))
self.label_4.setText(_translate("mainFrame", "Width"))
self.label_20.setText(_translate("mainFrame", "Length"))
self.label_22.setText(_translate("mainFrame", "weight"))
self.label_21.setText(_translate("mainFrame", "distanceUnit"))
self.label_23.setText(_translate("mainFrame", "mass(lb)"))
self.label_5.setText(_translate("mainFrame", "Height"))
def buttonClick(self):
if self.check() == False:
msg = QMessageBox()
msg.setText("Some info has not been filled!")
msg.exec()
else:
self.convert()
print("now generating")
w = generator(self.name1, self.street1, self.city1, self.state1, self.country1, self.zipcode1, self.email1,
self.phone1,self.name2, self.street2, self.city2, self.state2, self.country2, self.zipcode2,
self.email2,self.phone2,self.parwidth,self.parlength,self.parweight,self.distance_unit,self.parheight)
w.construct()
w.generating()
def convert(self):
self.name1 = str(self.fromFirstName.text() + self.fromLastName.text())
self.street1 = str(self.fromStreet.text())
self.city1 = str(self.fromCity.text())
self.state1 = str(self.fromState.text())
self.country1 = str(self.fromCountry.text())
self.zipcode1 = str(self.fromZipcode.text())
self.email1 = str(self.fromEmail.text())
self.phone1 = str(self.fromPhone.text())
self.name2 = str(self.toFirstName.text() + self.toLastName.text())
self.street2 = str(self.toStreet.text())
self.city2 = str(self.toCity.text())
self.state2 = str(self.toState.text())
self.country2 = str(self.toCountry.text())
self.zipcode2 = str(self.toZipcode.text())
self.email2 = str(self.toEmail.text())
self.phone2 = str(self.toPhone.text())
self.parwidth = str(self.width.text())
self.parlength = str(self.length.text())
self.parweight = str(self.weight.text())
self.distance_unit = str(self.distanceUnit.currentText())
self.parheight = str(self.height.text())
def check(self):
if self.fromFirstName.text() == "" or self.fromLastName.text() == "" or self.fromStreet == "" \
or self.fromCity.text() == "" or self.fromState.text() == "" or self.fromCountry.text() == "" \
or self.fromZipcode.text() == "" or self.fromEmail.text() == "" or self.fromPhone.text() == "" \
or self.toFirstName.text() == "" or self.toLastName.text() == "" or self.toStreet == "" \
or self.toCity.text() == "" or self.toState.text() == "" or self.toCountry.text() == "" \
or self.toZipcode.text() == "" or self.toEmail.text() == "" or self.toPhone.text() == "" \
or self.width.text() == "" or self.length.text() == "" or self.weight.text() == "" \
or self.height.text() == "":
return False
else:
print("Hello")
return True
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
mainFrame = QtWidgets.QFrame()
ui = Ui_mainFrame()
ui.setupUi(mainFrame)
mainFrame.show()
sys.exit(app.exec_())
|
{"/main.py": ["/generator.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.