text stringlengths 38 1.54M |
|---|
from keras.datasets import reuters
from keras import models
from keras import layers
import numpy as np
import matplotlib.pyplot as plt
from keras.utils.np_utils import to_categorical
def main():
(train_data, train_labels),(test_data, test_labels) = reuters.load_data(num_words=10000)
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
ohTrainLabels = to_categorical(train_labels)
ohTestLables = to_categorical(test_labels)
x_val = x_train[:1000]
partial_x_train = x_train[1000:]
y_val = ohTrainLabels[:1000]
partial_y_train = ohTrainLabels[1000:]
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(partial_x_train, partial_y_train, epochs=9,
batch_size=512, validation_data=(x_val, y_val))
print(model.evaluate(x_test, ohTestLables))
plotHistory(history)
def plotHistory(history):
historyDict = history.history
lossValues = historyDict['loss']
valLossValues = historyDict['val_loss']
epochs = range(1, len(lossValues)+1)
plt.subplot(2, 1, 1)
plt.plot(epochs, lossValues, 'bo', label='Training Loss')
plt.plot(epochs, valLossValues, 'b', label='Validation Loss')
plt.title("Training and Validation Loss and Accuracy")
# plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
acc = historyDict['acc']
valAcc = historyDict['val_acc']
plt.subplot(2, 1, 2)
plt.plot(epochs, acc, 'bo', label='Training Accuracy')
plt.plot(epochs, valAcc, 'b', label='Validation Accuracy')
# plt.title("Training and Validation Accuracy")
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
def decodeToText(textSequence):
wordIndex = reuters.get_word_index()
reverseWordIndex = dict([(value,key) for (key,value) in wordIndex.items()])
decodedText = ' '.join([reverseWordIndex.get(i-3, '?') for i in textSequence])
return decodedText
def vectorize_sequences(sequences, dimension = 10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1
return results
if __name__ == '__main__':
main() |
import os
import sys
import pygame
from app.scene.SceneHandler import SceneHandler
from app.settings import *
if __name__ == '__main__':
#Code to check if the code is running from a PyInstaller --onefile .exe
if getattr(sys, 'frozen', False):
os.chdir(sys._MEIPASS)
# Screen
screenSize = (SCREEN_WIDTH, SCREEN_HEIGHT)
screen = pygame.display.set_mode(screenSize)
pygame.display.set_caption("LD38 Warm Up")
# Init
pygame.mixer.pre_init(44100, -16, 2, 2048)
pygame.init()
pygame.font.init()
# Hide the mouse
# pygame.mouse.set_visible(False)
# Setup with gameData and the first scene
sceneHandler = SceneHandler(screen)
sceneHandler.mainLoop() |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 15 10:36:36 2018
@author: MUJ
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Importing the dataset
data=pd.read_csv("customers.csv")
features=data.iloc[:,3:].values
labels=data.iloc[:,2].values
from sklearn.cross_validation import train_test_split
features_train,features_test,labels_train,labels_test=train_test_split(features,labels,test_size=0.1,random_state=0)
#scaling of data
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
features_train=sc.fit_transform(features_train)
features_test=sc.transform(features_test)
#system is trained using linear regression
from sklearn.linear_model import LinearRegression
regressor=LinearRegression()
regressor.fit(features_train,labels_train)
#prediction done over test data
pred=regressor.predict(features_test)
#prediction over next lot of data
x=pd.read_csv("rfmmed.csv")
testd=x.iloc[99:,1:]
testd=sc.transform(testd)
pred1=regressor.predict(testd)
score=regressor.score(features_train,labels_train)
print(score)
score=regressor.score(features_test,labels_test)
print(score)
#print(regressor.coef_)
df_loyal=data[data['loyalty_points']>=2]
df_not_loyal=data[data['loyalty_points']<=1]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import division, print_function, absolute_import
"test suite for utilities library"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "07/05/2015"
import unittest
import numpy
import sys
import os
import fabio
import tempfile
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, getLogger, recursive_delete
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
import pyFAI.utils
# if logger.getEffectiveLevel() <= logging.INFO:
# from pyFAI.gui_utils import pylab
import scipy.ndimage
# TODO Test:
# gaussian_filter
# relabel
# boundingBox
# removeSaturatedPixel
# DONE:
# # binning
# # unbinning
# # shift
# # shiftFFT
# # measure_offset
# # averageDark
# # averageImages
class TestUtils(unittest.TestCase):
unbinned = numpy.random.random((64, 32))
dark = unbinned.astype("float32")
flat = 1 + numpy.random.random((64, 32))
raw = flat + dark
tmp_dir = UtilsTest.tempdir
tmp_file = os.path.join(tmp_dir, "testUtils_average.edf")
def test_binning(self):
"""
test the binning and unbinning functions
"""
binned = pyFAI.utils.binning(self.unbinned, (4, 2))
self.assertEqual(binned.shape, (64 // 4, 32 // 2), "binned size is OK")
unbinned = pyFAI.utils.unBinning(binned, (4, 2))
self.assertEqual(unbinned.shape, self.unbinned.shape, "unbinned size is OK")
self.assertAlmostEqual(unbinned.sum(), self.unbinned.sum(), 2, "content is the same")
def test_averageDark(self):
"""
Some testing for dark averaging
"""
one = pyFAI.utils.averageDark([self.dark])
self.assertEqual(abs(self.dark - one).max(), 0, "data are the same")
two = pyFAI.utils.averageDark([self.dark, self.dark])
self.assertEqual(abs(self.dark - two).max(), 0, "data are the same: mean test")
three = pyFAI.utils.averageDark([numpy.ones_like(self.dark), self.dark, numpy.zeros_like(self.dark)], "median")
self.assertEqual(abs(self.dark - three).max(), 0, "data are the same: median test")
four = pyFAI.utils.averageDark([numpy.ones_like(self.dark), self.dark, numpy.zeros_like(self.dark)], "min")
self.assertEqual(abs(numpy.zeros_like(self.dark) - four).max(), 0, "data are the same: min test")
five = pyFAI.utils.averageDark([numpy.ones_like(self.dark), self.dark, numpy.zeros_like(self.dark)], "max")
self.assertEqual(abs(numpy.ones_like(self.dark) - five).max(), 0, "data are the same: max test")
six = pyFAI.utils.averageDark([numpy.ones_like(self.dark), self.dark, numpy.zeros_like(self.dark), self.dark, self.dark], "median", .001)
self.assert_(abs(self.dark - six).max() < 1e-4, "data are the same: test threshold")
seven = pyFAI.utils.averageImages([self.raw], darks=[self.dark], flats=[self.flat], threshold=0, output=self.tmp_file)
self.assert_(abs(numpy.ones_like(self.dark) - fabio.open(seven).data).mean() < 1e-2, "averageImages")
def test_shift(self):
"""
Some testing for image shifting and offset measurement functions.
"""
ref = numpy.ones((11, 12))
ref[2, 3] = 5
res = numpy.ones((11, 12))
res[5, 7] = 5
delta = (5 - 2, 7 - 3)
self.assert_(abs(pyFAI.utils.shift(ref, delta) - res).max() < 1e-12, "shift with integers works")
self.assert_(abs(pyFAI.utils.shiftFFT(ref, delta) - res).max() < 1e-12, "shift with FFTs works")
self.assert_(pyFAI.utils.measure_offset(res, ref) == delta, "measure offset works")
def test_gaussian_filter(self):
"""
Check gaussian filters applied via FFT
"""
for sigma in [2, 9.0 / 8.0]:
for mode in ["wrap", "reflect", "constant", "nearest", "mirror"]:
blurred1 = scipy.ndimage.filters.gaussian_filter(self.flat, sigma, mode=mode)
blurred2 = pyFAI.utils.gaussian_filter(self.flat, sigma, mode=mode)
delta = abs((blurred1 - blurred2) / (blurred1)).max()
logger.info("Error for gaussian blur sigma: %s with mode %s is %s" % (sigma, mode, delta))
self.assert_(delta < 6e-5, "Gaussian blur sigma: %s in %s mode are the same, got %s" % (sigma, mode, delta))
def test_set(self):
s = pyFAI.utils.FixedParameters()
self.assertEqual(len(s), 0, "initial set is empty")
s.add_or_discard("a", True)
self.assertEqual(len(s), 1, "a is in set")
s.add_or_discard("a", None)
self.assertEqual(len(s), 1, "set is untouched")
s.add_or_discard("a", False)
self.assertEqual(len(s), 0, "set is empty again")
s.add_or_discard("a", None)
self.assertEqual(len(s), 0, "set is untouched")
s.add_or_discard("a", False)
self.assertEqual(len(s), 0, "set is still empty")
def test_expand2d(self):
vect = numpy.arange(10.)
size2 = 11
self.assert_((numpy.outer(vect, numpy.ones(size2)) == pyFAI.utils.expand2d(vect, size2, False)).all(), "horizontal vector expand")
self.assert_((numpy.outer(numpy.ones(size2), vect) == pyFAI.utils.expand2d(vect, size2, True)).all(), "vertical vector expand")
def test_suite_all_Utils():
testSuite = unittest.TestSuite()
testSuite.addTest(TestUtils("test_binning"))
testSuite.addTest(TestUtils("test_averageDark"))
testSuite.addTest(TestUtils("test_shift"))
testSuite.addTest(TestUtils("test_gaussian_filter"))
testSuite.addTest(TestUtils("test_set"))
testSuite.addTest(TestUtils("test_expand2d"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_Utils()
runner = unittest.TextTestRunner()
runner.run(mysuite)
UtilsTest.clean_up()
|
from actualizaciones.api import views
from django.urls import path
urlpatterns = [
path(
"<int:seguimiento_pk>/",
views.create_actualizacion,
name="actualizacion-create",
),
path(
"<int:seguimiento_pk>/list/",
views.list_actualizacion,
name="actualizacion-list",
),
path(
"list_last/",
views.list_latest_actualizacion,
name="actualizacion-list-latests",
),
path(
"<int:actualizacion_pk>/mix/",
views.mix_actualizacion,
name="mix-actualizacion",
),
path(
"<int:actualizacion_pk>/files/",
views.upload_file,
name="actualizacion-upload",
),
path(
"files/<int:file_pk>/",
views.delete_file,
name="actualizacion-delete-file",
),
]
|
# -*- coding: utf-8 -*-
import pathlib
from itertools import tee
import pytest
import requirementslib.models.project
from .test_requirements import DEP_PIP_PAIRS
def pairwise(seq):
a, b = tee(seq)
next(b, None)
return zip(a, b)
PIPFILE_ENTRIES = [entry for entry, pip_entry in DEP_PIP_PAIRS]
PAIRED_PIPFILE_ENTRIES = list(pairwise(PIPFILE_ENTRIES))
@pytest.mark.parametrize("entry_1, entry_2", PAIRED_PIPFILE_ENTRIES)
def test_pipfile_entry_comparisons(entry_1, entry_2):
assert (
requirementslib.models.project._are_pipfile_entries_equal(entry_1, entry_1)
is True
)
assert (
requirementslib.models.project._are_pipfile_entries_equal(entry_1, entry_2)
is False
)
def test_project_file_works_if_file_exists_but_is_empty(pathlib_tmpdir):
pipfile = pathlib_tmpdir.joinpath("Pipfile")
pipfile.write_text(u"")
project_file = requirementslib.models.project.ProjectFile.read(
pipfile.as_posix(),
requirementslib.models.pipfile.plette.pipfiles.Pipfile,
invalid_ok=True,
)
assert project_file.model is not None
project_file.write()
project_file_contents = pipfile.read_text()
assert project_file.dumps().strip() == pipfile.read_text().strip()
def test_dir_with_empty_pipfile_file_raises_exception(pathlib_tmpdir):
project = None
with pytest.raises(FileNotFoundError):
project = requirementslib.models.project.Project(root=pathlib_tmpdir.as_posix())
assert project is None
def test_dir_with_pipfile_creates_project_file(pathlib_tmpdir):
pipfile = pathlib_tmpdir.joinpath("Pipfile")
pipfile.write_text(u"")
project_file = requirementslib.models.project.ProjectFile.read(
pipfile.as_posix(), requirementslib.models.pipfile.plette.pipfiles.Pipfile
)
assert project_file.model is not None
def test_dir_with_pipfile_creates_project(pathlib_tmpdir):
pipfile = pathlib_tmpdir.joinpath("Pipfile")
pipfile.write_text(u"")
project = requirementslib.models.project.Project(root=pathlib_tmpdir.as_posix())
assert project.pipfile is not None
assert pathlib.Path(project.pipfile_location).as_posix() == pipfile.as_posix()
assert project.lockfile is None
assert (
pathlib.Path(project.lockfile_location).as_posix()
== pathlib_tmpdir.joinpath("Pipfile.lock").as_posix()
)
project.add_line_to_pipfile("requests[security]", False)
assert project.pipfile["packages"]["requests"]._data == {
"extras": ["security"],
"version": "*",
}
project.remove_keys_from_pipfile(["requests"], True, False)
assert "requests" not in project.pipfile["packages"]
|
#!/usr/bin/env python
# coding: utf-8
# # ATSA Model
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
from tensorflow.keras.preprocessing.text import one_hot
from tensorflow.keras.preprocessing.sequence import pad_sequences
from time import time
# ## Loading data
# In[2]:
path_data = "./data/"
path_src = "./src/"
# train set
df_train = pd.read_csv(path_data + 'traindata.csv', sep = '\t', header = None)
df_train.columns = ['polarity', 'aspect_category', 'target_term', 'start:end', 'sentence']
#dev set
df_dev = pd.read_csv(path_data + 'devdata.csv', sep = '\t', header = None)
df_dev.columns = ['polarity', 'aspect_category', 'target_term', 'start:end', 'sentence']
df_train.head(5)
# ## Création de y_train et y_dev
# In[3]:
y_train = torch.Tensor(df_train['polarity'].map({'positive':2, 'neutral':1, 'negative':0}).values)
y_dev = torch.Tensor(df_dev['polarity'].map({'positive':2, 'neutral':1, 'negative':0}).values)
y_train.shape
# ## Création de X_train et X_dev
# In[4]:
vocab_size = 20000
sentences_train = df_train['sentence']
sentences_dev = df_dev['sentence']
sentences = pd.concat([sentences_train, sentences_dev])
sentences = list(sentences.apply(lambda sentence: one_hot(sentence, vocab_size, lower=False)).values)
X1 = torch.LongTensor(pad_sequences(sentences))
X1.shape
# In[5]:
vocab_size_target = 2000
target_train = df_train['target_term']
target_dev = df_dev['target_term']
targets = pd.concat([target_train, target_dev])
targets = list(targets.apply(lambda sentence: one_hot(sentence, vocab_size_target, lower=False)).values)
X2 = torch.LongTensor(pad_sequences(targets))
X2.shape
# ## After embedding
# In[6]:
# max number for context and aspect
max_aspect = 2
max_context = 30
# useful params
l1 = min(X1.shape[1], max_context) # max length of a sentence
l2 = min(X2.shape[1], max_aspect) # max length of target name
train_size = int(X1.shape[0] * 0.8) # take 80% of data for train set and 20% for dev set
# reduce dimension
X1 = X1[:,-min(l1,max_context):]
X2 = X2[:,-min(l2,max_aspect):]
# gather tensor
X = torch.cat([X1, X2], 1)
# train set & dev set creation
X_train = X[:train_size, :]
X_dev = X[train_size:, :]
print(X_train.shape)
print(X_dev.shape)
# In[7]:
dataset_train = TensorDataset(X_train, y_train)
dataset_dev = TensorDataset(X_dev, y_dev)
# In[8]:
print('Train set')
print(pd.Series(y_train).value_counts(normalize = True))
print('')
print('Dev set')
print(pd.Series(y_dev).value_counts(normalize = True))
# ## Model
# In[9]:
class CNN_Gate_Aspect_Text(nn.Module):
def __init__(self, Co=100, L=300, Ks=[3,4,5], C=3, embed_num = 20000, embed_dim = 300, aspect_embed_num = 2000, aspect_embed_dim = 300, embedding = None, aspect_embedding = None):
super(CNN_Gate_Aspect_Text, self).__init__()
#Initialize the embedding, with weights if pre-trained embedding provided
self.embed = nn.Embedding(embed_num, embed_dim)
# self.embed.weight = nn.Parameter(embedding, requires_grad=True) #What is exactly embedding ?
#Initialise the embedding for the aspect, with weights if pretrained embedding provided
self.aspect_embed = nn.Embedding(aspect_embed_num, aspect_embed_dim)
# self.aspect_embed.weight = nn.Parameter(aspect_embedding, requires_grad=True)
self.convs1 = nn.ModuleList([nn.Conv1d(embed_dim, Co, K) for K in Ks])
self.convs2 = nn.ModuleList([nn.Conv1d(embed_dim, Co, K) for K in Ks])
self.convs3 = nn.ModuleList([nn.Conv1d(embed_dim, L, 3, padding=1)])
self.dropout = nn.Dropout(0.2)
#Predict the classes
self.fc1 = nn.Linear(len(Ks)*Co, C)
self.fc_aspect = nn.Linear(L, Co)
def forward(self, feature, aspect):
#Aspect embeddings >> TO CHECK: for me, they call aspect, the term related to the aspect category
aspect_v = self.aspect_embed(aspect) # (N, L', D)
aa = [F.relu(conv(aspect_v.transpose(1, 2))) for conv in self.convs3] # [(N,Co,L), ...]*len(Ks)
aa = [F.max_pool1d(a, a.size(2)).squeeze(2) for a in aa]
aspect_v = torch.cat(aa, 1) #Check what is it ? Not needed here
#Embedding of the context
feature = self.embed(feature) # (N, L, D)
x = [torch.tanh(conv(feature.transpose(1, 2))) for conv in self.convs1] # [(N,Co,L), ...]*len(Ks)
y = [torch.relu(conv(feature.transpose(1, 2)) + self.fc_aspect(aspect_v).unsqueeze(2)) for conv in self.convs2]
x = [i*j for i, j in zip(x, y)]
# pooling method
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N,Co), ...]*len(Ks)
x = torch.cat(x, 1) #Check what is it ?
x = self.dropout(x) # (N,len(Ks)*Co)
logit = self.fc1(x) # (N,C)
return logit
# ## def train
# In[95]:
# Create the model:
model = CNN_Gate_Aspect_Text()
# Hyperparameters for training:
num_epochs = 10
batch_size = 32
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.RMSprop(model.parameters(), lr=1e-0, weight_decay=0.01)
# In[96]:
# Calculate the accuracy to evaluate the model
def accuracy(dataset, model):
model.eval()
with torch.no_grad():
correct = 0
dataloader = DataLoader(dataset)
for X, labels in dataloader:
outputs = model(X[:, :l1], X[:, -l2:])
_, predicted = torch.max(outputs.data, 1)
correct += (predicted == labels).sum()
return 100*correct.item()/ len(dataset)
# In[97]:
# define a function for training
def train(model, dataset_train, dataset_dev, num_epochs, batch_size, criterion, optimizer):
t = time()
train_loader = DataLoader(dataset_train, batch_size, shuffle=True)
model.train()
for epoch in range(num_epochs):
acc = 0.
for (X_batch, labels) in train_loader:
y_pre = model(X_batch[:, :l1], X_batch[:, -l2:])
loss = criterion(y_pre, labels.long())
optimizer.zero_grad()
loss.backward()
optimizer.step()
_, predicted = torch.max(y_pre.data, 1)
acc += (predicted == labels).sum().item()
acc = 100 * acc / len(dataset_train)
dev_acc = accuracy(dataset_dev, model)
print('Epoch [{}/{}] | exec time: {:.2f} secs | acc: {:.2f}% | dev_acc: {:.2f}%'.format(epoch+1, num_epochs, time()-t, acc, dev_acc))
# In[98]:
train(model, dataset_train, dataset_dev, num_epochs, batch_size, criterion, optimizer)
# ## Evaluation on dev set
# In[92]:
from sklearn.metrics import classification_report
def report(dataset, model):
predicted_all = []
labels_all = []
model.eval()
with torch.no_grad():
correct = 0
dataloader = DataLoader(dataset)
for X, labels in dataloader:
outputs = model(X[:, :l1], X[:, -l2:])
_, predicted = torch.max(outputs.data, 1)
correct += (predicted == labels).sum()
predicted_all.append(int(predicted[0]))
labels_all.append(int(labels[0]))
print(classification_report(labels_all,predicted_all))
# In[93]:
# Dev set
accuracy_dev = accuracy(dataset_dev, model)
print('Accuracy for dev set is : {:.2f} %'.format(accuracy_dev))
print('')
report(dataset_dev, model)
# In[94]:
# train set
accuracy_train = accuracy(dataset_train, model)
print('Accuracy for train set is : {:.2f} %'.format(accuracy_train))
print('')
report(dataset_train, model)
# In[ ]:
|
from django.db import models
from datetime import datetime
from django.contrib.auth.models import User, auth
# Create your models here.
class categories(models.Model):
category_name = models.CharField(max_length=255)
def __str__(self):
return self.category_name
class sub_categorie(models.Model):
sub_category_name = models.CharField(max_length=255)
def __str__(self):
return self.sub_category_name
class sub_sub_categorie(models.Model):
sub_sub_category_name = models.CharField(max_length=255)
def __str__(self):
return self.sub_sub_category_name
class available_item(models.Model):
category = models.ForeignKey(categories, on_delete=models.CASCADE)
sub_category = models.ForeignKey(sub_categorie, on_delete=models.CASCADE)
sub_sub_category = models.ForeignKey(sub_sub_categorie, on_delete=models.CASCADE)
picture = models.ImageField(upload_to='photos')
item_name = models.CharField(max_length=255)
price_per_unit = models.FloatField()
installed_vendor = models.CharField(max_length=3, choices=(("YES", "YES"), ("NO", "NO")))
|
"""
Write a program to ask the user to enter their name and two integers where the second integer is less than the first integer.
Your program must display the following message:
Please enter your first name:
The user enters their first name <FirstName> at this point.
Your program then displays the message:
Hi <FirstName>, please enter the first integer:
The user enters the first integer <FirstInteger> at this point. Next, display the message:
Thank you. Please enter the second integer. This integer should be less than <FirstInteger>:
The user enters the second integer <SecondInteger> at this point.
Your program must output the following:
It is <Answer> that <SecondInteger> is less than <FirstInteger>.
where <Answer> has a boolean value of true or false based on <SecondInteger> being less than <FirstInteger>
<FirstInteger> and <SecondInteger> are the respective integers the user entered.
Example
Please enter your first name: dave
Hi Dave, please enter the first integer: 5
Thank you. Please enter the second integer. This integer should be less than 5: 6
It is False that 6 is less than 5.
# The lines of text you need to use to generate the output are given below for you. Do not alter anything inside the quotes.
# "Please enter your first name: "
# "Hi {}, please enter the first integer: "
# "Thank you. Please enter the second integer. This integer should be less than {}: "
# "It is {} that {} is less than {}."
"""
first_name = input("Please enter your first name: ").capitalize()
first_int = int(input("Hi {}, please enter the first integer: ".format(first_name)))
second_int = int(input("Thank you. Please enter the second integer. This integer should be less than {}: ".format(first_int)))
bool = first_int > second_int
print("It is {} that {} is less than {}.".format(bool, second_int, first_int)) |
from dojosandninjas_app import app
from dojosandninjas_app.controllers import dojos_controller
from dojosandninjas_app.controllers import ninjas_controller
if __name__ == "__main__":
app.run( debug = True )
# http://127.0.0.1:5000/
|
class Solution(object):
def complexNumberMultiply(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
plus_index = a.find('+')
a_real = int(a[:plus_index])
a_image = int(a[plus_index + 1:-1])
plus_index = b.find('+')
b_real = int(b[:plus_index])
b_image = int(b[plus_index + 1:-1])
real = a_real * b_real - a_image * b_image
image = b_real * a_image + a_real * b_image
return str(real) + '+' + str(image) + 'i'
|
from collections import defaultdict
def dist(a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
def uniq_min_idx(data):
uniq = True
idx = 0
for i, val in enumerate(data):
if val < data[idx]:
uniq = True
idx = i
elif val == data[idx]:
uniq = False
return idx if uniq else None
def both():
with open('input') as f:
coords = set()
top = left = float('inf')
bot = right = 0
for line in f:
x, y = map(int, line.split(','))
top = min(top, y)
bot = max(bot, y)
left = min(left, x)
right = max(right, x)
coords.add((x, y))
coords = list(sorted(coords))
not_inf = lambda p: left < p[0] < right and top < p[1] < bot
count = defaultdict(int)
result_2 = 0
for i in range(left, right):
for j in range(top, bot):
distances = [dist((i, j), c) for c in coords]
mn = uniq_min_idx(distances)
if mn is not None:
count[coords[mn]] += 1
if sum(distances) < 10000:
result_2 += 1
result_1 = 0
for point, val in count.items():
if not_inf(point):
result_1 = max(result_1, val)
return result_1, result_2
def main():
print('Both:', *both())
if __name__ == '__main__':
main()
|
from torchvision import transforms
from datasets.randaugment import *
from itertools import permutations
def get_data_transforms(purpose='baseline', baseline_flag=0, num_labeled=50):
'''Data augmentation and normalization
Args:
purpose (str): the purpose of the model
Returns:
data_transforms (dict): transformation methods for train, validation and test
'''
# TODO: Add transformation methods which wasn't added below.
transforms_dict = {
'1': transforms.ColorJitter(brightness=(0.9, 1.1)),
'2': transforms.ColorJitter(brightness=(0.8, 1.2)),
'3': transforms.ColorJitter(brightness=(0.7, 1.3)),
'4': transforms.ColorJitter(contrast=(0.9, 1.1)),
'5': transforms.ColorJitter(contrast=(0.8, 1.2)),
'6': transforms.ColorJitter(contrast=(0.7, 1.3)),
'7': transforms.RandomAffine(degrees=(-5, 5)),
'8': transforms.RandomAffine(degrees=(-10, 10)),
'9': transforms.RandomAffine(degrees=0, translate=(0.1, 0.1)),
'10': transforms.RandomAffine(degrees=0, translate=(0.15, 0.15)),
'11': transforms.RandomAffine(degrees=0, translate=(0.2, 0.2)),
'12': transforms.RandomAffine(degrees=0, scale=(0.95, 1.05)),
'13': transforms.RandomAffine(degrees=0, scale=(0.9, 1.1)),
'14': transforms.RandomAffine(degrees=0, scale=(0.85, 1.15)),
'15': transforms.RandomAffine(degrees=0, scale=(0.8, 1.2)),
'16': transforms.RandomHorizontalFlip(p=0.5),
'17': transforms.Normalize([0.493, 0.493, 0.493], [0.246, 0.246, 0.246]),
'18': transforms.Resize(272),
'19': transforms.Resize(288),
'20': transforms.Resize(304),
}
if purpose == 'baseline':
data_transforms = {
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
if baseline_flag == '0':
data_transforms['train'] = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
if ',' not in baseline_flag: # Single case
if baseline_flag == '17': # Normalization mean/std change
for dtype in ('train', 'val', 'test'):
data_transforms[dtype] = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms_dict[baseline_flag]
])
elif baseline_flag == '18' or baseline_flag == '19' or baseline_flag == '20': # Resize
data_transforms['train'] = transforms.Compose([
transforms_dict[baseline_flag],
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else: # baseline_flag: 1 ~ 16
data_transforms['train'] = transforms.Compose([
transforms_dict[baseline_flag],
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else: # Multiple case without normalization cases
bflags = baseline_flag.split(',')
final_transforms = [transforms_dict[bflag] for bflag in bflags]
final_transforms.extend([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
data_transforms['train'] = transforms.Compose(final_transforms)
elif purpose == 'fixmatch': # FixMatch
data_transforms = {
'train_lb': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'train_ulb': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
# This is based on COVIDNet settings.
'train_ulb_wa': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.RandomAffine(degrees=(-10,10), translate=(0.1, 0.1), scale=(0.85, 1.15)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(brightness=(0.9, 1.1)), # , contrast=(0.9, 1.1)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
elif purpose == 'fixaug1': # FixMatch
data_transforms = {
'train_lb': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.RandomAffine(degrees=(-5,5), translate=(0.1, 0.1), scale=(0.925, 1.075)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(brightness=(0.95, 1.05)), # , contrast=(0.9, 1.1)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'train_ulb': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.RandomAffine(degrees=(-5,5), translate=(0.1, 0.1), scale=(0.925, 1.075)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(brightness=(0.95, 1.05)), # , contrast=(0.9, 1.1)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
# This is based on COVIDNet settings.
'train_ulb_wa': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.RandomAffine(degrees=(-10,10), translate=(0.1, 0.1), scale=(0.85, 1.15)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(brightness=(0.9, 1.1)), # , contrast=(0.9, 1.1)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
elif purpose == 'fixaug2': # FixMatch
data_transforms = {
'train_lb': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.RandomAffine(degrees=(-10,10), translate=(0.1, 0.1), scale=(0.85, 1.15)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(brightness=(0.9, 1.1)), # , contrast=(0.9, 1.1)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'train_ulb': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.RandomAffine(degrees=(-10,10), translate=(0.1, 0.1), scale=(0.85, 1.15)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(brightness=(0.9, 1.1)), # , contrast=(0.9, 1.1)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'train_ulb_wa': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.RandomAffine(degrees=(-10,10), translate=(0.1, 0.1), scale=(0.85, 1.15)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(brightness=(0.9, 1.1)), # , contrast=(0.9, 1.1)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_transforms['train_ulb_wa'].transforms.insert(0,RandAugment(3))
elif purpose == 'fixaug3': # FixMatch
data_transforms = {
'train_lb': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.RandomAffine(degrees=(-10,10), translate=(0.1, 0.1), scale=(0.85, 1.15)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(brightness=(0.9, 1.1)), # , contrast=(0.9, 1.1)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'train_ulb': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.RandomAffine(degrees=(-10,10), translate=(0.1, 0.1), scale=(0.85, 1.15)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(brightness=(0.9, 1.1)), # , contrast=(0.9, 1.1)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'train_ulb_wa': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.RandomAffine(degrees=(-10,10), translate=(0.1, 0.1), scale=(0.85, 1.15)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(brightness=(0.9, 1.1)), # , contrast=(0.9, 1.1)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_transforms['train_ulb_wa'].transforms.insert(0,RandAugment(3,purpose ='fixaug3'))
return data_transforms |
# Generated by Django 3.1.2 on 2020-10-28 04:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0019_auto_20201028_1005'),
]
operations = [
migrations.AlterField(
model_name='product2',
name='pub_date',
field=models.DateField(),
),
]
|
#!/usr/bin/env python
#coding=utf-8
import sys, codecs, json, os, ConfigParser, logging
from sklearn.feature_extraction.text import TfidfVectorizer
from SPARQLWrapper import SPARQLWrapper, JSON
from nltk.corpus import stopwords
from nltk.classify import NaiveBayesClassifier
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.svm import LinearSVC
from nltk.stem import PorterStemmer
class Utility:
''' Utilities to be used by all classes'''
def __init__(self):
self.config = ConfigParser.ConfigParser()
self.config.read("YoutubeVideoClassifier.config")
self.movies_file_name = self.config.get('GLOBAL', 'movies_file')
self.actors_file_name = self.config.get('GLOBAL', 'actors_file')
self.tvshows_file_name = self.config.get('GLOBAL', 'tvshows_file')
self.test_file_name = self.config.get('GLOBAL', 'test_file')
self.logging_file_name = self.config.get('GLOBAL', 'log_file')
self.input_dir = self.config.get('GLOBAL', 'input_dir')
self.output_dir = self.config.get('GLOBAL', 'output_dir')
cur_dir = os.getcwd()
self.input_dir = os.path.join(cur_dir, self.input_dir)
if not os.path.exists(self.input_dir):
os.makedirs(self.input_dir)
self.output_dir = os.path.join(cur_dir, self.output_dir)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
self.movies_file = os.path.join(self.input_dir, self.movies_file_name)
self.actors_file = os.path.join(self.input_dir, self.actors_file_name)
self.tvshows_file = os.path.join(self.input_dir, self.tvshows_file_name)
self.test_file = os.path.join(self.input_dir, self.test_file_name)
self.logging_file = os.path.join(self.output_dir, self.logging_file_name)
logging.basicConfig(filename=self.logging_file, level=logging.INFO)
logging.info("Initialized logging")
class DataSetCollector(Utility):
''' Fetch data from dbpedia and store in file'''
def __init__(self):
Utility.__init__(self)
self.sparql = SPARQLWrapper("http://dbpedia.org/sparql")
self.getFilms = self.config.get('QUERY', 'getFilmsQuery')
self.getTvShows = self.config.get('QUERY', 'getTvShowsQuery')
self.getActors = self.config.get('QUERY', 'getActorsQuery')
def run_main(self):
self.collectFilms()
self.collectActors()
self.collectTvShows()
def collectFilms(self):
self.movies_file_fd = codecs.open(self.movies_file, 'w', 'utf-8')
self.sparql.setQuery(self.getFilms)
self.sparql.setReturnFormat(JSON)
results = self.sparql.query().convert()
films = results.get('results')
films = films and films.get('bindings')
if not films:return
for result in films:
try:
movie_name = result.get('movie').get('value')
movie_name = movie_name and movie_name.strip("http://dbpedia.org/resource/")
if not movie_name:continue
self.movies_file_fd.write("%s\n" % (movie_name))
except:
logging.info("Exception while parsing movie data")
continue
self.movies_file_fd.close()
def collectActors(self):
self.actors_file_fd = codecs.open(self.actors_file, 'w', 'utf-8')
self.sparql.setQuery(self.getActors)
self.sparql.setReturnFormat(JSON)
results = self.sparql.query().convert()
actors = results.get('results')
actors = actors and actors.get('bindings')
for result in actors:
try:
actor_name = result.get('actor').get('value')
actor_name = actor_name and actor_name.strip("http://dbpedia.org/resource/")
if not actor_name:continue
self.actors_file_fd.write("%s\n" % (actor_name))
except:
logging.info("Exception while parsing actors data")
continue
self.actors_file_fd.close()
def collectTvShows(self):
self.tvshows_file_fd = codecs.open(self.tvshows_file, 'w', 'utf-8')
self.sparql.setQuery(self.getTvShows)
self.sparql.setReturnFormat(JSON)
results = self.sparql.query().convert()
tvshows = results.get('results')
tvshows = tvshows and tvshows.get('bindings')
for result in tvshows:
try:
tvshow_name = result.get('tvshow').get('value')
tvshow_name = tvshow_name and tvshow_name.strip("http://dbpedia.org/resource/")
if not tvshow_name:continue
self.tvshows_file_fd.write("%s\n" % (tvshow_name))
except:
logging.info("Exception while parsing tvshow data")
continue
self.tvshows_file_fd.close()
class YoutubeVideoClassifier(Utility):
''' Use the collected data as training set and classify test data'''
def __init__(self):
Utility.__init__(self)
self.nb_output_file_name = self.config.get('GLOBAL', 'nb_output_file')
self.svm_output_file_name = self.config.get('GLOBAL', 'svm_output_file')
self.nb_output = os.path.join(self.output_dir, self.nb_output_file_name)
self.svm_output = os.path.join(self.output_dir, self.svm_output_file_name)
self.train_features = []
self.stopwords_set = set(stopwords.words('english'))
def run_main(self):
self.pre_processing()
self.feature_extraction()
self.classification()
self.testing()
def pre_processing(self):
self.load_data()
def load_data(self):
self.load_movies()
self.load_actors()
self.load_tvshows()
self.load_test_data()
def load_movies(self):
self.movies_list = []
movies_fd = codecs.open(self.movies_file)
for movie in movies_fd.readlines():
if not movie: continue
self.movies_list.append(movie)
movies_fd.close()
def load_actors(self):
self.actors_list = []
actors_fd = codecs.open(self.actors_file)
for actor in actors_fd.readlines():
if not actor: continue
self.actors_list.append(actor)
actors_fd.close()
def load_tvshows(self):
self.tvshows_list = []
tvshows_fd = codecs.open(self.tvshows_file)
for tvshow in tvshows_fd.readlines():
if not tvshow:continue
self.tvshows_list.append(tvshow)
tvshows_fd.close()
def load_test_data(self):
json_data = open(self.test_file)
self.test_data = json.load(json_data)
def feature_selection(self, features_list):
selected_features = []
for feat in features_list:
if feat and feat.strip() and feat.lower() not in self.stopwords_set:
selected_features.append((feat.strip().lower(), True))
return dict(selected_features)
def feature_extraction(self):
for item in self.tvshows_list:
if not item:continue
selected_features = self.feature_selection(item.replace("_"," ").split(" "))
self.train_features.append((selected_features, 'tvshow'))
for item in self.movies_list:
if not item: continue
selected_features = self.feature_selection(item.replace("_"," ").split(" "))
self.train_features.append((selected_features, 'movie'))
for item in self.actors_list:
if not item: continue
selected_features = self.feature_selection(item.replace("_"," ").split(" "))
self.train_features.append((selected_features, 'celebrity'))
def classification(self):
#Training NB Classifier
self.nb_classifier = NaiveBayesClassifier.train(self.train_features)
#Training SVM classifier
self.svm_classifier = SklearnClassifier(LinearSVC())
self.svm_classifier.train(self.train_features)
def testing(self):
nb_fd = codecs.open(self.nb_output, 'w', 'utf-8')
svm_fd = codecs.open(self.svm_output, 'w', 'utf-8')
for instance in self.test_data:
try:
if not instance:continue
test_features = instance.get('title').split(" ")
test_features.extend(instance.get('description').split(" "))
selected_features = self.feature_selection(test_features)
label = self.nb_classifier.classify(selected_features)
nb_fd.write("%s\n" % (label))
label = self.svm_classifier.classify(selected_features)
svm_fd.write("%s\n" % (label))
except:
logging.info("Exception in test data ")
continue
nb_fd.close()
svm_fd.close()
class RelatedVideoGenerator(Utility):
''' Related video suggestions based on jaccard similarity and vector similarity'''
def __init__(self):
Utility.__init__(self)
self.related_tfidf_file_name = self.config.get('GLOBAL', 'tfidf_related_output')
self.related_jaccard_file_name = self.config.get('GLOBAL', 'jaccard_related_output')
self.related_tfidf = os.path.join(self.output_dir, self.related_tfidf_file_name)
self.related_jaccard = os.path.join(self.output_dir, self.related_jaccard_file_name)
self.stopwords_set = set(stopwords.words('english'))
self.stemmer = PorterStemmer()
self.test_data = []
self.features_set_list = []
self.features_string_list = []
def run_main(self):
self.load_data()
self.select_features()
self.find_related_jaccard()
self.find_related_tfidf()
def load_data(self):
json_data = open(self.test_file)
self.test_data = json.load(json_data)
def select_features(self):
for instance in self.test_data:
try:
feature = instance.get('title') + " " + instance.get('description')
feature = feature.split(" ")
feature = [self.stemmer.stem(feat.lower().strip()) for feat in feature if feat and feat.lower().strip() not in self.stopwords_set]
feature_string = " ".join(feature)
self.features_set_list.append(set(feature))
self.features_string_list.append(feature_string)
except:
logging.info("Exception in test data")
continue
def find_related_jaccard(self):
related_fd = codecs.open(self.related_jaccard, 'w', 'utf-8')
for index, feature in enumerate(self.features_set_list):
related = self.get_relevant_entry(feature, index)
related_fd.write("%s\t%s\n%s\n%s\n%s\n%s\n\n" % (index, related, self.features_set_list[index], self.features_set_list[related[0]], self.features_set_list[related[1]], self.features_set_list[related[2]]))
related_fd.close()
def get_relevant_entry(self, feature, index):
relevant_value = []
for ind, feat in enumerate(self.features_set_list):
if ind == index:relevant_value.append(0);continue
relevant_value.append(len(feat.intersection(feature))/float(len(feat.union(feature))))
return self.get_similar(relevant_value)
def find_related_tfidf(self):
related_fd = codecs.open(self.related_tfidf, 'w', 'utf-8')
vect = TfidfVectorizer(min_df=1)
tfidf = vect.fit_transform(self.features_string_list)
array = (tfidf * tfidf.T).A
array_list = array.tolist()
for i,entry in enumerate(array_list):
entry[i] = 0
related = self.get_similar(entry)
related_fd.write("%s\t%s\n%s\n%s\n%s\n%s\n\n" % (i, related, self.features_string_list[i], self.features_string_list[related[0]], self.features_string_list[related[1]], self.features_string_list[related[2]]))
related_fd.close()
def get_similar(self, entry):
if not entry:return []
if len(entry) <3 : return entry
result = sorted(range(len(entry)), key=lambda i:entry[i], reverse=True)
return result[:3]
if __name__ == "__main__":
mode = int(sys.argv[1])
if mode == 0:
data_obj = DataSetCollector()
data_obj.run_main()
elif mode == 1:
y_obj = YoutubeVideoClassifier()
y_obj.run_main()
elif mode == 2:
r_obj = RelatedVideoGenerator()
r_obj.run_main()
else:
print "Please enter the appropriate mode"
|
#
# @lc app=leetcode id=365 lang=python3
#
# [365] Water and Jug Problem
#
# @lc code=start
class Solution:
'''
Accepted
28/28 cases passed (28 ms)
Your runtime beats 88 % of python3 submissions
Your memory usage beats 68.84 % of python3 submissions (14.2 MB)
Time complexity : O(log(jug1Capacity * jug2Capacity))
Space complexity : O(1)
'''
def canMeasureWater(self, jug1Capacity: int, jug2Capacity: int, targetCapacity: int) -> bool:
return targetCapacity == 0 or (jug1Capacity + jug2Capacity >= targetCapacity and
targetCapacity % self.gcd(jug1Capacity, jug2Capacity) == 0)
def gcd(self, x, y):
return x if y == 0 else self.gcd(y, x % y)
# @lc code=end
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-24 08:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('survey', '0004_auto_20170424_1038'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='city',
field=models.CharField(max_length=40, verbose_name='Stadt'),
),
migrations.AlterField(
model_name='profile',
name='date_posted',
field=models.DateField(verbose_name='Erstellugsdatum'),
),
migrations.AlterField(
model_name='profile',
name='email',
field=models.EmailField(max_length=254, verbose_name='E-Mail'),
),
migrations.AlterField(
model_name='profile',
name='first_name',
field=models.CharField(max_length=30, verbose_name='Vorname'),
),
migrations.AlterField(
model_name='profile',
name='last_name',
field=models.CharField(max_length=50, verbose_name='Nachname'),
),
migrations.AlterField(
model_name='profile',
name='message',
field=models.TextField(verbose_name='Persönliche Nachricht'),
),
migrations.AlterField(
model_name='profile',
name='occupation',
field=models.CharField(max_length=40, verbose_name='Beruf'),
),
migrations.AlterField(
model_name='profile',
name='phone_number',
field=models.CharField(max_length=20, verbose_name='Telefonnummer'),
),
migrations.AlterField(
model_name='profile',
name='street',
field=models.CharField(max_length=100, verbose_name='Adresse'),
),
migrations.AlterField(
model_name='profile',
name='zip_code',
field=models.CharField(max_length=10, verbose_name='PLZ'),
),
]
|
#!/usr/bin/env python
#!/usr/bin/env python
""" A chatbot that listens to how peoples' days are going and
recommends a Spotify playlist based on it's interpretation of their mood.
"""
import re
import random
import string
import nltk
from nltk.corpus import stopwords
from EmoClassifier import emo2vec, model, emotions, emo_model
from PLRetriever import callAPI
from bot_responses import *
import numpy as np
from nltk.chat import *
import os
import time
from slackclient import SlackClient
samples = []
cachedStopWords = stopwords.words("english")
def reflect(fragment):
tokens = fragment.lower().split(' ')
tokens = [*map(lambda x:reflections[x] if x in reflections else x, tokens)]
return ' '.join(tokens)
def playlist_recommender(list_of_strings):
mood = " ".join("".join(char for char in sent if char not in string.punctuation) for sent in list_of_strings).lower().split(' ')
prediction_words = [word for word in mood if word not in cachedStopWords]
similar_word_vectors, degree_of_similarity = model.most_similar(positive=prediction_words, topn=1)[0]
prediction = model[similar_word_vectors].reshape(1, -1)
call = list(emotions.columns)[emo_model.predict(prediction)]
data = callAPI(call)
items = data['playlists']['items']
playlist = random.choice(items)['external_urls']['spotify']
return playlist
def analyze(statement):
"""
Match user's input to responses in psychobabble. Then reflect candidate response.
"""
statement = statement.lower()
for item in convos:
match = re.search(item[0],statement)
if match:
response = np.random.choice(item[1])
return response.replace('{0}',reflect(match.group(0)))
# starterbot's ID as an environment variable
BOT_ID = os.environ.get("BOT_ID")
# constants
AT_BOT = "<@" + BOT_ID + ">"
REQUEST_COMMAND = "retrieval"
# instantiate Slack & Twilio clients
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
def handle_command(command, channel):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
feelings_list = []
feelings_list.append(command)
response = "Hello Beautiful! How are you feeling today? Use the *" + REQUEST_COMMAND + \
"* request to get my musical interpretation of your mood today."
if command.startswith(REQUEST_COMMAND):
slack_client.api_call("chat.postMessage", channel=channel,text="reading your mood...", as_user=True)
response = "<" + playlist_recommender(feelings_list) + "|Click here!!!>"
slack_client.api_call("chat.postMessage",
channel=channel,
text="...aaand here's your perfect playlist for this moment :)\n" + response + "\n SO FRESH! Listen and enjoy",
unfurl_links=True, as_user=True)
else:
response3 = analyze(command)
slack_client.api_call("chat.postMessage", channel=channel,text=response3, as_user=True)
def parse_slack_output(slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and AT_BOT in output['text']:
# return text after the @ mention, whitespace removed
return output['text'].split(AT_BOT)[1].strip().lower(), \
output['channel']
return None, None
if __name__ == "__main__":
READ_WEBSOCKET_DELAY = 1 # 1 second delay between reading from firehose
if slack_client.rtm_connect():
print("StarterBot connected and running!")
while True:
command, channel = parse_slack_output(slack_client.rtm_read())
if command and channel:
handle_command(command, channel)
time.sleep(READ_WEBSOCKET_DELAY)
else:
print("Connection failed. Invalid Slack token or bot ID?")
|
from pulp import *
m = LpProblem(sense=LpMaximize)
x = LpVariable("x",lowBound=0)
y = LpVariable("y",lowBound=0)
m += x+y
m += x*x -y ==0
m += 0.5 * x -y +1 ==0
m += y == 1
m.solve()
print(value(x),value(y),value(m.objective)) |
# ENGINE
# -------------------------
import random
from typing import Iterable, List
from core.common import Queuer, Player, max_mmr, min_mmr, Lobby, MatchMaker
TEAM_SIZE = 5
class CompositeMatchmaker(MatchMaker):
def __init__(self, find_lobby):
self._find_lobby = find_lobby
def find_lobbies(self, queue: List[Queuer], found_lobby_callback) -> None:
found = self._find_lobby(queue)
while found is not None:
t1, t2 = found
found_lobby_callback(t1, t2)
found = self._find_lobby(queue)
def find_by_sorted_mmr(queue: List[Queuer]) -> (List[Queuer], List[Queuer]):
if len(queue) < TEAM_SIZE*2:
return None
copy = sorted_queue(queue)
ind = random.randint(0, len(copy) - TEAM_SIZE*2)
t1 = []
t2 = []
for i in range(TEAM_SIZE):
t1.append(copy[(ind + 2*i) % len(copy)])
t2.append(copy[(ind + 2*i + 1) % len(copy)])
return t1, t2
def sorted_queue(queue):
copy = list(queue)
copy.sort(key=lambda q: q.player.mmr)
return copy
def max_mmr_diff(mmr_boundary):
return lambda t1, t2: _max_mmr_diff_filter(t1, t2, mmr_boundary)
def _max_mmr_diff_filter(t1, t2, mmr_diff_boundary):
players = [q.player for q in t1+t2]
diff = abs(max_mmr(players) - min_mmr(players))
return diff < mmr_diff_boundary
def max_mmr_diff_or_long_wait(mmr_diff_boundary, wait_boundary):
return lambda t1, t2: _max_mmr_diff_filter(t1, t2, mmr_diff_boundary) or _long_wait_filter(t1, t2, wait_boundary)
def _long_wait_filter(t1, t2, wait_boundary):
return any(q.waited > wait_boundary for q in t1 + t2)
def filtered_find_by_sorted_mmr(num_tries: int, lobby_filter) -> Lobby:
return lambda queue: _filtered_find_by_sorted_mmr(queue, num_tries, lobby_filter)
def fair_method(queue: List[Queuer]) -> (List[Queuer], List[Queuer]):
if len(queue) < TEAM_SIZE*2:
return None
num_tries = min(100, len(queue))
for i in range(num_tries):
queuer = queue[i]
found = find_lobby_for(queuer, queue)
if found is not None:
return found
return None
def find_lobby_for(queuer: Queuer, queue: List[Queuer]) -> (List[Queuer], List[Queuer]):
sorted_by_mmr = sorted_queue(queue)
ind = index_of(queuer, sorted_by_mmr)
pick_right = len(queue) - ind
if pick_right >= TEAM_SIZE*2:
picked = sorted_by_mmr[ind: ind+TEAM_SIZE*2]
else:
pick_left = TEAM_SIZE*2 - pick_right
picked = sorted_by_mmr[ind - pick_left: ind + pick_right]
t1, t2 = [], []
for i in range(TEAM_SIZE):
t1.append(picked[2*i])
t2.append(picked[2*i+1])
return (t1, t2) if _is_good_enough(t1, t2) else None
def _is_good_enough(t1, t2):
if _max_wait(t1, t2) < 300:
mmr_boundary = 100 + _max_wait(t1, t2)
else:
mmr_boundary = 100 + _max_wait(t1, t2) * 2
return _max_mmr_diff_filter(t1, t2, mmr_boundary)
def _max_wait(t1, t2):
return max(q.waited for q in t1 + t2)
def index_of(el, arr):
return [i for i, x in enumerate(arr) if x == el][0]
def _filtered_find_by_sorted_mmr(queue: List[Queuer], num_tries: int, lobby_filter) -> Lobby:
if len(queue) < TEAM_SIZE*2:
return None
for i in range(num_tries):
t1, t2 = find_by_sorted_mmr(queue)
valid_lobby = lobby_filter(t1, t2)
if valid_lobby:
return t1, t2
return None
def _get_random_slice(array: Iterable, slice_size: int) -> list:
copy = list(array)
random.shuffle(copy)
return copy[0: slice_size]
def _pick_teams_random(queuers: Iterable[Queuer]) -> (List[Player], List[Player]):
picked_queuers = _get_random_slice(queuers, TEAM_SIZE * 2)
team_1 = picked_queuers[0: TEAM_SIZE]
team_2 = picked_queuers[TEAM_SIZE: TEAM_SIZE * 2]
return team_1, team_2
def _pick_teams_unfair(queuers: Iterable[Queuer]) -> (List[Player], List[Player]):
picked_queuers = _get_random_slice(queuers, TEAM_SIZE * 2)
picked_queuers.sort(key=lambda p: p.mmr)
team_1 = picked_queuers[0: TEAM_SIZE]
team_2 = picked_queuers[TEAM_SIZE: TEAM_SIZE * 2]
return team_1, team_2
def _pick_teams_simple(queue: List[Queuer]):
t1 = queue[0: TEAM_SIZE]
t2 = queue[TEAM_SIZE: TEAM_SIZE*2]
return [(t1, t2)]
simple_matchmaker = CompositeMatchmaker(find_by_sorted_mmr)
advanced_matchmaker = CompositeMatchmaker(filtered_find_by_sorted_mmr(50, max_mmr_diff(300)))
advanced_matchmaker2 = CompositeMatchmaker(filtered_find_by_sorted_mmr(50, max_mmr_diff_or_long_wait(300, 60*5)))
fair_matchmaker = CompositeMatchmaker(fair_method)
|
my_list = [i * j for i in range(2, 7) for j in range(2, 7)]
new_list = [el for el in my_list if my_list.count(el) < 2]
print(f'Исходный список {my_list}')
print(f'Новый список {new_list}')
|
from scipy.signal import filtfilt, butter
def butterworth(x, order, freq=0.05):
b, a = butter(order, freq)
return filtfilt(b, a, x)
|
"""Integration tests using YORM as a persistence model."""
# pylint: disable=missing-docstring,no-self-use,misplaced-comparison-constant
import os
from expecter import expect
import yorm
from yorm.types import String
# CLASSES #####################################################################
class Config:
"""Domain model."""
def __init__(self, key, name=None, root=None):
self.key = key
self.name = name or ""
self.root = root or ""
@yorm.attr(key=String)
@yorm.attr(name=String)
@yorm.sync("{self.root}/{self.key}/config.yml",
auto_create=False, auto_save=False)
class ConfigModel:
"""Persistence model."""
def __init__(self, key, root):
self.key = key
self.root = root
print(self.key)
self.unmapped = 0
@staticmethod
def pm_to_dm(model):
config = Config(model.key)
config.name = model.name
config.root = model.root
return config
class ConfigStore:
def __init__(self, root):
self.root = root
def read(self, key):
return yorm.find(ConfigModel, self.root, key)
# TESTS #######################################################################
class TestPersistanceMapping: # pylint: disable=no-member
root = os.path.join(os.path.dirname(__file__), 'files')
def test_load_pm(self):
model = ConfigModel('my_key', self.root)
print(model.__dict__)
assert model.key == "my_key"
assert model.root == self.root
assert model.name == "my_name"
def test_create_dm_from_pm(self):
model = ConfigModel('my_key', self.root)
config = ConfigModel.pm_to_dm(model)
print(config.__dict__)
assert config.key == "my_key"
assert config.root == self.root
assert config.name == "my_name"
def test_nonmapped_attribute_is_kept(self):
model = ConfigModel('my_key', self.root)
model.unmapped = 42
assert 42 == model.unmapped
def test_missing_files_are_handled(self):
model = ConfigModel('my_key_manual', self.root)
with expect.raises(yorm.exceptions.MissingFileError):
print(model.name)
class TestStore:
def test_read_missing(self, tmpdir):
store = ConfigStore(str(tmpdir))
assert None is store.read('unknown')
|
import tensorflow as tf
import numpy as np
from .frame import Frame, Twist
from .rotation import *
from enum import IntEnum
class JointType(IntEnum):
RotX = 0
RotY = 1
RotZ = 2
RotAxis = 3
NoneT = 4
class Joint(object):
def __init__(self, type, origin=None, axis=None, name='', limits=None):
self.type = type
if limits is not None:
self.limits = {'up': limits.upper, 'low': limits.lower,
'vel': limits.velocity, 'effort': limits.effort}
else:
self.limits = {}
self.axis = axis
self.origin = origin
def pose(self, a):
# TODO implement origin
if self.type is JointType.RotX:
return Frame(m=rot_x(a))
elif self.type is JointType.RotY:
return Frame(m=rot_y(a))
elif self.type is JointType.RotZ:
return Frame(m=rot_z(a))
elif self.type is JointType.RotAxis:
return Frame(p=self.origin , m=rot_2(self.axis, a))
elif self.type is JointType.NoneT:
return Frame()
def twist(self, a):
if self.type is JointType.RotX:
return Twist(twist_x(a))
elif self.type is JointType.RotY:
return Twist(twist_y(a))
elif self.type is JointType.RotZ:
return Twist(twist_z(a))
elif self.type is JointType.RotAxis:
return Twist(twist_2(self.axis, a))
elif self.type is JointType.NoneT:
return Twist()
class Mesh(object):
def __init__(self, mesh, dtype=tf.float32):
self._vertices = tf.convert_to_tensor(mesh.vertices, dtype=dtype)
self._nb_vertices = mesh.vertices.shape[0]
self._area_faces = tf.convert_to_tensor(mesh.area_faces, dtype=dtype)
triangles = np.copy(mesh.triangles)
# triangles[:, :, 0] = mesh.triangles[:, :, 2]
# triangles[:, :, 1] = mesh.triangles[:, :, 0]
# triangles[:, :, 2] = mesh.triangles[:, :, 1]
self._triangles = tf.convert_to_tensor(triangles, dtype=dtype)
sample = self.sample(10)
def sample(self, size=10):
"""
Random sample on surface
:param size:
:return:
"""
# sample triangle proportional to surface
idx = tf.random.categorical(tf.log(self._area_faces)[None], size)[0]
triangles_samples = tf.gather(
self._triangles,
idx
)
# sample on triangle tf.reduce_sum(tf.transpose(vs)[:, :, None] * triangles_samples, axis=1)
r0, r1 = tf.random_uniform((size, ), 0., 1.), tf.random_uniform((size, ), 0., 1.)
vs = tf.stack([1. - r0 ** 0.5, r0 ** 0.5 * (1. - r1), r1 * r0 ** 0.5])
return tf.reduce_sum(tf.transpose(vs)[:, :, None] * triangles_samples, axis=1)
def sample_face(self, size=10):
return tf.gather(self._vertices,
tf.random_uniform((size, ), 0, self._nb_vertices-1, dtype=tf.int64))
class Link(object):
def __init__(self, frame, mass=1.0):
self.mass = mass
self.frame = frame
self._collision_mesh = None
@property
def collision_mesh(self):
return self._collision_mesh
@collision_mesh.setter
def collision_mesh(self, value):
self._collision_mesh = Mesh(value)
def pose(self):
return self.frame |
from dictionary.searchbase import SearchBase
import requests
from bs4 import BeautifulSoup
from random import choice
from string import ascii_letters, digits
import re
import json
import time
from django.utils.text import slugify
import pprint
from dictionary import helper
import random
import threading
from dictionary.models import Vocabulary
class myThread (threading.Thread):
def __init__(self, threadID, name, func):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.func = func
def run(self):
print("Starting " + self.name)
self.func
def searchVocabulary(vocabulary, proxy):
word_slug = slugify(vocabulary)
# proxy_data = "145.40.78.181:3128"
proxy_parse = {
"http": f"http://{proxy}",
"https": f"https://{proxy}"
}
search = SearchBase(word_slug, proxy_parse)
search.searchVocabulary()
# search.get_word_explains()
data_search = search.get_search_data()
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(data_search)
word = data_search.get('word_cover')
name = word.get('name')
phon_us = word.get('pronunciation_us')
phon_uk = word.get('pronunciation_uk')
sound_us = word.get('sound_us')
sound_uk = word.get('sound_uk')
definitions_examples = data_search.get('word_explaning')
if sound_us and sound_uk:
# print('sound_us: ', sound_us)
file_name_us = f'{name}+us.mp3'
file_name_uk = f'{name}+uk.mp3'
# helper.downloadFileFromUrl(proxy, sound_us, file_name)
thread1 = myThread(
1, f"Thread-{file_name_us}", helper.downloadFileFromUrl(proxy, sound_us, file_name_us))
# print('sound_uk: ', sound_uk)
# helper.downloadFileFromUrl(proxy, sound_uk, file_name)
thread2 = myThread(
2, f"Thread-{file_name_uk}", helper.downloadFileFromUrl(proxy, sound_us, file_name_uk))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print("Exiting Main Thread")
vocabulary = Vocabulary()
vocabulary.name = name
vocabulary.phon_us = phon_us
vocabulary.phon_uk = phon_uk
vocabulary.sound_us = file_name_uk
vocabulary.sound_uk = file_name_us
vocabulary.definitions = definitions_examples
vocabulary.certification_field = 'ielts'
vocabulary.save()
print(f"Saved <{vocabulary}> successfully.")
# print('sound_us: ',sound_us)
# print('sound_uk: ',sound_uk)
def check_search(word):
count = 1
while True:
try:
print(f"Searching {word}")
if(count >= 10):
helper.generate_proxy()
print("Generated New Proxy")
count = 1
continue
proxy = helper.choose_random("dictionary/proxy_data.txt")
print(f"Searching {word} turn {count} with proxy: {proxy} ")
searchVocabulary(word, proxy)
return False
except Exception as e:
print(f"Search error {count} : {e}")
count += 1
if count >=15:
break
continue
# check_search('irregular')
def read_vocabulary_to_search(file):
start = time.time()
# Using readlines()
file1 = open(file, 'r')
Lines = file1.readlines()
count = 0
# Strips the newline character
for line in Lines:
count += 1
print(f"=====>Searching for {line.strip()} ")
check_search(line.strip())
end = time.time()
print(f"Search {count} vocabulary in {end-start} ")
read_vocabulary_to_search('dictionary/vocabulary_data/test_vocabulary.txt')
|
from django.db import models
class Arjun(models.Model):
idno=models.IntegerField(primary_key=True)
name=models.CharField(max_length=30)
designation=models.CharField(max_length=30)
salary=models.DecimalField(max_digits=10,decimal_places=2)
image=models.ImageField(upload_to="arjun/")
|
import re
from random import choice
def get_statistics(text: str, triads: list[str]) -> dict:
statistics = {}
for triad in triads:
ones = f"(?=({triad}1))"
zeros = f"(?=({triad}0))"
total_ones = len(re.findall(ones, text))
total_zeros = len(re.findall(zeros, text))
statistics[triad] = [total_zeros, total_ones]
return statistics
def predict_symbol(statistics: dict, test_triad) -> str:
zeros, ones = statistics[test_triad]
if zeros > ones:
return "0"
elif ones > zeros:
return "1"
else:
elements = ["1", "0"]
return choice(elements)
|
#todo make ##todo make #todo make this more general
import maya.cmds as cmds
import random
#delete existing cubes
cubeLs = cmds.ls('myCube*', tr=True)
if len(cubeLs) > 0:
cmds.delete(cubeLs)
#make cube
def createCube():
cube = cmds.polyCube(name='myCube#')[0]
cmds.move(0,0.5,0, cube, r=True)
#make pivot attr keyable
cmds.setAttr("%s.rotatePivotTranslateY" % cube, k=True)
cmds.setAttr("%s.rotatePivotTranslateX" % cube, k=True)
cmds.setAttr("%s.rotatePivotTranslateZ" % cube, k=True)
cmds.setAttr("%s.scalePivotX" % cube, k=True)
cmds.setAttr("%s.scalePivotY" % cube, k=True)
cmds.setAttr("%s.scalePivotZ" % cube, k=True)
cmds.setAttr("%s.rotatePivotX" % cube, k=True)
cmds.setAttr("%s.rotatePivotY" % cube, k=True)
cmds.setAttr("%s.rotatePivotZ" % cube, k=True)
cmds.setAttr("%s.scalePivotTranslateX" % cube, k=True)
cmds.setAttr("%s.scalePivotTranslateY" % cube, k=True)
cmds.setAttr("%s.scalePivotTranslateZ" % cube, k=True)
def roundCoords(coords):
for i in range(len(coords)):
coords[i] = round(coords[i]*2)/2
return coords
def getCubePos(cubeTrans):
coords = cmds.xform(cubeTrans, q=True, t=True, os=True)
return roundCoords(coords)
def getLastKeyframe(cubeTrans): #return 0 if no keyframe (translateX) associated to object
try:
return cmds.keyframe('%s.translateX' % cubeTrans, q=True)[-1]
except:
return 0
def rollCube(vect, startTime, stepTime, cubeTrans):
endTime = startTime + stepTime
z,y,x = vect
xRot,yRot,zRot = x*90, y*90, z*(-90)
xHalf, yHalf, zHalf = x*0.5, y*0.5, z*0.5
#print(x,y,z,xHalf,yHalf,zHalf,xRot,yRot,zRot)
#centres pivot, and cube orientation reset, then moves y pivot down
cmds.xform(cubeTrans, centerPivots=True)
cmds.rotate(0,0,0,cubeTrans)
if y == 0:
cmds.move(0,-0.5,0,'%s.scalePivot' % cubeTrans, '%s.rotatePivot' % cubeTrans, r=True)
else:
cmds.move(0,0.5,0,'%s.scalePivot' % cubeTrans, '%s.rotatePivot' % cubeTrans, r=True)
xRot = xRot*2
zRot = zRot*2
#put pivot on the correct bottom edge
cmds.move(zHalf,0,xHalf,'%s.scalePivot' % cubeTrans, '%s.rotatePivot' % cubeTrans, r=True)
#work out the startPos
coords = getCubePos(cubeTrans)
cmds.setKeyframe(cubeTrans, time=startTime)
cmds.rotate(xRot,0,zRot, cubeTrans, r=True)
cmds.setKeyframe(cubeTrans, time=endTime-1)
cmds.xform(cubeTrans, centerPivots=True)
cmds.rotate(0,0,0,cubeTrans)
cmds.setKeyframe(cubeTrans, time=endTime)
return getCubePos(cubeTrans)
# #cube has moved, z-=1, so move the pivot accordingly
# cmds.move(0,0,-1,'myCube1.scalePivot', 'myCube1.rotatePivot', r=True)
# cmds.setKeyframe(cube, time=21)
# cmds.rotate(-90,0,0, cube, r=True)
# cmds.setKeyframe(cube, time=42)
# cmds.xform(cubeTrans, centerPivots=True)
def moveCube(targetPos, cube, step):
currentPos = getCubePos(cube)
if currentPos[0] < targetPos[0]:
currentPos = rollCube([1,0,0],getLastKeyframe(cube)+1,step,cube)
elif currentPos[0] > targetPos[0]:
currentPos = rollCube([-1,0,0],getLastKeyframe(cube)+1,step,cube)
elif currentPos[2] < targetPos[2]:
currentPos = rollCube([0,0,1],getLastKeyframe(cube)+1,step,cube)
elif currentPos[2] > targetPos[2]:
currentPos = rollCube([0,0,-1],getLastKeyframe(cube)+1,step,cube)
print(currentPos)
#break if cube is in target position
if currentPos[0] == targetPos[0] and currentPos[1] == targetPos[1] and currentPos[2] == targetPos[2]:
return 1
else:
return 0
cubes = {} #dict for each cube, True if in final position
walls = [] #list of all cubes postitions acting as 'walls'
for i in range(0,30):
createCube()
for cube in cmds.ls('myCube*', tr=True):
cubes[cube] = False
x = random.randrange(-10,10)
z = random.randrange(-10,10)
cmds.move(x,0,z, cube, r=True)
walls.append(getCubePos(cube))
for cube in cubes:
x = random.randrange(-10,10)
z = random.randrange(-10,10)
while not cubes[cube]:
cubes[cube] = moveCube([x,0.5,z], cube, 7)
print("Cube reached destination")
print(walls)
|
import numpy as np
from dl4nlp.utilities import softmax, tanh_gradient
from dl4nlp.preprocessing import build_dictionary, to_indices
from dl4nlp.gradient_check import gradient_check
from dl4nlp.gradient_descent import gradient_descent
from dl4nlp.sgd import bind_cost_gradient, get_stochastic_sampler
from dl4nlp.neural_network import flatten_cost_gradient
class NPLM:
"""
Neural Probabilistic Language Model (Bengio 2003)
"""
def __init__(self, vocabulary_size, feature_size, context_size, hidden_size):
self.vocabulary_size = vocabulary_size
self.feature_size = feature_size
self.context_size = context_size
self.hidden_size = hidden_size
self.W_shape = (vocabulary_size + 1, feature_size * context_size + 1)
self.U_shape = (vocabulary_size + 1, hidden_size)
self.H_shape = (hidden_size, feature_size * context_size + 1)
self.C_shape = (vocabulary_size + 1, feature_size)
self.dictionary = None
self.reverse_dictionary = None
self.parameters = None
def train(self, sentences, iterations=1000):
# Preprocess sentences to create indices of context and next words
self.dictionary = build_dictionary(sentences, self.vocabulary_size)
indices = to_indices(sentences, self.dictionary)
self.reverse_dictionary = {index: word for word, index in self.dictionary.items()}
inputs, outputs = self.create_context(indices)
# Create cost and gradient function for gradient descent
shapes = [self.W_shape, self.U_shape, self.H_shape, self.C_shape]
flatten_nplm_cost_gradient = flatten_cost_gradient(nplm_cost_gradient, shapes)
cost_gradient = bind_cost_gradient(flatten_nplm_cost_gradient, inputs, outputs,
sampler=get_stochastic_sampler(10))
# Train neural network
parameters_size = np.sum(np.product(shape) for shape in shapes)
initial_parameters = np.random.normal(size=parameters_size)
self.parameters, cost_history = gradient_descent(cost_gradient, initial_parameters, iterations)
return cost_history
def predict(self, context):
if self.dictionary is None or self.parameters is None:
print('Train before predict!')
return
context = context[-self.context_size:]
input = []
for word in context:
if word in self.dictionary:
input.append(self.dictionary[word])
else:
input.append(0)
W_size = np.product(self.W_shape)
U_size = np.product(self.U_shape)
H_size = np.product(self.H_shape)
split_indices = [W_size, W_size + U_size, W_size + U_size + H_size]
W, U, H, C = np.split(self.parameters, split_indices)
W = W.reshape(self.W_shape)
U = U.reshape(self.U_shape)
H = H.reshape(self.H_shape)
C = C.reshape(self.C_shape)
x = np.concatenate([C[input[i]] for i in range(self.context_size)])
x = np.append(x, 1.) # Append bias term
x = x.reshape(-1, 1)
y = W.dot(x) + U.dot(np.tanh(H.dot(x)))
# You don't want to predict unknown words (index 0)
prediction = np.argmax(y[1:]) + 1
return self.reverse_dictionary[prediction]
def create_context(self, sentences):
inputs = []
outputs = []
for sentence in sentences:
context = []
for word in sentence:
if len(context) >= self.context_size:
context = context[-self.context_size:]
inputs.append(context)
outputs.append(word)
context = context + [word]
return inputs, outputs
def gradient_check(self, inputs, outputs):
# Create cost and gradient function for gradient check
shapes = [self.W_shape, self.U_shape, self.H_shape, self.C_shape]
flatten_nplm_cost_gradient = flatten_cost_gradient(nplm_cost_gradient, shapes)
cost_gradient = bind_cost_gradient(flatten_nplm_cost_gradient, inputs, outputs)
# Gradient check!
parameters_size = np.sum(np.product(shape) for shape in shapes)
initial_parameters = np.random.normal(size=parameters_size)
result = gradient_check(cost_gradient, initial_parameters)
return result
def nplm_cost_gradient(parameters, input, output):
"""
Cost function for NPLM
:param parameters: tuple of (W, U, H, C)
:param input: indices of context word
:param output: index of current word
:return: cost and gradient
"""
W, U, H, C = parameters
context_size = len(input)
x = np.concatenate([C[input[i]] for i in range(context_size)])
x = np.append(x, 1.) # Append bias term
x = x.reshape(-1, 1)
hidden_layer = np.tanh(H.dot(x))
y = W.dot(x) + U.dot(hidden_layer)
prediction = softmax(y.reshape(-1)).reshape(-1, 1)
cost = -np.sum(np.log(prediction[output]))
one_hot = np.zeros_like(prediction)
one_hot[output] = 1
delta = prediction - one_hot
gradient_W = delta.dot(x.T)
gradient_U = delta.dot(hidden_layer.T)
gradient_H = tanh_gradient(hidden_layer) * U.T.dot(delta).dot(x.T)
gradient_C = np.zeros_like(C)
gradient_y_x = W + U.dot(tanh_gradient(hidden_layer) * H)
gradient_x = gradient_y_x.T.dot(delta)
gradient_x = gradient_x[:-1, :]
gradient_x_split = np.split(gradient_x, context_size)
for i in range(context_size):
gradient_C[input[i]] += gradient_x_split[i].flatten()
gradient = [gradient_W, gradient_U, gradient_H, gradient_C]
return cost, gradient
|
from robot.megapi import *
right = 1
left = 2
deltaTickRight = 0
def onRead1(level):
print("Encoder1 motor speed Value:%f" %level)
#if( int(level) > 500):
# print("ffffff")
#deltaTickRight = level
def onRead2(level):
print("Encoder2 motor speed Value:%f" %level)
#deltaTickRight = int(level)
#if( int(level) > 500):
# print("ffffff")
# print(deltaTickRight)
def check(f):
print(f)
def onForwardFinish():
sleep(0.4)
bot.encoderMotorMove(right,100, 720 * -1,check)
bot.encoderMotorMove(left,100, 720 ,check )
def onRightFinish():
sleep(0.4)
bot.encoderMotorMove(right,100, 0 * -1,check)
bot.encoderMotorMove(left,100, 495 ,check )
def onLeftFinish():
sleep(0.4)
bot.encoderMotorMove(right,100, 495 * -1,check)
bot.encoderMotorMove(left,100, 0 ,check )
def onBackwardFinish():
sleep(0.4)
print("slot")
bot.encoderMotorMove(right ,100, 720,check)
bot.encoderMotorMove(left,100, -720,check )
if __name__ == '__main__':
#bot = MeAuriga()
bot = MegaPi()
#bot.start("/dev/ttyUSB0")
bot.start("/dev/rfcomm0")
#bot.encoderMotorSetCurPosZero(1)
#bot.encoderMotorSetCurPosZero(2)
#bot.encoderMotorRun(right ,0)#right
#bot.encoderMotorRun(left ,0)#left
sleep(0.5)
print("forward")
onForwardFinish()
print("end forward")
sleep(0.5)
#onRightFinish()
#sleep(0.5)
#onBackwardFinish()
#sleep(2)
#bot.encoderMotorPosition(1,onRead1)
#sleep(1)
#bot.encoderMotorPosition(2,onRead2)
#while 1:
#if( deltaTickRight > ( 720 - 10 ) ):
# print("next order")
#else :
#bot.encoderMotorPosition(1,onRead1)
# bot.encoderMotorPosition(2,onRead2)
#print(deltaTickRight)
#print( bot.getDict() )
# sleep(1)
# continue
|
# 计算器,当用户输入的不是+-*/会抛出异常,并捕捉这个异常
try:
num1 = int(input("请输入整数"))
num2 = int(input("请输入整数"))
op1 = input("请输入+-*/")
if op1 != "+" and op1 != "-" and op1 != "*" and op1 != "/":
raise Exception("请输入正确的+-*/")
if op1 == "+":
print(num1 + num2)
elif op1 == "-":
print(num1 - num2)
elif op1 == "*":
print(num1 * num2)
else:
print(num1 / num2)
except Exception as result:
print(result) |
"""
Найдите производителей ПК с процессором не менее 450 Мгц. Вывести: Maker
"""
from queries.models import Product
from django.db import connection, reset_queries
def run():
result = Product.objects.values('maker').filter(pc__speed__gte=450).distinct()
return result
def print_result():
reset_queries()
start_queries = len(connection.queries)
result = run()
print('| maker |')
print('________________')
for item in result:
print(f"| {item['maker']} |")
end_queries = len(connection.queries)
print('Number of queries:', end_queries - start_queries)
"""
SQL:
SELECT DISTINCT maker FROM Product JOIN PC ON PC.model = Product.model
WHERE PC.speed >= 450
"""
"""
Правильный результат:
maker
A
B
E
""" |
from django.conf import settings
from redis import Redis
from rq import Queue
from . import RunnerBase
class RedisRunner(RunnerBase):
def run(self, func, *args, **kwargs):
q = Queue(connection=Redis.from_url(settings.REDIS_URL))
return q.enqueue(func, args=args, kwargs=kwargs)
def get(id):
q = Queue(connection=Redis.from_url(settings.REDIS_URL))
return q.get_job(id).meta
|
from peewee import *
import datetime
db = SqliteDatabase('proxy.db')
class BaseModel(Model):
class Meta:
database = db
class IPInfo(BaseModel):
ip_port = CharField(unique=True, help_text='ip地址、端口')
http_type = CharField(help_text='协议类型 http/socks')
check_total_count = IntegerField(default=0, help_text='检测总次数')
check_success_count = IntegerField(default=0, help_text='检测成功次数')
check_success_ratio = FloatField(default=0, help_text='检测成功率')
check_time = CharField(help_text='检测时间')
protocol_status = CharField(help_text='请求方法 post/get')
transfer_time = FloatField(default=10, help_text='传输时间')
ping_time = FloatField(default=5, help_text='响应时间')
country = CharField(help_text='ip所在地')
db.connect()
# db.create_tables([IPInfo])
|
#dictionary is an unoredered type of collection
# in it key/value pair{key.value}
#dictionary_example = {}
dictionary_example = {'name':'rachel','test_grade':98}
print(dictionary_example.get('name'))
|
#!/usr/bin/env python3
import requests
import sys
def url_encode(url):
out = ''
for c in url:
out += '%' + hex(ord(c))[2:].zfill(2)
return out
ip = sys.argv[1]
r = requests.get('http://' + ip + ':9990' + '/' + url_encode(url_encode('../../data/gonclub.db')))
print(r.text, flush=True)
|
# -*- coding: utf-8 -*-
import json
from flask import Flask, request, Response, jsonify, send_file
from werkzeug.utils import secure_filename
from dblite import *
import os
# prefix = d['SERVER-INFO']['PREFIX']
from dblite import people
from dblite import session
from dblite.consts import SQL_GET_USER_TEMPLATES
from model import parse_template_request
from queue import tasks
UPLOAD_FOLDER = os.path.abspath(os.path.dirname(__file__)) + '/templates'
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
verificationCodes = {}
# ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif','png', 'ttf'])
def root_dir(): # pragma: no cover
return os.path.abspath(os.path.dirname(__file__)) + "/pages"
def get_file(filename): # pragma: no cover
try:
src = os.path.join(root_dir(), filename)
# Figure out how flask returns static files
# Tried:
# - render_template
# - send_file
# This should not be so non-obvious
return open(src).read()
except IOError as exc:
return str(exc)
@app.route('/client/', defaults={'path': ''})
@app.route('/client/<path:path>')
def get_resource(path): # pragma: no cover
mimetypes = {
".css": "text/css",
".html": "text/html",
".js": "application/javascript",
".png": "image/png",
".jpg": "image/jpeg",
'.svg': 'image/svg+xml'
}
complete_path = os.path.join(root_dir(), path)
ext = os.path.splitext(path)[1]
mimetype = mimetypes.get(ext, "text/html")
return send_file(
complete_path,
mimetype,
attachment_filename=complete_path,
cache_timeout=0
), 200
@app.route('/server/templatelist/<session_id>', methods=['GET'])
def get_template_list(session_id):
d = os.path.abspath(os.path.dirname(__file__)) + '/templates'
sql = "select * from template"
connection = create()
cursor = connection.cursor()
cursor.execute(sql)
rows = cursor.fetchall()
res = [{"id": row[0], "name": row[1]} for row in rows]
# res = [{"NAME": o} for o in os.listdir(d) if os.path.isdir(os.path.join(d, o))]
return jsonify(res)
'''
@app.route('/server/templatecode/<template_name>', methods=['GET'])
def get_template_code(template_name):
file = open(os.path.abspath(os.path.dirname(__file__)) + '/templates/%s/template.py' % template_name)
# res = [{"name": o} for o in os.listdir(d) if os.path.isdir(os.path.join(d, o))]
return file.read()
'''
@app.route('/server/preview/<template_id>/<session_id>')
def get_preview(template_id, session_id):
mimetypes = {
".png": "image/png",
".jpg": "image/jpeg"
}
user_id = session.get_user_id(session_id)
user_templates = people.get_user_items("select * from (" + SQL_GET_USER_TEMPLATES + ") where ID = :template_id",
user_id, None,
template_id=template_id)
if len(user_templates) == 0:
return "Access denied", 500
template = user_templates[0]
mimetype = mimetypes.get(".jpg", "text/html")
file_path = os.path.abspath(os.path.dirname(__file__)) + '/templates/%s/files/preview.jpg' % template["PATH"]
return send_file(
file_path,
mimetype,
attachment_filename=file_path,
cache_timeout=0
), 200
@app.route('/server/templatecode/<session_id>', methods=['GET'])
def get_template_code(session_id):
user_id = session.get_user_id(session_id)
template_id = request.args.get("template_id")
if template_id == None:
return 500
user_templates = people.get_user_items("select * from (" + SQL_GET_USER_TEMPLATES + ") where ID = :template_id",
user_id, None,
template_id=template_id)
if len(user_templates) == 0:
return "Access denied", 500
template = user_templates[0]
file_path = os.path.abspath(os.path.dirname(__file__)) + '/templates/%s/template.py' % template["PATH"]
file = open(file_path)
# res = [{"name": o} for o in os.listdir(d) if os.path.isdir(os.path.join(d, o))]
return file.read()
@app.route('/server/invitation/<invitation_id>')
def get_email_by_invitation(invitation_id):
conn = create()
cursor = conn.cursor()
res = cursor.execute("select * from invitation where SID = ?", [invitation_id])
rows = res.fetchall()
return "" if len(rows) == 0 else rows[0]['EMAIL']
@app.route('/server/templatecode/<template_name>', methods=['POST'])
def set_template_code(template_name):
print "%s" % request.data
# params = json.loads(request.data)
template_code = request.data
file = open(os.path.abspath(os.path.dirname(__file__)) + '/templates/%s/template.py' % template_name, 'w+')
# res = [{"name": o} for o in os.listdir(d) if os.path.isdir(os.path.join(d, o))]
file.write(template_code)
file.close()
return "OK" # file.read()
@app.route('/server/generate/<template_id>/<session_id>', methods=['POST'])
def generate_template(template_id, session_id):
user_id = session.get_user_id(session_id)
print user_id, template_id
user_templates = people.get_user_items("select * from (" + SQL_GET_USER_TEMPLATES + ") where ID = :template_id",
user_id, None,
template_id=template_id)
params = json.loads(request.data)
template_request = parse_template_request(params)
if len(user_templates) == 0:
print "Access denied"
return "Access denied", 500
user_template = user_templates[0]
file_name = main.generate_picture(user_template["PATH"], template_request.width, template_request.height,
template_request.format, float(template_request.dpi), user_id=user_id)
return send_file(file_name), 200
@app.route('/server/generate/preview/<template_id>/<session_id>', methods=['POST'])
def generate_template_preview(template_id, session_id):
user_id = session.get_user_id(session_id)
print user_id, template_id
user_templates = people.get_user_items("select * from (" + SQL_GET_USER_TEMPLATES + ") where ID = :template_id",
user_id, None,
template_id=template_id)
params = json.loads(request.data)
template_request = parse_template_request(params)
if len(user_templates) == 0:
print "Access denied"
return "Access denied", 500
user_template = user_templates[0]
template_request.height = int(template_request.height * 640 / template_request.width)
template_request.width = 640
file_name = tasks.generate_picture(user_template["PATH"], template_request.width, template_request.height,
"JPEG", float(96), user_id=user_id, preview=True)
return os.path.basename(file_name), 200
@app.route('/server/upload/<session_id>', methods=['POST'])
def upload(session_id):
user_id = session.get_user_id(session_id)
template_id = request.args.get("template_id")
if template_id == None:
return "Empty template", 500
user_templates = people.get_user_items("select * from (" + SQL_GET_USER_TEMPLATES + ") where ID = :template_id",
user_id, None,
template_id=template_id)
if len(user_templates) == 0:
return "Access denied", 500
user_template = user_templates[0]
# file_path = os.path.abspath(os.path.dirname(__file__)) + '/templates/%s/template.py' % user_template["PATH"]
file = request.files['file']
filename = secure_filename(file.filename)
file.save(os.path.abspath(os.path.dirname(__file__)) + "/templates/" + user_template["PATH"] + "/files/" + filename)
return "OK", 200
@app.route('/login', methods=['POST'])
def login():
params = json.loads(request.data)
resp = {}
email = params['email']
password = params['password']
session_id = people.login(email, password)
if session_id == None:
return "Wrong login", 500
else:
resp["session_id"] = session_id
return jsonify(resp)
@app.route('/register', methods=['POST'])
def register():
params = json.loads(request.data)
resp = {}
email = params['email']
session_id = people.register(email, params['password'])
resp["session_id"] = session_id
verificationCodes[email] = {}
verificationCodes[email]["code"] = "123123"
verificationCodes[email]["session_id"] = session_id
resp["verified"] = "0"
return jsonify(resp)
@app.route('/verify', methods=['POST'])
def verify():
params = json.loads(request.data)
resp = {}
email = params['email']
verification_code = params['code']
try:
if verification_code == "123123":
user_id = people.get_user_by_email(email)
people.update_table_people(user_id, VERIFIED=1)
resp = {}
resp["result"] = "OK"
# del verificationCodes[email]
return jsonify(resp), 200
else:
return "Wrong code", 500
except KeyError:
return "Wrong session", 500
@app.route("/rules/<session_id>", methods=['GET'])
def rules(session_id):
pass
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
return response
|
"""
sampleFunction5 class3
absolute value
"""
def absoluteValue(valueIn):
if valueIn >= 0:
valueOut = valueIn
else: #valueIn was -ve
valueOut = -1 * valueIn
return valueOut
print(absoluteValue(-3))
print(absoluteValue(4))
print(absoluteValue(0))
print(absoluteValue(1.345))
print(absoluteValue(-7.89))
|
import csv
import sys
import asyncio
from urllib.request import urlopen
from concurrent.futures import ProcessPoolExecutor
from pyppeteer import launch
from requests_html import HTML
def retrieve_jquery_source():
with urlopen('http://code.jquery.com/jquery-latest.min.js') as jquery:
return jquery.read().decode('utf-8')
def get_script_expanding_reviews_code():
jquery_src = retrieve_jquery_source()
expand_reviews_script = """
buttons = document.getElementsByClassName("MuiButtonBase-root MuiButton-root MuiButton-text Button___StyledMaterialButton-FZwYh kvPsnQ colorized__WrappedComponent-apsCh kAVjHC -ml-3 MuiButton-textPrimary");
for (let i = 1; i < buttons.length; ++i) {
$(buttons[i]).click();
}
"""
return jquery_src + expand_reviews_script
async def render_html(url, injected_script):
browser = await launch()
try:
page = await browser.newPage()
await page.goto(url)
await page.evaluate(injected_script, force_expr=True)
return await page.content()
finally:
await browser.close()
def get_page_html(url, injected_script):
return asyncio.get_event_loop().run_until_complete(render_html(url, injected_script))
def get_beer_params(html):
header_div = html.find('.fj-s.fa-c.mb-4', first=True)
if header_div is None:
return None
mui_elems = header_div.find('.MuiTypography-root')
links = header_div.find('a')
return {
'name': mui_elems[0].text,
'region': mui_elems[1].text,
'style': links[0].text,
'brewery': links[1].text
}
class BeerPageParser:
def __init__(self, injected_script):
self.injected_script = injected_script
def parse_beer_page(self, number):
url = 'https://www.ratebeer.com/beer/{}/'.format(str(number + 1))
try:
print(number)
html = HTML(html=get_page_html(url, self.injected_script))
beer = get_beer_params(html)
if beer is not None:
reviews_divs = html.find(
'.BeerReviewListItem___StyledDiv-iilxqQ>.Text___StyledTypographyTypeless-bukSfn')
beer['reviews'] = [review.text for review in reviews_divs]
return beer
except Exception as ex:
print(ex)
print("Error on beer id", number + 1, file=sys.stderr)
return None
beer_parser = BeerPageParser(get_script_expanding_reviews_code())
def drink_beer(beer_number):
return beer_parser.parse_beer_page(beer_number)
def write_reviews(file_number, start_range, end_range):
with open(
'beer_reviews' + str(file_number) + '.csv',
mode='a',
newline='',
encoding='utf-8') as csv_file, ProcessPoolExecutor() as pool:
fieldnames = ['name', 'region', 'style', 'brewery', 'review']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames, delimiter=';')
for beer in pool.map(drink_beer, range(start_range, end_range)):
if beer is None:
continue
for review in beer['reviews']:
row = {key: entry for key, entry in beer.items() if key != 'reviews'}
row['review'] = review
writer.writerow(row)
if __name__ == '__main__':
start_range = int(sys.argv[1])
end_range = int(sys.argv[2])
file_num = int(sys.argv[3])
write_reviews(file_num, start_range, end_range)
|
# coding:utf-8
'''
Created on 2016年12月6日
@author: hasee
test.getmessage.httphandle
'''
from HTMLParser import HTMLParser
class MyHTMLParserLocation(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.items = []
self.flag = False
def handle_starttag(self, tag, attrs):
# 处理标签
if tag == 'div' and attrs:
for attr,value in attrs:
if attr == 'id' and value == 'ip_pos' :
# print 'here'
self.flag = True
return
self.flag = False
def handle_data(self, data):
if self.flag == True:
# for d in data:
# print d
self.items.append(data)
def show_items(self):
print 'ip地址解析结果: '
for item in self.items:
print item;
class MyHTMLParserRecordNumber(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.recordnumber = ''
self.flag = False
def handle_starttag(self, tag, attrs):
if tag == 'a' and attrs:
for attr, value in attrs:
if attr == 'href' and self.issame(value,'www.beian.gov.cn'):
# print 'here'
self.flag = True
return
# self.flag = False
def handle_endtag(self, tag):
#self.flag = False;
pass
def handle_data(self, data):
if self.flag == True:
self.recordnumber = data
self.flag = False
def show_record_number(self):
print '备案号: ',self.recordnumber
def issame(self, value, valuetocompare):
try:
hostname = value.split('/')[2]
# print '解析: ',hostname,'[[[[]]]]',value
if hostname == valuetocompare:
return True
except Exception,e:
pass
# print e
# print '比对失败'
return False
|
from django.core.validators import RegexValidator, URLValidator
from django.utils.translation import gettext_lazy as _
class HostnameValidator(RegexValidator):
regex = '^(' + URLValidator.host_re + ')$'
message = _('Enter a valid hostname.')
class HostnameAddressValidator(RegexValidator):
regex = '^(' + URLValidator.ipv4_re + '|' + URLValidator.ipv6_re + '|' + URLValidator.host_re + ')$'
message = _('Enter a valid hostname or IP address.')
|
from pymotifcluster.clusterwindow import *
# import pytest
# import pytest_benchmark
testing_windows = [
"DSSASPEVVSDLPPSSPKGSPDRHDPSTSSP",
"VREQAVWALGNVAGDSPKCRDLVLSYGAMTP",
"LTSPIPRASVITNQNSPLSSRATRRTSISSG",
"VTPCKGSGDRSLGLTSPIPRASVITNQNSPL",
"AIKASSLSKEGLLFGSPKLSGGSSLYGKLQQ",
"GSFRKNLDTKDAIISSPKLMESGSGKLPVFE",
"SSIASVPITDTTHVKSETGSPRHSSSAKMDE",
"SVPITDTTHVKSETGSPRHSSSAKMDETNGR",
"GSLSKSPSFDWGEDVSPNIPLEKLLVYRLNV",
"DMSSIDGKETSRSGGSPNRAELRKRLSAAEE",
"FKSVFTEDLDPPETESESDSPKHSEEHEHPE",
"SVFTEDLDPPETESESDSPKHSEEHEHPEQE",
"FTEDLDPPETESESDSPKHSEEHEHPEQEHP",
"TGRLSPQTFTSSPSLSPSSSSGGSSFMARYA",
"SPQTFTSSPSLSPSSSSGGSSFMARYAMESS",
"PQTFTSSPSLSPSSSSGGSSFMARYAMESSK",
"NLPGNPDPEAEVIALSPKTLMATNRFLCEIC",
"SPRFSRQRTSPPSLHSPLRSLKEPKRQLIPQ",
"ESAASESGEKADEGRSQVDGSTEQSPKLESA",
"ESGEKADEGRSQVDGSTEQSPKLESASSTEP",
]
def test_SymetricDict():
my_symdict = SymetricDict('PEPTIDES')
# This tests that all values can be set appropiately
for i, x in enumerate(itertools.combinations_with_replacement(set('PEPTIDES'),2)):
my_symdict[x] = i
assert my_symdict[x] == i, "Value not being set appropiately"
assert my_symdict[x[::-1]] == i, "Value not being set appropiately"
for x in set('PEPTIDES'):
for y in set('PEPTIDES'):
assert my_symdict[x, y] == my_symdict[y, x], "Symdict is not symetrical"
def test_getPermutations():
my_perm = getPermutations('123', 2)
assert len(my_perm) == 9, 'Unexpected length of the permutations'
def test_getNmers():
n_mers1 = getNmers("AAAAAAA", 2)
assert len(n_mers1) == 1, 'More than 1 nmer for a sequence with only one unique character'
n_mers2 = getNmers("AABAAAB", 3)
assert len(n_mers2) == 4, 'Unexpected number of nmers, check manually'
n_mers3 = getNmers("AABAAAB", 2, fill_missing=True, default_alphabet='ABC')
print(str(len(n_mers3)))
print(str(len(getPermutations('ABC', 2))))
assert len(n_mers3) == len(getPermutations('ABC', 2)), 'Not filling nmers when asked'
# TODO implement cropping border delimiters in getNmers
#n_mers3 = getNmers("__AAAAA", 3)
#assert len(n_mers1) == 1, 'More than 1 nmer for a sequence with only one unique character and border delimiters'
def test_getNmer_method_dispatching():
assert len(getNmers('abc', 2)) == 2
assert set(getNmers('abc', 2)) == set(getNmers('abc', 2))
assert len(getNmers(['abc', 'cde'], 2)) == 2
assert set(getNmers(['abc', 'cde'], 2)['abc']) == set(getNmers('abc', 2))
assert set(getNmers(['abc', 'cde'], 2)['cde']) == set(getNmers('cde', 2))
def _test_trimer_clustering(testing_windows):
foo = [getNmers(x, 3, fill_missing = True) for x in testing_windows]
myarray = np.array([[ int(i) for i in elem.values()] for elem in foo])
kdt = KDTree(myarray, leaf_size = 20, metric = 'manhattan')
foo2 = kdt.query(myarray, k =2, return_distance = False)
def test_trimer_clustering_20(benchmark):
benchmark(_test_trimer_clustering, testing_windows[0:10])
def test_trimer_clustering_20(benchmark):
benchmark(_test_trimer_clustering, testing_windows)
def test_trimer_clustering_40(benchmark):
benchmark(_test_trimer_clustering, testing_windows*2)
def test_trimer_clustering_60(benchmark):
benchmark(_test_trimer_clustering, testing_windows*3)
|
# Generated by Django 2.1.5 on 2019-03-07 08:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0008_first_year_first_semester_bsba_first_year_second_semester_bsba_fourth_year_first_semester_bsba_fourt'),
]
operations = [
migrations.RenameModel(
old_name='tshird_year_first_semester_bsba',
new_name='third_year_first_semester_bsba',
),
]
|
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # 不显示警告
tf.compat.v1.disable_eager_execution() # tensorflow2 使用旧版本功能需要调用该方法
def demo1():
# 构建图:
a=tf.constant(2,name='a') # 设定常量
b=tf.constant(3,name='b')
c=tf.add(a,b,name='c')
print(a,'\n',b)
print(c)
print(a.graph)
# 执行图:
with tf.compat.v1.Session() as sess:
c_value=sess.run(c)
print("c_value",c_value)
print("sess_graph:",sess.graph)
# 将图写入本地生成events文件
# tf.compat.v1.summary.FileWriter("./summary", graph=sess.graph)
def tensor_demo():
a=tf.constant(3.0) # 标量(零阶张量)
b=tf.constant([1,2,3,5,6,9]) # 向量(一阶张量)
c=tf.constant([
[1,2,3],
[4,5,6]
]) # 矩阵(二阶张量)
d=tf.constant([
[
[1,2],
[2,3]
],
[
[3,4],
[4,5]
]
]) # 三维数组(三阶张量)
print(a,b,c,d)
def variable_demo():
a=tf.Variable(initial_value=50,name='a')
b=tf.Variable(initial_value=40)
c=tf.add(a,b)
print(a,b,c)
init = tf.compat.v1.global_variables_initializer()
with tf.compat.v1.Session() as sess:
sess.run(init)
print(sess.run(c))
if __name__ == '__main__':
# demo1()
# tensor_demo()
variable_demo()
|
import argparse
import glob
import json
import os
import time
import math
import openai
import shutil
import pathlib
languages_to_run = {'C', 'C#', 'C++', 'Go', 'Java', 'JavaScript',
'PHP', 'Python', 'Ruby', 'Rust', 'Scala', 'TypeScript'}
# The private OpenAI API key needs to be an environment variable
openai.api_key = os.getenv('OPENAI_API_KEY')
# As instructed here: https://community.openai.com/t/token-logprobs-when-echo-is-true/9626/2
# "Transformer models don’t predict the probability of the first token. If you want to get the probability
# for your first token you can try to use <|endoftext|> as the first token as a workaround."
endoftext_token = '<|endoftext|>'
def ppl(avg_logprob):
return 2 ** (-avg_logprob / math.log(2))
def call_codex(code_str, save_probs):
eos_code_str = endoftext_token + code_str
# engine: 'davinci-codex' is currently the best codex model
# max_tokens=0 means that we don't want the model to generate additional tokens
# logprobs=0 means that we don't want the logprobs of the alternative tokens, only the actual tokens
# echo=True means that we want the model to echo our prompt, in addition to our (not existing) completion
completion = openai.Completion.create(engine="davinci-codex", prompt=eos_code_str,
max_tokens=0,
temperature=0.0,
logprobs=0,
n=1,
echo=True)
c = completion.choices[0]
# skipping the <|endoftext|> token
sum_logprobs = sum(c.logprobs.token_logprobs[1:])
num_tokens = len(c.logprobs.token_logprobs[1:])
if save_probs:
saved_probs = {
'text': code_str,
'tokens': c.logprobs.tokens[1:],
'logprobs': c.logprobs.token_logprobs[1:],
'sum_logprobs': sum_logprobs
}
else:
saved_probs = None
return sum_logprobs, num_tokens, saved_probs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dirs', type=str, help='path to a directory that contains a subdirectory for each evaluated language', required=False)
parser.add_argument('--save-probs', type=str, required=False, default=None)
parser.add_argument('--output', type=str, required=False, default=os.devnull)
args = parser.parse_args()
results = {}
dirs = glob.glob(os.path.join(args.dirs, '*'), recursive=False)
excluded_dirs = args.dirs + '-excluded'
pathlib.Path(excluded_dirs).mkdir(parents=True, exist_ok=True)
for language in dirs:
if language.split('/')[-1] not in languages_to_run:
continue
print('Language:', language)
files = glob.glob(os.path.join(language, '**/*'), recursive=True)
files = [f for f in files if os.path.isfile(f)]
log_probs_sum = 0
tokens_count = 0
ignored_files = []
all_per_token_probs = []
with open(args.output, 'w') as out_file:
for file in files:
try:
with open(file, 'r') as f:
code_str = f.read()
logprobs_sum, logprobs_count, per_token_probs = call_codex(code_str, args.save_probs is not None)
except Exception as e:
print(f'EXCEPTION in file {file}: {e}')
print(e)
ignored_files.append(file)
# OpenAI limits the request rate to 20/min
time.sleep(10)
continue
out_str = f'{logprobs_sum}\t{logprobs_count}\t{file}'
if args.output != os.devnull:
out_file.writelines([f'Evaluating file: {file}', out_str, '\n'])
log_probs_sum += logprobs_sum
tokens_count += logprobs_count
# OpenAI limits the request rate to 20/min
time.sleep(10)
print(f'\n\n\nlogprobs sum: {log_probs_sum}')
print(f'total tokens: {tokens_count}')
print(f'Average loss: {-log_probs_sum / tokens_count}')
print(f'Perplexity: {ppl(log_probs_sum / tokens_count)}')
print(f'Ignored files:')
for f in ignored_files:
print(f'\t{f}')
new_location = os.path.join(excluded_dirs, os.path.dirname(f))
pathlib.Path(new_location).mkdir(parents=True, exist_ok=True)
shutil.move(f, new_location)
results[language] = {
'log_probs_sum': log_probs_sum,
'tokens_count': tokens_count,
'average_loss': -log_probs_sum / tokens_count,
'perplexity': ppl(log_probs_sum / tokens_count),
}
print('Language, sum_logprobs, average_loss, perplexity, num_tokens')
for language in results:
print(f'{language.split("/")[-1]}, {results[language]["log_probs_sum"]}, {results[language]["average_loss"]}, {results[language]["perplexity"]}, {results[language]["tokens_count"]}')
|
# -*- coding: utf-8 -*-
import os
import random
trainval_percent = 1 # 用于训练验证的数据比例
train_percent = 0.8 # 用于训练验证的数据中,训练数据所占比例
xmlfilepath = '/PycharmProjects/lx/data/xml' # 标注信息xml文件路径
txtsavepath = '/PycharmProjects/lx/data/Main' # 文件保存路径
total_xml = os.listdir(xmlfilepath)
num=len(total_xml)
list=range(num)
tv=int(num*trainval_percent)
tr=int(tv*train_percent)
trainval= random.sample(list,tv)
train=random.sample(trainval,tr)
ftrainval = open('/PycharmProjects/lx/data/Main/trainval.txt', 'w')
ftest = open('/PycharmProjects/lx/data/Main/test.txt', 'w')
ftrain = open('/PycharmProjects/lx/data/Main/train.txt', 'w')
fval = open('/PycharmProjects/lx/data/Main/val.txt', 'w')
for i in list:
name=total_xml[i][:-4]+'\n'
if i in trainval:
ftrainval.write(name)
if i in train:
ftrain.write(name)
else:
fval.write(name)
else:
ftest.write(name)
ftrainval.close()
ftrain.close()
fval.close()
ftest .close()
|
import random
import string
import re
def random_generator(size=6 , chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
"""
def pruebas(request):
# Formulario
if request.method == 'POST':
form = ProPedidoForm(request.POST)
if form.is_valid():
form.save()
#return HttpResponseRedirect('/debug/')
else:
form = ProPedidoForm()
# productos en la cesta
try:
basket = request.session['basket']
except KeyError:
basket = []
cesta = []
for row in basket:
cesta.append( re.sub("([^0-9]+)", "", row) )
productos = Producto.objects.filter(id__in=cesta)
# Obtenemos total y sumamos el envio
sumar = []
for items in productos:
precio = Decimal(items.precio)
sumar.append(precio)
subtotal = sum(sumar)
total = subtotal+199
# Usuario y perfil
user = request.user
profile = user.profile
# Datos extras
paso = 'confirm'
data = {
"user": user,
"lista": productos,
'profile': profile,
"subtotal" : subtotal,
"total": total,
"formula": form,
"step" : paso
}
return render_to_response("test.html", context_instance=RequestContext(request,data))
""" |
from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseRedirect
from home_app.models import CreatedPizzas, Toppings, PizzaSize
# Create your views here.
def home(request):
return render(request, 'home.html')
def created_pizza_list(request):
access_pz_list = CreatedPizzas.objects.all()
size_list = PizzaSize.objects.all()
access_pz_dict = {'access_pz_list':access_pz_list, 'size_list':size_list}
if request.method == 'POST':
pizza_type = request.POST.get('pizza_type')
pizza_size = request.POST.get('pizza_size')
if pizza_type == 'All' and pizza_size == 'All':
access_pz_list2 = CreatedPizzas.objects.all()
elif pizza_type == 'All' and pizza_size != 'All':
size_obj = PizzaSize.objects.get(pizza_size=pizza_size)
access_pz_list2 = CreatedPizzas.objects.filter(pizza_size=size_obj)
elif pizza_type != 'All' and pizza_size == 'All':
access_pz_list2 = CreatedPizzas.objects.filter(pizza_type=pizza_type)
else:
size_obj = PizzaSize.objects.get(pizza_size=pizza_size)
access_pz_list2 = CreatedPizzas.objects.filter(pizza_type=pizza_type, pizza_size=size_obj)
access_pz_dict2 = {'access_pz_list':access_pz_list2, 'size_list':size_list}
return render(request, 'created_pizza_list.html', access_pz_dict2)
return render(request, 'created_pizza_list.html', access_pz_dict)
def create_pizza(request):
size_list = PizzaSize.objects.all()
toppings_list = Toppings.objects.all()
context_dict = {'toppings_list':toppings_list, 'size_list':size_list}
if request.method == 'POST':
creator = request.POST.get('name')
pizza_type = request.POST.get('pizza_type')
pizza_size = request.POST.get('pizza_size')
size_obj = PizzaSize.objects.get(pizza_size=pizza_size)
pizza_topp_str = ''
for topping in toppings_list:
code = str(request.POST.get(f'{topping}'))
if code != 'None':
pizza_topp_str += code + ', '
if pizza_topp_str == '':
context_dict['alert_topp_emty'] = 1
return render(request, 'create_pizza.html', context_dict)
pizza_obj = CreatedPizzas(creator=creator, pizza_type=pizza_type, pizza_size=size_obj, pizza_toppings=pizza_topp_str)
pizza_obj.save()
context_dict['alert'] = 1
return render(request, 'create_pizza.html', context_dict)
return render(request, 'create_pizza.html', context_dict)
def edit_pizza(request, pk):
size_list = PizzaSize.objects.all()
toppings_list = Toppings.objects.all()
selected_pizza = CreatedPizzas.objects.get(id=pk)
context_dict = {'toppings_list':toppings_list, 'size_list':size_list, 'pk':pk, 'creator': selected_pizza.creator}
if request.method == 'POST':
creator = request.POST.get('name')
pizza_type = request.POST.get('pizza_type')
pizza_size = request.POST.get('pizza_size')
size_obj = PizzaSize.objects.get(pizza_size=pizza_size)
pizza_topp_str = ''
for topping in toppings_list:
code = str(request.POST.get(f'{topping}'))
if code != 'None':
pizza_topp_str += code + ', '
if pizza_topp_str == '':
context_dict['alert_topp_emty'] = 1
return render(request, 'edit_pizza.html', context_dict)
selected_pizza.creator = creator
selected_pizza.pizza_type = pizza_type
selected_pizza.pizza_size = size_obj
selected_pizza.pizza_toppings = pizza_topp_str
selected_pizza.save()
context_dict['alert'] = 1
return render(request, 'edit_pizza.html', context_dict)
return render(request, 'edit_pizza.html', context_dict)
def add_variety(request):
pizza_size = PizzaSize.objects.all()
pizza_toppings = Toppings.objects.all()
if request.method == 'POST':
pizza_size_reci = request.POST.get('pizza_size')
pizza_toppings_reci = request.POST.get('pizza_toppings')
print(pizza_size_reci)
print(len(pizza_toppings_reci))
if len(pizza_size_reci) != 0:
obj1 = PizzaSize(pizza_size=pizza_size_reci)
obj1.save()
if len(pizza_toppings_reci) != 0:
obj2 = Toppings(pizza_toppings=pizza_toppings_reci)
obj2.save()
return render(request, 'addvariety.html', {'pizza_size': pizza_size, 'pizza_toppings':pizza_toppings})
return render(request, 'addvariety.html', {'pizza_size': pizza_size, 'pizza_toppings':pizza_toppings})
def delete_pizza(request, pk):
access_pz_list = CreatedPizzas.objects.all()
access_pz_dict = {'access_pz_list':access_pz_list}
if request.method == 'POST':
CreatedPizzas.objects.get(pk=pk).delete()
return render(request, 'created_pizza_list.html', access_pz_dict)
return render(request, 'created_pizza_list.html', access_pz_dict)
def delete_size(request, pk):
pizza_size = PizzaSize.objects.all()
pizza_toppings = Toppings.objects.all()
print('reached1')
PizzaSize.objects.get(pk=pk).delete()
return render(request, 'addvariety.html', {'pizza_size': pizza_size, 'pizza_toppings':pizza_toppings})
# return render(request, 'addvariety.html', {'pizza_size': pizza_size, 'pizza_toppings':pizza_toppings})
def delete_toppings(request, pk):
pizza_size = PizzaSize.objects.all()
pizza_toppings = Toppings.objects.all()
Toppings.objects.get(pk=pk).delete()
return render(request, 'addvariety.html', {'pizza_size': pizza_size, 'pizza_toppings':pizza_toppings})
# return render(request, 'addvariety.html', {'pizza_size': pizza_size, 'pizza_toppings':pizza_toppings})
|
from flask import Flask, make_response, request
app = Flask(__name__)
xml_o = """<?xml version="1.0" encoding="UTF-8"?>
<query xmlns:yahoo="http://www.yahooapis.com/v1/base.rng"
yahoo:count="1" yahoo:created="2018-04-21T15:18:32Z" yahoo:lang="en-IN">
<results>
<channel>
<yweather:units
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
distance="mi" pressure="in" speed="mph" temperature="F"/>
<title>Yahoo! Weather - Nome, AK, US</title>
<link>http://us.rd.yahoo.com/dailynews/rss/weather/Country__Country/*https://weather.yahoo.com/country/state/city-2460286/</link>
<description>Yahoo! Weather for Nome, AK, US</description>
<language>en-us</language>
<lastBuildDate>Sat, 21 Apr 2018 07:18 AM AKDT</lastBuildDate>
<ttl>60</ttl>
<yweather:location
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
city="Nome" country="United States" region=" AK"/>
<yweather:wind
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
chill="3" direction="23" speed="22"/>
<yweather:atmosphere
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
humidity="71" pressure="1007.0" rising="0" visibility="16.1"/>
<yweather:astronomy
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
sunrise="7:6 am" sunset="10:56 pm"/>

<item>
<title>Conditions for Nome, AK, US at 06:00 AM AKDT</title>
<geo:lat xmlns:geo="http://www.w3.org/2003/01/geo/wgs84_pos#">64.499474</geo:lat>
<geo:long xmlns:geo="http://www.w3.org/2003/01/geo/wgs84_pos#">-165.405792</geo:long>
<link>http://us.rd.yahoo.com/dailynews/rss/weather/Country__Country/*https://weather.yahoo.com/country/state/city-2460286/</link>
<pubDate>Sat, 21 Apr 2018 06:00 AM AKDT</pubDate>
<yweather:condition
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
code="31" date="Sat, 21 Apr 2018 06:00 AM AKDT"
temp="17" text="Clear"/>
<yweather:forecast
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
code="23" date="21 Apr 2018" day="Sat" high="27"
low="17" text="Breezy"/>
<yweather:forecast
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
code="34" date="22 Apr 2018" day="Sun" high="38"
low="22" text="Mostly Sunny"/>
<yweather:forecast
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
code="28" date="23 Apr 2018" day="Mon" high="33"
low="28" text="Mostly Cloudy"/>
<yweather:forecast
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
code="28" date="24 Apr 2018" day="Tue" high="36"
low="29" text="Mostly Cloudy"/>
<yweather:forecast
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
code="28" date="25 Apr 2018" day="Wed" high="35"
low="26" text="Mostly Cloudy"/>
<yweather:forecast
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
code="28" date="26 Apr 2018" day="Thu" high="34"
low="31" text="Mostly Cloudy"/>
<yweather:forecast
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
code="30" date="27 Apr 2018" day="Fri" high="34"
low="29" text="Partly Cloudy"/>
<yweather:forecast
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
code="28" date="28 Apr 2018" day="Sat" high="33"
low="30" text="Mostly Cloudy"/>
<yweather:forecast
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
code="28" date="29 Apr 2018" day="Sun" high="34"
low="31" text="Mostly Cloudy"/>
<yweather:forecast
xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0"
code="26" date="30 Apr 2018" day="Mon" high="37"
low="29" text="Cloudy"/>
<description><![CDATA[<img src="http://l.yimg.com/a/i/us/we/52/31.gif"/>
<BR />
<b>Current Conditions:</b>
<BR />Clear
<BR />
<BR />
<b>Forecast:</b>
<BR /> Sat - Breezy. High: 27Low: 17
<BR /> Sun - Mostly Sunny. High: 38Low: 22
<BR /> Mon - Mostly Cloudy. High: 33Low: 28
<BR /> Tue - Mostly Cloudy. High: 36Low: 29
<BR /> Wed - Mostly Cloudy. High: 35Low: 26
<BR />
<BR />
<a href="http://us.rd.yahoo.com/dailynews/rss/weather/Country__Country/*https://weather.yahoo.com/country/state/city-2460286/">Full Forecast at Yahoo! Weather</a>
<BR />
<BR />
<BR />
]]></description>
<guid isPermaLink="false"/>
</item>
</channel>
</results>
</query>
"""
json_o = """{
"query": {
"count": 1,
"created": "2018-04-21T15:21:01Z",
"lang": "en-IN",
"results": {
"channel": {
"units": {
"distance": "mi",
"pressure": "in",
"speed": "mph",
"temperature": "F"
},
"title": "Yahoo! Weather - Nome, AK, US",
"link": "http://us.rd.yahoo.com/dailynews/rss/weather/Country__Country/*https://weather.yahoo.com/country/state/city-2460286/",
"description": "Yahoo! Weather for Nome, AK, US",
"language": "en-us",
"lastBuildDate": "Sat, 21 Apr 2018 07:21 AM AKDT",
"ttl": "60",
"location": {
"city": "Nome",
"country": "United States",
"region": " AK"
},
"wind": {
"chill": "3",
"direction": "23",
"speed": "22"
},
"atmosphere": {
"humidity": "71",
"pressure": "1007.0",
"rising": "0",
"visibility": "16.1"
},
"astronomy": {
"sunrise": "7:6 am",
"sunset": "10:56 pm"
},
"image": {
"title": "Yahoo! Weather",
"width": "142",
"height": "18",
"link": "http://weather.yahoo.com",
"url": "http://l.yimg.com/a/i/brand/purplelogo//uh/us/news-wea.gif"
},
"item": {
"title": "Conditions for Nome, AK, US at 06:00 AM AKDT",
"lat": "64.499474",
"long": "-165.405792",
"link": "http://us.rd.yahoo.com/dailynews/rss/weather/Country__Country/*https://weather.yahoo.com/country/state/city-2460286/",
"pubDate": "Sat, 21 Apr 2018 06:00 AM AKDT",
"condition": {
"code": "31",
"date": "Sat, 21 Apr 2018 06:00 AM AKDT",
"temp": "17",
"text": "Clear"
},
"forecast": [
{
"code": "23",
"date": "21 Apr 2018",
"day": "Sat",
"high": "27",
"low": "17",
"text": "Breezy"
},
{
"code": "34",
"date": "22 Apr 2018",
"day": "Sun",
"high": "38",
"low": "22",
"text": "Mostly Sunny"
},
{
"code": "28",
"date": "23 Apr 2018",
"day": "Mon",
"high": "33",
"low": "28",
"text": "Mostly Cloudy"
},
{
"code": "28",
"date": "24 Apr 2018",
"day": "Tue",
"high": "36",
"low": "29",
"text": "Mostly Cloudy"
},
{
"code": "28",
"date": "25 Apr 2018",
"day": "Wed",
"high": "35",
"low": "26",
"text": "Mostly Cloudy"
},
{
"code": "28",
"date": "26 Apr 2018",
"day": "Thu",
"high": "34",
"low": "31",
"text": "Mostly Cloudy"
},
{
"code": "30",
"date": "27 Apr 2018",
"day": "Fri",
"high": "34",
"low": "29",
"text": "Partly Cloudy"
},
{
"code": "28",
"date": "28 Apr 2018",
"day": "Sat",
"high": "33",
"low": "30",
"text": "Mostly Cloudy"
},
{
"code": "28",
"date": "29 Apr 2018",
"day": "Sun",
"high": "34",
"low": "31",
"text": "Mostly Cloudy"
},
{
"code": "26",
"date": "30 Apr 2018",
"day": "Mon",
"high": "37",
"low": "29",
"text": "Cloudy"
}
],
"description": "<![CDATA[<img src=\\"http://l.yimg.com/a/i/us/we/52/31.gif\\"/>\\n<BR />\\n<b>Current Conditions:</b>\\n<BR />Clear\\n<BR />\\n<BR />\\n<b>Forecast:</b>\\n<BR /> Sat - Breezy. High: 27Low: 17\\n<BR /> Sun - Mostly Sunny. High: 38Low: 22\\n<BR /> Mon - Mostly Cloudy. High: 33Low: 28\\n<BR /> Tue - Mostly Cloudy. High: 36Low: 29\\n<BR /> Wed - Mostly Cloudy. High: 35Low: 26\\n<BR />\\n<BR />\\n<a href=\\"http://us.rd.yahoo.com/dailynews/rss/weather/Country__Country/*https://weather.yahoo.com/country/state/city-2460286/\\">Full Forecast at Yahoo! Weather</a>\\n<BR />\\n<BR />\\n<BR />\\n]]>",
"guid": {
"isPermaLink": "false"
}
}
}
}
}
}"""
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
headers_json = {'Content-Type':'application/json'}
headers_xml = {'Content-Type':'application/xml'}
#print(path, request.args)
if "format" in request.args and request.args["format"] == "xml":
return xml_o, 200 ,headers_xml
else:
return json_o, 200 ,headers_json
if __name__ == '__main__':
app.run()
'''
http://localhost:5000/query.yahooapis.com/v1/public/yql?q=select%20*%20from%20weather.forecast%20where%20woeid%20in%20(select%20woeid%20from%20geo.places(1)%20where%20text%3D%22nome%2C%20ak%22)&format=json&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys
http://localhost:5000/query.yahooapis.com/v1/public/yql?q=select%20*%20from%20weather.forecast%20where%20woeid%20in%20(select%20woeid%20from%20geo.places(1)%20where%20text%3D%22nome%2C%20ak%22)&format=xml&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys
''' |
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from django.shortcuts import render
from django.http import HttpResponse
from django.http import request
import nsepy
from mrigwebapp.myhtml import myhtml
import mrigwebapp.forms as fm
import interface.web.webdashboard as wdb
import pandas as pd
import mrigutilities as mu
import mrigstatics
import datetime
import json
import strategies.stocks as stocks
# Create your views here.
def home(request):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
return render(request, "index.html", {'GOOGLE_ADS': GOOGLE_ADS})
def stock(request,symbol='NIFTY 50'):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
engine = mu.sql_engine(mrigstatics.MRIGWEB[mrigstatics.ENVIRONMENT])
sql = "select * from stock_page where symbol='"+symbol+"'"
stock_page = pd.read_sql(sql,engine)
# stocklist = list(stocklist)
price_list,return_list,risk_list,ratios,oc= "","","","",""
price_graph,return_graph,macd_graph,boll_graph = "","","",""
stock_desc = ""
news = ""
engine = mu.sql_engine()
stocklist = engine.execute("select distinct sm.symbol, sm.stock_name from security_master sm inner join stock_history sh on sm.symbol=sh.symbol where sh.series='EQ'").fetchall()
slist = "<input style=\"width: 130px; height: 25px;\" list=\"stocks\" name=\"symbol\"><datalist id=\"stocks\">"
for stk in stocklist:
if stk[0] != 'symbol':
if stk[1] != None:
slist = slist + "<option value=\""+str(stk[0])+" : "+str(stk[1])+"\">"
else:
slist = slist + "<option value=\""+str(stk[0])+" : "+str(stk[0])+"\">"
slist = slist + "</datalist>"
if request.method == "POST":
#Get the posted form
stockform = fm.StockForm(request.POST)
if stockform.is_valid():
symbol = stockform.cleaned_data['symbol']
symbol = symbol.split(":")[0].strip()
print(symbol)
engine = mu.sql_engine(mrigstatics.MRIGWEB[mrigstatics.ENVIRONMENT])
if (symbol and symbol != ""):
sql = "select * from stock_page where symbol='"+symbol+"'"
stock_page = pd.read_sql(sql,engine)
if not stock_page.empty:
price_list = stock_page['price_list'][0]
return_list = stock_page['return_list'][0]
risk_list = stock_page['risk_list'][0]
ratios = stock_page['ratios'][0]
oc = stock_page['oc'][0]
price_graph = stock_page['price_graph'][0]
price_graph = bytes(price_graph)
price_graph = price_graph.decode('utf-8')
return_graph = stock_page['return_graph'][0]
return_graph = bytes(return_graph)
return_graph = return_graph.decode('utf-8')
macd_graph = stock_page['macd_graph'][0]
macd_graph = bytes(macd_graph)
macd_graph = macd_graph.decode('utf-8')
boll_graph = stock_page['boll_graph'][0]
boll_graph = bytes(boll_graph)
boll_graph = boll_graph.decode('utf-8')
stock_desc = stock_page['stock_description'][0]
news = stock_page['news'][0]
news = json.loads(news)
else:
if symbol == 'NIFTY 50':
stkanalytics = wdb.mrigweb_index(symbol)
else:
stkanalytics = wdb.mrigweb_stock(symbol)
price_list,return_list,risk_list,ratios,oc = stkanalytics[0], stkanalytics[1], stkanalytics[2], stkanalytics[3], stkanalytics[4]
price_graph,return_graph,macd_graph,boll_graph = stkanalytics[5], stkanalytics[6], stkanalytics[7], stkanalytics[8]
stock_desc = stkanalytics[9]
news = stkanalytics[10]
# fd,oc = fd.to_html(), oc.to_html()
return_list = myhtml.list_to_html(return_list)
risk_list = myhtml.list_to_html(risk_list)
if not ratios.empty:
ratios = ratios.reset_index()
ratios_head = list(ratios)
ratios_head.remove("index")
ratios_head.insert(0,"")
ratios = [ratios_head] + ratios.values.tolist()
ratios = myhtml.list_to_html(ratios)
if not oc.empty:
oc = oc.reset_index()
oc['PUT_Expiry'] = "<a style=\"color:#f7ed4a;text-decoration:underline;\" href=\"/option/"+symbol+":"+oc['Expiry'].apply(lambda x:x.strftime('%d%m%Y')) +":"+ oc['Strike_Price'].apply(lambda x:str(x))+":"+ oc['PUT_LTP'].apply(lambda x:str(x))+":PE\">"+oc['Expiry'].apply(lambda x:x.strftime('%d-%b-%Y'))+"</a>"
oc['Expiry'] = "<a style=\"color:#f7ed4a;text-decoration:underline;\" href=\"/option/"+symbol+":"+oc['Expiry'].apply(lambda x:x.strftime('%d%m%Y')) +":"+ oc['Strike_Price'].apply(lambda x:str(x))+":"+ oc['CALL_LTP'].apply(lambda x:str(x))+":CE\">"+oc['Expiry'].apply(lambda x:x.strftime('%d-%b-%Y'))+"</a>"
# oc['PUT_Expiry'] = oc['Expiry'].apply(lambda x:x.replace("CE","PE"))
oc_head = [x.replace("CALL_","").replace("PUT_","").replace("_"," ") for x in list(oc)]
oc = [oc_head] + oc.values.tolist()
oc = myhtml.list_to_html(oc,"small")
price_labels = ['Last Price','Open','Previous Close','Day High', 'Day Low','52 Week High','52 Week Low']
quotes = []
if symbol == 'NIFTY 50':
stk = stocks.Index('NIFTY 50')
else:
stk = stocks.Stock(symbol)
quotes.append(stk.quote['lastPrice']) if 'lastPrice' in stk.quote.keys() else quotes.append("")
quotes.append(stk.quote['open']) if 'open' in stk.quote.keys() else quotes.append("")
quotes.append(stk.quote['previousclose']) if 'previousclose' in stk.quote.keys() else quotes.append("")
quotes.append(stk.quote['dayhigh']) if 'dayhigh' in stk.quote.keys() else quotes.append("")
quotes.append(stk.quote['daylow']) if 'daylow' in stk.quote.keys() else quotes.append("")
quotes.append(stk.quote['high52']) if 'high52' in stk.quote.keys() else quotes.append("")
quotes.append(stk.quote['low52']) if 'low52' in stk.quote.keys() else quotes.append("")
# if len(stk.quote) > 0:
# quotes = [stk.quote['lastPrice'],
# stk.quote['open'],
# stk.quote['previousclose'],
# stk.quote['dayhigh'],
# stk.quote['daylow'],
# stk.quote['high52'],
# stk.quote['low52']]
# else:
# quotes = []
price_list = [price_labels,quotes]
price_list = myhtml.list_to_html(price_list)
return render(request, "stock.html", {"slist":slist,"symbol":symbol,
"stock_desc" : stock_desc,
"price_list":price_list,
"return_list":return_list,
"risk_list":risk_list,
"ratios":ratios,
"oc":oc,
"price_graph":price_graph,
"return_graph":return_graph,
"macd_graph":macd_graph,
"boll_graph":boll_graph,
"news":news,
'GOOGLE_ADS': GOOGLE_ADS})
def os(request):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
engine = mu.sql_engine(mrigstatics.MRIGWEB[mrigstatics.ENVIRONMENT])
os_list = ["Covered Call","Bull Put Spread", "Bear Call Spread"]
strategy_desc = ""
strategy = None
oc = pd.DataFrame()
slist = "<input style=\"width: 130px; height: 25px;\" list=\"os\" name=\"strategy\"><datalist id=\"os\">"
for stg in os_list:
slist = slist + "<option value=\""+str(stg)+"\">"
slist = slist + "</datalist>"
if request.method == "POST":
#Get the posted form
strategyform = fm.StrategyForm(request.POST)
if strategyform.is_valid():
strategy = strategyform.cleaned_data['strategy']
sql = "select * from os_page where strategy='"+strategy+"' limit 1"
os_page = pd.read_sql(sql,engine)
if not os_page.empty:
strategy_desc = os_page['strategy_name'][0]
oc = os_page['strategy_table'][0]
return render(request, "os.html", {"slist":slist,
"strategy":strategy,
"strategy_desc" : strategy_desc,
"oc":oc,
'GOOGLE_ADS': GOOGLE_ADS})
def osa(request,strategyid):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
strategy = mu.mrigsession_get(strategyid)
analytics = []
description = ""
if strategy['strategyname'] == 'coveredcall':
analytics = wdb.covered_call_analysis(strategy)
description = "Covered Call Strategy using option:"
if strategy['strategyname'] == 'bullputspread':
analytics = wdb.bull_put_spread_analysis(strategy)
description = "Bull Put Spread Strategy using options:"
if strategy['strategyname'] == 'bearcallspread':
analytics = wdb.bear_call_spread_analysis(strategy)
description = "Bear Call Spread Strategy using options:"
strategy_desc = analytics[0]
strategy_specs = analytics[1]
strategy_risk = analytics[2]
NPV_graph = analytics[3]
delta_graph = analytics[4]
gamma_graph = analytics[5]
theta_graph = analytics[6]
results = analytics[7]
long_option_desc = strategy_desc[0]+" "+strategy_desc[1].strftime('%d-%b-%Y')+ " "+str(strategy_desc[2])
short_option_desc = ""
if len(strategy_desc) >3:
short_option_desc = strategy_desc[0]+" "+strategy_desc[1].strftime('%d-%b-%Y')+ " "+str(strategy_desc[3])
#
strategy_specs = myhtml.list_to_html(strategy_specs)
strategy_risk = myhtml.list_to_html(strategy_risk)
results = myhtml.dict_to_html(results)
return render(request, "osa.html", {"symbol":strategy_desc[0],
"strategy_desc" : description,
"long_option_desc":long_option_desc,
"short_option_desc":short_option_desc,
"strategy_specs" : strategy_specs,
"strategy_risk" : strategy_risk,
"NPV_graph" : NPV_graph,
"delta_graph" : delta_graph,
"gamma_graph" : gamma_graph,
"theta_graph" : theta_graph,
"results" : results,
'GOOGLE_ADS': GOOGLE_ADS
})
def ss(request):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
criteria = {}
engine = mu.sql_engine(mrigstatics.MRIGWEB[mrigstatics.ENVIRONMENT])
customscreen = ""
strategyform = ""
if request.method == "POST":
#Get the posted form
print("method is post")
strategyform = fm.StockStrategyForm(request.POST)
sql = "select * from ss_page "
ss_page = pd.read_sql(sql,engine)
if not ss_page.empty:
bm_desc = 'Big Money Momentum Strategy'
bm_table = ss_page.loc[ss_page['strategy_name'] == bm_desc]['strategy_table'].values[0]
bm_graph = ss_page.loc[ss_page['strategy_name'] == bm_desc]['strategy_graph'].values[0]
bm_graph = bytes(bm_graph)
bm_graph = bm_graph.decode('utf-8')
scg_desc = 'Small Cap Growth Stocks'
scg_table = ss_page.loc[ss_page['strategy_name'] == scg_desc]['strategy_table'].values[0]
scg_graph = ss_page.loc[ss_page['strategy_name'] == scg_desc]['strategy_graph'].values[0]
scg_graph = bytes(scg_graph)
scg_graph = scg_graph.decode('utf-8')
nh_desc = 'New Highs making Stocks'
nh_table = ss_page.loc[ss_page['strategy_name'] == nh_desc]['strategy_table'].values[0]
nh_graph = ss_page.loc[ss_page['strategy_name'] == nh_desc]['strategy_graph'].values[0]
nh_graph = bytes(nh_graph)
nh_graph = nh_graph.decode('utf-8')
gi_desc = 'Growth and Income Stocks'
gi_table = ss_page.loc[ss_page['strategy_name'] == gi_desc]['strategy_table'].values[0]
gi_graph = ss_page.loc[ss_page['strategy_name'] == gi_desc]['strategy_graph'].values[0]
gi_graph = bytes(gi_graph)
gi_graph = gi_graph.decode('utf-8')
return render(request, "ss.html", {
"bm_table":bm_table,
"bm_graph":bm_graph,
"bm_desc":bm_desc,
"scg_table":scg_table,
"scg_graph":scg_graph,
"scg_desc":scg_desc,
# "tafa_table":tafa_table,
# "tafa_graph":tafa_graph,
# "tafa_desc":tafa_desc,
"nh_table":nh_table,
"nh_graph":nh_graph,
"nh_desc":nh_desc,
"gi_table":gi_table,
"gi_graph":gi_graph,
"gi_desc":gi_desc,
'GOOGLE_ADS': GOOGLE_ADS
# "customscreen":customscreen,
# "strategyform":strategyform
})
def screener(request):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
criteria = {}
# engine = mu.sql_engine(mrigstatics.MRIGWEB[mrigstatics.ENVIRONMENT])
customscreen = ""
strategyform = ""
if request.method == "POST":
#Get the posted form
print("method is post")
strategyform = fm.StockStrategyForm(request.POST)
if strategyform.is_valid():
# strategy = strategyform.cleaned_data['strategy']
# criteria['marketcap_aggf'] = strategyform.cleaned_data['marketcap_aggf']
criteria['marketcap_aggp'] = strategyform.cleaned_data['marketcap_aggp']
criteria['marketcap_aggpnum'] = strategyform.cleaned_data['marketcap_aggpnum']
criteria['marketcap_op'] = strategyform.cleaned_data['marketcap_op']
criteria['marketcap_abs_filter'] = strategyform.cleaned_data['marketcap_abs_filter']
# criteria['marketcap_bm_f'] = strategyform.cleaned_data['marketcap_bm_f']
criteria['price_aggf'] = strategyform.cleaned_data['price_aggf']
criteria['price_aggp'] = strategyform.cleaned_data['price_aggp']
criteria['price_aggpnum'] = strategyform.cleaned_data['price_aggpnum']
criteria['price_op'] = strategyform.cleaned_data['price_op']
criteria['price_abs_filter'] = strategyform.cleaned_data['price_abs_filter']
# criteria['price_bm_f'] = strategyform.cleaned_data['price_bm_f']
criteria['volume_aggf'] = strategyform.cleaned_data['volume_aggf']
criteria['volume_aggp'] = strategyform.cleaned_data['volume_aggp']
criteria['volume_aggpnum'] = strategyform.cleaned_data['volume_aggpnum']
criteria['volume_op'] = strategyform.cleaned_data['volume_op']
criteria['volume_abs_filter'] = strategyform.cleaned_data['volume_abs_filter']
# criteria['volume_bm_f'] = strategyform.cleaned_data['volume_bm_f']
criteria['pricevolume_aggf'] = strategyform.cleaned_data['pricevolume_aggf']
criteria['pricevolume_aggp'] = strategyform.cleaned_data['pricevolume_aggp']
criteria['pricevolume_aggpnum'] = strategyform.cleaned_data['pricevolume_aggpnum']
criteria['pricevolume_op'] = strategyform.cleaned_data['pricevolume_op']
criteria['pricevolume_abs_filter'] = strategyform.cleaned_data['pricevolume_abs_filter']
# criteria['pricevolume_bm_f'] = strategyform.cleaned_data['pricevolume_bm_f']
criteria['pricereturn_aggf'] = strategyform.cleaned_data['pricereturn_aggf']
criteria['pricereturn_aggp'] = strategyform.cleaned_data['pricereturn_aggp']
criteria['pricereturn_aggpnum'] = strategyform.cleaned_data['pricereturn_aggpnum']
criteria['pricereturn_op'] = strategyform.cleaned_data['pricereturn_op']
criteria['pricereturn_abs_filter'] = strategyform.cleaned_data['pricereturn_abs_filter']
criteria['pricereturn_bm_f'] = strategyform.cleaned_data['pricereturn_bm_f']
criteria['basiceps_aggf'] = strategyform.cleaned_data['basiceps_aggf']
criteria['basiceps_aggp'] = strategyform.cleaned_data['basiceps_aggp']
criteria['basiceps_aggpnum'] = strategyform.cleaned_data['basiceps_aggpnum']
criteria['basiceps_op'] = strategyform.cleaned_data['basiceps_op']
criteria['basiceps_abs_filter'] = strategyform.cleaned_data['basiceps_abs_filter']
criteria['basiceps_bm_f'] = strategyform.cleaned_data['basiceps_bm_f']
criteria['dividendyield_aggf'] = strategyform.cleaned_data['dividendyield_aggf']
criteria['dividendyield_aggp'] = strategyform.cleaned_data['dividendyield_aggp']
criteria['dividendyield_aggpnum'] = strategyform.cleaned_data['dividendyield_aggpnum']
criteria['dividendyield_op'] = strategyform.cleaned_data['dividendyield_op']
criteria['dividendyield_abs_filter'] = strategyform.cleaned_data['dividendyield_abs_filter']
criteria['dividendyield_bm_f'] = strategyform.cleaned_data['dividendyield_bm_f']
criteria['pe_aggf'] = strategyform.cleaned_data['pe_aggf']
criteria['pe_aggp'] = strategyform.cleaned_data['pe_aggp']
criteria['pe_aggpnum'] = strategyform.cleaned_data['pe_aggpnum']
criteria['pe_op'] = strategyform.cleaned_data['pe_op']
criteria['pe_abs_filter'] = strategyform.cleaned_data['pe_abs_filter']
criteria['pe_bm_f'] = strategyform.cleaned_data['pe_bm_f']
criteria['ps_aggf'] = strategyform.cleaned_data['ps_aggf']
criteria['ps_aggp'] = strategyform.cleaned_data['ps_aggp']
criteria['ps_aggpnum'] = strategyform.cleaned_data['ps_aggpnum']
criteria['ps_op'] = strategyform.cleaned_data['ps_op']
criteria['ps_abs_filter'] = strategyform.cleaned_data['ps_abs_filter']
criteria['ps_bm_f'] = strategyform.cleaned_data['ps_bm_f']
criteria['pb_aggf'] = strategyform.cleaned_data['pb_aggf']
criteria['pb_aggp'] = strategyform.cleaned_data['pb_aggp']
criteria['pb_aggpnum'] = strategyform.cleaned_data['pb_aggpnum']
criteria['pb_op'] = strategyform.cleaned_data['pb_op']
criteria['pb_abs_filter'] = strategyform.cleaned_data['pb_abs_filter']
criteria['pb_bm_f'] = strategyform.cleaned_data['pb_bm_f']
criteria['peg_aggf'] = strategyform.cleaned_data['peg_aggf']
criteria['peg_aggp'] = strategyform.cleaned_data['peg_aggp']
criteria['peg_aggpnum'] = strategyform.cleaned_data['peg_aggpnum']
criteria['peg_op'] = strategyform.cleaned_data['peg_op']
criteria['peg_abs_filter'] = strategyform.cleaned_data['peg_abs_filter']
criteria['peg_bm_f'] = strategyform.cleaned_data['peg_bm_f']
criteria['roe_aggf'] = strategyform.cleaned_data['roe_aggf']
criteria['roe_aggp'] = strategyform.cleaned_data['roe_aggp']
criteria['roe_aggpnum'] = strategyform.cleaned_data['roe_aggpnum']
criteria['roe_op'] = strategyform.cleaned_data['roe_op']
criteria['roe_abs_filter'] = strategyform.cleaned_data['roe_abs_filter']
criteria['roe_bm_f'] = strategyform.cleaned_data['roe_bm_f']
criteria['roa_aggf'] = strategyform.cleaned_data['roa_aggf']
criteria['roa_aggp'] = strategyform.cleaned_data['roa_aggp']
criteria['roa_aggpnum'] = strategyform.cleaned_data['roa_aggpnum']
criteria['roa_op'] = strategyform.cleaned_data['roa_op']
criteria['roa_abs_filter'] = strategyform.cleaned_data['roa_abs_filter']
criteria['roa_bm_f'] = strategyform.cleaned_data['roa_bm_f']
criteria['netprofitmargin_aggf'] = strategyform.cleaned_data['netprofitmargin_aggf']
criteria['netprofitmargin_aggp'] = strategyform.cleaned_data['netprofitmargin_aggp']
criteria['netprofitmargin_aggpnum'] = strategyform.cleaned_data['netprofitmargin_aggpnum']
criteria['netprofitmargin_op'] = strategyform.cleaned_data['netprofitmargin_op']
criteria['netprofitmargin_abs_filter'] = strategyform.cleaned_data['netprofitmargin_abs_filter']
criteria['netprofitmargin_bm_f'] = strategyform.cleaned_data['netprofitmargin_bm_f']
criteria['operatingprofitmargin_aggf'] = strategyform.cleaned_data['operatingprofitmargin_aggf']
criteria['operatingprofitmargin_aggp'] = strategyform.cleaned_data['operatingprofitmargin_aggp']
criteria['operatingprofitmargin_aggpnum'] = strategyform.cleaned_data['operatingprofitmargin_aggpnum']
criteria['operatingprofitmargin_op'] = strategyform.cleaned_data['operatingprofitmargin_op']
criteria['operatingprofitmargin_abs_filter'] = strategyform.cleaned_data['operatingprofitmargin_abs_filter']
criteria['operatingprofitmargin_bm_f'] = strategyform.cleaned_data['operatingprofitmargin_bm_f']
criteria['currentratio_aggf'] = strategyform.cleaned_data['currentratio_aggf']
criteria['currentratio_aggp'] = strategyform.cleaned_data['currentratio_aggp']
criteria['currentratio_aggpnum'] = strategyform.cleaned_data['currentratio_aggpnum']
criteria['currentratio_op'] = strategyform.cleaned_data['currentratio_op']
criteria['currentratio_abs_filter'] = strategyform.cleaned_data['currentratio_abs_filter']
criteria['currentratio_bm_f'] = strategyform.cleaned_data['currentratio_bm_f']
criteria['quickratio_aggf'] = strategyform.cleaned_data['quickratio_aggf']
criteria['quickratio_aggp'] = strategyform.cleaned_data['quickratio_aggp']
criteria['quickratio_aggpnum'] = strategyform.cleaned_data['quickratio_aggpnum']
criteria['quickratio_op'] = strategyform.cleaned_data['quickratio_op']
criteria['quickratio_abs_filter'] = strategyform.cleaned_data['quickratio_abs_filter']
criteria['quickratio_bm_f'] = strategyform.cleaned_data['quickratio_bm_f']
criteria['debtequity_aggf'] = strategyform.cleaned_data['debtequity_aggf']
criteria['debtequity_aggp'] = strategyform.cleaned_data['debtequity_aggp']
criteria['debtequity_aggpnum'] = strategyform.cleaned_data['debtequity_aggpnum']
criteria['debtequity_op'] = strategyform.cleaned_data['debtequity_op']
criteria['debtequity_abs_filter'] = strategyform.cleaned_data['debtequity_abs_filter']
criteria['debtequity_bm_f'] = strategyform.cleaned_data['debtequity_bm_f']
criteria['assetturnover_aggf'] = strategyform.cleaned_data['assetturnover_aggf']
criteria['assetturnover_aggp'] = strategyform.cleaned_data['assetturnover_aggp']
criteria['assetturnover_aggpnum'] = strategyform.cleaned_data['assetturnover_aggpnum']
criteria['assetturnover_op'] = strategyform.cleaned_data['assetturnover_op']
criteria['assetturnover_abs_filter'] = strategyform.cleaned_data['assetturnover_abs_filter']
criteria['assetturnover_bm_f'] = strategyform.cleaned_data['assetturnover_bm_f']
criteria['inventoryturnover_aggf'] = strategyform.cleaned_data['inventoryturnover_aggf']
criteria['inventoryturnover_aggp'] = strategyform.cleaned_data['inventoryturnover_aggp']
criteria['inventoryturnover_aggpnum'] = strategyform.cleaned_data['inventoryturnover_aggpnum']
criteria['inventoryturnover_op'] = strategyform.cleaned_data['inventoryturnover_op']
criteria['inventoryturnover_abs_filter'] = strategyform.cleaned_data['inventoryturnover_abs_filter']
criteria['inventoryturnover_bm_f'] = strategyform.cleaned_data['inventoryturnover_bm_f']
criteria['volatility_aggf'] = strategyform.cleaned_data['volatility_aggf']
criteria['volatility_aggp'] = strategyform.cleaned_data['volatility_aggp']
criteria['volatility_aggpnum'] = strategyform.cleaned_data['volatility_aggpnum']
criteria['volatility_op'] = strategyform.cleaned_data['volatility_op']
criteria['volatility_abs_filter'] = strategyform.cleaned_data['volatility_abs_filter']
criteria['volatility_bm_f'] = strategyform.cleaned_data['volatility_bm_f']
criteria['beta_aggf'] = strategyform.cleaned_data['beta_aggf']
criteria['beta_aggp'] = strategyform.cleaned_data['beta_aggp']
criteria['beta_aggpnum'] = strategyform.cleaned_data['beta_aggpnum']
criteria['beta_op'] = strategyform.cleaned_data['beta_op']
criteria['beta_abs_filter'] = strategyform.cleaned_data['beta_abs_filter']
criteria['beta_bm_f'] = strategyform.cleaned_data['beta_bm_f']
criteria['sharpe_aggf'] = strategyform.cleaned_data['sharpe_aggf']
criteria['sharpe_aggp'] = strategyform.cleaned_data['sharpe_aggp']
criteria['sharpe_aggpnum'] = strategyform.cleaned_data['sharpe_aggpnum']
criteria['sharpe_op'] = strategyform.cleaned_data['sharpe_op']
criteria['sharpe_abs_filter'] = strategyform.cleaned_data['sharpe_abs_filter']
criteria['sharpe_bm_f'] = strategyform.cleaned_data['sharpe_bm_f']
cs = wdb.mrigweb_custom_screener(criteria)
if not cs.empty:
cs = cs.reset_index()
cs['symbol'] = "<a style=\"color:#f7ed4a;text-decoration:underline;\" href=\"/stock/"+cs['symbol']+"\">"+cs['symbol']+"</a>"
customscreen = [[str(x).replace("_", " ").capitalize() for x in list(cs)]] + cs.values.tolist()
customscreen = myhtml.list_to_html(customscreen)
else:
print(strategyform.errors)
# sql = "select * from ss_page "
# ss_page = pd.read_sql(sql,engine)
#
# if not ss_page.empty:
# bm_desc = ss_page['strategy_name'][0]
# bm_table = ss_page['strategy_table'][0]
# bm_graph = ss_page['strategy_graph'][0]
# bm_graph = bytes(bm_graph)
# bm_graph = bm_graph.decode('utf-8')
# scg_desc = ss_page['strategy_name'][1]
# scg_table = ss_page['strategy_table'][1]
# scg_graph = ss_page['strategy_graph'][1]
# scg_graph = bytes(scg_graph)
# scg_graph = scg_graph.decode('utf-8')
# nh_desc = ss_page['strategy_name'][2]
# nh_table = ss_page['strategy_table'][2]
# nh_graph = ss_page['strategy_graph'][2]
# nh_graph = bytes(nh_graph)
# nh_graph = nh_graph.decode('utf-8')
# gi_desc = ss_page['strategy_name'][3]
# gi_table = ss_page['strategy_table'][3]
# gi_graph = ss_page['strategy_graph'][3]
# gi_graph = bytes(gi_graph)
# gi_graph = gi_graph.decode('utf-8')
#
return render(request, "screener.html", {
# "bm_table":bm_table,
# "bm_graph":bm_graph,
# "bm_desc":bm_desc,
# "scg_table":scg_table,
# "scg_graph":scg_graph,
# "scg_desc":scg_desc,
## "tafa_table":tafa_table,
## "tafa_graph":tafa_graph,
## "tafa_desc":tafa_desc,
# "nh_table":nh_table,
# "nh_graph":nh_graph,
# "nh_desc":nh_desc,
# "gi_table":gi_table,
# "gi_graph":gi_graph,
# "gi_desc":gi_desc,
"customscreen":customscreen,
"strategyform":strategyform,
'GOOGLE_ADS': GOOGLE_ADS
})
def option(request,opid):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
"""
engine = mu.sql_engine()
stocklist = engine.execute("select symbol, stock_name from security_master").fetchall()
slist = "<input list=\"stocks\" name=\"symbol\"><datalist id=\"stocks\">"
for stk in stocklist:
slist = slist + "<option value=\""+str(stk[0])+"\">"
slist = slist + "</datalist>"
slist = "<input list=\"expirydates\" name=\"expiry\"><datalist id=\"expirydates\">"
for dates in mu.get_futures_expiry(datetime.date.today(),datetime.date.today()):
slist = slist + "<option value=\""+dates.strftime('%d%m%Y')+"\">"
slist = slist + "</datalist>"
slist = "<input list=\"strikes\" name=\"strike\"><datalist id=\"strikes\">"
for strikes in mu.getStrikes():
slist = slist + "<option value=\""+str(stk[0])+"\">"
slist = slist + "</datalist><input type=\"submit\">"
slist = "<input list=\"types\" name=\"callput\"><datalist id=\"types\">"
slist = slist + "<option value=\"CE\"/><option value=\"PE\"/>\""
slist = slist + "<input type=\"submit\">"
"""
option_desc = ""
contract_specs = []
oh = pd.DataFrame()
price_graph = ""
results = {}
NPV_graph = ""
delta_graph = ""
gamma_graph = ""
theta_graph = ""
if opid:
params = {}
keyval = str(opid).split(":")
params['symbol']= keyval[0]
params['expiry']= datetime.datetime.strptime(keyval[1],'%d%m%Y').date()
params['strike']= float(keyval[2])
params['ltp'] = -1
try:
params['ltp']= float(keyval[3])
except:
pass
params['option_type']= keyval[4]
option_desc = params['symbol']+" "+params['expiry'].strftime('%b')+ " "+keyval[2] + " "+ keyval[4]
op = wdb.mrigweb_options(params)
contract_specs = op[0]
oh = op[1]
price_graph = op[2]
results = op[3]
NPV_graph = op[4]
delta_graph = op[5]
gamma_graph = op[6]
theta_graph = op[7]
oi_graph = op[8]
contract_specs = myhtml.list_to_html(contract_specs)
oh = oh.reset_index()
oh = [list(oh)] + oh.values.tolist()
oh = myhtml.list_to_html(oh)
results = [[str(key).capitalize().replace("_"," ") for key in results.keys()],[results[key] for key in results.keys()]]
results = myhtml.list_to_html(results)
return render(request, "option.html", {"option_desc":option_desc,
"oh":oh,
"contract_specs":contract_specs,
"price_graph":price_graph,
"results":results,
"NPV_graph":NPV_graph,
"delta_graph":delta_graph,
"gamma_graph":gamma_graph,
"theta_graph":theta_graph,
"oi_graph":oi_graph,
'GOOGLE_ADS': GOOGLE_ADS})
def news(request):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
news = wdb.mrigweb_news()
# news_head = [x.replace("CALL_","").replace("PUT_","").replace("_"," ").capitalize() for x in list(news)]
# news = [news_head] + news.values.tolist()
# news = myhtml.list_to_html(news)
# oc = "<img border=\"0\" src=\"{% static 'assets/images/pnl_icon.png' %}\" width=\"10\" height=\"10\"/>"
# newstype = news[0]
# newsdate = news[1]
# newstitle = news[2]
# newsdesc = news[3]
return render(request, "news.html", {"news":news,'GOOGLE_ADS': GOOGLE_ADS})
def ia(request):
return render(request, "ia.html")
def ra(request):
return render(request, "ra.html")
def softs(request):
return render(request, "softs.html")
def ds(request):
return render(request, "ds.html")
def about(request):
return render(request, "about.html")
def rates(request):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
sz_params = {}
ff_params = {}
SZYC_INR = wdb.mrigweb_szc_rates()
SZYC_USD = wdb.mrigweb_szc_rates('USD')
SZYC_GBP = wdb.mrigweb_szc_rates('GBP')
LIBOR_3M_INR = wdb.mrigweb_Libor('LIBOR_3M_INR', yieldcurvehandle=SZYC_INR[0])
LIBOR_6M_INR = wdb.mrigweb_Libor('LIBOR_6M_INR', tenor='6M', yieldcurvehandle=SZYC_INR[0])
LIBOR_3M_USD = wdb.mrigweb_Libor('LIBOR_3M_USD', curve_currency='USD', yieldcurvehandle=SZYC_USD[0])
LIBOR_6M_USD = wdb.mrigweb_Libor('LIBOR_6M_USD', curve_currency='USD', tenor='6M', yieldcurvehandle=SZYC_USD[0])
LIBOR_3M_GBP = wdb.mrigweb_Libor('LIBOR_3M_GBP', curve_currency='GBP', tenor='3M', yieldcurvehandle=SZYC_GBP[0])
LIBOR_6M_GBP = wdb.mrigweb_Libor('LIBOR_6M_GBP', curve_currency='GBP', tenor='6M', yieldcurvehandle=SZYC_GBP[0])
szyc = wdb.mrigweb_szc_rates()
ffyc = wdb.mrigweb_ff_rates()
# engine = mu.sql_engine(mrigstatics.MRIGWEB[mrigstatics.ENVIRONMENT])
ffyc_graph = wdb.mrigweb_ratePlot(ffyc,['Flat','Flat_shifted'])
szyc_graph = wdb.mrigweb_ratePlot(szyc,['INR','INR_shifted'])
if request.method == "POST":
#Get the posted form
reference_date = datetime.date.today()
print("method is post")
if 'szc_form' in request.POST:
# print("szc clicked ------")
rateform = fm.SZC_InterestRateForm(request.POST)
if rateform.is_valid():
print("----szc form valid----")
sz_params['curve_currency'] = rateform.cleaned_data['szc_currency']
sz_params['day_count'] = rateform.cleaned_data['szc_daycount']
sz_params['calendar'] = rateform.cleaned_data['szc_calendar']
sz_params['compounding'] = rateform.cleaned_data['szc_compounding']
sz_params['compounding_frequency'] = rateform.cleaned_data['szc_frequency']
sz_params['interpolation'] = rateform.cleaned_data['szc_interpolation']
sz_params['shiftparameter'] = rateform.cleaned_data['szc_parallelshift']
print(sz_params)
szyc = wdb.mrigweb_szc_rates(sz_params['curve_currency'],reference_date,sz_params)
# szyc_graph = szyc[0]
szyc_graph = wdb.mrigweb_ratePlot(szyc,['INR','INR_shifted'])
if 'ff_form' in request.POST:
rateform = fm.FF_InterestRateForm(request.POST)
if rateform.is_valid():
print("----ff form valid----")
ff_params['curvename'] = rateform.cleaned_data['ff_curvename']
ff_params['currency'] = rateform.cleaned_data['ff_currency']
ff_params['day_count'] = rateform.cleaned_data['ff_daycount']
ff_params['calendar'] = rateform.cleaned_data['ff_calendar']
ff_params['compounding'] = rateform.cleaned_data['ff_compounding']
ff_params['compounding_frequency'] = rateform.cleaned_data['ff_frequency']
ff_params['flat_rate'] = rateform.cleaned_data['ff_flatrate']
ff_params['shiftparameter'] = rateform.cleaned_data['ff_parallelshift']
print(ff_params)
ffyc = wdb.mrigweb_ff_rates(reference_date,ff_params)
ffyc_graph = wdb.mrigweb_ratePlot(ffyc,['Flat','Flat_shifted'])
return render(request, "ra_rates.html" , {'ffyc_graph':ffyc_graph,
'szyc_graph':szyc_graph,
'GOOGLE_ADS': GOOGLE_ADS
})
def bonds(request):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
objectmap = {}
objectmap['None'] = None
objectmap['SZYC_INR'] = wdb.mrigweb_szc_rates()
objectmap['SZYC_USD'] = wdb.mrigweb_szc_rates('USD')
objectmap['SZYC_GBP'] = wdb.mrigweb_szc_rates('GBP')
objectmap['LIBOR_3M_INR'] = wdb.mrigweb_Libor('LIBOR_3M_INR', yieldcurvehandle=objectmap['SZYC_INR'][0])
objectmap['LIBOR_6M_INR'] = wdb.mrigweb_Libor('LIBOR_6M_INR', tenor='6M', yieldcurvehandle=objectmap['SZYC_INR'][0])
objectmap['LIBOR_3M_USD'] = wdb.mrigweb_Libor('LIBOR_3M_USD', curve_currency='USD', yieldcurvehandle=objectmap['SZYC_USD'][0])
objectmap['LIBOR_6M_USD'] = wdb.mrigweb_Libor('LIBOR_6M_USD', curve_currency='USD', tenor='6M', yieldcurvehandle=objectmap['SZYC_USD'][0])
objectmap['LIBOR_3M_GBP'] = wdb.mrigweb_Libor('LIBOR_3M_GBP', curve_currency='GBP', tenor='3M', yieldcurvehandle=objectmap['SZYC_GBP'][0])
objectmap['LIBOR_6M_GBP'] = wdb.mrigweb_Libor('LIBOR_6M_GBP', curve_currency='GBP', tenor='6M', yieldcurvehandle=objectmap['SZYC_GBP'][0])
resultset = ""
face_value=100
day_count='30-360'
calendar='India'
business_convention='Following'
month_end='True'
settlement_days=3
date_generation='Backward'
coupon_frequency='Semiannual'
fixed_coupon_rate=None
floating_coupon_index=None
floating_coupon_spread=0
inArrears=True
cap=None
floor=None
fixing=None
conversionRatio=None
conversionPrice=None
credit_spread=None
call_date_1 = None
call_date_2 = None
call_date_3 = None
call_date_4 = None
call_date_5 = None
put_date_1 = None
put_date_2 = None
put_date_3 = None
put_date_4 = None
put_date_5 = None
call_schedule=None
call_schedule_date = []
call_schedule_price = []
put_schedule=None
put_schedule_date = []
put_schedule_price = []
dividend_schedule=None
if request.method == "POST":
#Get the posted form
reference_date = datetime.date.today()
print("bond method is post")
if 'bond_form' in request.POST:
bondform = fm.BondForm(request.POST)
if bondform.is_valid():
print("----bond form valid----")
issue_name = bondform.cleaned_data['bondsname']
issue_date = bondform.cleaned_data['issue_date']
issue_date = datetime.datetime.strptime(issue_date,'%Y-%m-%d').date()
maturity_date = bondform.cleaned_data['maturity_date']
maturity_date = datetime.datetime.strptime(maturity_date,'%Y-%m-%d').date()
face_value = float(bondform.cleaned_data['facevalue'])
day_count= bondform.cleaned_data['daycount']
calendar = bondform.cleaned_data['calendar']
currency = bondform.cleaned_data['currency']
business_convention = bondform.cleaned_data['business_convention']
month_end = bool(bondform.cleaned_data['month_end'])
settlement_days = float(bondform.cleaned_data['settlement_days'])
date_generation = bondform.cleaned_data['date_generation']
coupon_frequency = bondform.cleaned_data['coupon_frequency']
fixed_coupon_rate = float(bondform.cleaned_data['fixed_coupon_rate'])
floating_coupon_index = bondform.cleaned_data['floating_coupon_index']
floating_coupon_index = objectmap[floating_coupon_index]
try:
floating_coupon_spread = float(bondform.cleaned_data['floating_coupon_spread'])
except:
pass
inArrears = bool(bondform.cleaned_data['inarrears'])
try:
cap = float(bondform.cleaned_data['cap'])
except:
pass
try:
floor = float(bondform.cleaned_data['floor'])
except:
pass
try:
fixing = float(bondform.cleaned_data['last_libor'])
except:
pass
try:
conversionRatio = float(bondform.cleaned_data['conversion_ratio'])
except:
pass
try:
conversionPrice = float(bondform.cleaned_data['conversion_price'])
except:
pass
try:
credit_spread = float(bondform.cleaned_data['credit_spread'])
except:
pass
call_date_1 = bondform.cleaned_data['call_date_1']
call_price_1 = bondform.cleaned_data['call_price_1']
try:
call_date_1 = datetime.datetime.strptime(call_date_1,'%Y-%m-%d').date()
call_schedule_date.append(call_date_1)
call_schedule_price.append(float(call_price_1))
except:
pass
call_date_2 = bondform.cleaned_data['call_date_2']
call_price_2 = bondform.cleaned_data['call_price_2']
try:
call_date_2 = datetime.datetime.strptime(call_date_2,'%Y-%m-%d').date()
call_schedule_date.append(call_date_2)
call_schedule_price.append(float(call_price_2))
except:
pass
call_date_3 = bondform.cleaned_data['call_date_3']
call_price_3 = bondform.cleaned_data['call_price_3']
try:
call_date_3 = datetime.datetime.strptime(call_date_3,'%Y-%m-%d').date()
call_schedule_date.append(call_date_3)
call_schedule_price.append(float(call_price_3))
except:
pass
call_date_4 = bondform.cleaned_data['call_date_4']
call_price_4 = bondform.cleaned_data['call_price_4']
try:
call_date_4 = datetime.datetime.strptime(call_date_4,'%Y-%m-%d').date()
call_schedule_date.append(call_date_4)
call_schedule_price.append(float(call_price_4))
except:
pass
call_date_5 = bondform.cleaned_data['call_date_5']
call_price_5 = bondform.cleaned_data['call_price_5']
try:
call_date_5 = datetime.datetime.strptime(call_date_5,'%Y-%m-%d').date()
call_schedule_date.append(call_date_5)
call_schedule_price.append(float(call_price_5))
except:
pass
if len(call_schedule_date) > 0:
call_schedule = [call_schedule_date,call_schedule_price]
put_date_1 = bondform.cleaned_data['put_date_1']
put_price_1 = bondform.cleaned_data['put_price_1']
try:
put_date_1 = datetime.datetime.strptime(put_date_1,'%Y-%m-%d').date()
put_schedule_date.append(put_date_1)
put_schedule_price.append(float(put_price_1))
except:
pass
put_date_2 = bondform.cleaned_data['put_date_2']
put_price_2 = bondform.cleaned_data['put_price_2']
try:
put_date_2 = datetime.datetime.strptime(put_date_2,'%Y-%m-%d').date()
put_schedule_date.append(put_date_2)
put_schedule_price.append(float(put_price_2))
except:
pass
put_date_3 = bondform.cleaned_data['put_date_3']
put_price_3 = bondform.cleaned_data['put_price_3']
try:
put_date_3 = datetime.datetime.strptime(put_date_3,'%Y-%m-%d').date()
put_schedule_date.append(put_date_3)
put_schedule_price.append(float(put_price_3))
except:
pass
put_date_4 = bondform.cleaned_data['put_date_4']
put_price_4 = bondform.cleaned_data['put_price_4']
try:
put_date_4 = datetime.datetime.strptime(put_date_4,'%Y-%m-%d').date()
put_schedule_date.append(put_date_4)
put_schedule_price.append(float(put_price_4))
except:
pass
put_date_5 = bondform.cleaned_data['put_date_5']
put_price_5 = bondform.cleaned_data['put_price_5']
try:
put_date_5 = datetime.datetime.strptime(put_date_5,'%Y-%m-%d').date()
put_schedule_date.append(put_date_5)
put_schedule_price.append(float(put_price_5))
except:
pass
if len(put_schedule_date) > 0:
put_schedule = [put_schedule_date,put_schedule_price]
#Valuation Parameters
discount_curve = bondform.cleaned_data['discount_curve']
discount_curve = objectmap[discount_curve][0]
volatility_curve = float(bondform.cleaned_data['volatility_curve'])
volatility_curve = wdb.mrigweb_ConstantVolatilityCurve(volatility_curve)
dividend_curve = float(bondform.cleaned_data['dividend_curve'])
dividend_curve = wdb.mrigweb_FlatDividendYieldCurve(reference_date,flat_rate=dividend_curve)
underlying_spot = float(bondform.cleaned_data['underlying_spot'])
mean_reversion = float(bondform.cleaned_data['mean_reversion'])
shortrate_vol = float(bondform.cleaned_data['shortrate_vol'])
hwgrid_pts = float(bondform.cleaned_data['hwgrid_pts'])
print(issue_date)
print(call_schedule)
print(put_schedule)
print(conversionRatio)
else:
print(bondform.errors)
bond = wdb.mrigweb_Bond(issue_name,issue_date,maturity_date,
face_value,day_count,calendar,business_convention,
month_end,settlement_days,date_generation,coupon_frequency,
fixed_coupon_rate,floating_coupon_index,floating_coupon_spread,
inArrears,cap,floor,fixing,conversionRatio,conversionPrice,
credit_spread,call_schedule,put_schedule,dividend_schedule)
valuation_args = {'Underlying Spot' : underlying_spot,
'Discount Curve' : discount_curve,
'Volatility Curve' : volatility_curve,
'Dividend Curve' : dividend_curve,
'Mean Reversion' : mean_reversion,
'Short Rate Vol' : shortrate_vol,
'Hull White Grid Pts' : hwgrid_pts}
resultset = wdb.mrigweb_Analytics(bond,valuation_args)
resultset = myhtml.dict_to_html(resultset)
return render(request, "ra_bonds.html",{'resultset' : resultset,'GOOGLE_ADS': GOOGLE_ADS})
def options(request):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
objectmap = {}
objectmap['None'] = None
objectmap['SZYC_INR'] = wdb.mrigweb_szc_rates()
objectmap['SZYC_USD'] = wdb.mrigweb_szc_rates('USD')
objectmap['SZYC_GBP'] = wdb.mrigweb_szc_rates('GBP')
objectmap['LIBOR_3M_INR'] = wdb.mrigweb_Libor('LIBOR_3M_INR', yieldcurvehandle=objectmap['SZYC_INR'][0])
objectmap['LIBOR_6M_INR'] = wdb.mrigweb_Libor('LIBOR_6M_INR', tenor='6M', yieldcurvehandle=objectmap['SZYC_INR'][0])
objectmap['LIBOR_3M_USD'] = wdb.mrigweb_Libor('LIBOR_3M_USD', curve_currency='USD', yieldcurvehandle=objectmap['SZYC_USD'][0])
objectmap['LIBOR_6M_USD'] = wdb.mrigweb_Libor('LIBOR_6M_USD', curve_currency='USD', tenor='6M', yieldcurvehandle=objectmap['SZYC_USD'][0])
objectmap['LIBOR_3M_GBP'] = wdb.mrigweb_Libor('LIBOR_3M_GBP', curve_currency='GBP', tenor='3M', yieldcurvehandle=objectmap['SZYC_GBP'][0])
objectmap['LIBOR_6M_GBP'] = wdb.mrigweb_Libor('LIBOR_6M_GBP', curve_currency='GBP', tenor='6M', yieldcurvehandle=objectmap['SZYC_GBP'][0])
resultset = ""
if request.method == "POST":
#Get the posted form
reference_date = datetime.date.today()
print("option method is post")
if 'option_form' in request.POST:
optionform = fm.OptionForm(request.POST)
if optionform.is_valid():
print("----swap form valid----")
option_name = optionform.cleaned_data['optionname']
underlying_name = optionform.cleaned_data['underlyingname']
maturity_date = optionform.cleaned_data['maturity_date']
maturity_date = datetime.datetime.strptime(maturity_date,'%Y-%m-%d').date()
strike = float(optionform.cleaned_data['strike'])
option_type = optionform.cleaned_data['option_type']
exercise_type = optionform.cleaned_data['exercise_type']
currency = optionform.cleaned_data['currency']
day_count= optionform.cleaned_data['daycount']
calendar = optionform.cleaned_data['calendar']
#Valuation Parameters
discount_curve = optionform.cleaned_data['discount_curve']
discount_curve = objectmap[discount_curve][0]
volatility_curve = float(optionform.cleaned_data['volatility_curve'])
volatility_curve = wdb.mrigweb_FlatVolatilityCurve(reference_date,spot_vols=volatility_curve)
dividend_curve = float(optionform.cleaned_data['dividend_curve'])
dividend_curve = wdb.mrigweb_FlatDividendYieldCurve(reference_date,flat_rate=dividend_curve)
underlying_spot = float(optionform.cleaned_data['underlying_spot'])
valuation_method = optionform.cleaned_data['valuation_method']
else:
print(optionform.errors)
option = wdb.mrigweb_Option(option_name,underlying_name,maturity_date,
option_type,strike,exercise_type,day_count,calendar)
valuation_args = {'Discount Curve' : discount_curve,
'Volatility Curve' : volatility_curve,
'Underlying Spot' : underlying_spot,
'Dividend Curve' : dividend_curve,
'Valuation Method' : valuation_method}
resultset = wdb.mrigweb_Analytics(option,valuation_args)
resultset = myhtml.dict_to_html(resultset)
return render(request, "ra_options.html",{'resultset' : resultset,'GOOGLE_ADS': GOOGLE_ADS})
def swaps(request):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
objectmap = {}
objectmap['None'] = None
objectmap['SZYC_INR'] = wdb.mrigweb_szc_rates()
objectmap['SZYC_USD'] = wdb.mrigweb_szc_rates('USD')
objectmap['SZYC_GBP'] = wdb.mrigweb_szc_rates('GBP')
objectmap['LIBOR_3M_INR'] = wdb.mrigweb_Libor('LIBOR_3M_INR', yieldcurvehandle=objectmap['SZYC_INR'][0])
objectmap['LIBOR_6M_INR'] = wdb.mrigweb_Libor('LIBOR_6M_INR', tenor='6M', yieldcurvehandle=objectmap['SZYC_INR'][0])
objectmap['LIBOR_3M_USD'] = wdb.mrigweb_Libor('LIBOR_3M_USD', curve_currency='USD', yieldcurvehandle=objectmap['SZYC_USD'][0])
objectmap['LIBOR_6M_USD'] = wdb.mrigweb_Libor('LIBOR_6M_USD', curve_currency='USD', tenor='6M', yieldcurvehandle=objectmap['SZYC_USD'][0])
objectmap['LIBOR_3M_GBP'] = wdb.mrigweb_Libor('LIBOR_3M_GBP', curve_currency='GBP', tenor='3M', yieldcurvehandle=objectmap['SZYC_GBP'][0])
objectmap['LIBOR_6M_GBP'] = wdb.mrigweb_Libor('LIBOR_6M_GBP', curve_currency='GBP', tenor='6M', yieldcurvehandle=objectmap['SZYC_GBP'][0])
fixedleg_day_count='30-360',
fixedleg_calendar='India',
fixedleg_business_convention='Following',
fixedleg_month_end='True',
fixedleg_date_generation='Backward',
fixedleg_coupon_frequency='Semiannual',
fixedleg_coupon_rate=None,
floatleg_day_count='30-360',
floatleg_calendar='India',
floatleg_business_convention='Following',
floatleg_month_end='True',
floatleg_date_generation='Backward',
floatleg_coupon_frequency='Semiannual',
floatleg_index=None,
floatleg_coupon_spread=0,
floatleg_fixing=None
resultset = ""
if request.method == "POST":
#Get the posted form
reference_date = datetime.date.today()
print("swap method is post")
if 'swap_form' in request.POST:
swapform = fm.SwapForm(request.POST)
if swapform.is_valid():
print("----swap form valid----")
fixed_pay = swapform.cleaned_data['fixed_pay_recieve']
maturity_date = swapform.cleaned_data['fixed_maturity_date']
maturity_date = datetime.datetime.strptime(maturity_date,'%Y-%m-%d').date()
face_value = float(swapform.cleaned_data['fixed_facevalue'])
fixedleg_day_count= swapform.cleaned_data['fixed_daycount']
fixedleg_calendar = swapform.cleaned_data['fixed_calendar']
fixedleg_business_convention = swapform.cleaned_data['fixed_business_convention']
fixedleg_month_end = bool(swapform.cleaned_data['fixed_month_end'])
fixedleg_date_generation = swapform.cleaned_data['fixed_date_generation']
fixedleg_coupon_frequency = swapform.cleaned_data['fixed_coupon_frequency']
fixedleg_coupon_rate = float(swapform.cleaned_data['fixed_coupon_rate'])
floatleg_day_count= swapform.cleaned_data['float_daycount']
floatleg_calendar = swapform.cleaned_data['float_calendar']
floatleg_business_convention = swapform.cleaned_data['float_business_convention']
floatleg_month_end = bool(swapform.cleaned_data['float_month_end'])
floatleg_date_generation = swapform.cleaned_data['float_date_generation']
floatleg_coupon_frequency = swapform.cleaned_data['float_coupon_frequency']
floatleg_index = swapform.cleaned_data['floating_coupon_index']
floatleg_index = objectmap[floatleg_index]
try:
floatleg_coupon_spread = float(swapform.cleaned_data['floating_coupon_spread'])
except:
pass
try:
floatleg_fixing = float(swapform.cleaned_data['last_libor'])
except:
pass
#Valuation Parameters
discount_curve = swapform.cleaned_data['discount_curve']
discount_curve = objectmap[discount_curve][0]
else:
print(swapform.errors)
swap = wdb.mrigweb_Swap(fixed_pay, maturity_date,
face_value,fixedleg_day_count, fixedleg_calendar,
fixedleg_business_convention,fixedleg_month_end,fixedleg_date_generation,
fixedleg_coupon_frequency,fixedleg_coupon_rate,floatleg_day_count,
floatleg_calendar,floatleg_business_convention,floatleg_month_end,
floatleg_date_generation,floatleg_coupon_frequency,floatleg_index,
floatleg_coupon_spread,floatleg_fixing)
valuation_args = {'Discount Curve' : discount_curve}
resultset = wdb.mrigweb_Analytics(swap,valuation_args)
resultset = myhtml.dict_to_html(resultset)
return render(request, "ra_swaps.html",{'resultset' : resultset,'GOOGLE_ADS': GOOGLE_ADS})
def capsfloors(request):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
objectmap = {}
objectmap['None'] = None
objectmap['SZYC_INR'] = wdb.mrigweb_szc_rates()
objectmap['SZYC_USD'] = wdb.mrigweb_szc_rates('USD')
objectmap['SZYC_GBP'] = wdb.mrigweb_szc_rates('GBP')
objectmap['LIBOR_3M_INR'] = wdb.mrigweb_Libor('LIBOR_3M_INR', yieldcurvehandle=objectmap['SZYC_INR'][0])
objectmap['LIBOR_6M_INR'] = wdb.mrigweb_Libor('LIBOR_6M_INR', tenor='6M', yieldcurvehandle=objectmap['SZYC_INR'][0])
objectmap['LIBOR_3M_USD'] = wdb.mrigweb_Libor('LIBOR_3M_USD', curve_currency='USD', yieldcurvehandle=objectmap['SZYC_USD'][0])
objectmap['LIBOR_6M_USD'] = wdb.mrigweb_Libor('LIBOR_6M_USD', curve_currency='USD', tenor='6M', yieldcurvehandle=objectmap['SZYC_USD'][0])
objectmap['LIBOR_3M_GBP'] = wdb.mrigweb_Libor('LIBOR_3M_GBP', curve_currency='GBP', tenor='3M', yieldcurvehandle=objectmap['SZYC_GBP'][0])
objectmap['LIBOR_6M_GBP'] = wdb.mrigweb_Libor('LIBOR_6M_GBP', curve_currency='GBP', tenor='6M', yieldcurvehandle=objectmap['SZYC_GBP'][0])
face_value=1000000,
day_count='30-360',
calendar='India',
business_convention='Following',
month_end='True',
settlement_days=3,
date_generation='Forward',
coupon_frequency='Quarterly',
floating_coupon_index=None,
floating_coupon_spread=0,
fixing=None
resultset = ""
if request.method == "POST":
#Get the posted form
reference_date = datetime.date.today()
print("swap method is post")
if 'capsfloors_form' in request.POST:
capfloorform = fm.CapFloorForm(request.POST)
if capfloorform.is_valid():
print("----swap form valid----")
option_name = capfloorform.cleaned_data['capfloorname']
start_date = capfloorform.cleaned_data['start_date']
start_date = datetime.datetime.strptime(start_date,'%Y-%m-%d').date()
maturity_date = capfloorform.cleaned_data['maturity_date']
maturity_date = datetime.datetime.strptime(maturity_date,'%Y-%m-%d').date()
cap_or_floor = capfloorform.cleaned_data['option_type']
face_value = float(capfloorform.cleaned_data['facevalue'])
strike = float(capfloorform.cleaned_data['strike'])
currency = capfloorform.cleaned_data['currency']
day_count= capfloorform.cleaned_data['daycount']
calendar = capfloorform.cleaned_data['calendar']
business_convention = capfloorform.cleaned_data['business_convention']
month_end = bool(capfloorform.cleaned_data['month_end'])
settlement_days = float(capfloorform.cleaned_data['settlement_days'])
date_generation = capfloorform.cleaned_data['date_generation']
coupon_frequency = capfloorform.cleaned_data['coupon_frequency']
floating_coupon_index = capfloorform.cleaned_data['floating_coupon_index']
floating_coupon_index = objectmap[floating_coupon_index]
try:
floating_coupon_spread = float(capfloorform.cleaned_data['floating_coupon_spread'])
except:
pass
try:
fixing = float(capfloorform.cleaned_data['last_libor'])
except:
pass
#Valuation Parameters
discount_curve = capfloorform.cleaned_data['discount_curve']
discount_curve = objectmap[discount_curve][0]
volatility_curve = float(capfloorform.cleaned_data['volatility_curve'])
volatility_curve = wdb.mrigweb_ConstantVolatilityCurve(volatility_curve)
else:
print(capfloorform.errors)
capfloor = wdb.mrigweb_CapFloor(option_name,start_date,maturity_date,
cap_or_floor,strike,face_value,day_count,calendar,
business_convention,month_end,settlement_days,
date_generation,coupon_frequency,floating_coupon_index,
floating_coupon_spread,fixing)
valuation_args = {'Discount Curve' : discount_curve,
'Volatility Curve' : volatility_curve}
resultset = wdb.mrigweb_Analytics(capfloor,valuation_args)
resultset = myhtml.dict_to_html(resultset)
return render(request, "ra_capsfloors.html",{'resultset' : resultset,'GOOGLE_ADS': GOOGLE_ADS})
def portfolio(request):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
return render(request, "ra_portfolio.html")
def mf(request):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
topmfs = wdb.mrigweb_top_mfs()
topmfs = topmfs.reset_index()
topmfs_table = [list(topmfs)] + topmfs.values.tolist()
topmfs_table = myhtml.list_to_html(topmfs_table)
return render(request, "mf.html",{'topmfs_table' : topmfs_table,'GOOGLE_ADS': GOOGLE_ADS})
def stock1(request):
GOOGLE_ADS = 0
if mrigstatics.ENVIRONMENT == 'production':
GOOGLE_ADS = 1
# oc = mu.test_df()
# oc = oc[['Symbol','Open','Last']]
# oc_head = [x.replace("CALL_","").replace("PUT_","").replace("_"," ") for x in list(oc)]
# oc = [oc_head] + oc.values.tolist()
# oc = myhtml.list_to_html(oc)
# oc = "<img border=\"0\" src=\"{% static 'assets/images/pnl_icon.png' %}\" width=\"10\" height=\"10\"/>"
sql = "select image from images limit 1"
engine = mu.sql_engine(mrigstatics.MRIGWEB[mrigstatics.ENVIRONMENT])
image = engine.execute(sql).fetchall()
image = bytes(image[0][0])
image = image.decode("utf-8")
oc = ['a',3]
return render(request, "stock1.html", {"oc":image,'GOOGLE_ADS': GOOGLE_ADS})
|
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
result = ''
i = 0
while strs:
j = 1
if i == len(strs[0]):
break
else:
current = strs[0][i]
while j < len(strs):
if i == len(strs[j]) or strs[j][i] != current:
break
j += 1
if j != len(strs):
break
result += strs[0][i]
i += 1
return result
|
#Dada una secuencia de caracteres sec y un entero r devolver en la variable secRot la secuencia rotada r posiciones a la derecha. Ejemplo: si sec="abbcd" y r=3 entonces en secRot="bcdab"
sec = raw_input("Ingrese una secuencia de letras:")
N = int(input("Ingrese el numero de rotaciones hacia la derecha: "))
i = 0
newText = ''
aux = sec
while (i < N):
newText = aux[len(sec)-1]
newText += aux[:len(sec)-1]
aux = newText
i += 1
print(aux) |
from .base import *
DEBUG = True
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'openmind',
'USER': 'oliver',
'PASSWORD': 'yugiho2000',
'HOST': 'localhost',
'PORT': 5432,
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'usuarios.User'
STATIC_ROOT = os.path.normpath(os.path.join(BASE_DIR, 'staticfiles'))
MEDIA_URL = '/media/'
STATICFILES_DIRS=[
os.path.join(os.path.dirname(BASE_DIR),'static'),
]
LOGIN_REDIRECT_URL= reverse_lazy('post:index')
LOGOUT_REDIRECT_URL= reverse_lazy('login')
|
# coding=utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import time
import os
import shutil
# 清空log文件
filelist = []
rootdir = "./model/tensorflowlogs/"
for f in os.listdir(rootdir):
filepath = os.path.join(rootdir, f)
if os.path.isfile(filepath):
os.remove(filepath)
print filepath+" removed!"
elif os.path.isdir(filepath):
shutil.rmtree(filepath, True)
print "dir "+filepath+" removed!"
# set random seed for comparing the two result calculations
tf.set_random_seed(1)
# this is data
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
class model:
def __init__(self, name_scope, configure, is_training):
self.batch_size = configure.batch_size
self.n_steps = configure.n_steps
self.n_inputs = configure.n_inputs
self.n_classes = configure.n_classes
self.n_hidden_units = configure.n_hidden_units
self.is_training = is_training
self.lr = configure.lr
self.graph()
self.merged = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES, name_scope))
def graph(self):
# tf Graph input
with tf.variable_scope("input_data") as scope:
self.x = tf.placeholder(tf.float32, [None, self.n_steps, self.n_inputs])
with tf.variable_scope("labels") as scope:
self.y = tf.placeholder(tf.float32, [None, self.n_classes])
# # Define weights_1
with tf.variable_scope("weight") as scope:
self.weights = {
# (28, 128)
'in': tf.get_variable('in', initializer=tf.random_normal([self.n_inputs, self.n_hidden_units])),
# (128, 10)
'out': tf.get_variable('out', initializer=tf.random_normal([self.n_hidden_units, self.n_classes]))
}
with tf.variable_scope("biases"):
self.biases = {
# (128, )
'in': tf.get_variable('in', initializer=tf.constant(0.1, shape=[self.n_hidden_units, ])),
# (10, )
'out': tf.get_variable('out', initializer=tf.constant(0.1, shape=[self.n_classes, ]))
}
with tf.variable_scope("pre"):
self.logits = self.rnn(self.x, self.weights, self.biases)
with tf.variable_scope('loss'):
_loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y)
with tf.variable_scope("cost") as scope:
self.cost = tf.reduce_mean(_loss)
tf.summary.scalar(scope.name, self.cost)
with tf.variable_scope("accuracy") as scope:
correct_predict = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predict, tf.float32))
tf.summary.scalar(scope.name, self.accuracy)
if not self.is_training:
return
with tf.name_scope("train_op"):
self.train = tf.train.AdamOptimizer(self.lr).minimize(self.cost)
def rnn(self, input_data, weights, biases):
# hidden layer for input to cell
########################################
# transpose the inputs shape from
# X(128 batch ,28 steps, 28 inputs)
# ==> (128 batch * 28 steps, 28 inputs)
input_data = tf.reshape(input_data, [-1, self.n_inputs])
# into hidden
# data_in = (128 batch * 28 steps, 128 hidden)
data_in = tf.matmul(input_data, weights['in']) + biases['in']
# data_in ==> (128 batch, 28 steps, 128 hidden_units)
data_in = tf.reshape(data_in, [-1, self.n_steps, self.n_hidden_units])
# cell
##########################################
# basic LSTM Cell.
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.n_hidden_units, forget_bias=1.0, state_is_tuple=True)
# lstm cell is divided into two parts (c_state, h_state)
_init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)
outputs, final_state = tf.nn.dynamic_rnn(lstm_cell, data_in, initial_state=_init_state, time_major=False)
# hidden layer for output as the final results
#############################################
# results = tf.matmul(final_state[1], weights_1['out']) + biases_1['out']
# # or
# unpack to list [(batch, outputs)..] * steps
outputs = tf.unstack(tf.transpose(outputs, [1, 0, 2])) # states is the last outputs
logits = tf.matmul(outputs[-1], weights['out']) + biases['out']
return logits
class conf(object):
init_scale = 0.04
# hyperparameters
lr = 0.001
training_iters = 100000
batch_size = 128
n_inputs = 28 # MNIST data input (img shape: 28*28)
n_steps = 28 # time steps
n_hidden_units = 128 # neurons in hidden layer
n_classes = 10 # MNIST classes (0-9 digits)
train_conf = conf()
valid_conf = conf()
valid_conf.batch_size = 20
valid_conf.training_iters = 2000
valid_conf.num_steps = 1
def run_epoch(_model, session, _conf):
"""Testing or valid."""
count = 0
while count * _conf.batch_size < _conf.training_iters:
test_batch_xs, test_batch_ys = mnist.test.next_batch(_conf.batch_size)
test_batch_xs = test_batch_xs.reshape([_conf.batch_size, _conf.n_steps, _conf.n_inputs])
# print(batch_xs.shape)
# print(batch_ys.shape)
session.run([_model.cost, _model.accuracy], feed_dict={_model.x: test_batch_xs, _model.y: test_batch_ys, })
if count % 3 == 0:
_summary = session.run(_model.merged, feed_dict={_model.x: test_batch_xs, _model.y: test_batch_ys, })
test_summary_writer.add_summary(_summary, count)
print(session.run(_model.accuracy, feed_dict={_model.x: test_batch_xs, _model.y: test_batch_ys, }))
count += 1
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-train_conf.init_scale, train_conf.init_scale)
with tf.name_scope("Train") as train_scope:
with tf.variable_scope("Model", reuse=None, initializer=initializer):
train_model = model(train_scope, train_conf, is_training=True)
with tf.name_scope("Test") as test_scope:
with tf.variable_scope("Model", reuse=True, initializer=initializer):
test_model = model(test_scope, valid_conf, is_training=False)
with tf.Session() as session:
train_summary_writer = tf.summary.FileWriter('./model/tensorflowlogs/train', session.graph)
test_summary_writer = tf.summary.FileWriter('./model/tensorflowlogs/test')
session.run(tf.global_variables_initializer())
step = 0
while step * train_conf.batch_size < train_conf.training_iters:
batch_xs, batch_ys = mnist.train.next_batch(train_conf.batch_size)
batch_xs = batch_xs.reshape([train_conf.batch_size, train_conf.n_steps, train_conf.n_inputs])
# print(batch_xs.shape)
# print(batch_ys.shape)
session.run([train_model.train], feed_dict={train_model.x: batch_xs, train_model.y: batch_ys, })
if step % 2 == 0:
summary = session.run(train_model.merged, feed_dict={train_model.x: batch_xs, train_model.y: batch_ys, })
train_summary_writer.add_summary(summary, step)
print(session.run(train_model.accuracy, feed_dict={train_model.x: batch_xs, train_model.y: batch_ys, }))
step += 1
# print step
# if step % 1000 == 0 or (step + 1) == train_conf.training_iters:
# test_batch_xs, test_batch_ys = mnist.test.next_batch(valid_conf.batch_size)
# test_batch_xs = test_batch_xs.reshape([valid_conf.batch_size, valid_conf.n_steps, valid_conf.n_inputs])
# print(session.run(test_model.accuracy, feed_dict={test_model.x: test_batch_xs, test_model.y: test_batch_ys, }))
run_epoch(test_model, session, valid_conf)
|
print("*"*5,"Menu Harga Buah-Buahan","*"*5)
DaftarBuahBuahan = {
"apel" : 5000,
"jeruk" : 8500,
"mangga" : 7800,
"duku" :6500
}
print(DaftarBuahBuahan)
print("-"*50)
def BeliBuah():
memilih = input("Nama buah yang dibeli = ")
while True :
try:
if memilih in DaftarBuahBuahan :
masukanBerat = float(input("Silahkan Masukan Total Buah yang akan anda beli :"))
print("Total buah yang akan di beli (Kg) :", masukanBerat,"Kg")
print("="*50)
print("Total Harga bayar : Rp.", DaftarBuahBuahan[memilih] * masukanBerat)
break
else :
print("Nama Buah tidak terdaftar")
break
except:
print("Inputan berat bukan menggunakan koma")
break
BeliBuah()
|
import json
from django.http import HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic.list import ListView
from cafe.models import Cafe
from forms.forms import SearchForm, CafeRegistrationForm
class CafeleListView(ListView):
template_name = 'home.html'
model = Cafe
def get_queryset(self):
return self.model.objects.filter(verified=True)
def searchInCafes(request):
if request.method == 'POST':
form = SearchForm(request.POST)
if form.is_valid():
keyword = form.cleaned_data.get('keyword')
results = Cafe.objects.filter(name__contains=keyword)
return render(request, 'home.html', {'form': form, 'object_list': results})
else:
form = searchInCafes()
return render(request, 'home.html', {'form': form})
def cafeView(request, cafe_id):
cafe = get_object_or_404(Cafe, pk=cafe_id)
return render(request, 'cafe.html', {'cafe': cafe,
})
def autocomplete(request):
print('salam')
if request.is_ajax():
q = request.GET.get('term', '')
cafes = Cafe.objects.filter(name__icontains=q)[:20]
results = []
for cafe in cafes:
cafe_json = {}
cafe_json['id'] = cafe.id
cafe_json['name'] = cafe.name
results.append(cafe_json)
data = json.dumps(results)
else:
data = 'fail'
mimetype = 'application/json'
return HttpResponse(data, mimetype)
def cafeRegister(request):
if request.method == 'POST':
form = CafeRegistrationForm(request.POST)
if form.is_valid():
name = form.cleaned_data.get('name')
description = form.cleaned_data.get('description')
latitude = form.cleaned_data.get('latitude')
longitude = form.cleaned_data.get('longitude')
main_image_url = form.cleaned_data.get('main_image_url')
new_cafe = Cafe(name=name, description=description, latitude=latitude, longitude=longitude, main_image_url=main_image_url)
new_cafe.save()
return redirect('cafe:home')
else:
return render(request, 'caferegister.html', {'form': form})
else:
form = CafeRegistrationForm()
return render(request, 'caferegister.html', {'form': form})
|
import math
def solve(goal, searchRange):
table = []
i = 0
n = searchRange
for i in range(n + 1):
if i == 2 or i % 2 == 1:
table.append(1)
else:
table.append(0)
maxN = int(math.sqrt(n)) + 1
i = 1
while i < maxN:
i += 2
if table[i] == 0:
continue
j = i + i
while j <= n:
table[j] = 0
j += i
numbers = []
for i in range(2, n):
if table[i] == 1:
numbers.append(i)
if goal < len(numbers):
return str(numbers[goal - 1])
else:
return "the goal is not in range."
return
print(solve(10001, 200000))
|
import sys
def snip_special_characters(word):
word_new = ""
for char in word:
if char.isalpha() or char == "-":
word_new += char
return word_new
file_handler = open(str(sys.argv[1]))
word_counts = {}
for line in file_handler:
line = line.rstrip()
words = line.split(" ")
for word in words:
word = word.lower()
word = snip_special_characters(word)
word_counts[word] = word_counts.get(word,0) + 1
print word_counts
|
# Main UIButton Class. Made for subclassing for other buttons
# Whole button is being drawn with code, no images in sight! Woo!
import pygame
from noticer import *
from constants import *
from UIView import *
class UIButton(UIView):
def __init__(self, dimensions, parent):
cont = constants()
UIView.__init__(self, dimensions, parent)
self.userText = ""
self.state = "up"
self.backgroundcolor = (96, 117, 146)
self.paint()
self.font = cont.defaultButtonFont # set the font, default is the default from constants
self.textOffset = (0, 0) # set the text offset, default there is none
def setBackgroundColor(self, color):
self.backgroundcolor = color # Set the background color for later painting referenc e
self.paint() # Paint the object again, this time with the new background
self.setText() # Re-add the text to the object (yes, paint removed the text :( )
def setText(self, text=False):
# This method basically adds text to the object's canvas. Simple really,
# Simply places it in the center of the button and renders it using the default font, unless specified in the init, or later
if text != False:
self.userText = text
font = self.font
text1 = font.render(self.userText, 1, (244, 244, 244))
text1pos = text1.get_rect()
text1pos.centerx = self.rect.width / 2 + self.textOffset[0]
text1pos.centery = self.rect.height / 2 + self.textOffset[1]
font2 = self.font
text2 = font2.render(self.userText, 1, (10, 10, 10))
text2pos = text2.get_rect()
text2pos.centerx = self.rect.width / 2 + self.textOffset[0]
text2pos.centery = self.rect.height / 2 + 1 + self.textOffset[1]
self.image.blit(text2, text2pos)
self.image.blit(text1, text1pos)
def paint(self):
self.image = pygame.Surface((self.rect.width, self.rect.height)) # Fix all the rectangles. Re-init the image
if self.state == "down": # Paint accordingly whether the mouse is inside or not.
bg = list(self.backgroundcolor)
sh = 40 # Shading scale up - Larger the number, the brighter the shaddow
d = 2 # Shading dithering value, the greater the number, the more dithered colours will be.
sh2 = [0,0,0]
self.image.fill((bg[0]/d + sh, bg[1]/d + sh, bg[2]/d + sh), ((1, 1), (self.rect.width - 2, self.rect.height - 2))) # paint the new shadow
else: # No shadow needed, paint accordingly, with inset shadow color.
bg = list(self.backgroundcolor)
self.image.fill(self.backgroundcolor, ((1, 1), (self.rect.width - 2, self.rect.height - 2)))
sh = 100
# Determine the colour of the inset shadow according to the color of the background. Will always have a nice top glow to it.
if (bg[0] + sh) > 255:
bg[0] = 255
else:
bg[0] += sh
if (bg[1] + sh) > 255:
bg[1] = 255
else:
bg[1] += sh
if (bg[2] + sh) > 255:
bg[2] = 255
else:
bg[2] += sh
print bg
self.image.fill((bg[0], bg[1], bg[2]), ((1, 1), (self.rect.width - 2, 1))) # Paint it to the canvas.
def manageEvent(self, event, caller, withinBounds=True):
# Subclass the UIView's manageEvent traverse function.
UIView.manageEvent(self, event, caller, withinBounds) # Allow the UIButton to still send events to lower objects by using UIView's definition. Also allows for custom callbacks
self.manageClickDown(withinBounds, event) # Also allow painting the up and down states for the UIButton (hence making it a button)
def manageClickDown(self, withinBounds, event):
# Manage the painting for up and down states
if withinBounds: # Check if within the button. - Within
if event.type == pygame.MOUSEBUTTONDOWN and self.state == "up": # See if the mouse has been pressed down ,and check the state of the button if it's already up/down to save on extra useless rendering
self.state = "down" # Set the state for later reference/caching
self.paint() # Re-paint the button
self.setText() # Add the text to the button
self.parent.updateView() # Update the parentView, which will traverse up the tree.
if event.type == pygame.MOUSEBUTTONUP and self.state == "down": # See if the button was pressed up, AND, it's state is already down - this needs a repaint to make it look like it's not pressed down
self.state = "up" # Set state for later reference/caching
self.paint() # Re-paint the button
self.setText(False) # add the text for the button
self.parent.updateView() # Update the parentView, which will traverse up the tree.
# print ("Setting state to: " + self.state) |
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.options
import os.path
from pymongo import MongoClient
import hashlib
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
def role(array):
def _role(method):
def __role(self, *args, **kwargs):
type = self.get_secure_cookie("type")
if type in array:
return method(self, *args, **kwargs)
else:
self.write("no right to access")
return __role
return _role
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("username")
class LoginHandler(BaseHandler):
def get(self):
self.render('login.html')
def post(self):
name = self.get_argument("username", None)
password = self.get_argument("password", None)
conn = MongoClient("localhost", 27017)
self.db = conn["users"]
user = self.db.validUsers.find_one({"name alias": name})
if user:
m = hashlib.md5()
m.update(password)
if user["password"] == m.hexdigest():
self.set_secure_cookie("username", name)
self.set_secure_cookie("type", user["type"])
self.redirect("/")
else:
self.set_header("Content-Type", "text/plain")
self.write("error")
else:
self.redirect("/login")
class WelcomeHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.render('index.html', user=self.current_user)
class vipHandler(tornado.web.RequestHandler):
@role(['vip'])
def get(self):
self.render('vip.html', user=self.current_user)
class adminHandler(tornado.web.RequestHandler):
@role(['vip', 'admin'])
def get(self):
self.render('admin.html', user=self.current_user)
class userHandler(tornado.web.RequestHandler):
@role(['vip', 'user'])
def get(self):
self.render('user.html', user=self.current_user)
class guestHandler(tornado.web.RequestHandler):
@role(['vip', 'user', 'admin', 'guest'])
def get(self):
self.render('guest.html', user=self.current_user)
class LogoutHandler(BaseHandler):
def get(self):
if (self.get_argument("logout", None)):
self.clear_cookie("username")
self.redirect("/")
if __name__ == "__main__":
tornado.options.parse_command_line()
settings = {
"template_path": os.path.join(os.path.dirname(__file__), "templates"),
"cookie_secret": "bZJc2sWbQLKos6GkHn/VB9oXwQt8S0R0kRvJ5/xJ89E=",
"login_url": "/login",
"debug": True
}
application = tornado.web.Application([
(r'/', WelcomeHandler),
(r'/login', LoginHandler),
(r'/logout', LogoutHandler),
(r'/vip', vipHandler),
(r'/admin', adminHandler),
(r'/user', userHandler),
(r'/guest', guestHandler)
], **settings)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
'''
---------크롤링의 방법----------
원하는 웹페이지에 접속 -> html 데이터를 분석한다
받아온 html 데이터를 분석가능한 형채로 가공한다 -> 원하는 데이터를 추출한다.
'''
import requests
from bs4 import BeautifulSoup
from requests_html import HTMLSession
# session = HTMLSession()
# response = session.get("https://www.naver.com")
# print(response.html.links)
response = requests.get("http://www.naver.com") #-> requests 라이브러리를 이용했을때
# print(response.status_code)
# print(response.headers)
# print(response.text)
bs =BeautifulSoup(response.text, "html.parser")
# html parser를 통해, response.text 분석
for img in bs.select("img") :
print(img)
for a in bs.select("a"):
print(a) |
import ui
from plistlib import *
from random import randrange
import speech
import sound
from time import sleep
'''fileName is the plist to import'''
fileName = 'Bitsboard.plist'
#fileName = 'LPDB00/Bitsboard.plist'
class Phototouch (ui.View):
def __init__(self):
self.name = 'Photo Touch'
self.background_color = 'lightblue'
self.setup()
def did_load(self):
pass
def draw(self):
pass
def setup(self):
'''This extracts the items from playlist'''
with open(fileName, 'rb') as fp:
pl = load(fp)
self.originlist = pl['Bits']
self.buttons = []
self.playTouch(self.originlist)
def button_tapped(self, sender):
'''Checks if sender is the winning bit'''
if str(sender.name) == self.winTex:
sender.image = ui.Image.named('emj:Checkmark_3').with_rendering_mode(ui.RENDERING_MODE_ORIGINAL)
sound.play_effect('game:Ding_3', 0.2)
def animation():
button.alpha = 0.0 # fade out
for button in self.buttons:
ui.animate(animation, duration=1.5)
ui.delay(self.restart, 2)
else:
sender.image = ui.Image.named('emj:Cross_Mark').with_rendering_mode(ui.RENDERING_MODE_ORIGINAL)
sound.play_effect('game:Error', 0.2)
def playTouch(self, importList):
'''Items selection and setup'''
bitlist = importList.copy()
showlist = []
for i in range(3):
a=i+1
bnum = len(bitlist)
targetBit = bitlist[randrange(bnum)]
bitname = str(targetBit)
#print('bit '+bitname)
targetPic = str(targetBit)+'.jpg'
picname = targetPic.lower()
print('pic '+picname)
self.button = ui.Button(bitname) # [4]
(screenX, screenY) = ui.get_screen_size()
self.button.frame = (0, 0, 200, 200)
self.button.name = bitname
self.button.background_image=ui.Image.named(picname).with_rendering_mode(ui.RENDERING_MODE_ORIGINAL)
self.button.border_width =4
self.button.border_color ='white'
self.button.corner_radius = 15
self.button.center = ((screenX/4)* (i+1), (screenY/4) * 3)
#button.flex = 'TR'
self.add_subview(self.button)
self.button.action = self.button_tapped
self.buttons.append(self.button)
bitlist.remove(targetBit)
showlist.append(targetBit)
snum = len(showlist)
#print(snum, showlist)
winbit = showlist[randrange(snum)]
sound.play_effect(winbit.lower()+'.mp3')
sound.play_effect(winbit.lower()+'.caf')
self.winTex = str(winbit)
def restart(self):
'''This removes all sibviews and then restarts game'''
for button in self.buttons:
self.remove_subview(button)
self.buttons.clear()
#print(self.buttons, self.subviews, 'done')
self.playTouch(self.originlist)
v = Phototouch()
v.present('Landscape Large')
|
# Generated by Django 2.0.3 on 2018-04-01 20:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filmsApi', '0002_auto_20180401_2252'),
]
operations = [
migrations.AddField(
model_name='film',
name='actors',
field=models.CharField(max_length=100, null=True, verbose_name='Актеры'),
),
migrations.AddField(
model_name='film',
name='boxOffice',
field=models.CharField(max_length=20, null=True, verbose_name='Прокат'),
),
migrations.AddField(
model_name='film',
name='country',
field=models.CharField(max_length=25, null=True, verbose_name='Страна'),
),
migrations.AddField(
model_name='film',
name='director',
field=models.CharField(max_length=50, null=True, verbose_name='Режисер'),
),
migrations.AddField(
model_name='film',
name='genre',
field=models.CharField(max_length=50, null=True, verbose_name='Жанр'),
),
migrations.AddField(
model_name='film',
name='language',
field=models.CharField(max_length=30, null=True, verbose_name='Язык'),
),
migrations.AddField(
model_name='film',
name='plot',
field=models.CharField(max_length=250, null=True, verbose_name='Описание'),
),
migrations.AddField(
model_name='film',
name='poster',
field=models.CharField(max_length=100, null=True, verbose_name='URL-постер'),
),
migrations.AddField(
model_name='film',
name='production',
field=models.CharField(max_length=30, null=True, verbose_name='Продакшн компания'),
),
migrations.AddField(
model_name='film',
name='rated',
field=models.CharField(max_length=15, null=True, verbose_name='Рейтинг'),
),
migrations.AddField(
model_name='film',
name='released',
field=models.CharField(max_length=25, null=True, verbose_name='Дата релиза'),
),
migrations.AddField(
model_name='film',
name='writer',
field=models.CharField(max_length=50, null=True, verbose_name='Сценарист'),
),
migrations.AddField(
model_name='film',
name='year',
field=models.IntegerField(null=True, verbose_name='Год'),
),
migrations.AlterField(
model_name='film',
name='runtime',
field=models.IntegerField(null=True, verbose_name='Время'),
),
migrations.AlterField(
model_name='film',
name='slogan',
field=models.CharField(max_length=200, null=True, verbose_name='Слоган'),
),
migrations.AlterField(
model_name='film',
name='title',
field=models.CharField(max_length=200, null=True, verbose_name='Название'),
),
]
|
#
"""
Base class for episodic reinforcement learning agents.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from texar.agents.agent_base import AgentBase
# pylint: disable=too-many-instance-attributes
class EpisodicAgentBase(AgentBase):
"""
Base class inherited by episodic RL agents.
Args:
TODO
"""
def __init__(self, env_config, hparams=None):
AgentBase.__init__(self, hparams)
self._env_config = env_config
self._reset_tmplt_fn = tf.make_template(
"{}_reset".format(self.name), self._reset)
self._observe_tmplt_fn = tf.make_template(
"{}_observe".format(self.name), self._observe)
self._get_action_tmplt_fn = tf.make_template(
"{}_get_action".format(self.name), self._get_action)
@staticmethod
def default_hparams():
"""Returns a dictionary of hyperparameters with default values.
TODO
"""
return {
'name': 'agent'
}
def reset(self):
"""Resets the states to begin new episodes.
"""
self._reset_tmplt_fn()
def _reset(self):
raise NotImplementedError
def observe(self, observ, action, reward, terminal, next_observ, train_policy=True, feed_dict=None):
"""Observes experience from environment.
Args:
"""
return self._observe_tmplt_fn(
observ, action, reward, terminal, next_observ, train_policy, feed_dict)
def _observe(self, observ, action, reward, terminal, next_observ, train_policy, feed_dict):
raise NotImplementedError
def get_action(self, observ, feed_dict=None):
"""Gets action according to observation.
Args:
Returns:
"""
return self._get_action_tmplt_fn(observ, feed_dict)
def _get_action(self, observ, feed_dict):
raise NotImplementedError
|
"""Datasets from metadat."""
import json
import os.path as op
import pandas as pd
from pymare.utils import get_resource_path
def michael2013():
"""Load a dataset of studies on the persuasive power of a brain image.
This dataset was published in :footcite:t:`michael2013non`,
and was curated in metadat :footcite:p:`white2022metadat`.
Returns
-------
df : :obj:`~pandas.DataFrame`
A dataframe with the following columns:
- ``"Study"``: the study name
- ``"No_brain_n"``: the sample size for no-brain-image condition
- ``"No_brain_m"``: mean agreement rating for no-brain-image condition
- ``"No_brain_s"``: standard deviation of agreement rating for no-brain-image condition
- ``"Brain_n"``: the sample size for brain-image condition
- ``"Brain_m"``: mean agreement rating for brain-image condition
- ``"Brain_s"``: standard deviation of agreement rating for brain-image condition
- ``"Included_Critique"``: whether a critique was included in the study or not
- ``"Medium"``: the medium of the study
- ``"Compensation"``: notes on the compensation of the study
- ``"Participant_Pool"``: notes on where participants were recruited
- ``"yi"``: Raw mean difference, calculated as Brain_m - No_brain_m
- ``"vi"``: Corresponding sampling variance
metadata : :obj:`dict`
A dictionary with metadata about the columns in the dataset.
Notes
-----
For more information about this dataset, see metadat's documentation:
https://wviechtb.github.io/metadat/reference/dat.michael2013.html
References
----------
.. footbibliography::
"""
dataset_dir = op.join(get_resource_path(), "datasets")
tsv_file = op.join(dataset_dir, "michael2013.tsv")
json_file = op.join(dataset_dir, "michael2013.json")
df = pd.read_table(tsv_file)
with open(json_file, "r") as fo:
metadata = json.load(fo)
return df, metadata
|
import numpy as np
import cv2
import math
h=300
w=300
cap = cv2.VideoCapture(0)
SUN_LOC=(200,40)
SUN_RSIZE=15
ORBITAL_R=10
def Orbiral(frame,Centerloc,orbit_r,size_r,phi,color):
x_orbit=Centerloc[0]+int(orbit_r*np.cos(np.deg2rad(phi)))
y_orbit=Centerloc[1]+int(orbit_r*np.sin(np.deg2rad(phi)))
#print(f"x:{x_orbit} y:{y_orbit} phi:{int(orbitphi)}")
frame= cv2.circle(frame,(x_orbit,y_orbit),size_r, color, -1)
return frame
ORBITAL_RSIZE=2
ORBITAL_PHI=0
ORBITAL_DPHI=1 #0.5deg delta
#2021/05/06 Window priority
print(cv2.WND_PROP_FULLSCREEN)
cv2.namedWindow("Frame", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Frame",cv2.WND_PROP_FULLSCREEN,0)
dr=(SUN_RSIZE+ORBITAL_R) #*(orbitdphi) #*np.pi/180)
orbitloc=(SUN_LOC[0],SUN_LOC[1]+SUN_RSIZE+ORBITAL_R)
while True:
_, frame = cap.read()
#frame = cv2.resize(frame,(h,w))
if(frame is None):
continue
frame = cv2.circle(frame,SUN_LOC,SUN_RSIZE, (0,0,250), -1)
x_orbit=SUN_LOC[0]+int(dr*np.cos(np.deg2rad(ORBITAL_PHI)))
y_orbit=SUN_LOC[1]+int(dr*np.sin(np.deg2rad(ORBITAL_PHI)))
#print(f"x:{x_orbit} y:{y_orbit} phi:{int(orbitphi)}")
for offphi in range(-180,180,30):
frame=Orbiral(frame,SUN_LOC,dr,ORBITAL_RSIZE,ORBITAL_PHI-offphi,(0,255,255))
#frame=Orbiral(frame,SUN_LOC,dr,ORBITAL_RSIZE,ORBITAL_PHI-180,(0,255,0))
#frame= cv2.circle(frame,(x_orbit,y_orbit),ORBITAL_RSIZE, (0,255,0), -1)
#frame= cv2.circle(frame,(x_orbit,y_orbit),ORBITAL_RSIZE, (255,0,0), -1)
orbitloc=(x_orbit,y_orbit)
ORBITAL_PHI+=ORBITAL_DPHI
if ORBITAL_PHI>=360:
ORBITAL_PHI=0
#Line
#img = cv2.line(frame,logoloc,orbitloc,(255,0,0),5)
cv2.imshow('Frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# VideoCaptureオブジェクト破棄
cap.release()
cv2.destroyAllWindows()
|
# https://leetcode.com/problems/maximum-subarray/description/
# solution:
# https://en.wikipedia.org/wiki/Maximum_subarray_problem#Kadane's_algorithm_(Algorithm_3:_Dynamic_Programming)
class Solution:
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
max_ending_here = max_so_far = nums[0]
for number in nums[1:]:
max_ending_here = max(number, max_ending_here + number)
print('max_ending_here', max_ending_here)
max_so_far = max(max_ending_here, max_so_far)
print('max_so_far', max_so_far)
return max_so_far
a = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
x = Solution()
print(x.maxSubArray(a)) |
import sys
import math
# def reverse(num):
# return num[::1]
def toBaseN(num, base):
baseN = []
highestPower = 0
while pow(base, highestPower) < num:
highestPower += 1
for i in range (highestPower, -1, -1):
baseToPower = pow(base, i)
baseN.append(math.floor(num/baseToPower))
num = num % baseToPower
while baseN[0] == 0:
baseN.pop(0)
intList = reverseIntList(baseN)
return intList
def reverseIntList(intList):
newIntList = []
for i in range(len(intList) - 1, -1, -1):
newIntList.append(intList[i])
return newIntList
def toBase10(intList, base):
sum = 0
for i in range(0, len(intList)):
sum += intList[i] * pow(base, i)
return sum
def palindrome(num):
rev = reverseIntList(num)
if num == rev:
return True
return False
count = 0
length = 0
for line in sys.stdin:
count = 0
length = 0
fields = line.split()
base = int(fields[0])
num = int(fields[1])
intList = toBaseN(num, base)
while (not palindrome(intList)) and (count <= 500):
count += 1
num = toBase10(intList, base)
rev = reverseIntList(intList)
num += toBase10(rev, base)
intList = toBaseN(num, base)
if count > 500:
print(">500")
else:
print(count, len(intList))
exit(0) |
"""
Build your Deep Neural Network: Step by Step
"""
import numpy as np
import h5py
import matplotlib.pyplot as plt
from testCases import *
from dnn_utils import sigmoid, sigmoid_backward, relu, relu_backward
#%matplotlib inline
#plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
#plt.rcParams['image.interpolation'] = 'nearest'
#plt.rcParams['image.cmap'] = 'gray'
#%load_ext autoreload
#%autoreload 2
#np.random.seed(1) |
from logging import getLogger
import requests
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from friendly_captcha.widgets import FrcCaptchaWidget
logger = getLogger('django.friendly_captcha')
class FrcCaptchaField(forms.CharField):
description = "Friendly captcha field"
widget = FrcCaptchaWidget
def __init__(self, *args, **kwargs):
super(FrcCaptchaField, self).__init__(*args, **kwargs)
def clean(self, value):
clean_value = False
# handle captcha field
captcha_secret = getattr(settings, 'FRC_CAPTCHA_SECRET', None)
captcha_sitekey = getattr(settings, 'FRC_CAPTCHA_SITE_KEY', None)
captcha_verification_url = getattr(settings, 'FRC_CAPTCHA_VERIFICATION_URL', False)
if captcha_sitekey and captcha_secret and captcha_verification_url:
payload = {
'solution': value,
'secret': captcha_secret,
'sitekey': captcha_sitekey
}
captcha_response = requests.post(captcha_verification_url, data=payload)
if captcha_response.status_code == 200:
validation = captcha_response.json()
if not validation['success']:
logger.info('Captcha failed validation {}'.format(captcha_response.json()))
else:
logger.info('Captcha validation success')
clean_value = True
else:
logger.info('Captcha failed validation {}'.format(captcha_response.json()))
if clean_value:
return True
else:
fail_silent = getattr(settings, 'FRC_CAPTCHA_FAIL_SILENT', False)
if fail_silent:
return False
else:
raise ValidationError(_('Captcha test failed'), code='bot_detected')
|
import math
def computeForSevereImpact(data):
average_Daily_Income_InUSD = data['region']['avgDailyIncomeInUSD']
average_Daily_Income_population = data['region']['avgDailyIncomePopulation']
time_to_elapse = data['timeToElapse']
reported_cases = data['reportedCases']
total_hospital_beds = data['totalHospitalBeds']
period_type = data['periodType']
if period_type == 'days':
period = int(time_to_elapse)
elif period_type == 'weeks':
period = int(time_to_elapse * 7)
elif period_type == 'months':
period = int(time_to_elapse * 30)
else:
period = int(time_to_elapse * 360)
factor = int(period//3)
# Make computations
# CHALLENGE 1
currentlyInfected = reported_cases * 50
infectionsByRequestedTime = currentlyInfected * (2 ** factor)
infectionsByRequestedTime = int(infectionsByRequestedTime)
# CHALLENGE 2
severeCasesByRequestedTime = (15/100) * infectionsByRequestedTime
severeCasesByRequestedTime = int(severeCasesByRequestedTime)
hospitalBedsByRequestedTime = ((35/100) * total_hospital_beds) - severeCasesByRequestedTime
hospitalBedsByRequestedTime = int(hospitalBedsByRequestedTime)
# CHALLENGE 2
casesForICUByRequestedTime = (5/100) * infectionsByRequestedTime
casesForICUByRequestedTime = int(casesForICUByRequestedTime)
casesForVentilatorsByRequestedTime = (2/100) * infectionsByRequestedTime
casesForVentilatorsByRequestedTime = int(casesForVentilatorsByRequestedTime)
dollarsInFlight = math.trunc((infectionsByRequestedTime * average_Daily_Income_population * average_Daily_Income_InUSD )/ period)
dollarsInFlight = int(dollarsInFlight)
data = {}
data['currentlyInfected'] = currentlyInfected
data['infectionsByRequestedTime'] = infectionsByRequestedTime
data['severeCasesByRequestedTime'] = severeCasesByRequestedTime
data['hospitalBedsByRequestedTime'] = hospitalBedsByRequestedTime
data['casesForICUByRequestedTime'] = casesForICUByRequestedTime
data['casesForVentilatorsByRequestedTime'] = casesForVentilatorsByRequestedTime
data['dollarsInFlight'] = dollarsInFlight
severeResult = data
return severeResult
|
import django_rq
from django.conf import settings
from django.core.mail import send_mail
from django.db.models.signals import post_save
from django.utils.translation import ugettext_lazy as _ul
from tastypie.api import Api
from tastypie import fields, utils
from tastypie.models import ApiKey
from tastypie.utils import trailing_slash
from tastypie.resources import ModelResource
from tastypie.authorization import Authorization
from tastypie.authentication import Authentication, ApiKeyAuthentication, BasicAuthentication
from .tasks import create_rnd_sales
from .models import Product, Ingredient, CustomUser, Order, OrderItem, Currency, Review
from .signals import post_save_handler
post_save.connect(post_save_handler)
class RegistrationAuthorization(Authorization):
pass
class BackboneCompatibleResource(ModelResource):
class Meta:
always_return_data = True
class ConfirmationResource(BackboneCompatibleResource):
api_key = fields.ToOneField('showcase.api.ApiKeyResource', 'api_key', full=True)
def obj_get_list(self, bundle, **kwargs):
qs = super(ConfirmationResource, self).obj_get_list(bundle, **kwargs)
key = bundle.request.META['HTTP_APIKEY']
user_name = bundle.request.META['HTTP_USERNAME']
qs = qs.filter(username=user_name)
if key == qs[0].api_key.key:
mdl = qs.get(username=user_name)
mdl.is_active = True
mdl.save()
return qs
class Meta(BackboneCompatibleResource.Meta):
queryset = CustomUser.objects.filter(is_active=False).select_related()
allowed_methods_list = ['get', 'post']
resource_name = 'ConfirmationResource'
class ApiKeyResource(BackboneCompatibleResource):
user = fields.ToOneField('showcase.api.ConfirmationResource', 'user')
class Meta(BackboneCompatibleResource.Meta):
queryset = ApiKey.objects.all()
fields = ['key', 'user']
allowed_methods_list = ['get']
resource_name = 'ApiKeyResource'
class AuthenticationResource(BackboneCompatibleResource):
def obj_get_list(self, bundle, **kwargs):
qs = super(AuthenticationResource, self).obj_get_list(bundle, **kwargs)
return qs.filter(user=bundle.request.user)
class Meta(BackboneCompatibleResource.Meta):
queryset = ApiKey.objects.all()
fields = ['key', 'user']
allowed_methods_list = ['get']
resource_name = 'AuthenticationResource'
authentication = BasicAuthentication()
class RegistrationResource(BackboneCompatibleResource):
class Meta(BackboneCompatibleResource.Meta):
queryset = CustomUser.objects.all()
fields = []
allowed_methods_list = ['post']
resource_name = 'RegistrationResource'
authentication = Authentication()
authorization = RegistrationAuthorization()
class CurrencyResource(ModelResource):
class Meta(BackboneCompatibleResource.Meta):
queryset = Currency.objects.all()
resource_name = 'CurrencyResource'
authentication = ApiKeyAuthentication()
class ProductResource(BackboneCompatibleResource):
reviews = fields.ToManyField('showcase.api.ReviewResource', 'reviews', related_name='product', blank=True, null=True, full=True)
currency = fields.ForeignKey(CurrencyResource, 'currency', full=True)
allowed_methods_list = ['get']
#sum_ings_price = fields.DecimalField(readonly=True)
class Meta(BackboneCompatibleResource.Meta):
queryset = Product.objects.all_published()
resource_name = 'ProductResource'
authorization = Authorization()
#def dehydrate_sum_ings_price(self, bundle):
# return bundle.obj.sum_ings_price
class AddProductResource(BackboneCompatibleResource):
reviews = fields.ToManyField('showcase.api.ReviewResource', 'reviews', related_name='product', blank=True, null=True, full=True)
currency = fields.ForeignKey(CurrencyResource, 'currency')
allowed_methods_list = ['post']
class Meta(BackboneCompatibleResource.Meta):
queryset = Product.objects.all_published()
resource_name = 'AddProductResource'
authentication = ApiKeyAuthentication()
authorization = Authorization()
class ReviewResource(BackboneCompatibleResource):
product = fields.ToOneField(ProductResource, 'product')
class Meta(BackboneCompatibleResource.Meta):
queryset = Review.objects.all()
class IngredientResource(BackboneCompatibleResource):
class Meta(BackboneCompatibleResource.Meta):
queryset = Ingredient.objects.all()
resource_name = 'IngredientResource'
authentication = ApiKeyAuthentication()
class CustomUserResource(BackboneCompatibleResource):
class Meta(BackboneCompatibleResource.Meta):
queryset = CustomUser.objects.all()
resource_name = 'CustomUserResource'
authentication = ApiKeyAuthentication()
authorization = Authorization()
def _send_email(self, bundle, user):
token_url = "%s://%s/#confirm/%s/%s/" % (
bundle.request.scheme,
bundle.request.get_host(),
user.api_key.key,
user.username
)
send_mail(
_ul(u'Account confirmation'),
_ul(u'Go to %s for complete registration') % token_url,
settings.EMAIL_HOST_USER,
[user.email],
fail_silently=False
)
def obj_create(self, bundle, **kwargs):
res = super(CustomUserResource, self).obj_create(bundle, **kwargs)
#import ipdb as pdb; pdb.set_trace()
self._send_email(bundle, bundle.obj)
return res
class OrderResource(BackboneCompatibleResource):
class Meta(BackboneCompatibleResource.Meta):
queryset = Order.objects.all()
resource_name = 'OrderResource'
authentication = ApiKeyAuthentication()
authorization = Authorization()
class OrderItemResource(BackboneCompatibleResource):
class Meta(BackboneCompatibleResource.Meta):
queryset = OrderItem.objects.all()
resource_name = 'OrderItemResource'
authentication = ApiKeyAuthentication()
authorization = Authorization()
class OrderResource(ModelResource):
class Meta(BackboneCompatibleResource.Meta):
queryset = Order.objects.all()
resource_name = 'OrderResource'
authentication = ApiKeyAuthentication()
authorization = Authorization()
class GenerationResource(ModelResource):
def obj_get_list(self, bundle, **kwargs):
print "start RQ-job here..."
django_rq.enqueue(create_rnd_sales, None, None)
return super(GenerationResource, self).obj_get_list(bundle, **kwargs);
class Meta(BackboneCompatibleResource.Meta):
resource_name = 'GenerationResource'
queryset = OrderItem.objects.all()
authentication = ApiKeyAuthentication()
authorization = Authorization()
v1_api = Api(api_name='v1')
v1_api.register(ProductResource())
v1_api.register(AddProductResource())
v1_api.register(IngredientResource())
v1_api.register(CustomUserResource())
v1_api.register(CurrencyResource())
v1_api.register(AuthenticationResource())
v1_api.register(ConfirmationResource())
v1_api.register(GenerationResource())
|
import pygame as pg
from .. import prepare
from .ramp import Ramp, Hill
from .windmill import Windmill
HOLE_INFO = {
1: {"ramps": [((855, 279, 373, 27), (0, -1)),
((855, 347, 373, 48), (0, 1))],
"hills": [],
"ball pos": (150, 200),
"cup pos": (1047, 480),
"par": 3},
2: {"ramps": [((392, 97, 35, 237), (-1, 0)),
((563, 97, 51, 237), (-1, 0)),
((793, 97, 35, 237), (-1, 0)),
((459, 97, 35, 237), (1, 0)),
((670, 97, 51, 237), (1, 0)),
((860, 97, 35, 237), (1, 0))],
"hills": [],
"ball pos": (250, 600),
"cup pos": (1080, 568),
"par": 3},
3: {"ramps": [((343, 86, 322, 22), (-1, 0)),
((343, 86, 17, 211), (0, 1))],
"hills": [],
"ball pos": (714, 641),
"cup pos": (351, 330),
"par": 3},
4: {"ramps": [((358, 445, 277, 84), (1, 0))],
"hills": [],
"ball pos": (170, 155),
"cup pos": (244, 484),
"par": 4},
5: {"ramps": [((187, 253, 247, 94), (1, 0))],
"hills": [],
"ball pos": (117, 302),
"cup pos": (1153, 301),
"par": 4},
6: {"ramps": [((197, 88, 670, 138), (1, 0)),
((384, 327, 585, 78), (-1, 0)),
#((201, 405, 68, 113), (0, -1)),
((269, 534, 435, 42), (-1, 0))],
"hills": [],
"ball pos": (140, 159),
"cup pos": (809, 495),
"par": 5},
7: {"ramps": [((763, 253, 24, 32), (-1, 0)),
((496, 429, 24, 32), (-1, 0))],
"hills": [],
"ball pos": (567, 164),
"cup pos": (705, 554),
"par": 3},
8: {"ramps": [],
"hills": [],
"ball pos": (633, 322),
"cup pos": (633, 582),
"par": 4},
9: {"ramps": [((621, 121, 167, 292), (1, 0)),
((875, 311, 76, 352), (-1, 0))],
"hills": [((72, 662), 100, 200)],
"ball pos": (1106, 616),
"cup pos": (424, 355),
"par": 7}
}
#copy the front nine for the back nine
for x in range(1, 10):
HOLE_INFO[x + 9] = HOLE_INFO[x]
class Cup(pg.sprite.Sprite):
def __init__(self, pos, *groups):
super(Cup, self).__init__(*groups)
self.image = prepare.GFX["cup"]
self.radius = self.image.get_width() // 2
self.rect = self.image.get_rect(center=pos)
self.mask = pg.mask.from_surface(self.image)
class CourseHole(pg.sprite.Sprite):
def __init__(self, hole_num):
image = prepare.GFX["hole{}".format(hole_num)]
self.rect = image.get_rect()
self.mask = pg.mask.from_surface(image)
self.image = prepare.GFX["green{}".format(hole_num)]
self.image.blit(image, (0, 0))
self.make_ramps(HOLE_INFO[hole_num]["ramps"])
self.make_hills(HOLE_INFO[hole_num]["hills"])
self.ball_pos = HOLE_INFO[hole_num]["ball pos"]
self.cup = Cup(HOLE_INFO[hole_num]["cup pos"])
self.par = HOLE_INFO[hole_num]["par"]
if hole_num == 3:
self.windmill = Windmill((716, 303), (1, -1))
else:
self.windmill = None
def update(self, dt, ball):
if self.windmill:
self.windmill.update(dt, ball)
def make_ramps(self, ramp_info):
self.ramps = [Ramp(rect, velocity) for rect, velocity in ramp_info]
def make_hills(self, hill_info):
self.hills = [Hill(center, flat_radius, radius)
for center, flat_radius, radius in hill_info]
def draw(self, surface):
surface.blit(self.image, self.rect)
surface.blit(self.cup.image, self.cup.rect)
#for ramp in self.ramps:
# pg.draw.rect(surface, pg.Color("blue"), ramp.rect, 1)
|
from numba import jit, uint32
import numpy as np
from timeit import timeit
@jit(nopython=True, cache=True)
def go_fast(): # Function is compiled to machine code when called the first time
x = np.arange(100, dtype=np.uint32).reshape(10, 10)
trace = 0
for i in range(x.shape[0]): # Numba likes loops
trace += np.tanh(x[i, i]) # Numba likes NumPy functions
return x + trace # Numba likes NumPy broadcasting
def go_slow(): # Function is compiled to machine code when called the first time
x = np.arange(100).reshape(10, 10)
trace = 0
for i in range(x.shape[0]): # Numba likes loops
trace += np.tanh(x[i, i]) # Numba likes NumPy functions
return x + trace # Numba likes NumPy broadcasting
@jit(nopython=True, cache=True)
def no_loop():
x = np.arange(10 ** 5)
return x
@jit(nopython=True, cache=True)
def loop():
x = np.empty(10**5, dtype=np.uint32)
for i in range(10**5):
x[i] = i
return x
# @jit(nopython=True, cache=True)
def slow_no_loop():
x = np.arange(10 ** 5)
return x
# @jit(nopython=True, cache=True)
def slow_loop():
x = np.empty(10**5, dtype=np.uint32)
for i in range(10**5):
x[i] = i
return x
for i in range(3):
# print(timeit(go_fast))
# print(timeit(go_slow))
print('jit')
print(timeit(no_loop))
print(timeit(loop))
print('no jit')
print(timeit(slow_no_loop))
print(timeit(slow_loop))
|
import os
import json
import re
import datetime
def processSource(source, f):
posts = open("data2/"+source+"_2.json", mode='r', encoding='utf-8')
data1 = json.load(posts)
file3 = open('data3/confessions.csv', 'a', encoding='utf-8')
for post in data1:
month = str(int(post["time"][5:7]))
college = post["college"].replace(" ", "_")
isUniversity = '1' if post["isUniversity"]==True else '0'
len_char = str(post["len_char"])
len_words = str(post["len_words"])
likes = str(post["likes"])
shares = str(post["shares"])
comments = str(post["comments"])
Food = str(post["issues"]["אוכל"]) if "אוכל" in post["issues"] else '0'
Clothing = str(post["issues"]["בגדים"]) if "בגדים" in post["issues"] else '0'
Entertainment = str(post["issues"]["בידור"]) if "בידור" in post["issues"] else '0'
health = str(post["issues"]["בריאות"]) if "בריאות" in post["issues"] else '0'
Men = str(post["issues"]["גברים"]) if "גברים" in post["issues"] else '0'
Depression = str(post["issues"]["דיכאון"]) if "דיכאון" in post["issues"] else '0'
Religion = str(post["issues"]["דת"]) if "דת" in post["issues"] else '0'
Relationship = str(post["issues"]["זוגיות"]) if "זוגיות" in post["issues"] else '0'
Society = str(post["issues"]["חברה"]) if "חברה" in post["issues"] else '0'
Holidays = str(post["issues"]["חגים"]) if "חגים" in post["issues"] else '0'
Vacation = str(post["issues"]["חופשה"]) if "חופשה" in post["issues"] else '0'
Technology = str(post["issues"]["טכנולוגיה"]) if "טכנולוגיה" in post["issues"] else '0'
Money = str(post["issues"]["כסף"]) if "כסף" in post["issues"] else '0'
Studies = str(post["issues"]["לימודים"]) if "לימודים" in post["issues"] else '0'
residence = str(post["issues"]["מגורים"]) if "מגורים" in post["issues"] else '0'
social_media = str(post["issues"]["מדיה חברתית"]) if "מדיה חברתית" in post["issues"] else '0'
Music = str(post["issues"]["מוזיקה"]) if "מוזיקה" in post["issues"] else '0'
Weather = str(post["issues"]["מזג אוויר"]) if "מזג אוויר" in post["issues"] else '0'
sex = str(post["issues"]["מין"]) if "מין" in post["issues"] else '0'
Party = str(post["issues"]["מסיבה"]) if "מסיבה" in post["issues"] else '0'
Place_of_study = str(post["issues"]["מקום לימודים"]) if "מקום לימודים" in post["issues"] else '0'
Family = str(post["issues"]["משפחה"]) if "משפחה" in post["issues"] else '0'
women = str(post["issues"]["נשים"]) if "נשים" in post["issues"] else '0'
politics = str(post["issues"]["פוליטיקה"]) if "פוליטיקה" in post["issues"] else '0'
Positive_emotion = str(post["issues"]["רגש חיובי"]) if "רגש חיובי" in post["issues"] else '0'
Negative_emotion = str(post["issues"]["רגש שלילי"]) if "רגש שלילי" in post["issues"] else '0'
transportation = str(post["issues"]["תחבורה"]) if "תחבורה" in post["issues"] else '0'
field_of_study = str(post["issues"]["תחום לימודים"]) if "תחום לימודים" in post["issues"] else '0'
# Description = str(post["issues"]["תיאור"]) if "תיאור" in post["issues"] else '0'
line = college+','+month+','+f+','+isUniversity+','+len_char+','+len_words+','+likes+','+shares+','+comments+',';
line = line+Food+','+Clothing+','+Entertainment+','+health+','+Men+','+Depression+','+Religion+',';
line = line+Relationship+','+Society+','+Holidays+','+Vacation+','+Technology+','+Money+','+Studies+','+residence+',';
line = line+social_media+','+Music+','+Weather+','+sex+','+Party+','+Place_of_study+','+Family+','+women+',';
line = line+politics+','+Positive_emotion+','+Negative_emotion+','+transportation+','+field_of_study+'\n';
file3.write(line)
file3.close()
# return i
collegeArray = {
"ColmanConfessions": "3640",
"MTACONFESS": "1569",
"LevinskyConfessions": "559",
"JCTConfessions": "462",
"smkbconfessions": "927",
"ShenkarConfessions":"1862",
"bezalelconf": "1434",
"IDCHerzliyaConfessions": "7817",
"sapirconfession": "2218",
"telhaiconfessions": "2953",
"hitconfessionsisrael": "1872",
"RuppinConfession": "1720"
}
universityArray = {
"Open.University.of.Israel.Confessions": "3373",
"HUIConfessions": "2568",
"TechnionConfessions": "17411",
"ArielUConfessions": "11716",
"tel.aviv.university.confessions": "20059",
"BGUConfession": "20211",
"biuconfessions2018": "8317",
"HUJI.Confessions": "11253"
}
line = 'college,month,followers,isUniversity,len_char,len_words,likes,shares,comments,Food,Clothing,Entertainment,health,Men,Depression,Religion,Relationship,Society,Holidays,Vacation,Technology,Money,Studies,residence,social_media,Music,Weather,sex,Party,Place_of_study,Family,women,politics,Positive_emotion,Negative_emotion,transportation,field_of_study\n';
file3 = open('data3/confessions.csv', 'w', encoding='utf-8')
file3.write(line)
file3.close()
for s,v in collegeArray.items():
print(s)
i = processSource(s,v)
for s,v in universityArray.items():
print(s)
i = processSource(s,v)
|
from django.contrib import admin
from .models import Priority, Todo
class PriorityAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'order')
admin.site.register(Priority, PriorityAdmin)
admin.site.register(Todo)
|
import requests
import argparse
class RxnormClassMeds():
def __init__(self, classid):
self.classid = classid
r = requests.get(f'https://rxnav.nlm.nih.gov/REST/rxclass/classMembers.json?classId={classid}&relaSource=ATC')
drugs = r.json()['drugMemberGroup']['drugMember']
self.drug_dict = {}
for drug in drugs:
drug_name = drug['minConcept']['name']
drug_id = drug['minConcept']['rxcui']
r = requests.get(f'https://rxnav.nlm.nih.gov/REST/rxcui/{drug_id}/related.json?tty=DFG')
try:
drug_doseform = r.json()['relatedGroup']['conceptGroup'][0]['conceptProperties']
has_doseform = True
except KeyError:
has_doseform = False
if has_doseform:
drug_doseform_list = []
for doseform in drug_doseform:
drug_doseform_list.append({'doseform_name':doseform['name'], 'doseform_code':doseform['umlscui']})
self.drug_dict[f'{drug_name}'] = {'drug_id':drug_id, 'drug_doseforms':drug_doseform_list}
# print(self.drug_dict)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--classid', help='Class id for rxnorm.', type=str)
args = parser.parse_args()
if args.classid is None:
RxnormClassMeds(classid='A10AE')
else:
RxnormClassMeds(classid=args.classid)
if __name__ == '__main__':
main()
|
>>> # menentukan bilangan genap
>>> genap = lambda x: x%2 == 0
>>> list(filter(genap, range(11)))
[0, 2, 4, 6, 8, 10] |
def knapsackwithoutrepsbu(weight, count, items):
A = [0] * (weight + 1)
B = [0] * (weight + 1)
A[0] = 1
for i in range(count):
for j in range(weight, items[i] - 1, -1):
if A[j - items[i]] == 1:
A[j] = 1
B[j] = items[i]
k = weight
while A[k] == 0:
k -= 1
return k
def main():
W, n = map(int, input().split())
gold = [int(i) for i in input().split()]
print(knapsackwithoutrepsbu(W, n, gold))
if __name__ == "__main__":
main()
|
from random import *
l = []
for i in range(50):
l.append(randint(1, 100))
print(l)
l.sort()
print(l[0], l[1])
print(l[-2], l[-1]) |
import getpass
import warnings
import netrc
import os
from .system import SYSTEM
from . import config
__CACHE__ = None
def _filename(filename=None):
if filename is None:
if SYSTEM == 'win':
filename = os.path.join(os.environ.get("USERPROFILE"), "_netrc")
else:
filename = os.path.join(os.environ.get("HOME"), ".netrc")
return filename
def load(filename=None):
global __CACHE__
if __CACHE__ is None or filename is not None:
if __CACHE__ is None:
__CACHE__ = dict()
filename = _filename(filename=filename)
try:
credentials = netrc.netrc(file=filename)
for host, (login, account, password) in credentials.hosts.iteritems():
__CACHE__[host] = dict(login = login,
account = account,
password = password)
except Exception as e:
__CACHE__ = dict()
def register(filename=None):
global __CACHE__
filename = _filename(filename=filename)
with open(filename, "w") as filehandler:
for host, credential in __CACHE__.iteritems():
filehandler.write("machine " + host + "\n")
if __CACHE__[host]["login"]:
filehandler.write("login " + credential["login"] + "\n")
if __CACHE__[host]["account"]:
filehandler.write("account " + credential["account"] + "\n")
if __CACHE__[host]["password"]:
filehandler.write("password " + credential["password"] + "\n")
def retrieve(host, login='', password='', stdin=True):
global __CACHE__
load()
if not login:
if not host is None:
try:
login = __CACHE__[host]["login"]
except:
warnings.warn("login and/or password for '" + host + "' host has not been found in the '" + _filename() + "' file.", UserWarning)
if not login and stdin:
sentence = "login for '" + host + "': "
login = raw_input(sentence)
if password:
while not login:
warnings.warn('Invalid login...', UserWarning)
login = raw_input(sentence)
if login:
if not password:
if not host is None:
try:
password = __CACHE__[host]["password"]
except:
if login == "TOKEN":
warnings.warn("token for '" + host + "' host has not been found in the '" + _filename() + "' file.", UserWarning)
else:
warnings.warn("login and/or password for '" + host + "' host has not been found in the '" + _filename() + "' file.", UserWarning)
if not password and stdin:
if not login == "TOKEN":
sentence = login + "'s password"
else:
sentence = "token"
sentence += " for '" + host + "': "
password = getpass.getpass(sentence)
while not password:
if login == "TOKEN":
warnings.warn('Invalid token...', UserWarning)
else:
warnings.warn('Invalid password...', UserWarning)
password = getpass.getpass(sentence)
else:
login = ''
password = ''
__CACHE__[host] = dict(login = login,
password = password,
account = None)
config.load()
if config.__CACHE__.get("netrc_update", False):
register()
return login, password |
from .models import Department, User, Question, Answer
from django.contrib.auth.admin import UserAdmin
from django.contrib import admin
from django.utils.translation import gettext, gettext_lazy as _
class UserInlineInDepartment(admin.TabularInline):
model = User.departments.through
extra = 0
class AnswerInlineInQuestion(admin.TabularInline):
model = Answer
extra = 0
@admin.register(Department)
class DepartmentAdmin(admin.ModelAdmin):
#fieldsets = (
# (None, {'fields': ('username',)}),
#)
#def username(self, obj):
# return ','.join([user.username for user in obj.user_set.all()])
#inlines = [
# UserInline,
#]
inlines = [
UserInlineInDepartment,
]
pass
@admin.register(User)
class UserAdmin(UserAdmin):
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('account_name', 'email', 'departments')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser', 'groups', 'user_permissions')}),
(_('ボケバンク'),{'fields':('favorite_answers',)}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
list_display = ('username', 'account_name', 'email', 'is_staff')
search_fields = ('username', 'account_name', 'email')
@admin.register(Question)
class QuestionAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('questioner', 'text',)}),
)
inlines = [
AnswerInlineInQuestion,
]
@admin.register(Answer)
class AnswerAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('panelist', )}),
) |
from points import Point
from math import sqrt
class Triangle:
def __init__(self, x1=0, y1=0, x2=0, y2=0, x3=0, y3=0):
self.pt1 = Point(x1, y1)
self.pt2 = Point(x2, y2)
self.pt3 = Point(x3, y3)
def __str__(self):
output = "[(" + str(self.pt1.x) + ", " + str(self.pt1.y) + "), ("
output += str(self.pt2.x) + ", " + str(self.pt2.y) + "), ("
output += str(self.pt3.x) + ", " + str(self.pt3.y) + ")]"
return output
def __repr__(self):
output = "Triangle(" + str(self.pt1.x) + ", " + str(self.pt1.y) + ", "
output += str(self.pt2.x) + ", " + str(self.pt2.y) + ", "
output += str(self.pt3.x) + ", " + str(self.pt3.y) + ")"
return output
def __eq__(self, other):
return self.pt1.x == other.pt1.x and self.pt1.y == other.pt1.y and \
self.pt2.x == other.pt2.x and self.pt2.y == other.pt2.y and \
self.pt3.x == other.pt3.x and self.pt3.y == other.pt3.y
def __ne__(self, other):
return not self == other
def center(self):
return Point((self.pt1.x + self.pt2.x + self.pt3.x) / 3, (self.pt1.y + self.pt2.y + self.pt3.y) / 3)
def area(self):
a = sqrt((self.pt2.x - self.pt1.x) * (self.pt2.x - self.pt1.x) +
(self.pt2.y - self.pt1.y) * (self.pt2.y - self.pt1.y))
b = sqrt((self.pt3.x - self.pt1.x) * (self.pt3.x - self.pt1.x) +
(self.pt3.y - self.pt1.y) * (self.pt3.y - self.pt1.y))
c = sqrt((self.pt3.x - self.pt2.x) * (self.pt3.x - self.pt2.x) +
(self.pt3.y - self.pt2.y) * (self.pt3.y - self.pt2.y))
return sqrt((a + b + c) * (a + b - c) * (a - b + c) * (-a + b + c)) / 4.
def move(self, x, y):
self.pt1.x += x
self.pt2.x += x
self.pt3.x += x
self.pt1.y += y
self.pt2.y += y
self.pt3.y += y
return self
|
from PyObjCTools.TestSupport import TestCase
import SharedWithYouCore
class TestSWAction(TestCase):
def test_constants(self):
self.assertIsTypedEnum(SharedWithYouCore.SWCollaborationIdentifier, str)
self.assertIsTypedEnum(SharedWithYouCore.SWLocalCollaborationIdentifier, str)
|
import math
import os
import sys
import cv2
import numpy as np
class Line(object):
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
self._norm = (a**2 + b**2) ** 0.5
self._const_arg = None
self._const_val = None
@classmethod
def from_coords(cls, x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
if abs(dx) < 1e-6:
line = cls(1, 1, 1)
line._const_arg = (x0 + x1) / 2
elif abs(dy) < 1e-6:
line = cls(1, 1, 1)
line._const_val = (y0 + y1) / 2
else:
line = cls(1.0/(dx), 1.0/(-dy), y0/(dy) - x0/(dx))
return line
def dist_to(self, x, y):
if self._const_arg is not None:
return abs(x - self._const_arg)
if self._const_val is not None:
return abs(y - self._const_val)
return abs(self.a * x + self.b * y + self.c) / self._norm
def is_circle(image):
if have_solid_field(image):
return False
pixels = np.vstack(image.nonzero()).transpose()
def get_max_dist(pt):
return max([np.linalg.norm(pt - p) for p in pixels])
dists = [get_max_dist(p) for p in pixels]
return np.std(dists) < 1
def is_line(image):
if not have_solid_field(image):
return False
pixels = np.vstack(image.nonzero()).transpose().astype(np.float32)
mean, eigenvectors = cv2.PCACompute(pixels, mean=None)
projects = cv2.PCAProject(pixels, mean, eigenvectors)
return np.std(projects, axis=0)[1] < 1
def bfs(i, j, image, visited):
visited[i][j] = True
deltas = [(-1, 0), (0, -1), (1, 0), (0, 1)]
queue = [(i, j)]
while queue:
x, y = queue.pop(0)
for dx, dy in deltas:
v, u = x + dx, y + dy
if (
0 <= v < image.shape[0] and
0 <= u < image.shape[1] and
not visited[v][u] and
image[v][u] == 0
):
visited[v][u] = True
queue.append((v, u))
def have_solid_field(image):
marked = False
visited = np.zeros_like(image)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if image[i][j] > 0.0:
continue
if not marked:
bfs(i, j, image, visited)
marked = True
elif not visited[i][j]:
return False
return True
def is_broken_line(image):
return have_solid_field(image) and not is_line(image)
def get_max_dist(point, points):
dists = [np.linalg.norm(point - p) for p in points]
argmax = np.argmax(dists)
return argmax, dists[argmax]
def get_diameter_points(image):
pixels = np.vstack(image.nonzero()).transpose().astype(np.float32)
dists = [(i, get_max_dist(point, pixels))
for i, point in enumerate(pixels)]
dists = sorted(dists, key=lambda e: -e[1][1])
return pixels[dists[0][0]], pixels[dists[0][1][0]]
def parse_triangle_corners(image):
s, e = get_diameter_points(image)
x0, y0 = s
x1, y1 = e
line = Line.from_coords(x0, y0, x1, y1)
points = np.vstack(image.nonzero()).transpose().astype(np.float32)
amx = np.argmax([line.dist_to(x, y) for x, y in points])
return points[amx], s, e
def is_triangle(image):
if have_solid_field(image):
return False
corner_points = parse_triangle_corners(image)
triangle_points = [tuple(pt[::-1]) for pt in corner_points]
grid = image.copy()
for i, pt in enumerate(triangle_points):
neig = triangle_points[(i + 1) % 3]
grid = cv2.line(grid, pt, neig, 0, np.float32(2.5))
points = np.vstack(image.nonzero()).transpose().astype(np.float32)
new_points = np.vstack(grid.nonzero()).transpose().astype(np.float32)
ratio = len(new_points) / len(points)
return ratio < 0.05
def is_right_triangle(image):
if not is_triangle(image):
return False
corner_points = parse_triangle_corners(image)
segments = []
for i, corner in enumerate(corner_points):
neig = corner_points[(i + 1) % 3]
dist = np.linalg.norm(corner - neig)
segments.append(dist)
segments = sorted(segments)
expected = (segments[0] ** 2 + segments[1] ** 2) ** 0.5
return abs(expected - segments[2]) < 10
def is_equilateral_triangle(image):
if not is_triangle(image):
return False
corner_points = parse_triangle_corners(image)
segments = []
for i, corner in enumerate(corner_points):
neig = corner_points[(i + 1) % 3]
dist = np.linalg.norm(corner - neig)
segments.append(dist)
std = np.std(segments - np.mean(segments))
return std < 0.5
def is_isosceles_triangle(image):
if not is_triangle(image):
return False
corner_points = parse_triangle_corners(image)
segments = []
for i, corner in enumerate(corner_points):
neig = corner_points[(i + 1) % 3]
dist = np.linalg.norm(corner - neig)
segments.append(dist)
combinations = []
for i, segment in enumerate(segments):
combinations.append((segment, segments[(i + 1) % 3]))
possibilities = [np.std(combination) for combination in combinations]
return any(np.array(possibilities) < 0.6)
def get_dist(a, b):
return np.linalg.norm(a - b)
def is_rectangle(image, only_square=False):
diag_coord = get_diameter_points(image)
grid = cv2.circle(image.copy(), tuple(diag_coord[0][::-1]), 3, 0, -1)
grid = cv2.circle(grid, tuple(diag_coord[1][::-1]), 3, 0, -1)
diags_coords = [diag_coord, get_diameter_points(grid)]
std = np.std([get_dist(*diags_coords[0]), get_dist(*diags_coords[1])])
if std > 0.65:
return False
edges = [
[diags_coords[0][0], diags_coords[1][0]],
[diags_coords[0][0], diags_coords[1][1]],
[diags_coords[0][1], diags_coords[1][0]],
[diags_coords[0][1], diags_coords[1][1]],
]
if only_square:
std = np.std([get_dist(*edge) for edge in edges])
else:
std = np.std([get_dist(*edges[0]), get_dist(*edges[3])])
std = max(std, np.std([get_dist(*edges[1]), get_dist(*edges[2])]))
if std > 1.6:
return False
pts_number = image.nonzero()[0].shape[0]
found_pts_number = 0
grid = image.copy()
for edge in edges:
pts = [tuple(pt[::-1]) for pt in edge]
grid = cv2.line(grid, *pts, 0, 3)
rest = pts_number - found_pts_number
found_pts_number += rest - grid.nonzero()[0].shape[0]
diff = abs(pts_number - found_pts_number)
return diff < 10
def is_square(image):
return is_rectangle(image, True)
def is_ellipse(image):
if have_solid_field(image):
return False
c, da, db = parse_triangle_corners(image)
line = Line.from_coords(*da, *db)
b = int(round(line.dist_to(*c)))
center = (da + db) / 2
a = int(round(get_dist(da, center)))
center = tuple(center[::-1].astype(np.int))
axes = (a, b)
line = Line.from_coords(*(da[::-1]), *(db[::-1]))
tg = -line.a/line.b
ang = int(round(180 + math.atan(tg)*180/math.pi)) % 180
grid = image.copy()
grid = cv2.ellipse(grid, center, axes, ang, 0, 360, 0, 2)
pts_number = image.nonzero()[0].shape[0]
left_number = grid.nonzero()[0].shape[0]
ratio = left_number / pts_number
return ratio < 0.4
def main():
path = 'images'
shapes = [
'circles',
'ellipses',
'lines',
'broken-lines',
'rectangles',
'rotated-rectangles',
'squares',
'right-triangles',
'isosceles-triangles',
'equilateral-triangles',
'rotated-squares',
]
detectors = [
('Circle', is_circle),
('Line', is_line),
('Broken line', is_broken_line),
('Triangle', is_triangle),
('Right triangle', is_right_triangle),
('Equilateral triangle', is_equilateral_triangle),
('Isosceles triangle', is_isosceles_triangle),
('Rectangle', is_rectangle),
('Square', is_square),
('Ellipse', is_ellipse),
]
for shape in shapes:
folder = os.path.join(path, shape)
for file_ in os.listdir(folder):
if file_.endswith(".bmp"):
file_path = os.path.join(folder, file_)
image = cv2.cvtColor(cv2.imread(file_path), cv2.COLOR_BGR2GRAY)
image = image.astype(np.float32)
labels = [label for label, det in detectors if det(image)]
print('{0} {1}: {2}'.format(shape, file_, ', '.join(labels)))
if __name__ == "__main__":
main()
|
'''
Created on Feb 6, 2019
@author: Arun Sarita
'''
import POM
import naukari_homepage
import time
from selenium.webdriver.common.action_chains import ActionChains
import login
time.sleep(5)
main_m=POM.f_xpath('/html/body/div[1]/div/div/ul[2]/li[2]/a/div[2]')
action1=ActionChains(POM.driver)
action1.move_to_element(main_m)
action1.perform()
POM.f_xpath('/html/body/div[1]/div/div/ul[2]/li[2]/div/ul[1]/li[1]/a').click()
time.sleep(5)
def resu_head():
POM.f_xpath('//*[@id="lazyResumeHead"]/div/div/div/div[1]/span[2]').click()
time.sleep(5)
res_he=input('Enter Your Resume Heading:=')
time.sleep(5)
rhl=POM.f_id('resumeHeadlineTxt')
rhl.clear()
rhl.send_keys(res_he)
time.sleep(5)
POM.f_xpath('/html/body/div[5]/div[5]/div[2]/form/div[3]/div/button').click()
def key_skill():
POM.f_xpath('//*[@id="lazyKeySkills"]/div/div/div/div[1]/span[2]').click()
time.sleep(10)
ks=input('Enter Your Key Skills')
time.sleep(5)
ks1=POM.f_id('keySkillSugg')
ks1.clear()
ks1.send_keys(ks)
time.sleep(5)
POM.f_id('saveKeySkills').click()
def Add_emp():
POM.f_xpath('//*[@id="lazyEmployment"]/div/div/div/div[1]/span[2]').click()
time.sleep(5)
desg=POM.f_id('designationSugg')
time.sleep(5)
desg1=input('Enter Your Designation')
time.sleep(5)
desg.clear()
desg.send_keys(desg1)
time.sleep(5)
your_org=POM.f_id('companySugg')
you_org1=input('Enter Your Organization')
time.sleep(5)
your_org.clear()
your_org.send_keys(you_org1)
time.sleep(5)
opt=input("Is this is your current company Yes or No")
if opt=='Yes' or opt=='YES' or opt=='yes':
POM.f_xpath('//*[@id="employmentForm"]/div[4]/div[2]/label').click()
elif opt=='No' or opt=='no' or opt=='NO':
POM.f_xpath('//*[@id="employmentForm"]/div[4]/div[3]/label').click()#driver.find_element_by_xpath('//*[@id="employmentForm"]/div[4]/div[3]/label').click()
time.sleep(5)
strtfrom=POM.f_id('startedYearFor')#driver.find_element_by_id('startedYearFor')
time.sleep(5)
str_f=input('Enter start Year')
strtfrom.clear()
strtfrom.send_keys(str_f)
resu_head()
time.sleep(10)
key_skill()
time.sleep(10)
Add_emp()
print('Log Out') |
from django.apps import AppConfig
class StudentRegisterationConfig(AppConfig):
name = 'student_registeration'
|
import sys
import math
T=int(sys.stdin.readline().strip())
while T :
T-=1
K=int(sys.stdin.readline().strip())
papers=list(map(int,sys.stdin.readline().strip().split(' ')))
maximum = math.inf
matrix = [[maximum] * (K+1) for _ in range(K+1)]
subsum=[0] *(K+1)
for i in range(1,K+1):
subsum[i]=subsum[i-1]+papers[i-1]
matrix[i][i]=0
for gap in range(1,K):
for start in range(1,K-gap+1):
end=start+gap
for mid in range(start,end):
beforecost=matrix[start][mid]+matrix[mid+1][end]
matrix[start][end]=min(matrix[start][end], beforecost)
matrix[start][end]+=subsum[end]-subsum[start-1]
print(matrix[1][K])
|
import csv
from app import app
from app.models import db, Category, Dish
with app.app_context():
category_entities = []
with open('delivery_categories.csv') as f:
reader = csv.reader(f)
for category_id, title in reader:
if category_id == 'id':
continue
category_entity = Category(title=title)
db.session.add(category_entity)
category_entities.append(category_entity)
db.session.commit()
with open('delivery_items.csv') as f:
reader = csv.reader(f)
for dish_id, title, price, description, picture, category_id in reader:
if dish_id == 'id':
continue
dish_entity = Dish(title=title,
price=price,
description=description,
picture=picture,
category=category_entities[int(category_id) - 1])
db.session.add(dish_entity)
db.session.commit()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import datetime
import re
from PIL import Image
import numpy as np
class ImageConverter:
""" 画像変換クラス """
def __init__(self,files=[]):
# コンストラクタ
self.files = files
if self.files is None:
print("Error:file is empty")
return False
def invert_color(self):
""" 画像の色を反転して保存 """
print("\nImageConverter invert_color : 色の反転\n")
try:
convert_images = []
for image_name in self.files:
reply_image = Image.open(image_name)
width, height = reply_image.size
new_image = Image.new('RGB', (width, height))
img_pixels = np.array([[reply_image.getpixel((x, y)) for x in range(width)] for y in range(height)])
# 色を反転する
reverse_color_pixels = 255 - img_pixels
for y in range(height):
for x in range(width):
# 反転した色の画像を作成する
r, g, b = reverse_color_pixels[y][x]
new_image.putpixel((x, y), (r, g, b))
# 変換後のファイル名を決定し保存
convert_image_name = image_name.replace(".", "_invert_color.", 1)
new_image.save(convert_image_name)
convert_images.append(convert_image_name)
print('画像 ' + image_name + ' を変換し ' + convert_image_name + ' を保存しました')
except SyntaxError:
print('Error:SyntaxError')
except not SyntaxError:
print('Error:画像の変換に失敗しました')
return convert_images |
#!/usr/bin/env python3
import numpy as np
import tensorflow as tf
import sys, os
from decimal import Decimal
import hm_prep as hm
import ms_prep as ms
import random
############ Model Selection ############
HM_POS_PATH = 'data/human/omni_polyA_data/positive/'
HM_NEG_PATH = 'data/human/omni_polyA_data/negative/'
MS_POS_PATH = 'data/mouse/bl_mouse/positive/'
MS_NEG_PATH = 'data/mouse/bl_mouse/negative/'
RAT_NEG_PATH = 'data/rat/negative/'
RAT_POS_PATH = 'data/rat/positive/'
BATCH_SIZE = 256
PATCH_SIZE = 10
DEPTH = 16
NUM_HIDDEN = 128
SEQ_LEN = 206 + 2*PATCH_SIZE-2
NUM_CHANNELS = 4
NUM_LABELS = 2
NUM_EPOCHS = 100
NUM_FOLDS = 5
HYPER_DICT = None
############ **************** ############
FLAGS = tf.app.flags.FLAGS
def proportion(dataset,labels,fold):
dataset_folds = np.array_split(dataset,10)
labels_folds = np.array_split(labels,10)
dataset = np.concatenate([dataset_folds[i] for i in fold['round']], axis=0)
labels = np.concatenate([labels_folds[i] for i in fold['round']], axis=0)
def pad_dataset(dataset, labels):
''' Change dataset height to height + 2*DEPTH - 2'''
new_dataset = np.ones([dataset.shape[0], dataset.shape[1]+2*PATCH_SIZE-2, dataset.shape[2], dataset.shape[3]], dtype = np.float32) * 0.25
new_dataset[:, PATCH_SIZE-1:-(PATCH_SIZE-1), :, :] = dataset
labels = (np.arange(NUM_LABELS) == labels[:,None]).astype(np.float32)
return new_dataset, labels
def pixel_level_shuffle(data):
data = np.reshape(data,(data.shape[0],-1))
shuffled_data = []
for each in data:
permutation = np.random.permutation(data.shape[1])
each = each[permutation]
shuffled_data.append(each)
shuffled_data = np.array(shuffled_data)
shuffled_data = np.reshape(shuffled_data, (-1, SEQ_LEN * NUM_CHANNELS))
return shuffled_data
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
def shuffle(dataset, labels, randomState=None):
if randomState is None:
permutation = np.random.permutation(labels.shape[0])
else:
permutation = randomState.permutation(labels.shape[0])
shuffled_data = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_data, shuffled_labels
def gen_hyper_dict(hyper_dict=None):
def rand_log(a, b):
x = np.random.sample()
return 10.0 ** ((np.log10(b) - np.log10(a)) * x + np.log10(a))
def rand_sqrt(a, b):
x = np.random.sample()
return (b - a) * np.sqrt(x) + a
if hyper_dict is None:
hyper_dict = {
'tf_learning_rate': 0.3,
'tf_momentum': rand_sqrt(.90, .99),
'tf_motif_init_weight': rand_log(1e-2, 1),
'tf_fc_init_weight': rand_log(1e-2, 1),
'tf_keep_prob': np.random.choice([.5, .75, 1.0]),
'tf_ngroups': np.random.choice([2, 4, 8]),
'tf_mlp_init_weight': rand_log(1e-2, 10),
'tf_concat_init_weight': rand_log(1e-2, 1),
'lambda': 0.06,
'tf_keep_prob':0.2,
}
return hyper_dict
# Disable print
def block_print():
sys.stdout = open(os.devnull, 'w')
def produce_labels(labels):
labels = (np.arange(2) == labels[:,None]).astype(np.float32)
return labels
# Restore print
def enable_print():
sys.stdout = sys.__stdout__
def train(source_1, source_2, target,hyper_dict):
graph = tf.Graph()
with graph.as_default():
# Load hyper-params
tf_learning_rate = hyper_dict['tf_learning_rate']
tf_momentum = hyper_dict['tf_momentum']
tf_keep_prob = hyper_dict['tf_keep_prob']
tf_mlp_init_weight = hyper_dict['tf_mlp_init_weight']
tf_concat_init_weight = hyper_dict['tf_concat_init_weight']
# Input data.
tf_train_dataset = tf.placeholder(
tf.float32, shape=(BATCH_SIZE, SEQ_LEN, 1, NUM_CHANNELS))
tr_shuffle = tf.placeholder(
tf.float32, shape=(BATCH_SIZE, NUM_CHANNELS))
tf_train_labels = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_LABELS))
source_2_dataset = tf.constant(source_2['test_dataset'])
source_2_shuffle = tf.constant(pixel_level_shuffle(source_2['test_dataset']))
source_2_label = tf.constant(source_2['test_labels'])
source_1_dataset = tf.constant(source_1['test_dataset'])
source_1_shuffle = tf.constant(pixel_level_shuffle(source_1['test_dataset']))
source_1_label = tf.constant(source_1['test_labels'])
target_dataset = tf.constant(np.concatenate([target['train_dataset'], target['valid_dataset'], target['test_dataset']]))
target_shuffle = tf.constant(pixel_level_shuffle(np.concatenate([target['train_dataset'], target['valid_dataset'], target['test_dataset']])))
target_label = tf.constant(np.concatenate([target['train_labels'], target['valid_labels'], target['test_labels']]))
tf_train_valid_dataset = tf.constant(np.concatenate([source_1['train_dataset'], source_2['train_dataset']]))
tf_train_valid_shuffle = tf.constant(pixel_level_shuffle(np.concatenate([source_1['train_dataset'], source_2['train_dataset']])))
tf_train_valid_label = tf.constant(np.concatenate([source_1['train_labels'], source_2['train_labels']]))
tf_valid_dataset = tf.constant(np.concatenate([source_1['valid_dataset'],source_2['valid_dataset']]))
tf_valid_shuffle = tf.constant(pixel_level_shuffle(np.concatenate([source_1['valid_dataset'],source_2['valid_dataset']])))
tf_valid_label = tf.constant(np.concatenate([source_1['valid_labels'],source_2['valid_labels']]))
# Variables.
conv_weights = tf.Variable(tf.truncated_normal(
[PATCH_SIZE, 1, NUM_CHANNELS, DEPTH], stddev=1e-1))
conv_weights_1 = tf.Variable(tf.truncated_normal(
[PATCH_SIZE, 1, NUM_CHANNELS, DEPTH], stddev=1e-1))
conv_biases = tf.Variable(tf.zeros([DEPTH]))
conv_biases_1 = tf.Variable(tf.zeros([DEPTH]))
layer1_weights = tf.Variable(tf.truncated_normal(
[21*DEPTH, NUM_HIDDEN], stddev=1e-1))
layer1_biases = tf.Variable(tf.constant(0.0, shape=[NUM_HIDDEN]))
layer2_weights = tf.Variable(tf.truncated_normal(
[NUM_HIDDEN, NUM_LABELS], stddev=1e-1))
layer2_biases = tf.Variable(tf.constant(0.0, shape=[NUM_LABELS]))
mlp_1_weights = tf.Variable(tf.truncated_normal(
[4,128],stddev = tf_mlp_init_weight))
mlp_out_weights = tf.Variable(tf.truncated_normal(
[16,3],stddev = tf_mlp_init_weight))
mlp_2_weights = tf.Variable(tf.truncated_normal(
[512,128],stddev = tf_mlp_init_weight))
concat_weights = tf.Variable(tf.truncated_normal(
[128+128, NUM_LABELS],stddev = 1e-1))
mlp_1_biases = tf.Variable(tf.constant(1.0, shape=[128]))
mlp_2_biases = tf.Variable(tf.constant(0.0, shape=[128]))
mlp_out_biases = tf.Variable(tf.constant(0.0, shape=[3]))
concat_biases = tf.Variable(tf.constant(1.0, shape = [2]))
conv1_w = tf.Variable(tf.truncated_normal(
[3, 1, NUM_CHANNELS, DEPTH], stddev=1e-1))
conv1_b = tf.Variable(tf.zeros([DEPTH]))
# Store Variables
weights = {}
weights['conv_weights'] = conv_weights
weights['conv_biases'] = conv_biases
weights['layer1_weights'] = layer1_weights
weights['layer1_biases'] = layer1_biases
weights['layer2_weights'] = layer2_weights
weights['layer2_biases'] = layer2_biases
weights['mlp_1_weights'] = mlp_1_weights
weights['mlp_1_biases'] = mlp_1_biases
weights['mlp_2_weights'] = mlp_2_weights
weights['mlp_2_biases'] = mlp_2_biases
weights['concat_weights'] = concat_weights
weights['concat_biases'] = concat_biases
weights['back_biases'] = mlp_out_biases
weights['back_weights'] = mlp_out_weights
# Model.
def model(data, shuffle, label, drop=True):
# MLP
Hex = True
mlp_1 = tf.nn.relu(tf.matmul(shuffle, mlp_1_weights) + mlp_1_biases)
# CNN
conv = tf.nn.conv2d(data, conv_weights, [1, 1, 1, 1], padding='VALID')
hidden = tf.nn.relu(conv)
hidden = tf.nn.max_pool(hidden, [1, 10, 1, 1], [1, 10, 1, 1], padding='VALID')
shape = hidden.get_shape().as_list()
motif_score = tf.reshape(hidden, [shape[0], shape[1] * DEPTH])
hidden_nodes = tf.nn.dropout(tf.nn.relu(tf.matmul(motif_score, layer1_weights) + layer1_biases),
tf_keep_prob)
concat_loss = tf.concat([hidden_nodes, mlp_1], 1)
pad = tf.zeros_like(mlp_1, tf.float32)
concat_pred = tf.concat([hidden_nodes, pad], 1)
pad2 = tf.zeros_like(hidden_nodes, tf.float32)
concat_H = tf.concat([pad2, mlp_1], 1)
model_loss = tf.matmul(concat_loss, concat_weights) + concat_biases
model_pred = tf.matmul(concat_pred, concat_weights) + concat_biases
model_H = tf.matmul(concat_H, concat_weights) + concat_biases
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=label,
logits=model_loss))
if Hex:
model_loss = tf.nn.l2_normalize(model_loss, 0)
model_H = tf.nn.l2_normalize(model_H, 0)
model_loss = model_loss - \
tf.matmul(tf.matmul(
tf.matmul(model_H, tf.matrix_inverse(tf.matmul(model_H, model_H, transpose_a=True))),
model_H, transpose_b=True), model_loss)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=label,
logits=model_loss))
return loss, model_pred
# Training computation.
loss, _ = model(tf_train_dataset, tr_shuffle, tf_train_labels, drop=True)
# Optimizer.
global_step = tf.Variable(0, trainable=False) # count the number of steps taken.
stepOp = tf.assign_add(global_step, 1).op
learning_rate = tf.train.exponential_decay(tf_learning_rate, global_step, 3000, 0.96)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# Predictions for the training, validation, and test data.
motif_train_prediction = {}
train_loss, train_valid = model(tf_train_valid_dataset,tf_train_valid_shuffle, tf_train_valid_label, drop=True)
train_prediction = tf.nn.softmax(train_valid)
valid_loss, validation = model(tf_valid_dataset,tf_valid_shuffle, tf_valid_label, drop=True)
valid_prediction = tf.nn.softmax(validation)
_,source_1_out = model(source_1_dataset,source_1_shuffle, source_1_label,drop = True)
source_1_prediction = tf.nn.softmax(source_1_out)
_, source_2_out = model(source_2_dataset, source_2_shuffle,source_2_label, drop=True)
source_2_prediction = tf.nn.softmax(source_2_out)
_, target_out = model(target_dataset,target_shuffle, target_label, drop=True)
target_prediction = tf.nn.softmax(target_out)
# Kick off training
train_resuts = []
valid_results = []
source_1_results = []
source_2_results = []
target_results = []
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
train_dataset = np.concatenate([source_1['train_dataset'],source_2['train_dataset']])
train_labels = np.concatenate([source_1['train_labels'],source_2['train_labels']])
np.random.seed()
print('Initialized')
print('Training accuracy at the beginning: %.1f%%' % accuracy(train_prediction.eval(), np.concatenate([source_1['train_labels'], source_2['train_labels']])))
print('Validation accuracy at the beginning: %.1f%%' % accuracy(valid_prediction.eval(), np.concatenate([source_1['valid_labels'],source_2['valid_labels']])))
for epoch in range(NUM_EPOCHS):
permutation = np.random.permutation(train_labels.shape[0])
shuffled_dataset = train_dataset[permutation, :, :]
shuffled_labels = train_labels[permutation, :]
shuffled_domains = domain[permutation, :]
for step in range(shuffled_labels.shape[0] // BATCH_SIZE):
offset = step * BATCH_SIZE
batch_data = shuffled_dataset[offset:(offset + BATCH_SIZE), :, :, :]
batch_labels = shuffled_labels[offset:(offset + BATCH_SIZE), :]
batch_shuffle = pixel_level_shuffle(batch_data)
feed_dict = {tf_train_dataset: batch_data,tr_shuffle :batch_shuffle, tf_train_labels: batch_labels}
_, l = session.run(
[optimizer, loss], feed_dict=feed_dict)
session.run(stepOp)
train_resuts.append(accuracy(train_prediction.eval(), np.concatenate([source_1['train_labels'], source_2['train_labels']])))
valid_pred = valid_prediction.eval()
print('validation loss', valid_loss.eval())
valid_losses.append(valid_loss.eval())
valid_results.append(accuracy(valid_pred, np.concatenate([source_1['valid_labels'], source_2['valid_labels']])))
source_1_pred = source_1_prediction.eval()
source_1_results.append(accuracy(source_1_pred,source_1['test_labels']))
source_2_pred = source_2_prediction.eval()
source_2_results.append(accuracy(source_2_pred, source_2['test_labels']))
target_pred = target_prediction.eval()
target_results.append(accuracy(test_2_pred, np.concatenate([target['train_labels'], target['valid_labels'], target['test_labels']])))
print('Training accuracy at epoch %d: %.1f%%' % (epoch, train_resuts[-1]))
print('Validation accuracy: %.1f%%' % valid_results[-1])
print('target accuracy:%.1f%%'%target_results[-1])
# Early stopping based on validation results
if epoch > 10 and valid_results[-11] > max(valid_results[-10:]):
train_resuts = train_resuts[:-10]
valid_results = valid_results[:-10]
source_1_results = source_1_results[:-10]
source_2_results = source_2_results[:-10]
target_results = target_results[:-10]
return train_resuts, valid_results, source_1_results, source_2_results, target_results
return train_resuts, valid_results, source_1_results, source_2_results, target_results
def main(_):
for rounds in range(0,10):
print('rounds:%d'%rounds)
hyper_dict = gen_hyper_dict(HYPER_DICT)
source_1_pos_data, source_1_pos_labels, source_1_neg_data, source_1_neg_labels = hm.produce_dataset(NUM_FOLDS, HM_POS_PATH,HM_NEG_PATH)
source_2_pos_data, source_2_pos_labels, source_2_neg_data, source_2_neg_labels = ms.produce_dataset(NUM_FOLDS,MS_POS_PATH,
MS_NEG_PATH)
target_pos_data, target_pos_labels, target_neg_data, target_neg_labels = ms.produce_dataset(NUM_FOLDS,RAT_POS_PATH,
RAT_NEG_PATH)
# Cross validate
train_accuracy_split = []
valid_accuracy_split = []
source_2_accuracy_split = []
target_accuracy_split = []
source_1_accuracy_split = []
folds = {'round':[k for k in range(rounds+1)]}
for i in range(NUM_FOLDS):
split = {
'train': [(i + j) % NUM_FOLDS for j in range(NUM_FOLDS-2)],
'valid': [(i + NUM_FOLDS-2) % NUM_FOLDS],
'test': [(i + NUM_FOLDS-1) % NUM_FOLDS]
}
#proportion_60 = {'round':[k for k in range(0,6)]}
#proportion_100 = {'round':[k for k in range(0,10)]}
source_1_buffer = hm.data_split(source_1_pos_data, source_1_pos_labels, source_1_neg_data, source_1_neg_labels, NUM_FOLDS, split,folds)
source_2_buffer = ms.data_split(source_2_pos_data, source_2_pos_labels, source_2_neg_data, source_2_neg_labels, NUM_FOLDS, split,folds)
target_buffer = ms.data_split(target_pos_data, target_pos_labels, target_neg_data, target_neg_labels, NUM_FOLDS, split, folds)
source_1 = {}
source_1['train_dataset'], source_1['train_labels'] = pad_dataset(source_1_buffer['train_dataset'], source_1_buffer['train_labels'])
source_1['valid_dataset'], source_1['valid_labels'] = pad_dataset(source_1_buffer['valid_dataset'], source_1_buffer['valid_labels'])
source_1['test_dataset'], source_1['test_labels'] = pad_dataset(source_1_buffer['test_dataset'], source_1_buffer['test_labels'])
source_2 = {}
source_2['train_dataset'], source_2['train_labels'] = pad_dataset(source_2_buffer['train_dataset'], source_2_buffer['train_labels'])
source_2['valid_dataset'], source_2['valid_labels'] = pad_dataset(source_2_buffer['valid_dataset'], source_2_buffer['valid_labels'])
source_2['test_dataset'], source_2['test_labels'] = pad_dataset(source_2_buffer['test_dataset'], source_2_buffer['test_labels'])
target = {}
target['train_dataset'], target['train_labels'] = pad_dataset(target_buffer['train_dataset'], target_buffer['train_labels'])
target['valid_dataset'], target['valid_labels'] = pad_dataset(target_buffer['valid_dataset'], target_buffer['valid_labels'])
target['test_dataset'], target['test_labels'] = pad_dataset(target_buffer['test_dataset'], target_buffer['test_labels'])
train_resuts, valid_results, source_1_results, source_2_results, target_results = train(source_1, source_2, target,hyper_dict)
print(valid_loss)
print("\nbest valid epoch: %d" % (len(train_resuts) - 1))
print("Training accuracy: %.2f%%" % train_resuts[-1])
print("Validation accuracy: %.2f%%" % valid_results[-1])
print("Source_1 training accuracy:%.2f%%"%source_1_results[-1])
print("Source_2 training accuracy: %.2f%%" % source_1_results[-1])
print("Traget accuracy: %.2f%%" % target_results[-1])
train_accuracy_split.append(train_resuts[-1])
valid_accuracy_split.append(valid_results[-1])
source_1_accuracy_split.append(source_1_results[-1])
source_2_accuracy_split.append(source_2_results[-1])
target_accuracy_split.append(target_results[-1])
train_accuracy = np.mean(train_accuracy_split)
valid_accuracy = np.mean(valid_accuracy_split)
source_1_accuracy = np.mean(source_1_accuracy_split)
source_2_accuracy = np.mean(source_2_accuracy_split)
target_accuracy = np.mean(target_accuracy_split)
print('\n\n########################\nFinal result:')
print('Training set accuracy: %.1f%%' % (train_accuracy))
print('Validation set accuracy: %.1f%%' % (valid_accuracy))
print("Source_1 accuracy:%.1f%%"% (source_1_accuracy))
print('Srouce_2 accuracy: %.1f%%' % (source_2_accuracy))
print('Target accuracy: %.1f%%' % (target_accuracy))
if __name__ == '__main__':
tf.app.run()
|
from .abc import BaseModel, MetaBaseModel, db
class Measure(db.Model, BaseModel, metaclass=MetaBaseModel):
"""Model class for measure."""
id = db.Column(db.Integer(), nullable=False, primary_key=True)
country_id = db.Column(db.Integer,
db.ForeignKey('country.id', ondelete='CASCADE'),
nullable=False)
type = db.Column(db.String(32), nullable=False)
value = db.Column(db.Float, nullable=False)
date = db.Column(db.Integer, nullable=False)
def __init__(self, country_id, type, value, date):
"""Create a new measure."""
self.country_id = country_id
self.type = type
self.value = value
self.date = date
|
# -*- coding: utf-8 -*-
import json
import subprocess
import time
def getLength(filename):
command = ["ffprobe", "-loglevel", "quiet", "-print_format", "json", "-show_format", "-show_streams", "-i",
filename]
result = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = result.stdout.read()
# print(str(out))
temp = str(out.decode('utf-8'))
try:
data = json.loads(temp)['streams'][1]['width']
except:
data = json.loads(temp)['streams'][0]['width']
return data
def getLenTime(filename):
command = ["ffprobe", "-loglevel", "quiet", "-print_format", "json", "-show_format", "-show_streams", "-i",
filename]
result = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = result.stdout.read()
# print(str(out))
temp = str(out.decode('utf-8'))
data = json.loads(temp)
file_message = {"size": data["format"]['size'], "time": data["format"]['duration']}
return file_message
print(int(time.time()) * 1000)
a = getLenTime('D:\\video\\C90840816-2019-10-12-01-45-10.mp4')
print(a)
print(time.time() * 1000)
|
"""
This will autopatch machines using landscape
Todo:
Look at ways to make systems patched at different times get the same packages.
Manage reboots - canonical suggest polling for the need reboot flag (yuck)
The configuration.py file should be a copy of configuration.py.template
The copy should have the details (key, secret etc) filled in.
The CSV file should be based on the template too, and should be formatted:
<tag name>,<DayOfWeek>,<Week Of Month>,<Time>
<String>,<0-7>,<1-4>,<00:00 - 47:59>
- tag name should match a tag in landscape.
- DOW 0 is Sunday.
- Week Of Month - 1 is the first week. Values over 4 haven't been tested.
- Time is in 24 hour time. Hours over 23 will refer to hour-24 the next day.
"""
import csv
import re
import datetime
import dateutil.relativedelta as relativedelta
from landscape_api.base import API as LANDSCAPE_API
# This file should be based on configuration.py.template
import configuration as conf
def get_computers_by_tag(tag):
"""Get the computers with a specific tag"""
import landscape_api.base.errors as ldserrors
api = LANDSCAPE_API(conf.LDS_URI, conf.LDS_KEY, conf.LDS_SECRET, conf.LDS_CA)
try:
computers = api.get_computers(query="tag:" + tag) # pylint: disable=no-member
except ldserrors.InvalidQueryError:
computers = None
return computers
def upgrade_by_tag(tag, deliver_after, packages=None, security_only=False, deliver_delay_window=0):
"""Upgrade all systems with the given tag"""
if packages is None:
packages = []
api = LANDSCAPE_API(conf.LDS_URI, conf.LDS_KEY, conf.LDS_SECRET, conf.LDS_CA)
tag = "tag:" + tag
result = api.upgrade_packages(tag, packages, security_only, deliver_after,
deliver_delay_window) # pylint: disable=no-member
return result
def interpret_time(list_item, startdate=conf.STARTDATE):
"""Take the data we get from the CSV and turn it into a real date"""
weekday = int(list_item[1])
weeknumber = int(list_item[2])
time = re.split(":", list_item[3])
hours = int(time[0])
minutes = int(time[1])
if not startdate:
startdate = datetime.datetime.now()
# We always schedule at the end of the month, so lets reset to the start of next
startdate = startdate.replace(day=1)
# If it's December then next month is January next year not month 13 of this year
if startdate.month == 12:
startdate = startdate.replace(month=1)
startdate = startdate.replace(year=(startdate.year + 1))
else:
startdate = startdate.replace(month=(startdate.month + 1))
# Now we have a start date of the start of the month after that in startdate (normally today)
# The +1 makes this match up with linux times (day 1 = Monday)
daysahead = weekday - (startdate.weekday() + 1)
if daysahead < 0:
# Target day already happened this week
daysahead += 7
# Add 7 days for each Week Of Month we want - but 'This' week is week 1
daysahead += 7 * (weeknumber - 1)
schedule = startdate + datetime.timedelta(daysahead)
# If the time is over 24 hours then we mean hours-24 the next day
if hours > 23:
hours -= 24
schedule = schedule + datetime.timedelta(days=1)
schedule = schedule.replace(hour=hours).replace(minute=minutes)
return schedule
def reboot_by_tag(tag, schedule):
"""Reboot computers with this tag on this schedule"""
api = LANDSCAPE_API(conf.LDS_URI, conf.LDS_KEY, conf.LDS_SECRET, conf.LDS_CA)
computerlist = get_computers_by_tag(tag)
computers = []
for computer in computerlist:
computers.append(int(computer["id"]))
api.reboot_computers(computers, schedule) # pylint: disable=no-member
def process_list(listfile=conf.LISTFILE):
"""Process the list of tags and schedules"""
with open(listfile) as csvfile:
filereader = csv.reader(csvfile)
for row in filereader:
if get_computers_by_tag(row[0]):
schedule = interpret_time(row)
rebootschedule = schedule + relativedelta.relativedelta(hours=conf.LDS_REBOOT_DELAY)
upgrade_by_tag(row[0], schedule)
reboot_by_tag(row[0], rebootschedule)
if __name__ == "__main__":
process_list()
|
from flask import abort, jsonify, request
from __main__ import app, db
from models import StoredImage, User
@app.route('/admin/news/add/feature_image', methods=['POST'])
def ajax_admin_news_add_feature_image():
# Get the signed in User (if there's one), or None
user = User.get_signed_in_user()
if user is None:
abort(401)
else:
if not user.is_staff and not user.is_superuser:
abort(401)
json_results = {}
json_results['success'] = False
json_results['feature_image_id'] = 0
if 'feature_image' in request.files:
if request.files['feature_image'].filename:
# Try to import the image. Will be None on failure.
feature_image = StoredImage.from_file(request.files['feature_image'])
feature_image.fit_within(width=2560, height=2560)
if feature_image:
# If it exists in the database, get the stored image. Otherwise, save it.
tmp_image = StoredImage.from_database_md5(feature_image.md5_hash)
if tmp_image is None:
db.session.add(feature_image)
db.session.commit()
else:
feature_image = tmp_image
json_results['success'] = True
json_results['feature_image_id'] = feature_image.id
return jsonify(**json_results)
|
# from django.test import TestCase
# from blog.forms import CommentForm
# class CommentFormTest(TestCase):
# @classmethod
# def setUpClass(cls):
# cls.valid_data = {
# "name": "Subhan",
# "email":"subhanrzayev97@gmail.com",
# "message":"Write this comment"
# }
# cls.invalid_data = {
# "email":"Subhan",
# "message":"Write this comment"
# }
# cls.form = CommentForm
# def test_form_with_valid_data(self):
# form = CommentForm(data = self.valid_data)
# self.assertTrue(form.is_valid())
# def test_form_with_invalid_data(self):
# form = CommentForm(data = self.invalid_data)
# self.assertFalse(form.is_valid())
# self.assertIn('name',form.errors)
# self.assertIn('email',form.errors)
# print(form.errors)
# self.assertIn('This field is required.', form.errors['name'])
# self.assertIn('Enter a valid email address.', form.errors['email'])
# @classmethod
# def tearDownClass(cls):
# pass |
#!/usr/bin/python3
import smbus
bus = smbus.SMBus(1) # 0 = /dev/i2c-0 (port I2C0), 1 = /dev/i2c-1 (port I2C1)
DEVICE_ADDRESS = 0x18 #7 bit address (will be left shifted to add the read write bit)
DEVICE_REG_MODE1 = 0x00
DEVICE_REG_LEDOUT0 = 0x1d
#Write a single register
r = bus.write_byte_data(DEVICE_ADDRESS, DEVICE_REG_MODE1, 0x80)
print(r)
#Write an array of registers
#ledout_values = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff]
#bus.write_i2c_block_data(DEVICE_ADDRESS, DEVICE_REG_LEDOUT0, ledout_values)
|
from django.contrib import admin
# Register your models here.
from .models import Person, Book
class PersonAdmin(admin.ModelAdmin):
# listdisplay设置要显示在列表中的字段(id字段是Django模型的默认主键)
list_display = ('id', 'name', 'age', 'email', 'data',)
# list_per_page设置每页显示多少条记录,默认是100条
list_per_page = 50
# ordering设置默认排序字段,负号表示降序排序
ordering = ('-id',)
# 筛选器
list_filter = ('age', 'email',) # 过滤器
search_fields = ('id', 'name', 'email') # 搜索字段
date_hierarchy = 'data' # 详细时间分层筛选
admin.site.register(Person, PersonAdmin)
admin.site.register(Book) |
'''
Домашнее задание N8
Создать класс для представления трехмерных векторов (обычных евклидовых).
С помощью специальных методов: "__add__", "__mul__", "__abs__", "__bool__", "__str__"
- определить сложение векторов, умножение вектора на число, длинна вектора, булево значение
(True - если длинна > 0) и строковое представление объекта.
'''
import math
class Vector:
def __init__(self, x: float, y: float, z: float):
self._x = x
self._y = y
self._z = z
def __add__(self, other):
if isinstance(other, Vector):
return Vector(self._x + other.x, self._y + other.y, self._z + other.z)
else:
return None
def __mul__(self, other):
return Vector(other * self._x, other * self._y, other * self._z)
def __rmul__(self, other):
return self.__mul__(other)
def __abs__(self):
return math.sqrt(self._x**2 + self._y**2 + self._z**2)
def __bool__(self):
return abs(self) > 0
def __str__(self):
return '{' + str(self._x) + ', ' + str(self._y) + ', ' + str(self._z) + '}'
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def z(self):
return self._z |
import tushare as ts
import datetime
import pandas as pd
import numpy as np
import struct
import os
import math
from statistics import mean
from numba import jit
from numba.typed import List
tushare_token = '1e405fa29516d0c96f66ee71f4f2833b31b566cd6ad4f0faa895c671'
#获取上证综指历史数据,含市净率,市盈率
def get_shanghai_from_tushare():
if datetime.datetime.now().hour > 17:
strenddate = datetime.datetime.strftime(datetime.date.today(),'%Y%m%d')
else:
strenddate = datetime.datetime.strftime((datetime.date.today() + datetime.timedelta(days = -1)),'%Y%m%d')
ts.set_token(tushare_token)
pro = ts.pro_api()
#['ts_code', 'trade_date', 'total_mv', 'float_mv', 'total_share', 'float_share', 'free_share', 'turnover_rate', 'turnover_rate_f', 'pe', 'pe_ttm', 'pb']
df1 = pro.index_dailybasic(ts_code = "000001.SH",start_date = '20001219',end_date = '20160731',fields='trade_date,pb')
df2 = pro.index_dailybasic(ts_code = "000001.SH",start_date = '20160801',end_date = strenddate,fields='trade_date,pb')
df = df2.append(df1)
df = df.sort_values(by = 'trade_date',axis = 0,ascending = True).reset_index(drop=True)
return df.values.tolist()
def get_anystock_from_tushare(stockcode):
if datetime.datetime.now().hour > 17:
strenddate = datetime.datetime.strftime(datetime.date.today(),'%Y%m%d')
else:
strenddate = datetime.datetime.strftime((datetime.date.today() + datetime.timedelta(days = -1)),'%Y%m%d')
if stockcode[0:1] == '6':
stockcode = stockcode + '.SH'
else:
stockcode = stockcode + '.SZ'
ts.set_token(tushare_token)
pro = ts.pro_api()
#['ts_code', 'trade_date', 'close', 'turnover_rate', 'turnover_rate_f', 'volume_ratio', 'pe', 'pe_ttm', 'pb', 'ps', 'ps_ttm', 'total_share', 'float_share', 'free_share', 'total_mv', 'circ_mv']
df1 = pro.daily_basic(ts_code = stockcode,start_date = '19900101',end_date = '20031231',fields='trade_date,pb')
df2 = pro.daily_basic(ts_code = stockcode,start_date = '20040101',end_date = '20181231',fields='trade_date,pb')
df2 = df2.append(df1)
df3 = pro.daily_basic(ts_code = stockcode,start_date = '20190101',end_date = strenddate,fields='trade_date,pb')
df3 = df3.append(df2)
return np.array(df3.sort_values(by = 'trade_date',axis = 0,ascending = True).reset_index(drop=True)).tolist()
@jit(nopython=True)
def get_ndays_average_stock_pb(pbarray,ndays):
history_pb_dict = dict()
for i in range( pbarray.shape[0] - ndays ):
total_pb = 0.0
j = i
while j<ndays+i:
total_pb = total_pb + pbarray[j,1] #11列pb
j += 1
trade_date = pbarray[j,0]
pb = pbarray[j,1]
history_pb_dict[int(trade_date)] = round(total_pb / ndays,4)
return history_pb_dict
def get_stock_close_from_tushare(stockcode):
if datetime.datetime.now().hour > 17:
strenddate = datetime.datetime.strftime(datetime.date.today(),'%Y%m%d')
else:
strenddate = datetime.datetime.strftime((datetime.date.today() + datetime.timedelta(days = -1)),'%Y%m%d')
if stockcode[0:1] == '6' or stockcode[0:1] == '5':
stockcode = stockcode + '.SH'
else:
stockcode = stockcode + '.SZ'
ts.set_token(tushare_token)
pro = ts.pro_api()
#['ts_code', 'trade_date', 'close', 'turnover_rate', 'turnover_rate_f', 'volume_ratio', 'pe', 'pe_ttm', 'pb', 'ps', 'ps_ttm', 'total_share', 'float_share', 'free_share', 'total_mv', 'circ_mv']
if stockcode[0:1] == '5':
df1 = pro.fund_daily(ts_code = stockcode,start_date = '19900101',end_date = '20031231',fields='trade_date,close')
df2 = pro.fund_daily(ts_code = stockcode,start_date = '20040101',end_date = '20181231',fields='trade_date,close')
df2 = df2.append(df1)
df3 = pro.fund_daily(ts_code = stockcode,start_date = '20190101',end_date = strenddate,fields='trade_date,close')
df3 = df3.append(df2)
else:
df1 = pro.daily_basic(ts_code = stockcode,start_date = '19900101',end_date = '20031231',fields='trade_date,close')
df2 = pro.daily_basic(ts_code = stockcode,start_date = '20040101',end_date = '20181231',fields='trade_date,close')
df2 = df2.append(df1)
df3 = pro.daily_basic(ts_code = stockcode,start_date = '20190101',end_date = strenddate,fields='trade_date,close')
df3 = df3.append(df2)
return df3
stockcode = input("请输入证券代码(510300):")
if len(stockcode) == 0:
stockcode = '510300'
if stockcode[0:1] == '5':
stocklist = get_shanghai_from_tushare()
else:
stocklist = get_anystock_from_tushare(stockcode)
pbarray = np.array(stocklist,dtype=np.float64)
history_pb_dict = get_ndays_average_stock_pb(pbarray,1220)
df = pd.DataFrame(list(history_pb_dict.items()), columns=['trade_date', 'pb1220'])
ndays = int(244 * 4.5)
while ndays>=122:
history_pb_dict = get_ndays_average_stock_pb(pbarray,ndays)
tmpdf = pd.DataFrame(list(history_pb_dict.items()), columns=['trade_date', 'pb'+str(ndays)])
df = pd.merge(df,tmpdf.loc[:,['trade_date','pb'+str(ndays)]],how='inner',on = 'trade_date')
#df.to_csv(stockcode+'_pb_'+str(ndays)+'.csv',encoding='utf_8_sig')
ndays -= 122
ndays = 1
history_pb_dict = get_ndays_average_stock_pb(pbarray,ndays)
tmpdf = pd.DataFrame(list(history_pb_dict.items()), columns=['trade_date', 'pb'+str(ndays)])
df = pd.merge(df,tmpdf.loc[:,['trade_date','pb'+str(ndays)]],how='inner',on = 'trade_date')
#for i in range(len(df)):
# tmpdf = df[i:i+1]
tmpdf = df
tmpdf = tmpdf.drop(columns=['trade_date','pb1'])
tmparr = tmpdf.values
df['min1']=tmpdf.min(axis=1)
tmparr[tmpdf.index,np.argmin(tmparr,axis=1)]=100
tmpdf=pd.DataFrame(tmparr,columns=["pb1220","pb1098","pb976","pb854","pb732","pb610","pb488","pb366","pb244","pb122"])
df['min2']=tmpdf.min(axis=1)
tmparr[tmpdf.index,np.argmin(tmparr,axis=1)]=100
tmpdf=pd.DataFrame(tmparr,columns=["pb1220","pb1098","pb976","pb854","pb732","pb610","pb488","pb366","pb244","pb122"])
df['min3']=tmpdf.min(axis=1)
tmpdf = df
tmpdf = tmpdf.drop(columns=['trade_date','pb1','min1','min2','min3'])
tmparr = tmpdf.values
df['max1']=tmpdf.max(axis=1)
tmparr[tmpdf.index,np.argmax(tmparr,axis=1)]=-100
tmpdf=pd.DataFrame(tmparr,columns=["pb1220","pb1098","pb976","pb854","pb732","pb610","pb488","pb366","pb244","pb122"])
df['max2']=tmpdf.max(axis=1)
tmparr[tmpdf.index,np.argmax(tmparr,axis=1)]=-100
tmpdf=pd.DataFrame(tmparr,columns=["pb1220","pb1098","pb976","pb854","pb732","pb610","pb488","pb366","pb244","pb122"])
df['max3']=tmpdf.max(axis=1)
#df.to_csv(stockcode+'_pb_'+str(ndays)+'.csv',encoding='utf_8_sig')
#df['trade_date'] = df['trade_date'].astype('object')
#df = df.sort_values(by = 'trade_date',axis = 0,ascending = True).reset_index(drop=True)
tmpdf = get_stock_close_from_tushare(stockcode)
tmpdf['trade_date'] = tmpdf['trade_date'].astype('int')
df = pd.merge(df,tmpdf.loc[:,['trade_date','close']],how='inner',on = 'trade_date')
#df.to_csv(stockcode+'_close'+'.csv',encoding='utf_8_sig')
df.to_csv(stockcode+'.csv',index=False,encoding='utf_8_sig') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.