blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f28822e33cbfcd736db6cbcf9f5ff11dd18a9614 | abcc4679e9977e8e92f029da56f80e3cad69ecbb | /app/models.py | 00c875aec8e8ce243df5431ff3a634beb7b9d1a0 | [] | no_license | emjrymer/MovieRating | ecf2d5d1c096d65c5861f6b6e0511e368a1d4307 | 3785eeab01ffb23efe402f19bdc1a26bbb6f192c | refs/heads/master | 2021-06-04T16:05:58.996506 | 2016-03-15T20:41:59 | 2016-03-15T20:41:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | from django.db import models
# Create your models here.
class Rater(models.Model):
age = models.IntegerField()
gender = models.CharField(max_length=2)
occupation = models.CharField(max_length=50)
zip_code = models.CharField(max_length=10)
class Movie(models.Model):
movie_title = models.CharField(max_length=30)
release_date = models.CharField(max_length=30)
video_release_date = models.CharField(max_length=30, blank=True, default="")
imdb = models.URLField()
unknown_genre = models.BooleanField(default=0)
action = models.BooleanField(default=0)
adventure = models.BooleanField(default=0)
animation = models.BooleanField(default=0)
childrens = models.BooleanField(default=0)
comedy = models.BooleanField(default=0)
crime = models.BooleanField(default=0)
documentary = models.BooleanField(default=0)
drama = models.BooleanField(default=0)
fantasy = models.BooleanField(default=0)
filmnoir = models.BooleanField(default=0)
horror = models.BooleanField(default=0)
musical = models.BooleanField(default=0)
mystery = models.BooleanField(default=0)
romance = models.BooleanField(default=0)
scifi = models.BooleanField(default=0)
thriller = models.BooleanField(default=0)
war = models.BooleanField(default=0)
western = models.BooleanField(default=0)
avg_rating = models.FloatField(null=True)
def __str__(self):
return self.movie_title
class Review(models.Model):
reviewer = models.ForeignKey(Rater)
movie = models.ForeignKey(Movie)
rating = models.IntegerField()
def __str__(self):
return self.movie.movie_title
class Ureview(models.Model):
user_review = models.TextField()
user_movie = models.TextField()
class Meta:
ordering = ["-id"]
| [
"emjwivell@gmail.com"
] | emjwivell@gmail.com |
a74472db035ea74db466460bf3be92ed1ad179cf | 5e5a2823a212f8b3b71863b95c73f7b259220070 | /runspider.py | e769984489a93842dddebe27c14329e34c9a40e5 | [] | no_license | everyday-stoke-phosphate/kyodo_scraping | 85f28d59460507e357bf0f7d13c9778136420821 | a7bf5c27507174217dbf8d8e3e79ddac7eb3c2f9 | refs/heads/master | 2021-03-04T06:49:03.876710 | 2020-03-15T01:55:37 | 2020-03-15T01:55:37 | 246,015,135 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # -*- coding: utf-8 -*-
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
def main():
# 設定の変更 基本的にsetting.pyに記述
# 特に何か変えるときには設定内容を指定
settings = get_project_settings() # FEED_URI='results.json')
process = CrawlerProcess(settings)
spider_name = "kyodo_articles_scraping"
process.crawl(spider_name)
process.start()
| [
"test@htrghttesjthehukyjthrdjhe.greafewufgwuag"
] | test@htrghttesjthehukyjthrdjhe.greafewufgwuag |
1c848701cc7ee159f0646499637a78c5cdd12204 | 8337a3098a922b37185ab8bf3e1abe52d527c25e | /my_app/migrations/0002_auto_20151104_1503.py | f9d9637d9b77d8e1c662664586618f5272e3a86a | [] | no_license | dimejy2/StatsFootball | f1e72cf29a658985725774e6145802e2f2c1d713 | dce84f9a5c217243dba9e1f49efe959e5c90c695 | refs/heads/master | 2021-01-10T21:59:25.003768 | 2015-12-01T17:21:13 | 2015-12-01T17:21:13 | 47,205,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('my_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Balances',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('currency_id', models.CharField(max_length=3)),
('value', models.FloatField(default=0.0)),
],
),
migrations.CreateModel(
name='Investor',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('account_number', models.CharField(max_length=10)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='balances',
name='account_holder',
field=models.ForeignKey(to='my_app.Investor'),
),
]
| [
"dimejy2@gmail.com"
] | dimejy2@gmail.com |
983aa7b87ff0971ad8a96fbf487455eb742081e6 | 27cf8262cbe20597e61f89bdc2849aaa5c5ce4f4 | /dao/reaction_dao.py | 420b1a99c48e129bdc5294de13579a84b212162b | [] | no_license | rfgsantos/Moodler-Web-Server | dfa69efa01a7ac10a170c69b4ff969daae82114e | be6b1d414a9f5ff201ce9f01fb721102af1473e3 | refs/heads/master | 2022-12-10T22:00:29.131783 | 2018-07-16T03:31:50 | 2018-07-16T03:31:50 | 139,902,354 | 1 | 1 | null | 2022-12-08T02:15:15 | 2018-07-05T21:27:13 | Python | UTF-8 | Python | false | false | 1,756 | py | from datetime import date, datetime, timedelta
from utils.python_database_connector import DatabaseConnector
from dtos.reaction_dto import Reaction
class ReactionDao:
def __init__(self):
self.db = DatabaseConnector()
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(ReactionDao, cls).__new__(cls)
return cls.instance
def get_all_reaction(self):
query = "SELECT * FROM reaction"
self.db.executeQuery(query)
return list(map(lambda reaction: self.map_reaction(reaction), self.db.getQueryResult()))
def get_reaction_by_id(self,id):
query = "SELECT * FROM reaction WHERE reaction.id='%s'" % id
self.db.executeQuery(query)
return list(map(lambda reaction: self.map_reaction(reaction), self.db.getQueryResult()))
def insert_reaction(self,json_params):
params = (json_params['id'],json_params['user_id'],json_params['track_id'],json_params['hrv'],json_params['evaluation'], json_params['user_evaluation'])
query = "INSERT INTO reaction (id,user_id,track_id,hrv,evaluation,user_evaluation) VALUES ('%s','%s','%s','%s','%s','%s')" % params
return self.db.executeQuery(query,isInsert=True)
def delete_reaction(self,id):
query = "DELETE FROM reaction WHERE reaction.id='%s'" % id
return self.db.executeQuery(query,isInsert=True)
def map_reaction(self,reaction_input):
reaction = Reaction(
reaction_input['id'],
reaction_input['user_id'],
reaction_input['track_id'],
reaction_input['hrv'],
reaction_input['evaluation'],
reaction_input['user_evaluation']
)
return reaction.__dict__
| [
"rui.filipe.santos07@mail.com"
] | rui.filipe.santos07@mail.com |
ec232876bf6ba4dec0f2ee9821f5917b1a88791d | 584aefa111a279250bde4d2d0fa3fc64fda8706b | /models/WGANGP.py | ff09e4911cd4bc750e115cbf2f42ba8bc9bbbbd4 | [] | no_license | shotakikuchi/GenerativeDeepLearning | cce2b851ba722b44432566a7b5bc02d1c950c0f7 | a67bc43986df63065e81a3a840160e25099653bc | refs/heads/master | 2020-07-02T16:43:09.508058 | 2019-08-17T13:10:14 | 2019-08-17T13:10:14 | 201,592,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,689 | py | from keras.layers import Input, Conv2D, Flatten, Dense, Conv2DTranspose, Reshape, Lambda, Activation, \
BatchNormalization, LeakyReLU, Dropout, ZeroPadding2D, UpSampling2D
from keras.layers.merge import _Merge
from keras.models import Model, Sequential
from keras import backend as K
from keras.optimizers import Adam, RMSprop
from keras.utils import plot_model
from keras.initializers import RandomNormal
from functools import partial
import numpy as np
import os
import pickle
import matplotlib.pyplot as plt
class RandomWeightedAverage(_Merge):
def __init__(self, batch_size):
super().__init__()
self.batch_size = batch_size
"""
Provides a random weighted average between real and generated image samples.
"""
def _merge_function(self, inputs):
alpha = K.random_uniform((self.batch_size, 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
class WGANGP():
def __init__(self,
input_dim,
critic_conv_filters,
critic_conv_kernel_size,
critic_conv_strides,
critic_batch_norm_momentum,
critic_activation,
critic_dropout_rate,
critic_learning_rate,
generator_initial_dense_layer_size,
generator_upsample,
generator_conv_filters,
generator_conv_kernel_size,
generator_conv_strides,
generator_batch_norm_momentum,
generator_activation,
generator_dropout_rate,
generator_learning_rate,
optimizer,
grad_weight,
z_dim,
batch_size
):
self.name = 'gan'
self.input_dim = input_dim
self.critic_conv_filters = critic_conv_filters
self.critic_conv_kernel_size = critic_conv_kernel_size
self.critic_conv_strides = critic_conv_strides
self.critic_batch_norm_momentum = critic_batch_norm_momentum
self.critic_activation = critic_activation
self.critic_dropout_rate = critic_dropout_rate
self.critic_learning_rate = critic_learning_rate
self.generator_initial_dense_layer_size = generator_initial_dense_layer_size
self.generator_upsample = generator_upsample
self.generator_conv_filters = generator_conv_filters
self.generator_conv_kernel_size = generator_conv_kernel_size
self.generator_conv_strides = generator_conv_strides
self.generator_batch_norm_momentum = generator_batch_norm_momentum
self.generator_activation = generator_activation
self.generator_dropout_rate = generator_dropout_rate
self.generator_learning_rate = generator_learning_rate
self.optimiser = optimizer
self.z_dim = z_dim
self.n_layers_critic = len(critic_conv_filters)
self.n_layers_generator = len(generator_conv_filters)
# 'he normal'
self.weight_init = RandomNormal(mean=0, stddev=0.02)
self.grad_weight = grad_weight
self.batch_size = batch_size
self.d_losses = []
self.g_losses = []
self.epoch = 0
self._build_critic()
self._build_generator()
self._build_adversarial()
def gradient_penalty_loss(self, y_true, y_pred, interpolated_samples):
"""Computes gradient penalty based on prediction and weighted real / fake samples.
"""
gradients = K.gradients(y_pred, interpolated_samples)[0]
# compute the euclidean norm by squaring...
gradients_sqr = K.square(gradients)
# summing over the rows
gradients_sqr_sum = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape)))
# and sqrt
gradients_l2_norm = K.sqrt(gradients_sqr_sum)
# compute lambda * (1 - ||grad||)^2 still for each single sample
gradients_penalty = K.square(1 - gradients_l2_norm)
# return the mean as loss over all tha batch sample
return K.mean(gradients_penalty)
def wasserstein(self, y_true, y_pred):
return -K.mean(y_true * y_pred)
def get_activation(self, activation):
if activation == 'leaky_relu':
layer = LeakyReLU(alpha=0.2)
else:
layer = Activation(activation)
return layer
def _build_critic(self):
critic_input = Input(shape=self.input_dim, name='critic_input')
x = critic_input
for i in range(self.n_layers_critic):
x = Conv2D(
filters=self.critic_conv_filters[i]
, kernel_size=self.critic_conv_kernel_size[i]
, strides=self.critic_conv_strides[i]
, padding='same'
, name='critic_conv_' + str(i)
, kernel_initializer=self.weight_init
)(x)
if self.critic_batch_norm_momentum and i > 0:
x = BatchNormalization(momentum=self.critic_batch_norm_momentum)(x)
x = self.get_activation(self.critic_activation)(x)
if self.critic_dropout_rate:
x = Dropout(rate=self.critic_dropout_rate)(x)
x = Flatten()(x)
critic_output = Dense(1, activation=None
, kernel_initializer=self.weight_init
)(x)
self.critic = Model(critic_input, critic_output)
def _build_generator(self):
generator_input = Input(shape=(self.z_dim,), name='generator_input')
x = generator_input
x = Dense(np.prod(self.generator_initial_dense_layer_size), kernel_initializer=self.weight_init)(x)
if self.generator_batch_norm_momentum:
x = BatchNormalization(momentum=self.generator_batch_norm_momentum)(x)
x = self.get_activation(self.generator_activation)(x)
x = Reshape(self.generator_initial_dense_layer_size)(x)
if self.generator_dropout_rate:
x = Dropout(rate=self.generator_dropout_rate)(x)
for i in range(self.n_layers_generator):
if self.generator_upsample[i] == 2:
x = UpSampling2D()(x)
x = Conv2D(
filters=self.generator_conv_filters[i]
, kernel_size=self.generator_conv_kernel_size[i]
, padding='same'
, name='generator_conv_' + str(i)
, kernel_initializer=self.weight_init
)(x)
else:
x = Conv2DTranspose(
filters=self.generator_conv_filters[i]
, kernel_size=self.generator_conv_kernel_size[i]
, padding='same'
, strides=self.generator_conv_strides[i]
, name='generator_conv_' + str(i)
, kernel_initializer=self.weight_init
)(x)
if i < self.n_layers_generator - 1:
if self.generator_batch_norm_momentum:
x = BatchNormalization(momentum=self.generator_batch_norm_momentum)(x)
x = self.get_activation(self.generator_activation)(x)
else:
x = Activation('tanh')(x)
generator_output = x
self.generator = Model(generator_input, generator_output)
def get_opti(self, lr):
if self.optimiser == 'adam':
opti = Adam(lr=lr, beta_1=0.5)
elif self.optimiser == 'rmsprop':
opti = RMSprop(lr=lr)
else:
opti = Adam(lr=lr)
return opti
def set_trainable(self, m, val):
m.trainable = val
for l in m.layers:
l.trainable = val
def _build_adversarial(self):
# For critic
# Freeze generator's layers while training critic
self.set_trainable(self.generator, False)
# Image input (real image)
real_img = Input(shape=self.input_dim)
# Fake image
z_disc = Input(shape=(self.z_dim,))
fake_img = self.generator(z_disc)
# critic determines validity of the real and fake images
fake = self.critic(fake_img)
valid = self.critic(real_img)
# Construct weighted average between real and fake images
interpolated_img = RandomWeightedAverage(self.batch_size)([real_img, fake_img])
# Determine validity of weighted sample
validity_interpolated = self.critic(interpolated_img)
# Use Python partial to provide loss function with additional
# 'interpolated_samples' argument
partial_gp_loss = partial(self.gradient_penalty_loss,
interpolated_samples=interpolated_img)
partial_gp_loss.__name__ = 'gradient_penalty' # Keras requires function names
self.critic_model = Model(inputs=[real_img, z_disc],
outputs=[valid, fake, validity_interpolated])
self.critic_model.compile(
loss=[self.wasserstein, self.wasserstein, partial_gp_loss]
, optimizer=self.get_opti(self.critic_learning_rate)
, loss_weights=[1, 1, self.grad_weight]
)
# For generator
self.set_trainable(self.critic, False)
self.set_trainable(self.generator, True)
# Sampled noise for input to generator
model_input = Input(shape=(self.z_dim,))
# Generate images besed of noise
img = self.generator(model_input)
# Discriminator determines validity
model_output = self.critic(img)
# Defines generator model
self.model = Model(model_input, model_output)
self.model.compile(optimizer=self.get_opti(self.generator_learning_rate)
, loss=self.wasserstein
)
self.set_trainable(self.critic, True)
def train_critic(self, x_train, batch_size, using_generator):
valid = np.ones((batch_size, 1), dtype=np.float32)
fake = -np.ones((batch_size, 1), dtype=np.float32)
dummy = np.zeros((batch_size, 1), dtype=np.float32) # Dummy gt for gradient penalty
if using_generator:
true_imgs = next(x_train)[0]
if true_imgs.shape[0] != batch_size:
true_imgs = next(x_train)[0]
else:
idx = np.random.randint(0, x_train.shape[0], batch_size)
true_imgs = x_train[idx]
noise = np.random.normal(0, 1, (batch_size, self.z_dim))
d_loss = self.critic_model.train_on_batch([true_imgs, noise], [valid, fake, dummy])
return d_loss
def train_generator(self, batch_size):
noise = np.random.normal(0, 1, (batch_size, self.z_dim))
valid = np.ones((batch_size, 1), dtype=np.float32)
return self.model.train_on_batch(noise, valid)
def train(self, x_train, batch_size, epochs, run_folder, print_every_n_batches=10, n_critic=5,
using_generator=False):
for epoch in range(self.epoch, epochs + epochs):
if epoch % 100 == 0:
critic_loops = 5
else:
critic_loops = n_critic
for _ in range(critic_loops):
d_loss = self.train_critic(x_train, batch_size, using_generator)
g_loss = self.train_generator(batch_size)
print("%d (%d, %d) [D loss: (%.1f)(R %.1f, F %.1f, G %.1f)] [G loss: %.1f]" % (
epoch, critic_loops, 1, d_loss[0], d_loss[1], d_loss[2], d_loss[3], g_loss))
self.d_losses.append(d_loss)
self.g_losses.append(g_loss)
# If at save interval => save generated image samples
if epoch % print_every_n_batches == 0:
self.sample_images(run_folder)
self.model.save_weights(os.path.join(run_folder, 'weights/weights-%d.h5' % (epoch)))
self.model.save_weights(os.path.join(run_folder, 'weights/weights.h5'))
self.save_model(run_folder)
self.epoch += 1
def sample_images(self, run_folder):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, self.z_dim))
gen_imgs = self.generator.predict(noise)
# Rescale images 0 - 1
gen_imgs = 0.5 * (gen_imgs + 1)
gen_imgs = np.clip(gen_imgs, 0, 1)
fig, axs = plt.subplots(r, c, figsize=(15, 15))
cnt = 0
for i in range(r):
for j in range(c):
axs[i, j].imshow(np.squeeze(gen_imgs[cnt, :, :, :]), cmap='gray_r')
axs[i, j].axis('off')
cnt += 1
fig.savefig(os.path.join(run_folder, "images/sample_%d.png" % self.epoch))
plt.close()
def plot_model(self, run_folder):
plot_model(self.model, to_file=os.path.join(run_folder, 'viz/model.png'), show_shapes=True,
show_layer_names=True)
plot_model(self.critic, to_file=os.path.join(run_folder, 'viz/critic.png'), show_shapes=True,
show_layer_names=True)
plot_model(self.generator, to_file=os.path.join(run_folder, 'viz/generator.png'), show_shapes=True,
show_layer_names=True)
def save(self, folder):
with open(os.path.join(folder, 'params.pkl'), 'wb') as f:
pickle.dump([
self.input_dim
, self.critic_conv_filters
, self.critic_conv_kernel_size
, self.critic_conv_strides
, self.critic_batch_norm_momentum
, self.critic_activation
, self.critic_dropout_rate
, self.critic_learning_rate
, self.generator_initial_dense_layer_size
, self.generator_upsample
, self.generator_conv_filters
, self.generator_conv_kernel_size
, self.generator_conv_strides
, self.generator_batch_norm_momentum
, self.generator_activation
, self.generator_dropout_rate
, self.generator_learning_rate
, self.optimiser
, self.grad_weight
, self.z_dim
, self.batch_size
], f)
self.plot_model(folder)
def save_model(self, run_folder):
self.model.save(os.path.join(run_folder, 'model.h5'))
self.critic.save(os.path.join(run_folder, 'critic.h5'))
self.generator.save(os.path.join(run_folder, 'generator.h5'))
pickle.dump(self, open(os.path.join(run_folder, "obj.pkl"), "wb"))
def load_weights(self, filepath):
self.model.load_weights(filepath)
| [
"goodtime683@icloud.com"
] | goodtime683@icloud.com |
8426e077cebea70246a5e452b3422803b8c9272e | 4b2b6a8c2caf0f06e0ce47bbb9174ec4db07adaf | /code/stg_node.py | bcbc4a9401548f48edd347e32cd99be7b3057024 | [
"MIT"
] | permissive | StanfordASL/NHumanModeling | 052e89e8950b54a63a40655527ab29aeed5a48d9 | 0ae2297f563599601b1777a3fc825c43f1a461cd | refs/heads/master | 2021-03-27T20:53:13.930366 | 2019-02-10T01:11:42 | 2019-02-10T01:11:42 | 123,646,313 | 15 | 11 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | class STGNode(object):
def __init__(self, node_name, node_type):
self.name = node_name
self.type = node_type
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.name == other.name
and self.type == other.type)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.name, self.type))
def __repr__(self):
type_str = self.type.replace(' ', '')
name_str = self.name.replace(' ', '').replace("'", "")
return type_str + "/" + name_str
def convert_to_label_node(node):
return STGNode(node.name + '_label', node.type)
| [
"borisi@cs.stanford.edu"
] | borisi@cs.stanford.edu |
445508632ba9ca0cb62f6858ef1bb3b3b3ba0625 | f98dc868a74dc7a9b128a2d4ce2c53224dd11f63 | /intermediate_source/pipeline_tutorial.py | 49b37b1f564e74c3f8bc8ff8d8be0e03ec5f33b8 | [
"BSD-3-Clause"
] | permissive | walkingpanda/tutorials | ebe69e3fa8818b596922136273c74f6b0ab3b479 | 973193b4e28f4c277e14dcd93fd199f26cd1038b | refs/heads/master | 2023-05-29T23:34:27.635650 | 2021-06-14T21:05:10 | 2021-06-14T21:05:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,273 | py | """
Training Transformer models using Pipeline Parallelism
======================================================
**Author**: `Pritam Damania <https://github.com/pritamdamania87>`_
This tutorial demonstrates how to train a large Transformer model across
multiple GPUs using pipeline parallelism. This tutorial is an extension of the
`Sequence-to-Sequence Modeling with nn.Transformer and TorchText <https://pytorch.org/tutorials/beginner/transformer_tutorial.html>`__ tutorial
and scales up the same model to demonstrate how pipeline parallelism can be
used to train Transformer models.
Prerequisites:
* `Pipeline Parallelism <https://pytorch.org/docs/stable/pipeline.html>`__
* `Sequence-to-Sequence Modeling with nn.Transformer and TorchText <https://pytorch.org/tutorials/beginner/transformer_tutorial.html>`__
"""
######################################################################
# Define the model
# ----------------
#
######################################################################
# In this tutorial, we will split a Transformer model across two GPUs and use
# pipeline parallelism to train the model. The model is exactly the same model
# used in the `Sequence-to-Sequence Modeling with nn.Transformer and TorchText
# <https://pytorch.org/tutorials/beginner/transformer_tutorial.html>`__ tutorial,
# but is split into two stages. The largest number of parameters belong to the
# `nn.TransformerEncoder <https://pytorch.org/docs/stable/generated/torch.nn.TransformerEncoder.html>`__ layer.
# The `nn.TransformerEncoder <https://pytorch.org/docs/stable/generated/torch.nn.TransformerEncoder.html>`__
# itself consists of ``nlayers`` of `nn.TransformerEncoderLayer <https://pytorch.org/docs/stable/generated/torch.nn.TransformerEncoderLayer.html>`__.
# As a result, our focus is on ``nn.TransformerEncoder`` and we split the model
# such that half of the ``nn.TransformerEncoderLayer`` are on one GPU and the
# other half are on another. To do this, we pull out the ``Encoder`` and
# ``Decoder`` sections into seperate modules and then build an nn.Sequential
# representing the original Transformer module.
import sys
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import tempfile
from torch.nn import TransformerEncoder, TransformerEncoderLayer
if sys.platform == 'win32':
print('Windows platform is not supported for pipeline parallelism')
sys.exit(0)
if torch.cuda.device_count() < 2:
print('Need at least two GPU devices for this tutorial')
sys.exit(0)
class Encoder(nn.Module):
def __init__(self, ntoken, ninp, dropout=0.5):
super(Encoder, self).__init__()
self.pos_encoder = PositionalEncoding(ninp, dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.init_weights()
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src):
# Need (S, N) format for encoder.
src = src.t()
src = self.encoder(src) * math.sqrt(self.ninp)
return self.pos_encoder(src)
class Decoder(nn.Module):
def __init__(self, ntoken, ninp):
super(Decoder, self).__init__()
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, inp):
# Need batch dimension first for output of pipeline.
return self.decoder(inp).permute(1, 0, 2)
######################################################################
# ``PositionalEncoding`` module injects some information about the
# relative or absolute position of the tokens in the sequence. The
# positional encodings have the same dimension as the embeddings so that
# the two can be summed. Here, we use ``sine`` and ``cosine`` functions of
# different frequencies.
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
######################################################################
# Load and batch data
# -------------------
#
######################################################################
# The training process uses Wikitext-2 dataset from ``torchtext``. The
# vocab object is built based on the train dataset and is used to numericalize
# tokens into tensors. Starting from sequential data, the ``batchify()``
# function arranges the dataset into columns, trimming off any tokens remaining
# after the data has been divided into batches of size ``batch_size``.
# For instance, with the alphabet as the sequence (total length of 26)
# and a batch size of 4, we would divide the alphabet into 4 sequences of
# length 6:
#
# .. math::
# \begin{bmatrix}
# \text{A} & \text{B} & \text{C} & \ldots & \text{X} & \text{Y} & \text{Z}
# \end{bmatrix}
# \Rightarrow
# \begin{bmatrix}
# \begin{bmatrix}\text{A} \\ \text{B} \\ \text{C} \\ \text{D} \\ \text{E} \\ \text{F}\end{bmatrix} &
# \begin{bmatrix}\text{G} \\ \text{H} \\ \text{I} \\ \text{J} \\ \text{K} \\ \text{L}\end{bmatrix} &
# \begin{bmatrix}\text{M} \\ \text{N} \\ \text{O} \\ \text{P} \\ \text{Q} \\ \text{R}\end{bmatrix} &
# \begin{bmatrix}\text{S} \\ \text{T} \\ \text{U} \\ \text{V} \\ \text{W} \\ \text{X}\end{bmatrix}
# \end{bmatrix}
#
# These columns are treated as independent by the model, which means that
# the dependence of ``G`` and ``F`` can not be learned, but allows more
# efficient batch processing.
#
import io
import torch
from torchtext.utils import download_from_url, extract_archive
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
url = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip'
test_filepath, valid_filepath, train_filepath = extract_archive(download_from_url(url))
tokenizer = get_tokenizer('basic_english')
vocab = build_vocab_from_iterator(map(tokenizer,
iter(io.open(train_filepath,
encoding="utf8"))))
def data_process(raw_text_iter):
data = [torch.tensor([vocab[token] for token in tokenizer(item)],
dtype=torch.long) for item in raw_text_iter]
return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))
train_data = data_process(iter(io.open(train_filepath, encoding="utf8")))
val_data = data_process(iter(io.open(valid_filepath, encoding="utf8")))
test_data = data_process(iter(io.open(test_filepath, encoding="utf8")))
device = torch.device("cuda")
def batchify(data, bsz):
# Divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
batch_size = 20
eval_batch_size = 10
train_data = batchify(train_data, batch_size)
val_data = batchify(val_data, eval_batch_size)
test_data = batchify(test_data, eval_batch_size)
######################################################################
# Functions to generate input and target sequence
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
######################################################################
# ``get_batch()`` function generates the input and target sequence for
# the transformer model. It subdivides the source data into chunks of
# length ``bptt``. For the language modeling task, the model needs the
# following words as ``Target``. For example, with a ``bptt`` value of 2,
# we'd get the following two Variables for ``i`` = 0:
#
# .. image:: ../_static/img/transformer_input_target.png
#
# It should be noted that the chunks are along dimension 0, consistent
# with the ``S`` dimension in the Transformer model. The batch dimension
# ``N`` is along dimension 1.
#
bptt = 35
def get_batch(source, i):
seq_len = min(bptt, len(source) - 1 - i)
data = source[i:i+seq_len]
target = source[i+1:i+1+seq_len].view(-1)
# Need batch dimension first for pipeline parallelism.
return data.t(), target
######################################################################
# Model scale and Pipe initialization
# -----------------------------------
#
######################################################################
# To demonstrate training large Transformer models using pipeline parallelism,
# we scale up the Transformer layers appropriately. We use an embedding
# dimension of 4096, hidden size of 4096, 16 attention heads and 12 total
# transformer layers (``nn.TransformerEncoderLayer``). This creates a model with
# **~1.4 billion** parameters.
#
# We need to initialize the `RPC Framework <https://pytorch.org/docs/stable/rpc.html>`__
# since Pipe depends on the RPC framework via `RRef <https://pytorch.org/docs/stable/rpc.html#rref>`__
# which allows for future expansion to cross host pipelining. We need to
# initialize the RPC framework with only a single worker since we're using a
# single process to drive multiple GPUs.
#
# The pipeline is then initialized with 8 transformer layers on one GPU and 8
# transformer layers on the other GPU.
#
# .. note::
# For efficiency purposes we ensure that the ``nn.Sequential`` passed to
# ``Pipe`` only consists of two elements (corresponding to two GPUs), this
# allows the Pipe to work with only two partitions and avoid any
# cross-partition overheads.
ntokens = len(vocab.stoi) # the size of vocabulary
emsize = 4096 # embedding dimension
nhid = 4096 # the dimension of the feedforward network model in nn.TransformerEncoder
nlayers = 12 # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder
nhead = 16 # the number of heads in the multiheadattention models
dropout = 0.2 # the dropout value
from torch.distributed import rpc
tmpfile = tempfile.NamedTemporaryFile()
rpc.init_rpc(
name="worker",
rank=0,
world_size=1,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method="file://{}".format(tmpfile.name),
# Specifying _transports and _channels is a workaround and we no longer
# will have to specify _transports and _channels for PyTorch
# versions >= 1.8.1
_transports=["ibv", "uv"],
_channels=["cuda_ipc", "cuda_basic"],
)
)
num_gpus = 2
partition_len = ((nlayers - 1) // num_gpus) + 1
# Add encoder in the beginning.
tmp_list = [Encoder(ntokens, emsize, dropout).cuda(0)]
module_list = []
# Add all the necessary transformer blocks.
for i in range(nlayers):
transformer_block = TransformerEncoderLayer(emsize, nhead, nhid, dropout)
if i != 0 and i % (partition_len) == 0:
module_list.append(nn.Sequential(*tmp_list))
tmp_list = []
device = i // (partition_len)
tmp_list.append(transformer_block.to(device))
# Add decoder in the end.
tmp_list.append(Decoder(ntokens, emsize).cuda(num_gpus - 1))
module_list.append(nn.Sequential(*tmp_list))
from torch.distributed.pipeline.sync import Pipe
# Build the pipeline.
chunks = 8
model = Pipe(torch.nn.Sequential(*module_list), chunks = chunks)
def get_total_params(module: torch.nn.Module):
total_params = 0
for param in module.parameters():
total_params += param.numel()
return total_params
print ('Total parameters in model: {:,}'.format(get_total_params(model)))
######################################################################
# Run the model
# -------------
#
######################################################################
# `CrossEntropyLoss <https://pytorch.org/docs/master/nn.html?highlight=crossentropyloss#torch.nn.CrossEntropyLoss>`__
# is applied to track the loss and
# `SGD <https://pytorch.org/docs/master/optim.html?highlight=sgd#torch.optim.SGD>`__
# implements stochastic gradient descent method as the optimizer. The initial
# learning rate is set to 5.0. `StepLR <https://pytorch.org/docs/master/optim.html?highlight=steplr#torch.optim.lr_scheduler.StepLR>`__ is
# applied to adjust the learn rate through epochs. During the
# training, we use
# `nn.utils.clip_grad_norm\_ <https://pytorch.org/docs/master/nn.html?highlight=nn%20utils%20clip_grad_norm#torch.nn.utils.clip_grad_norm_>`__
# function to scale all the gradient together to prevent exploding.
#
criterion = nn.CrossEntropyLoss()
lr = 5.0 # learning rate
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)
import time
def train():
model.train() # Turn on the train mode
total_loss = 0.
start_time = time.time()
ntokens = len(vocab.stoi)
# Train only for 50 batches to keep script execution time low.
nbatches = min(50 * bptt, train_data.size(0) - 1)
for batch, i in enumerate(range(0, nbatches, bptt)):
data, targets = get_batch(train_data, i)
optimizer.zero_grad()
# Since the Pipe is only within a single host and process the ``RRef``
# returned by forward method is local to this node and can simply
# retrieved via ``RRef.local_value()``.
output = model(data).local_value()
# Need to move targets to the device where the output of the
# pipeline resides.
loss = criterion(output.view(-1, ntokens), targets.cuda(1))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
total_loss += loss.item()
log_interval = 10
if batch % log_interval == 0 and batch > 0:
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | '
'lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, nbatches // bptt, scheduler.get_lr()[0],
elapsed * 1000 / log_interval,
cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
def evaluate(eval_model, data_source):
eval_model.eval() # Turn on the evaluation mode
total_loss = 0.
ntokens = len(vocab.stoi)
# Evaluate only for 50 batches to keep script execution time low.
nbatches = min(50 * bptt, data_source.size(0) - 1)
with torch.no_grad():
for i in range(0, nbatches, bptt):
data, targets = get_batch(data_source, i)
output = eval_model(data).local_value()
output_flat = output.view(-1, ntokens)
# Need to move targets to the device where the output of the
# pipeline resides.
total_loss += len(data) * criterion(output_flat, targets.cuda(1)).item()
return total_loss / (len(data_source) - 1)
######################################################################
# Loop over epochs. Save the model if the validation loss is the best
# we've seen so far. Adjust the learning rate after each epoch.
best_val_loss = float("inf")
epochs = 3 # The number of epochs
best_model = None
for epoch in range(1, epochs + 1):
epoch_start_time = time.time()
train()
val_loss = evaluate(model, val_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
print('-' * 89)
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = model
scheduler.step()
######################################################################
# Evaluate the model with the test dataset
# -------------------------------------
#
######################################################################
# Apply the best model to check the result with the test dataset.
test_loss = evaluate(best_model, test_data)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
######################################################################
# Output
# ------
#
######################################################################
#.. code-block:: py
#
# Total parameters in model: 1,847,087,215
# | epoch 1 | 10/ 50 batches | lr 5.00 | ms/batch 2387.45 | loss 42.16 | ppl 2036775646369743616.00
# | epoch 1 | 20/ 50 batches | lr 5.00 | ms/batch 2150.93 | loss 48.24 | ppl 891334049215401558016.00
# | epoch 1 | 30/ 50 batches | lr 5.00 | ms/batch 2155.23 | loss 34.66 | ppl 1125676483188404.62
# | epoch 1 | 40/ 50 batches | lr 5.00 | ms/batch 2158.42 | loss 38.87 | ppl 76287208340888368.00
# -----------------------------------------------------------------------------------------
# | end of epoch 1 | time: 119.65s | valid loss 2.95 | valid ppl 19.15
# -----------------------------------------------------------------------------------------
# | epoch 2 | 10/ 50 batches | lr 4.51 | ms/batch 2376.16 | loss 34.92 | ppl 1458001430957104.00
# | epoch 2 | 20/ 50 batches | lr 4.51 | ms/batch 2160.96 | loss 34.75 | ppl 1232463826541886.50
# | epoch 2 | 30/ 50 batches | lr 4.51 | ms/batch 2160.66 | loss 28.10 | ppl 1599598251136.51
# | epoch 2 | 40/ 50 batches | lr 4.51 | ms/batch 2160.07 | loss 20.25 | ppl 621174306.77
# -----------------------------------------------------------------------------------------
# | end of epoch 2 | time: 119.76s | valid loss 0.87 | valid ppl 2.38
# -----------------------------------------------------------------------------------------
# | epoch 3 | 10/ 50 batches | lr 4.29 | ms/batch 2376.49 | loss 13.20 | ppl 537727.23
# | epoch 3 | 20/ 50 batches | lr 4.29 | ms/batch 2160.12 | loss 10.98 | ppl 58548.58
# | epoch 3 | 30/ 50 batches | lr 4.29 | ms/batch 2160.05 | loss 12.01 | ppl 164152.79
# | epoch 3 | 40/ 50 batches | lr 4.29 | ms/batch 2160.03 | loss 10.63 | ppl 41348.00
# -----------------------------------------------------------------------------------------
# | end of epoch 3 | time: 119.76s | valid loss 0.78 | valid ppl 2.17
# -----------------------------------------------------------------------------------------
# =========================================================================================
# | End of training | test loss 0.69 | test ppl 1.99
# =========================================================================================
| [
"noreply@github.com"
] | noreply@github.com |
6dee1b5890f0ba2108624a4a64c8378c3f83a85a | 1f5d98c97ac9ff75b1d6b81f0a4a5110b05d4284 | /posts/api/views.py | a71629961d4f008f12bdb8916786235c9f681cc8 | [] | no_license | DukhDmytro/social_network | 70cdd4aeb1448fdbacce6d32f627b421b8614a8c | a8d563b17ffc90dc467c67150fd4f0e7aa5f3992 | refs/heads/master | 2022-12-12T21:04:17.354395 | 2020-03-03T13:34:45 | 2020-03-03T13:34:45 | 241,352,402 | 0 | 0 | null | 2022-12-08T03:38:56 | 2020-02-18T12:12:34 | Python | UTF-8 | Python | false | false | 1,764 | py | from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rest_framework.decorators import action
from rest_framework.pagination import PageNumberPagination
from posts.models import Post
from .serializers import PostSerializer
from .permissions import IsOwnerOrReadOnly
class PostViewSet(ModelViewSet):
serializer_class = PostSerializer
queryset = Post.objects.all()
lookup_field = 'slug'
pagination_class = PageNumberPagination
def get_permissions(self):
if self.action in ['update', 'partial_update', 'destroy']:
self.permission_classes = IsAuthenticated, IsOwnerOrReadOnly
else:
self.permission_classes = IsAuthenticated,
return super().get_permissions()
def create(self, request, *args, **kwargs):
serializer = PostSerializer(data=request.data)
if serializer.is_valid():
serializer.save(author=request.user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@action(detail=True)
def like(self, request, slug):
post = get_object_or_404(Post, slug=slug)
post.like.add(request.user)
post.unlike.remove(request.user)
post.save()
return Response({'response': 'you like this post'}, status=status.HTTP_200_OK)
@action(detail=True)
def unlike(self, request, slug):
post = get_object_or_404(Post, slug=slug)
post.unlike.add(request.user)
post.like.remove(request.user)
post.save()
return Response({'response': 'you unlike this post'}, status=status.HTTP_200_OK)
| [
"cowboybebop4991@gmail.com"
] | cowboybebop4991@gmail.com |
fc87922135a12612cfa4e49ab3c08ee5f38e1ebe | af69497f9f5fb62ee894646a1804494f167f25a7 | /genbci/run_wgan_SSVEP.py | b6cead987f96a4be0902f71c396b33af585bd50e | [] | no_license | dpstart/genbci | afa1cacff63080d12337b98e066e5ebb2c733c03 | aeccce878a5d6ba668f65d4d181874059fe1b8f9 | refs/heads/master | 2022-12-28T02:00:23.647306 | 2020-10-09T13:32:27 | 2020-10-09T13:32:27 | 260,474,268 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,280 | py | import argparse
import numpy as np
import random
import os
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
import torch
from genbci.generate.model import (
SSVEP_Discriminator as Discriminator,
SSVEP_Generator as Generator,
)
# from genbci.scripts import ssvep_sample
from genbci.util import init_torch_and_get_device, weights_init, get_exo_data
torch.set_num_threads(8)
parser = argparse.ArgumentParser()
parser.add_argument(
"--modelpath", type=str, default="models/", help="Path to dave model"
)
parser.add_argument(
"--n_epochs", type=int, default=5000, help="number of epochs of training"
)
parser.add_argument("--lr", type=float, default=0.0001, help="adam: learning rate")
parser.add_argument(
"--b1",
type=float,
default=0.1,
help="adam: decay of first order momentum of gradient",
)
parser.add_argument(
"--b2",
type=float,
default=0.999,
help="adam: decay of first order momentum of gradient",
)
parser.add_argument(
"--n_cpu",
type=int,
default=4,
help="number of cpu threads to use during batch generation",
)
parser.add_argument(
"--latent_dim", type=int, default=32, help="dimensionality of the latent space"
)
parser.add_argument(
"--img_size", type=int, default=28, help="size of each image dimension"
)
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
parser.add_argument(
"--n_critic",
type=int,
default=5,
help="number of training steps for discriminator per iter",
)
parser.add_argument(
"--clip_value",
type=float,
default=0.01,
help="lower and upper clip value for disc. weights",
)
parser.add_argument(
"--sample_interval", type=int, default=200, help="interval between image samples"
)
parser.add_argument(
"--nz",
type=int,
default=64,
help="size of the latent z vector used as the generator input.",
)
opt = parser.parse_args()
opt.device = init_torch_and_get_device()
### Setting some defaults
opt.batch_size = 16
opt.dropout_level = 0.05
# opt.img_shape = (9, 1500)
opt.plot_steps = 250
opt.jobid = 2
opt.modelname = "ssvep_wgan%s"
if not os.path.exists(opt.modelpath):
os.makedirs(opt.modelpath)
# dataloader = torch.utils.data.DataLoader(
# dataset=ssvep_sample.dataset, batch_size=opt.batch_size, shuffle=True
# )
epochs_exo = get_exo_data(
"/Users/daniele/Desktop/thesis/library/genbci/ssvep/data/dataset-ssvep-exoskeleton",
plot=False,
)
data = epochs_exo.get_data()
labels = epochs_exo.events[:, 2] - 1
data = data[labels == 1, :, :]
labels = labels[labels == 1]
# Electrodes 2 and 3 should be O1 and O2 thus occipital
datatrain = torch.from_numpy(data[:, 1:3, :728]).float()
labels = torch.from_numpy(labels)
dataset = torch.utils.data.TensorDataset(datatrain, labels)
dataloader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=opt.batch_size, shuffle=True
)
def train_fn(dataloader, generator, discriminator, opt):
losses_d, losses_g = [], []
for epoch in range(opt.n_epochs):
for i, (real_imgs, _) in enumerate(dataloader):
generator.train()
discriminator.train()
real_imgs = real_imgs.to(opt.device)
# ---------------------
# Train Discriminator
# ---------------------
# Sample noise for generator input
z = torch.randn(opt.batch_size, opt.nz).to(opt.device)
# Generate a batch of fake images
fake_imgs = generator(z)
# Let the discriminator judge and learn
loss_real_d, loss_fake_d = discriminator.train_batch(real_imgs, fake_imgs)
loss_d = loss_real_d + loss_fake_d
losses_d.append(loss_d)
# Train the generator every n_critic steps
if i % opt.n_critic == 0:
z = torch.randn(opt.batch_size, opt.nz).to(opt.device)
loss_g = generator.train_batch(z, discriminator)
losses_g.append(loss_g)
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(dataloader), loss_d, loss_g)
)
eval_fn(
dataloader, generator, discriminator, epoch, opt, losses_d, losses_g
)
def eval_fn(dataloader, generator, discriminator, epoch, opt, losses_d, losses_g):
generator.eval()
discriminator.eval()
if epoch % opt.plot_steps == 0:
freqs_tmp = np.fft.rfftfreq(dataset.tensors[0].shape[2], d=1 / 250.0)
# Compute FFT frequencies
train_fft = np.fft.rfft(dataset.tensors[0], axis=2)
# Compute FFT on training data
train_amps = np.abs(train_fft).mean(axis=1).mean(axis=0)
# Noise for generator
z = torch.rand(opt.batch_size, opt.nz).to(opt.device)
# Get a batch of fake data and compute FFT
batch_fake = generator(z)
fake_fft = np.fft.rfft(batch_fake.data.cpu().numpy(), axis=2)
fake_amps = np.abs(fake_fft).mean(axis=1).mean(axis=0)
plt.figure()
plt.plot(freqs_tmp, np.log(fake_amps), label="Fake")
plt.plot(freqs_tmp, np.log(train_amps), label="Real")
plt.title("Frequency Spectrum")
plt.xlabel("Hz")
plt.legend()
plt.savefig(
os.path.join(
opt.modelpath, opt.modelname % opt.jobid + "_fft_%d.png" % epoch
)
)
plt.close()
batch_fake = batch_fake.data.cpu().numpy()
plt.figure(figsize=(10, 10))
for i in range(10):
plt.subplot(10, 1, i + 1)
# Working with 2 channels, plot only first one. A bit ugly.
plt.plot(batch_fake[i, 0, ...].squeeze())
plt.xticks((), ())
plt.yticks((), ())
plt.subplots_adjust(hspace=0)
plt.savefig(
os.path.join(
opt.modelpath, opt.modelname % opt.jobid + "_fakes_%d.png" % epoch
)
)
plt.close()
plt.figure(figsize=(10, 15))
plt.plot(np.asarray(losses_d))
plt.title("Loss Discriminator")
plt.savefig(
os.path.join(
opt.modelpath, opt.modelname % opt.jobid + "loss_disc_%d.png" % epoch
)
)
plt.close()
plt.figure(figsize=(10, 15))
plt.plot(np.asarray(losses_g))
plt.title("Loss generator")
plt.savefig(
os.path.join(
opt.modelpath, opt.modelname % opt.jobid + "loss_gen_%d.png" % epoch
)
)
plt.close()
discriminator.save_model(
os.path.join(opt.modelpath, opt.modelname % opt.jobid + ".disc")
)
generator.save_model(
os.path.join(opt.modelpath, opt.modelname % opt.jobid + ".gen")
)
# Initialize generator and discriminator
discriminator = Discriminator()
discriminator.apply(weights_init)
discriminator.train_init()
discriminator.to(opt.device)
generator = Generator(opt.nz)
generator.apply(weights_init)
generator.train_init()
generator.to(opt.device)
train_fn(dataloader, generator, discriminator, opt)
| [
"danielepaliotta96@gmail.com"
] | danielepaliotta96@gmail.com |
8716e411d7d36e6f03e049d7e4f758924e44cc44 | 169d68e02baceb04ce672c7771db351f71a1e47e | /untitled1/venv/Scripts/easy_install-script.py | bf6482875831339e7b62872ab6bf7d1f6f64f1fc | [] | no_license | yycarry1994/Pycharm_Project | 056959b2b14c171d1d5d87388ce0bd9bc97bd7d5 | da0d6176cccc365554aa46d47f33e7e1120e6e33 | refs/heads/master | 2020-06-28T10:12:29.711669 | 2019-08-02T09:23:41 | 2019-08-02T09:23:41 | 200,204,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | #!C:\Users\86181\PycharmProjects\untitled1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"yangbin-3@thunisoft.com"
] | yangbin-3@thunisoft.com |
601e04d3f95736775c8e3eee23c2ea0fc2a6192b | 216ddf61c5be758efde2b50fa476ada5354aced5 | /galaxy/gen_test.py | dddbbbb2c8f96cf24df4b8d0981a9c43604dbf60 | [] | no_license | cameronfabbri/ICGANs | 4600020238d6884b710ea0b035b84e86c73705f1 | d6be1a3e752959754be1dbf8af2ead8f75048b37 | refs/heads/master | 2021-01-16T18:11:38.596295 | 2017-11-26T22:35:16 | 2017-11-26T22:35:16 | 100,050,914 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | '''
Generates a dataset of encodings from real images using the trained encoder.
'''
import matplotlib.pyplot as plt
from tqdm import tqdm
from matplotlib.pyplot import cm
import scipy.misc as misc
import tensorflow as tf
import tensorflow.contrib.layers as tcl
import cPickle as pickle
import numpy as np
import argparse
import random
import ntpath
import glob
import time
import sys
import cv2
import os
sys.path.insert(0, '../ops/')
from tf_ops import *
import data_ops
from nets import *
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--CHECKPOINT_DIR', required=True,help='checkpoint directory',type=str)
parser.add_argument('--DATASET', required=False,help='The DATASET to use', type=str,default='celeba')
parser.add_argument('--DATA_DIR', required=False,help='Directory where data is', type=str,default='./')
parser.add_argument('--OUTPUT_DIR', required=False,help='Directory to save data', type=str,default='./')
parser.add_argument('--ACTIVATION', required=False,help='Activation function', type=str,default='lrelu')
a = parser.parse_args()
CHECKPOINT_DIR = a.CHECKPOINT_DIR
DATASET = a.DATASET
DATA_DIR = a.DATA_DIR
OUTPUT_DIR = a.OUTPUT_DIR
ACTIVATION = a.ACTIVATION
try: os.makedirs(OUTPUT_DIR)
except: pass
# placeholders for data going into the network
global_step = tf.Variable(0, name='global_step', trainable=False)
images = tf.placeholder(tf.float32, shape=(1, 64, 64, 3), name='images')
encoded = encZ(images, ACTIVATION)
saver = tf.train.Saver(max_to_keep=1)
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess = tf.Session()
sess.run(init)
# restore previous model if there is one
ckpt = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
if ckpt and ckpt.model_checkpoint_path:
print "Restoring previous model..."
try:
saver.restore(sess, ckpt.model_checkpoint_path)
print "Model restored"
except:
print "Could not restore model"
pass
print 'Loading data...'
# images and annots: _, __
train_images, train_annots, test_images, test_annots, paths = data_ops.load_galaxy(DATA_DIR)
test_len = len(test_annots)
print 'test num:',test_len
info = {}
# want to write out a file with the image path and z vector
for p,img,label in tqdm(zip(paths, test_images, test_annots)):
img = data_ops.normalize(img)
batch_images = np.expand_dims(img, 0)
encoding = sess.run([encoded], feed_dict={images:batch_images})[0][0]
info[p] = [encoding, label]
# write out dictionary to pickle file
p = open(OUTPUT_DIR+'data.pkl', 'wb')
data = pickle.dumps(info)
p.write(data)
p.close()
| [
"cameronfabbri@gmail.com"
] | cameronfabbri@gmail.com |
4fed9fbfe9b53e716ee5f6989ea66c9f22c91f8b | 2c311a5ce7dd08fd56de05dfcbcf2717406e12a3 | /examples/bootswatch_example/bootswatch_example/urls.py | 21479c51d35af7cf6f0a144ed704deb4bde8fc7a | [
"BSD-2-Clause"
] | permissive | nschlemm/django-themeswitch | b2e40c067fa008cb32f02a6114b2c374e644a1cc | 371495b05abe14fddc2cabc9468f4fb2aedf77f7 | refs/heads/master | 2021-01-22T10:19:36.535062 | 2014-04-08T15:19:48 | 2014-04-08T15:19:48 | 17,511,440 | 11 | 1 | null | 2014-03-07T16:19:16 | 2014-03-07T11:00:24 | Python | UTF-8 | Python | false | false | 192 | py | from django.conf.urls import patterns, include, url
urlpatterns = patterns(
'',
url(r'', include('themeswitch.urls')),
url(r'^$', 'bootswatch_example.views.home', name='home'),
)
| [
"moritz@tibidat.com"
] | moritz@tibidat.com |
c8ae13ad78da181556b71e88b5de8e8f6e4b91ab | 38e622a5daafbf115c88e7a1def650e14eedc455 | /apps/users/adminx.py | 15a5d9dfdfcc6058c2809c0e8ee1c43dee8e9da1 | [] | no_license | sunysir/Django-1.10-xadmin-mooc | aa66ae39c4204d24cb472a706382b606b48f9b19 | 4188590daedff52d34e2155c78e68735f5cdf690 | refs/heads/master | 2020-03-21T15:57:25.423422 | 2018-07-29T14:28:36 | 2018-07-29T14:28:36 | 138,742,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | # _*_ encoding: utf-8 _*_
import xadmin
from users.models import Banner, EmailVerifyRecord
from xadmin import views
__author__ = 'suny'
__date__ = '2018/7/20 16:27'
class BaseSetting(object):
enable_themes = True
use_bootswatch = True
class GlobalSetting(object):
site_title = '慕学在线后台管理系统'
site_footer = '慕学在线网'
menu_style = 'accordion'
class EmailVerifyRecordAdmin(object):
list_display = ['code', 'email', 'send_type', 'send_time']
list_filter = ['code', 'email', 'send_type', 'send_time']
search_fields = ['code', 'email', 'send_type']
class BannerAdmin(object):
list_display = ['titile', 'image', 'url', 'index', 'add_time']
list_filter = ['titile', 'image', 'url', 'index', 'add_time']
search_fields = ['titile', 'image', 'url', 'index']
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(Banner, BannerAdmin)
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSetting)
| [
"694190253@qq.com"
] | 694190253@qq.com |
94fa74e2b0d75f11f63daaf66cc8d206618c6999 | a9dd70520385b155d2f929127616d496fd89cd53 | /onecollect.py | 4ccf00cf62c39d3e08e3b1e5a884cf93f6fa30d1 | [] | no_license | singer0503/auto_show_mac_address | ce6917e1d05fab40f3d2a9c1017003eb30096580 | 9cce22d167a734335de04dde65d07359c8b8a587 | refs/heads/main | 2023-01-01T14:26:07.620655 | 2020-10-20T10:07:51 | 2020-10-20T10:07:51 | 305,604,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,659 | py | import time,multiprocessing,os
import paramiko #Note: 提供 ssh 功能模組
import re #Noet: 正則表達式模組
import sys #Noet: 檢測錯誤模組
import traceback #Noet: 檢測錯誤模組
try:
from config import * #Note: 把 config.py 讀進來
except:
hosts = ''
username = ''
password = ''
cmds = ''
stdmore = re.compile(r"-[\S\s]*[Mm]ore[\S\s]*-")
hostname_endcondition = re.compile(r"\S+[#>\]]\s*$")
class ssh_comm(object):
def __init__(self,address,username,password,port=22):
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) #Note: 自動添加主機名及密鑰到本地並保存,不依賴load_system_host_keys()配置,即如果known_hosts裡沒有遠程主機的公鑰時,默認連接會提示yes/no,自動yes
print('ssh_comm connection...')
self.client.connect(address, port=port, username=username, password=password, timeout=10, look_for_keys=True,allow_agent=False)
self.shell = self.client.invoke_shell()
while True:
time.sleep(0.5)
if self.shell.recv_ready() or self.shell.recv_stderr_ready():
break
self.shell.recv(4096).decode('utf-8')
self.shell.send('\n')
output = self.shell.recv(4096).decode('utf-8')
output = output
while True:
if hostname_endcondition.findall(output):
self.hostname = hostname_endcondition.findall(output)[0].strip().strip('<>[]#')
break
while True:
time.sleep(0.1)
if self.shell.recv_ready() or self.shell.recv_stderr_ready():
break
output += self.shell.recv(4096).decode('utf-8')
def recv_all(self,interval,stdjudge,stdconfirm):
endcondition = re.compile(r"%s\S*[#>\]]\s*$"%self.hostname)
while True:
time.sleep(interval)
if self.shell.recv_ready() or self.shell.recv_stderr_ready():
break
output = self.shell.recv(99999).decode('utf-8')
if (stdjudge != '') and (stdjudge in output):
self.shell.send(stdconfirm+'\n')
while True:
if stdmore.findall(output.split('\n')[-1]):
break
elif endcondition.findall(output):
break
while True:
time.sleep(interval)
if self.shell.recv_ready() or self.shell.recv_stderr_ready():
break
output += self.shell.recv(99999).decode('utf-8')
return output
def send_command(self,command_interval,command,stdjudge,stdconfirm):
command += "\n"
self.shell.send(command)
if ('hostname' in command) or ('sysname' in command):
while True:
time.sleep(0.5)
if self.shell.recv_ready() or self.shell.recv_stderr_ready():
break
stdout = self.shell.recv(4096).decode('utf-8')
self.hostname = hostname_endcondition.findall(stdout)[-1].strip().strip('<>[]#')
else:
stdout = self.recv_all(interval=command_interval,stdjudge=stdjudge,stdconfirm=stdconfirm)
data = stdout.split('\n')
while stdmore.findall(data[-1]):
self.shell.send(" ")
tmp = self.recv_all(interval=command_interval,stdjudge=stdjudge,stdconfirm=stdconfirm)
data = tmp.split('\n')
stdout += tmp
return stdout
def close(self):
if self.client is not None:
self.client.close()
def run(self,cmds,command_interval,stdjudge,stdconfirm):
stdout = ''
rc = 'success'
for cmd in cmds.split('\n'):
if cmd.strip():
stdout += self.send_command(command=cmd,command_interval=command_interval,stdjudge=stdjudge,stdconfirm=stdconfirm)
return rc, stdout
def writeoutput(address,username,password,cmds):
try:
connection = ssh_comm(address=address, username=username, password=password, port=22)
except Exception as e:
error_class = e.__class__.__name__ #取得錯誤類型
detail = e.args[0] #取得詳細內容
cl, exc, tb = sys.exc_info() #取得Call Stack
lastCallStack = traceback.extract_tb(tb)[-1] #取得Call Stack的最後一筆資料
fileName = lastCallStack[0] #取得發生的檔案名稱
lineNum = lastCallStack[1] #取得發生的行號
funcName = lastCallStack[2] #取得發生的函數名稱
errMsg = "File \"{}\", line {}, in {}: [{}] {}".format(fileName, lineNum, funcName, error_class, detail)
print(errMsg)
rc = 'connection failed'
return address,rc
stdjudge = 'Y/N'
stdconfirm = 'Y'
rc,stdout = connection.run(cmds=cmds,command_interval=0.1,stdjudge=stdjudge,stdconfirm=stdconfirm)
connection.close()
hostname = connection.hostname.split('/')[-1].split(':')[-1]
#Note: 若有需要可以使用 SNMP v2c 做事情
#hostname = os.popen('/usr/local/net-snmp/bin/snmpwalk -v 2c -c tcnw %s sysname -Oqv'%address).read().strip()
#Note: 根據各機器名稱產生目錄
if not os.path.exists(hostname):
os.makedirs(hostname)
filename = hostname+'-'+time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(time.time()))
#Note: 產生 txt 檔保存資料
with open ('%s/%s.txt'%(hostname,filename),'w') as f:
f.write(stdout)
return address,rc
#Note: 主程式呼叫 main
def main(username,password,hosts,cmds):
print('main('+username+','+password+','+hosts+','+cmds+')')
#Note: 檢查欄位是否有問題
if username == '':
username = raw_input('請輸入使用者名稱:')
if password == '':
password = raw_input('請輸入密碼: ')
if hosts == '':
hosts = raw_input('請輸入主機地址: ')
if cmds == '':
cmds = raw_input('請輸入採集命令: ')
host_list = hosts.split('\n')
if len(host_list) < 5:
processnum = len(host_list)
else:
processnum = 5
#Note: 可以調整多執行緒的地方,預設最高是 5 個 thread 再跑
pool = multiprocessing.Pool(processes=processnum )
process = []
for host in host_list:
if host:
process.append(pool.apply_async(writeoutput, (host.strip(),username,password,cmds)))
pool.close()
pool.join()
outs = ''
for o in process:
rc,ip = o.get()
print('[ '+ip+' : '+rc+' ]')
#Note: 主程式入口
if __name__== '__main__':
main(username,password,hosts,cmds)
| [
"singer0503@gmail.com"
] | singer0503@gmail.com |
1ec736c4ee5667fdac6c32e4c815c223c8fd9563 | 183892abe5d5481d037e5568c4505f0fc3370a23 | /nn_sa.py | 125440347ced3cad9e15a827a21f423212b28fd8 | [] | no_license | nani67/SportsAnalytics_ML_DL | 0c921b384a6f6c1f0a4de57ff1a556979f4be310 | 3c0f492130324e813e33444e7ee1a197b2e5697c | refs/heads/master | 2020-08-28T10:49:39.629789 | 2019-10-27T12:47:27 | 2019-10-27T12:47:27 | 217,677,816 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | import keras
from keras.models import Sequential
from keras.layers import Dense
import pandas as pd
import numpy as np
dataset = pd.read_csv("sample_data.csv")
X = dataset.drop(["Date","Referee"],axis=1)
Y = dataset["FTR"]
X_train = X[0:15]
X_test = X[15:]
Y_train = Y[0:15]
Y_test = Y[15:]
model = Sequential();
firstLayer = Dense(units = 16, activation='relu', kernel_initializer='uniform',input_dim = 20)
model.add(firstLayer);
secondLayer = Dense(units = 1, activation = 'relu', kernel_initializer = 'uniform')
model.add(secondLayer);
model.compile(optimizer = 'adam', loss = 'mse', metrics=['accuracy'])
history = model.fit(X_train, Y_train, epochs = 300, batch_size = 50)
import matplotlib.pyplot as plt
plt.plot(history.history['accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
| [
"pnani18dec@gmail.com"
] | pnani18dec@gmail.com |
c9baa64f4b82950354c16fb73ca806ec1d27be5f | a9c3be4041c2a93fcdf3407a03ca33ceb839dc12 | /Hostel Management Project/hostel_mgmt/right_frame.py | 25d8a4c86dbe4ae8f65cebd1bd6f2edf351c440e | [] | no_license | Soham-Chatterjee/Hostel_mgmt | 082bcbeb76fe06ddf606b517c8d690ebb7184014 | 8225181ff8cc1fbcf6c9cf35087d9bd6123ce5eb | refs/heads/main | 2023-02-20T17:15:36.119611 | 2021-01-23T16:25:35 | 2021-01-23T16:25:35 | 317,414,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,947 | py | import tkinter
from hostel_mgmt import root
from PIL import Image, ImageTk
from hostel_mgmt.design import right_frame
def on_enter(btn):
btn['bg'] = "bisque"
def on_leave(btn):
btn['bg'] = "bisque3"
image_load = Image.open("./Fee Chart.png")
image_render = ImageTk.PhotoImage(image_load)
pos = {'x': 12, 'btn1': 10, 'btn2': 60, 'btn3': 110, 'btn4': 160, 'btn5': 210, 'btn6': 260, 'btn7':310}
btn1 = tkinter.Button(right_frame, text="Button 1", bg="bisque3", activebackground="bisque3", relief="flat", font=("Courier New", 12), cursor="hand2", width=26)
btn2 = tkinter.Button(right_frame, text="Button 2", bg="bisque3", activebackground="bisque3", relief="flat", font=("Courier New", 12), cursor="hand2", width=26)
btn3 = tkinter.Button(right_frame, text="Button 3", bg="bisque3", activebackground="bisque3", relief="flat", font=("Courier New", 12), cursor="hand2", width=26)
btn4 = tkinter.Button(right_frame, text="Button 4", bg="bisque3", activebackground="bisque3", relief="flat", font=("Courier New", 12), cursor="hand2", width=26)
btn5 = tkinter.Button(right_frame, text="Button 4", bg="bisque3", activebackground="bisque3", relief="flat", font=("Courier New", 12), cursor="hand2", width=26)
img = tkinter.Label(right_frame, image=image_render)
img.image = image_render
img.config(bg='bisque4')
img.place(x=5, y=435)
btn_list = [btn1, btn2, btn3, btn4, btn5]
btn1.bind("<Enter>", lambda event: on_enter(btn1))
btn1.bind("<Leave>", lambda event: on_leave(btn1))
btn2.bind("<Enter>", lambda event: on_enter(btn2))
btn2.bind("<Leave>", lambda event: on_leave(btn2))
btn3.bind("<Enter>", lambda event: on_enter(btn3))
btn3.bind("<Leave>", lambda event: on_leave(btn3))
btn4.bind("<Enter>", lambda event: on_enter(btn4))
btn4.bind("<Leave>", lambda event: on_leave(btn4))
btn5.bind("<Enter>", lambda event: on_enter(btn5))
btn5.bind("<Leave>", lambda event: on_leave(btn5))
| [
"noreply@github.com"
] | noreply@github.com |
3d9f60873d750960d9d30489e80d15c58a53b641 | 152d5204595c4f38be21d85907af54012fb3fece | /python/A1014280203/0/0.py | d1aa0de267004157929207e14a6341ce63895ea3 | [] | no_license | Fayostyle/spiderstudy | 09f7dc0792c64083cbdb7e5b19a025dc05d5d1f2 | 6687e9a192f83f125d532b5df898790acc47f965 | refs/heads/master | 2022-11-30T10:45:21.260739 | 2019-09-19T07:31:50 | 2019-09-19T07:31:50 | 132,615,459 | 0 | 0 | null | 2022-11-22T00:37:43 | 2018-05-08T13:47:01 | HTML | UTF-8 | Python | false | false | 252 | py | from PIL import Image, ImageFont, ImageDraw
image = Image.open('0.png')
w, h = image.size
font = ImageFont.truetype('arial.ttf', 50)
draw = ImageDraw.Draw(image)
draw.text((4*w/5, h/5), '5', fill=(255, 10, 10), font=font)
image.save('0.0.png', 'png')
| [
"phuang5945@163.com"
] | phuang5945@163.com |
5c4f26641289e95cd612cadb24473c07e4a66f3e | 94551b3b0513f4f2b3930d9c28b36de079fac487 | /config_param_creator.py | 47c0c32a11d039deac4b9fd6858cc6a93e1a3bf1 | [] | no_license | KWiecko/su2_gui_pub | 92b0f10f0a208c57cad62b216b0f1d1e4e1e1566 | 8d781f1a3c1c231527c47f1289277a7851a64329 | refs/heads/master | 2020-07-13T07:19:52.893502 | 2019-09-25T07:36:55 | 2019-09-25T07:36:55 | 205,030,690 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,470 | py | import pyforms
from pyforms.basewidget import BaseWidget
from pyforms.controls import ControlButton, ControlBase
from su2_basic_widget import SU2BasicWidget
from su2_config_creator import SU2Config
from config_param_creator_ctrl import ConfigParamCreatorCtrl
class ConfigParamCreator(SU2BasicWidget):
# label = '', initial_max_width = None, initial_max_height = None,
# initial_min_width = None, initial_min_height = None)
# @property
# def param_name_ctrl(self) -> ControlBase:
# return self._param_name_ctrl
#
# @param_name_ctrl.setter
# def param_name_ctrl(self, new_val: ControlBase):
# self._param_name_ctrl = new_val
@property
def param_name_ctrl_grpbx(self) -> ControlBase:
return self._param_name_ctrl_grpbx
@param_name_ctrl_grpbx.setter
def param_name_ctrl_grpbx(self, new_val: ControlBase):
self._param_name_ctrl_grpbx = new_val
# @property
# def allwd_vals_ctrl(self) -> ControlBase:
# return self._allwd_vals_ctrl
#
# @allwd_vals_ctrl.setter
# def allwd_vals_ctrl(self, new_val: ControlBase):
# self._allwd_vals_ctrl = new_val
@property
def allwd_vals_ctrl_grpbx(self) -> ControlBase:
return self._allwd_vals_ctrl_grpbx
@allwd_vals_ctrl_grpbx.setter
def allwd_vals_ctrl_grpbx(self, new_val: ControlBase):
self._allwd_vals_ctrl_grpbx = new_val
# @property
# def default_val_ctrl(self) -> ControlBase:
# return self._default_val_ctrl
#
# @default_val_ctrl.setter
# def default_val_ctrl(self, new_val: ControlBase):
# self._default_val_ctrl = new_val
@property
def default_val_ctrl_grpbx(self) -> ControlBase:
return self._default_val_ctrl_grpbx
@default_val_ctrl_grpbx.setter
def default_val_ctrl_grpbx(self, new_val: ControlBase):
self._default_val_ctrl_grpbx = new_val
# @property
# def tooltip_ctrl(self) -> ControlBase:
# return self._tooltip_ctrl
#
# @tooltip_ctrl.setter
# def tooltip_ctrl(self, new_val: ControlBase):
# self._tooltip_ctrl = new_val
@property
def tooltip_ctrl_grpbx(self) -> ControlBase:
return self._tooltip_ctrl_grpbx
@tooltip_ctrl_grpbx.setter
def tooltip_ctrl_grpbx(self, new_val: ControlBase):
self._tooltip_ctrl_grpbx = new_val
@property
def ctrld_cfg_f_creator(self) -> object:
return self._ctrld_cfg_f_creator
@ctrld_cfg_f_creator.setter
def ctrld_cfg_f_creator(self, new_val: object):
self._ctrld_cfg_f_creator = new_val
@property
def config_field_creator_ctr(self) -> ConfigParamCreatorCtrl:
return self._config_field_creator_ctr
@config_field_creator_ctr.setter
def config_field_creator_ctr(self, new_val: ConfigParamCreatorCtrl):
self._config_field_creator_ctr = new_val
@property
def set_param_button(self) -> ControlButton:
return self._set_param_button
@set_param_button.setter
def set_param_button(self, new_val: ControlButton):
self._set_param_button = new_val
@property
def cancel_button(self) -> ControlButton:
return self._cancel_button
@cancel_button.setter
def cancel_button(self, new_val: ControlButton):
self._cancel_button = new_val
def __init__(
self, tabs_ctrl: object, label='Config param creator',
initial_max_width: int = 400,
initial_max_height: int = 700, initial_min_width: int = 200,
initial_min_height: int = 500,
su2_cfg_obj: SU2Config = None, # {'example_sect': {}},
des_cfg_section: str = 'INPUT_OUTPUT_INFORMATION'):
super(ConfigParamCreator, self).__init__(
label=label, initial_max_width=initial_max_width,
initial_max_height=initial_max_height,
initial_min_width=initial_min_width,
initial_min_height=initial_min_height)
if not su2_cfg_obj:
print('SU2 cfg was not found')
su2_cfg_obj = SU2Config()
input('SU2 cfg was not found')
self.config_field_creator_ctr = \
ConfigParamCreatorCtrl(
su2_cfg_obj=su2_cfg_obj, des_cfg_section=des_cfg_section,
ctrld_cfg_f_creator=self, tabs_ctrl=tabs_ctrl)
if __name__ == '__main__':
pyforms.start_app(ConfigParamCreator, geometry=(400, 500, 500, 500))
# test_cfc = ConfigFieldCreator() | [
"konrad.wiecko@gmail.com"
] | konrad.wiecko@gmail.com |
49d91b038609858a956b8fc002568e272efc77f6 | f7c3b1f65cc4d6dba1dc9dcf70f523497a14d791 | /mujoco/tf_commons/ops.py | ff3b576def5ff27fcc5369b938fb8a9e04891c09 | [
"MIT"
] | permissive | hiwonjoon/ICML2019-TREX | b9cac1ac5d97b22374a92f2f3cf5d8956cdb2482 | 44f92b61ca6c79ac22d468382d4f2fbee164fb7a | refs/heads/master | 2021-06-16T15:52:33.325054 | 2021-04-10T04:57:24 | 2021-04-10T04:57:24 | 184,654,702 | 76 | 24 | MIT | 2020-01-28T22:11:19 | 2019-05-02T21:36:24 | Python | UTF-8 | Python | false | false | 19,966 | py | import tensorflow as tf
class Conv2d(object) :
def __init__(self,name,input_dim,output_dim,k_h=4,k_w=4,d_h=2,d_w=2,
stddev=0.02, data_format='NCHW',padding='SAME') :
with tf.variable_scope(name) :
assert(data_format == 'NCHW' or data_format == 'NHWC')
self.w = tf.get_variable('w', [k_h, k_w, input_dim, output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0))
if( data_format == 'NCHW' ) :
self.strides = [1, 1, d_h, d_w]
else :
self.strides = [1, d_h, d_w, 1]
self.data_format = data_format
self.padding = padding
def __call__(self,input_var,name=None,w=None,b=None,**kwargs) :
w = w if w is not None else self.w
b = b if b is not None else self.b
if( self.data_format =='NCHW' ) :
return tf.nn.bias_add(
tf.nn.conv2d(input_var, w,
use_cudnn_on_gpu=True,data_format='NCHW',
strides=self.strides, padding=self.padding),
b,data_format='NCHW',name=name)
else :
return tf.nn.bias_add(
tf.nn.conv2d(input_var, w,data_format='NHWC',
strides=self.strides, padding=self.padding),
b,data_format='NHWC',name=name)
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormConv2d(object):
def __init__(self,name,input_dim,output_dim,k_h=4,k_w=4,d_h=2,d_w=2,
stddev=0.02, data_format='NHWC',padding='SAME',epsilon=1e-9) :
with tf.variable_scope(name) :
assert data_format == 'NHWC'
self.v = tf.get_variable('v', [k_h, k_w, input_dim, output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.g = tf.get_variable('g',[output_dim],
initializer=tf.constant_initializer(float('nan')))
self.b = tf.get_variable('b',[output_dim],
initializer=tf.constant_initializer(float('nan')))
self.strides = [1, d_h, d_w, 1]
self.padding = padding
self.epsilon = epsilon
def __call__(self,input_var,name=None,**kwargs) :
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,2])
t = tf.nn.conv2d(input_var,v_norm,self.strides,self.padding,data_format='NHWC')
mu,var = tf.nn.moments(t,axes=[0,1,2])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.reshape(self.g,[1,1,1,tf.shape(self.v)[-1]]) * tf.nn.l2_normalize(self.v,axis=[0,1,2])
return tf.nn.bias_add(
tf.nn.conv2d(input_var, w,data_format='NHWC',
strides=self.strides, padding=self.padding),
self.b,data_format='NHWC',name=name)
def get_variables(self):
#TODO: self.v should be l2-normalized or not? / currently not.
return {'v':self.v,'b':self.b,'g':self.g}
class DepthConv2d(object) :
def __init__(self,name,input_dim,channel_multiplier,k_h=4,k_w=4,d_h=2,d_w=2,
stddev=0.02, data_format='NCHW', padding='SAME') :
with tf.variable_scope(name) :
assert(data_format == 'NCHW' or data_format == 'NHWC')
self.w = tf.get_variable('w', [k_h, k_w, input_dim, channel_multiplier],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[input_dim*channel_multiplier], initializer=tf.constant_initializer(0.0))
if( data_format == 'NCHW' ) :
self.strides = [1, 1, d_h, d_w]
else :
self.strides = [1, d_h, d_w, 1]
self.data_format = data_format
self.padding = padding
def __call__(self,input_var,name=None,**xargs) :
return tf.nn.bias_add(
tf.nn.depthwise_conv2d(input_var, self.w,
data_format=self.data_format,
strides=self.strides, padding=self.padding),
self.b,data_format=self.data_format,name=name)
class Conv3d(object) :
def __init__(self,name,input_dim,output_dim,k_t=2,k_h=4,k_w=4,d_t=1,d_h=1,d_w=1,
stddev=0.02, data_format='NDHWC') :
with tf.variable_scope(name) :
assert(data_format == 'NDHWC')
self.w = tf.get_variable('w', [k_t, k_h, k_w, input_dim, output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0))
self.strides = [d_t,d_h,d_w]
def __call__(self,input_var,name=None,w=None,b=None,**kwargs) :
w = w if w is not None else self.w
b = b if b is not None else self.b
#k_t,k_h,k_w,_,_ = self.w.get_shape().as_list()
#_t = tf.pad(input_var, [[0,0],[0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0]], "SYMMETRIC")
return tf.nn.bias_add(
tf.nn.convolution(input_var, w,
strides=self.strides,
data_format='NDHWC',
padding='SAME'),
b,name=name)
def get_variables(self):
return {'w':self.w,'b':self.b}
class DilatedConv3D(object) :
def __init__(self,name,input_dim,output_dim,k_t=2,k_h=3,k_w=3,d_t=2,d_h=1,d_w=1,
stddev=0.02, data_format='NDHWC') :
with tf.variable_scope(name) :
assert(data_format == 'NDHWC')
self.w = tf.get_variable('w', [k_t, k_h, k_w, input_dim, output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0))
self.strides = [1,1,1]
self.dilates = [d_t, d_h, d_w]
def __call__(self,input_var,name=None) :
k_t,k_h,k_w,_,_ = self.w.get_shape().as_list()
_t = tf.pad(input_var, [[0,0],[0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0]], "SYMMETRIC")
return tf.nn.bias_add(
tf.nn.convolution(_t, self.w,
strides=self.strides, dilation_rate=self.dilates,
padding='VALID'),
self.b,name=name)
class Linear(object) :
def __init__(self,name,input_dim,output_dim,stddev=0.02) :
with tf.variable_scope(name) :
self.w = tf.get_variable('w',[input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim],
initializer=tf.constant_initializer(0.0))
def __call__(self,input_var,name=None,w=None,b=None,**kwargs) :
w = w if w is not None else self.w
b = b if b is not None else self.b
if( input_var.shape.ndims > 2 ) :
dims = tf.reduce_prod(tf.shape(input_var)[1:])
return tf.matmul(tf.reshape(input_var,[-1,dims]),w) + b
else :
return tf.matmul(input_var,w)+b
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormLinear(object):
def __init__(self,name,input_dim,output_dim,stddev=0.02,epsilon=1e-10) :
with tf.variable_scope(name) :
self.v = tf.get_variable('v',[input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.g = tf.get_variable('g',[output_dim],
initializer=tf.constant_initializer(float('nan')))
self.b = tf.get_variable('b',[output_dim],
initializer=tf.constant_initializer(float('nan')))
self.epsilon = epsilon
def __call__(self,input_var,name=None,**kwargs) :
if( input_var.shape.ndims > 2 ) :
dims = tf.reduce_prod(tf.shape(input_var)[1:])
input_var = tf.reshape(input_var,[-1,dims])
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=0)
t = tf.matmul(input_var,v_norm)
mu,var = tf.nn.moments(t,axes=[0])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.expand_dims(self.g,axis=0) * tf.nn.l2_normalize(self.v,axis=0)
return tf.matmul(input_var,w)+self.b
def get_variables(self):
#TODO: self.v should be l2-normalized or not? / currently not.
return {'v':self.v,'b':self.b,'g':self.g}
class SymPadConv2d(object): #Resize and Convolution(upsacle by 2)
def __init__(self,name,input_dim,output_dim,
k_h=3,k_w=3,stddev=0.02) :
assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size'
with tf.variable_scope(name) :
self.w = tf.get_variable('w', [k_h, k_w, input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0))
self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ]
def __call__(self,input_var,name=None,**kwargs):
_,h,w,c = input_var.shape.as_list()
_t = tf.image.resize_nearest_neighbor(input_var, [h*2, w*2])
_t = tf.pad(_t,self.padding, mode='SYMMETRIC')
return tf.nn.bias_add(
tf.nn.conv2d(_t, self.w,
data_format='NHWC', #we can't use cudnn due to resize method...
strides=[1,1,1,1], padding="VALID"),
self.b,data_format='NHWC',name=name)
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormSymPadConv2d(object): #Resize and Convolution(upsacle by 2)
def __init__(self,name,input_dim,output_dim,
k_h=3,k_w=3,stddev=0.02) :
assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size'
with tf.variable_scope(name) :
self.conv2d = WeightNormConv2d('conv',input_dim,output_dim,k_h,k_w,1,1,data_format='NHWC',padding='VALID')
self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ]
def __call__(self,input_var,name=None,**kwargs):
_,h,w,c = input_var.shape.as_list()
_t = tf.image.resize_nearest_neighbor(input_var, [h*2, w*2])
_t = tf.pad(_t,self.padding, mode='SYMMETRIC')
return self.conv2d(_t)
def get_variables(self):
return self.conv2d.get_variables()
class TransposedConv2d(object):
def __init__(self,name,input_dim,out_dim,
k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NCHW') :
with tf.variable_scope(name) :
self.w = tf.get_variable('w', [k_h, k_w, out_dim, input_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[out_dim], initializer=tf.constant_initializer(0.0))
self.data_format = data_format
if( data_format =='NCHW' ):
self.strides = [1, 1, d_h, d_w]
else:
self.strides = [1, d_h, d_w, 1]
def __call__(self,input_var,name=None,**xargs):
shapes = tf.shape(input_var)
if( self.data_format == 'NCHW' ):
shapes = tf.stack([shapes[0],tf.shape(self.b)[0],shapes[2]*self.strides[2],shapes[3]*self.strides[3]])
else:
shapes = tf.stack([shapes[0],shapes[1]*self.strides[1],shapes[2]*self.strides[2],tf.shape(self.b)[0]])
return tf.nn.bias_add(
tf.nn.conv2d_transpose(input_var,self.w,output_shape=shapes,
data_format=self.data_format,
strides=self.strides,padding='SAME'),
self.b,data_format=self.data_format,name=name)
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormTransposedConv2d(object):
def __init__(self,name,input_dim,out_dim,
k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NHWC',epsilon=1e-9) :
with tf.variable_scope(name) :
assert data_format == 'NHWC'
self.v = tf.get_variable('v', [k_h, k_w, out_dim, input_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.g = tf.get_variable('g',[out_dim],
initializer=tf.constant_initializer(float('nan')))
self.b = tf.get_variable('b',[out_dim],
initializer=tf.constant_initializer(float('nan')))
self.strides = [1, d_h, d_w, 1]
self.epsilon = epsilon
def __call__(self,input_var,name=None,**kwargs) :
shapes = tf.shape(input_var)
shapes = tf.stack([shapes[0],shapes[1]*self.strides[1],shapes[2]*self.strides[2],tf.shape(self.b)[0]])
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,3])
t = tf.nn.conv2d_transpose(input_var,v_norm,
output_shape=shapes,
strides=self.strides,
padding='SAME',
data_format='NHWC')
mu,var = tf.nn.moments(t,axes=[0,1,2])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.reshape(self.g,[1,1,tf.shape(self.v)[2],1]) * tf.nn.l2_normalize(self.v,axis=[0,1,3])
return tf.nn.bias_add(
tf.nn.conv2d_transpose(input_var,w,
output_shape=shapes,
strides=self.strides,
padding='SAME',
data_format='NHWC'),
self.b,data_format='NHWC',name=name)
def get_variables(self):
#TODO: self.v should be l2-normalized or not? / currently not.
return {'v':self.v,'b':self.b,'g':self.g}
class LayerNorm():
def __init__(self,name,axis,out_dim=None,epsilon=1e-7,data_format='NHWC') :
"""
out_dim: Recentering by adding bias again.
The previous bias can be ignored while normalization.
(when you normalize over channel only)
"""
assert data_format=='NCHW' or data_format=='NHWC'
assert len(axis) != 1 or (len(axis) == 1 and out_dim != None)
"""
TODO: Track Moving mean and variance, and use this statistics.
with tf.variable_scope(name):
self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False)
self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False)
"""
if out_dim is not None:
with tf.variable_scope(name) :
self.gamma= tf.get_variable('gamma',[1,1,1,out_dim], initializer=tf.constant_initializer(1.0))
self.beta = tf.get_variable('beta',[out_dim], initializer=tf.constant_initializer(0.0))
else:
self.gamma = None
self.beta = None
self.axis = axis
self.epsilon = epsilon
self.data_format = data_format
self.name = name
def __call__(self,input_var,**kwargs) :
mean, var = tf.nn.moments(input_var, self.axis, keep_dims=True)
ret = (input_var - mean) / tf.sqrt(var+self.epsilon)
if self.gamma is None :
return ret
else:
return tf.nn.bias_add(ret*self.gamma,
self.beta,data_format=self.data_format)
def get_variables(self):
return {'gamma':self.gamma,'beta':self.beta} if self.gamma is not None else {}
class InstanceNorm():
def __init__(self,name,format='NCHW',epsilon=1e-5) :
assert(format=='NCHW' or format=='NHWC')
self.axis = [2,3] if format == 'NCHW' else [1,2]
self.epsilon = epsilon
self.name = name
def __call__(self,input_var) :
mean, var = tf.nn.moments(input_var, self.axis, keep_dims=True)
return (input_var - mean) / tf.sqrt(var+self.epsilon)
class BatchNorm(object):
def __init__(self,name,dims,axis=1,epsilon=1e-3,momentum=0.999,center=True,scale=True) :
self.momentum = momentum
self.epsilon = epsilon
self.axis = axis
self.center=center
self.scale=scale
with tf.variable_scope(name) as scope:
with tf.variable_scope('bn') :
self.gamma= tf.get_variable('gamma',[dims], initializer=tf.constant_initializer(1.0))
self.beta = tf.get_variable('beta',[dims], initializer=tf.constant_initializer(0.0))
self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False)
self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False)
self.scope = scope
def __call__(self,input_var,is_training,**xargs) :
with tf.variable_scope(self.scope) :
return tf.layers.batch_normalization(
input_var,
axis=self.axis,
momentum=self.momentum,
epsilon=self.epsilon,
center=self.center,
scale=self.scale,
training=is_training,
reuse=True,
name='bn')
"""
---Do NOT forget to add update_ops dependencies for your loss function.---
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,tf.get_default_graph().get_name_scope())
#And, do not make any scope inside map_fn, since scope.name will not work...(it is corrupted by map_fn.)
print(update_ops)
with tf.control_dependencies(update_ops):
"""
def get_variables(self):
return {}
class Lrelu(object):
def __init__(self,leak=0.2,name='lrelu') :
self.leak = leak
self.name = name
def __call__(self, x, **kwargs) :
return tf.maximum(x, self.leak*x, name=self.name)
def get_variables(self):
return {}
class ResidualBlock() :
def __init__(self,name,filters,filter_size=3,non_linearity=Lrelu,normal_method=InstanceNorm) :
self.conv_1 = Conv2d(name+'_1',filters,filters,filter_size,filter_size,1,1)
self.normal = normal_method(name+'_norm')
self.nl = non_linearity()
self.conv_2 = Conv2d(name+'_2',filters,filters,filter_size,filter_size,1,1)
def __call__(self,input_var) :
_t = self.conv_1(input_var)
_t = self.normal(_t)
_t = self.nl(_t)
_t = self.conv_2(_t)
return input_var + _t
| [
"hi.wonjoon@gmail.com"
] | hi.wonjoon@gmail.com |
6e42e56cfec64fcecc126ae040f1964ab67867ca | adde969450333a4a459e7122521eb20fea9659f7 | /python-flask/openapi_server/test/test_default_controller.py | 16f79d5d61294677c3ddcdee117e032d41c016dd | [] | no_license | mishin/money-tracker-api-docs | 49c480765ecaacd8ff448fa7d264de590239b572 | 9b0f03fd054f54f136d1d28f2d20ecec9515210a | refs/heads/master | 2022-04-12T23:14:33.557855 | 2019-12-07T15:08:19 | 2019-12-07T15:08:19 | 271,111,873 | 1 | 0 | null | 2020-06-09T21:18:06 | 2020-06-09T21:18:05 | null | UTF-8 | Python | false | false | 2,819 | py | # coding: utf-8
from __future__ import absolute_import
import unittest
from flask import json
from six import BytesIO
from openapi_server.models.expense_request import ExpenseRequest # noqa: E501
from openapi_server.models.expense_response import ExpenseResponse # noqa: E501
from openapi_server.models.expenses_response import ExpensesResponse # noqa: E501
from openapi_server.models.not_found import NotFound # noqa: E501
from openapi_server.models.result_response import ResultResponse # noqa: E501
from openapi_server.test import BaseTestCase
class TestDefaultController(BaseTestCase):
"""DefaultController integration test stubs"""
def test_add_expense(self):
"""Test case for add_expense
adds a expense
"""
expense_request = {
"date" : "2019-10-22T00:00:00.000+0000",
"amount" : 100,
"content" : "おにぎり"
}
headers = {
'Content-Type': 'application/json',
}
response = self.client.open(
'/v1/expenses',
method='POST',
headers=headers,
data=json.dumps(expense_request),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_expense(self):
"""Test case for get_expense
gets a expense
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/v1/expenses/{expense_id}'.format(expense_id='expense_id_example'),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_expenses(self):
"""Test case for get_expenses
gets expenses
"""
query_string = [('beginDate', Tue Oct 01 09:00:00 JST 2019),
('endDate', Thu Oct 31 09:00:00 JST 2019)]
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/v1/expenses',
method='GET',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_result(self):
"""Test case for get_result
gets Result
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/v1/results/{result_id}'.format(result_id=2019-10),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
| [
"kawakawaryuryu@hotmail.co.jp"
] | kawakawaryuryu@hotmail.co.jp |
6cc605df33d06fc2f8575460a8beca5e972c9fe6 | f65b633d0760e20ef5e0066be10aa18168f5659e | /documents/views.py | 448b7e5a09e37e05fa8e98adc18b108ace1694e2 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | boxed/curia | a2b5ce5feda114bac1637de112b20026dabee5ae | c19f12f77b570b180acf4ec1ee05ea77b87b5fc9 | refs/heads/master | 2021-03-12T22:53:36.748160 | 2014-10-20T16:56:45 | 2014-10-20T16:56:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,834 | py | import re
from datetime import datetime
from django.contrib.auth.views import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.simplejson import dumps
from django.contrib.auth.models import User,Group
from django.utils.encoding import smart_unicode
from curia.documents.models import Document, Version
from curia.shortcuts import *
from curia import *
from django.utils.translation import ugettext as _
from sets import Set
from curia.labels import get_labels
from curia.labels.models import Label
from curia.labels import handle_labels, mark_labels_as_deleted
# helper
def get_latest(document_id):
try: return Version.objects.filter(document=document_id).order_by('-id')[0]
except Version.DoesNotExist: return None;
def validate_wiki_links(owner_user, owner_group, form, contents_name = 'contents'):
from django.utils.encoding import smart_unicode
contents = smart_unicode(form.data[contents_name])
links = list(re.finditer(r'(\[(.*?)\])', contents))
errors = []
link_targets = {}
# examples of use:
# [images/groups/1/sets/3]
for link in links:
title = link.groups()[1]
if ';' in title:
group_name, title = title.split(u';')
group = get_objects_from(Group, name=group_name)
if len(group) == 1:
owner_group = group[0]
else:
user = get_objects_from(User, username=group_name)
if len(user) == 1:
owner_user = user[0]
else:
errors.append(_('%s is not a valid group or user name') % group_name)
continue
documents = get_objects_from(Document, owner_user=owner_user, owner_group=owner_group, title=title, deleted=False)
if len(documents) != 1:
errors.append(_('Could not find document %s') % link.groups()[1])
else:
link_targets[link.groups()[1]] = documents[0]
if len(errors) != 0:
form.errors[contents_name] = errors
else:
# replace from the end as to not change the string in a way that interferes with the following replace operation
links.reverse()
for link in links:
target = link_targets[link.groups()[1]]
contents = contents.replace(link.groups()[0], '<a href="'+target.get_absolute_url()+'">'+smart_unicode(target)+'</a>')
return contents
# views
def version_response(request, v):
return render_to_response(request, 'documents/version.html', {'version': v, 'document': v.document, 'owner':get_owner(v.document)})
def view_latest(request, document_id):
v = get_latest(document_id)
check_access(request.user, obj=v.document, command='view')
if v == None:
raise Http404
return version_response(request, v)
def view_version(request, version_id, document_id):
v = get_object_or_404_and_check_access(request, Version, pk=version_id, command='view')
check_access(request.user, obj=v.document, command='view')
#if v.document.id != document_id:
# raise something
return version_response(request, v)
def view_version_list(request, document_id):
document = get_object_or_404_and_check_access(request, Document, pk=document_id, command='view')
return render_to_response(request, 'documents/version_list.html', {'version_list': Version.objects.filter(document=document_id), 'document': Document.objects.get(pk=document_id)})
def add_document(request):
is_presentation = get_boolean(request,'is_presentation')
owner_group = None
owner_user = None
class DocumentForm(django.forms.Form):
title = django.forms.CharField(max_length=1024, label=_('Title'))
#labels = django.forms.CharField(required=False, label=_('Labels'))
contents = django.forms.CharField(required=False, widget = django.forms.Textarea, label=_('Contents'))
group_id = get_integer(request,'group_id')
user_id = get_integer(request,'user_id')
if group_id:
owner_group = get_object_or_404_and_check_access(request, Group, pk=group_id, command='add')
check_access(request.user, obj=owner_group, command='add document')
else:
owner_user = get_object_or_404_and_check_access(request, User, pk=user_id, command='add')
check_access(request.user, obj=owner_user, command='add document')
if request.POST:
form = DocumentForm(request.POST)
if form.is_valid():
#Handle the document
if owner_group != None:
document = Document.objects.create(owner_group=owner_group, owner_user=owner_user, is_presentation=is_presentation)
else:
document = Document.objects.create(owner_user=owner_user, is_presentation=is_presentation)
if document.is_presentation:
if group == 0:
title = owner_user.username + 's Presentation'
else:
owner_group = get_object_or_404_and_check_access(request, Group, pk=group, command='add')
title = owner_group.name + 's Presentation'
else:
title = form.cleaned_data['title']
new_version = Version(document=document,title=title, contents=strip_p(form.cleaned_data['contents']), owner=request.user)
new_version.save()
#Handle the labels
#handle_labels(request,document)
if document.is_presentation:
if document.owner_group:
return HttpResponseRedirect(document.owner_group.get_absolute_url())
else:
return HttpResponseRedirect(document.owner_user.get_absolute_url())
return HttpResponseRedirect(document.get_absolute_url())
else:
form = DocumentForm()
return render_to_response(request, 'documents/add.html', {'form':form})
def edit_document(request, document_id, is_creating=False):
group_id = get_integer(request, 'group_id')
document = get_object_or_404_and_check_access(request, Document, pk=document_id, command='edit')
user = request.user
class DocumentForm(django.forms.Form):
if not document.is_presentation:
title = django.forms.CharField(max_length=1024, label=_('Title'))
#labels = django.forms.CharField(required=False, label=_('Labels'))
contents = django.forms.CharField(required=False, widget = django.forms.Textarea, label=_('Contents'))
edit_version = django.forms.IntegerField(widget = django.forms.HiddenInput, required=True)
if request.POST:
form = DocumentForm(request.POST)
if int(request.POST['edit_version']) != document.get_latest_version().id:
post = request.POST.copy()
post['edit_version'] = document.get_latest_version().id
form = DocumentForm(post)
form.errors['contents'] = [_('Document was changed after you began editing it, please review the changes and then press save again')]
if form.is_valid():
#Handle the labels
#handle_labels(request,document)
#Handle the document
if not document.is_presentation:
if form.cleaned_data.has_key('title'):
title = form.cleaned_data['title']
else:
title = document.get_latest_version().title
else:
if user.first_name.endswith('s'):
title=user.first_name+' presentation'
else:
title = user.first_name+'s presentation'
new_version = Version(document=document,title=title, contents=strip_p(form.cleaned_data['contents']), owner=request.user)
new_version.save()
if request.external:
from curia.homepage.models import MenuItem
try:
menu = MenuItem.objects.get(content_type=get_content_type(document), object_id=document.id)
menu.title = title
menu.save()
except MenuItem.DoesNotExist:
pass
if document.is_presentation:
if document.owner_group:
return HttpResponseRedirect(document.owner_group.get_absolute_url())
else:
return HttpResponseRedirect(document.owner_user.get_absolute_url())
return HttpResponseRedirect(document.get_absolute_url())
else:
latest_version = document.get_latest_version()
form = DocumentForm(initial={'title': latest_version.title, 'contents': latest_version.contents, 'edit_version':latest_version.id})
return render_to_response(request, 'documents/edit.html', {'form':form, 'document':document})
def delete_document(request, document_id):
document = get_object_or_404_and_check_access(request, Document, pk=document_id, command='delete')
from curia import delete_objects
delete_objects(document)
if request.external:
from curia.homepage.models import MenuItem
try:
menu = MenuItem.objects.get(content_type=get_content_type(document), object_id=document.id)
menu.delete()
except MenuItem.DoesNotExist:
pass
return HttpResponse(dumps(document_id, ensure_ascii=False), content_type="text/json; charset=UTF-8")
def view_documents_of_user(request, user_id):
user = get_object_or_404_and_check_access(request, User, pk=user_id, command='view')
objects = get_objects_from(Document, deleted=False, owner_user=user, owner_group=None, is_presentation=False)
return render_to_response(request, 'documents/document_list.html', {'owner':user, 'objects':objects, 'type':'users'})
def view_documents_of_group(request, group_id=None):
if group_id != None:
group = get_object_or_404_and_check_access(request, Group, pk=group_id, command='view')
else:
group = get_current_community()
objects = get_objects_from(Document, deleted=False, owner_group=group, is_presentation=False)
return render_to_response(request, 'documents/document_list.html', {'owner':group, 'objects':objects, 'type':'groups'})
def revert_to_version(request, document_id, version_id):
old_version = Version.objects.get(pk = version_id)
document = Document.objects.get(pk = document_id)
new_version = Version(document=document,title=old_version.title, contents=old_version.contents, owner=request.user)
new_version.save()
return version_response(request, new_version)
| [
"boxed@killingar.net"
] | boxed@killingar.net |
0c61ce225d80072549a004ed2591a718c5672896 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_8/models/target_get_response.py | ac033a0a864676d3ec597b61877bb7714e0e01c8 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,220 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.8, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_8 import models
class TargetGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[Target]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.Target]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the `continuation_token` to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The `continuation_token` is generated if the `limit` is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying `filter` params.
items (list[Target]): A list of target objects.
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `TargetGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TargetGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TargetGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"tlewis@purestorage.com"
] | tlewis@purestorage.com |
419e7e0ab8339ade64f7a829744f7147b5e4a0f3 | 72e3cb4dbc09423433040bf0ef56c07b8539c136 | /6/7.py | c39351cdf3d3ae53bf24f4ced3a453606d6ef94b | [] | no_license | refanr/2020 | 50cefe20519f2693d2133181a718046734f2ca3d | 51ae536f21f0c5b20dc067180d54392a09b3d5cb | refs/heads/master | 2023-01-04T21:38:08.327450 | 2020-10-20T14:48:30 | 2020-10-20T14:48:30 | 292,705,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | my_int = int(input('Give me an int >= 0: '))
# Fill in the missing code
working_int = my_int
quotient = 1
bin_str = ''
while quotient > 0:
if working_int % 2:
bin_str += '1'
else:
bin_str += '0'
quotient = working_int // 2
working_int = quotient
bin_str = bin_str[::-1]
print("The binary of {} is {}".format(my_int,bin_str)) | [
"refan@Reynirs-Air.lan"
] | refan@Reynirs-Air.lan |
bb4bfb31dbd4a51179b6d3da13cc9b01c3d47928 | a691ae93f31f7fac56027c9f5205731ea1d63c66 | /euler/euler1/euler1.py | b6bc320e2955d56cfc1ca2eb988f97a3f83d111c | [] | no_license | KnoxMakers/CodingWorkshops | c5cbb4ac0dd8abd7a68efc3b8eb3ad1f6b107658 | 12a0698f31d05c4e63d964dd3f9bc134086636e5 | refs/heads/master | 2021-01-19T07:56:27.199119 | 2018-12-05T23:57:33 | 2018-12-05T23:57:33 | 7,244,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | #!/usr/bin/python
x = range(1,1000)
woot = 0
for i in x:
if not i%3 or not i%5:
print i, "is a multiple of 3 or 5"
#woot += i
woot = woot + i
print "Total:",woot
| [
"relative.prime@gmail.com"
] | relative.prime@gmail.com |
6fe67275ed3c74ae869826a226fcfe2957de6f5b | 0aeb4ce84990053523a95b7d260bb89ea8958de4 | /Python_Labs/Remote File Inclusion/app/RFI.py | 5dba120b7f28b83d3f554958710598fa90ba9d66 | [] | no_license | Helmanso/Lplatform_All_Labs | 52474256b73662edf3e72c2be75345ac79162764 | 963438e7f0f55130a2241bd7124b8b9c3563277a | refs/heads/master | 2022-12-08T02:43:10.359270 | 2020-08-29T14:11:48 | 2020-08-29T14:11:48 | 288,520,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | from flask import Flask, request, url_for, render_template, redirect
import datetime, requests, os, validators
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
app = Flask(__name__, static_url_path='/static', template_folder='.', static_folder='static')
app.config['DEBUG'] = True
@app.route("/")
def start():
return render_template("index.html")
@app.route("/cmd", methods=['POST'])
def cmd():
filename = request.form['filename']
try:
if "http" not in str(urlparse(filename).scheme):
host = request.url[:-4]
filename = host+"/static/" + filename
result = eval(requests.get(filename).text)
return render_template("index.html", result=result)
else:
result = eval(requests.get(filename).text)
return render_template("index.html", result=result)
except Exception:
return render_template("index.html", result="Unexpected error during the execution of the predefined command.")
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html")
if __name__ == "__main__":
app.run(host='0.0.0.0', threaded=True)
| [
"hakimel0047@gmail.com"
] | hakimel0047@gmail.com |
7ea45cbd4ae13f3f6d4ca94d080968135b93965b | af9c3ca608bd3c0b9aa664756fad5f4da495a163 | /second.py | af911857c52401f822e540b0d1b302e42872dfc8 | [] | no_license | 1186141415/COVID-19 | 6f3e397fa73f1b6e845a8db69c02300aa5157e7e | 52f4788b563dae776059a621125e605051175cdf | refs/heads/master | 2022-09-27T16:58:27.788021 | 2020-06-06T04:21:54 | 2020-06-06T04:21:54 | 255,866,776 | 1 | 3 | null | 2020-06-06T03:23:58 | 2020-04-15T09:23:55 | Jupyter Notebook | UTF-8 | Python | false | false | 10,964 | py | import pandas as pd
# 导入matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
# 读取数据
today_world = pd.read_csv("today_world_2020_04_03.csv")
name_dict = {'date':'日期','name':'名称','id':'编号','lastUpdateTime':'更新时间',
'today_confirm':'当日新增确诊','today_suspect':'当日新增疑似',
'today_heal':'当日新增治愈','today_dead':'当日新增死亡',
'today_severe':'当日新增重症','today_storeConfirm':'当日现存确诊',
'total_confirm':'累计确诊','total_suspect':'累计疑似',
'total_heal':'累计治愈','total_dead':'累计死亡','total_severe':'累计重症',
'total_input':'累计输入病例','today_input':'当日输入病例'}
# 更改列名
today_world.rename(columns=name_dict,inplace=True) # inplace参数判断是否在原数据上进行修改
# 缺失值处理
today_world['当日现存确诊'] = today_world['累计确诊']-today_world['累计治愈']-today_world['累计死亡']
print(today_world.head())
print(today_world.info())# 查看数据基本信息
print(today_world.describe())# 默认只计算数值型特征的统计信息
# 计算缺失值比例
today_world_nan = today_world.isnull().sum()/len(today_world)
# 转变为百分数
print(today_world_nan.apply(lambda x: format(x, '.1%')) )
# 计算病死率,且保留两位小数
today_world['病死率'] = (today_world['累计死亡']/today_world['累计确诊']).apply(lambda x: format(x, '.2f'))
# 将病死率数据类型转换为float
today_world['病死率'] = today_world['病死率'].astype('float')
# 根据病死率降序排序
today_world.sort_values('病死率',ascending=False,inplace=True)
print(today_world.head(10))# 显示病死率前十国家
# 将国家名称设为索引
today_world.set_index('名称',inplace=True)
print(today_world.head(3))
print(today_world.loc['中国'])#可以通过传入列表获取指定国家的数据
# 查看当前累计确诊人数前十国家
world_top10 = today_world.sort_values(['累计确诊'],ascending=False)[:10]
world_top10 = world_top10[['累计确诊','累计死亡','病死率']]
print(world_top10)
#绘图准备
plt.rcParams['font.sans-serif']=['SimHei'] #正常显示中文
plt.rcParams['figure.dpi'] = 120 #设置所有图片的清晰度
# 绘制条形图
world_top10.sort_values('累计确诊').plot.barh(subplots=True,layout=(1,3),sharex=False,
figsize=(7,4),legend=False,sharey=True)
plt.tight_layout() #调整子图间距
plt.show()
# 读取数据
today_province = pd.read_csv("today_province_2020_04_03.csv")
# 创建中文列名字典
name_dict = {'date':'日期','name':'名称','id':'编号','lastUpdateTime':'更新时间',
'today_confirm':'当日新增确诊','today_suspect':'当日新增疑似',
'today_heal':'当日新增治愈','today_dead':'当日新增死亡',
'today_severe':'当日新增重症','today_storeConfirm':'当日现存确诊',
'total_confirm':'累计确诊','total_suspect':'累计疑似',
'total_heal':'累计治愈','total_dead':'累计死亡','total_severe':'累计重症',
'total_input':'累计输入病例','today_input':'当日输入病例'}
# 更改列名
today_province.rename(columns=name_dict,inplace=True) # inplace参数是否在原对象基础上进行修改
print(today_province.head())
print(today_province.info())# 查看数据基本信息
print(today_province.describe())# 查看数值型特征的统计量
# 计算各省当日现存确诊人数
today_province['当日现存确诊'] = today_province['累计确诊']-today_province['累计治愈']-today_province['累计死亡']
# 将各省名称设置为索引
today_province.set_index('名称',inplace=True)
print(today_province.info())
# 查看全国新增确诊top10的地区,new_top6 就是指代新增确诊的10个地区
new_top6 = today_province['当日新增确诊'].sort_values(ascending=False)[:10]
print(new_top6)
# 绘制条形图和饼图
fig,ax = plt.subplots(1,2,figsize=(10,5))
new_top6.sort_values(ascending=True).plot.barh(fontsize=10,ax=ax[0])
new_top6.plot.pie(autopct='%.1f%%',fontsize=10,ax=ax[1])
plt.ylabel('')
plt.title('全国新增确诊top10地区',size=15)
plt.show()
# 查看全国现存确诊人数top10的省市
store_top10 = today_province['当日现存确诊'].sort_values(ascending=False)[:10]
print(store_top10)
# 绘制条形图
store_top10.sort_values(ascending=True).plot.barh(fontsize=10)
plt.title('全国现存确诊top10地区',size=15)
plt.show()
#全国历史数据探索性分析
# 读取数据
alltime_china = pd.read_csv("alltime_China_2020_04_03.csv")
# 创建中文列名字典
name_dict = {'date':'日期','name':'名称','id':'编号','lastUpdateTime':'更新时间',
'today_confirm':'当日新增确诊','today_suspect':'当日新增疑似',
'today_heal':'当日新增治愈','today_dead':'当日新增死亡',
'today_severe':'当日新增重症','today_storeConfirm':'当日现存确诊',
'total_confirm':'累计确诊','total_suspect':'累计疑似',
'total_heal':'累计治愈','total_dead':'累计死亡','total_severe':'累计重症',
'total_input':'累计输入病例','today_input':'当日输入病例'}
# 更改列名
alltime_china.rename(columns=name_dict,inplace=True)
print(alltime_china.head())
print(alltime_china.info())
print(alltime_china.describe())
# 缺失值处理
# 计算当日现存确诊人数
alltime_china['当日现存确诊'] = alltime_china['累计确诊']-alltime_china['累计治愈']-alltime_china['累计死亡']
# 删除更新时间一列
alltime_china.drop(['更新时间','当日新增重症'],axis=1,inplace=True)
print(alltime_china.info())
# 将日期改成datetime格式
alltime_china['日期'] = pd.to_datetime(alltime_china['日期'])
# 设置日期为索引
alltime_china.set_index('日期',inplace=True) # 也可使用pd.read_csv("./input/alltime_China_2020_03_27.csv",parse_dates=['date'],index_col='date')
print(alltime_china.index)
# 举例
print(alltime_china.loc['2020-01'])
# 时间序列数据绘制折线图
fig, ax = plt.subplots(figsize=(8,4))
alltime_china.plot(marker='o',ms=2,lw=1,ax=ax)
ax.xaxis.set_major_locator(dates.MonthLocator()) #设置间距
ax.xaxis.set_major_formatter(dates.DateFormatter('%b')) #设置日期格式
fig.autofmt_xdate() #自动调整日期倾斜
# 图例位置调整
plt.legend(bbox_to_anchor = [1,1])
plt.title('全国新冠肺炎数据折线图',size=15)
plt.ylabel('人数')
plt.grid(axis='y')
plt.box(False)
plt.show()
# 时间序列数据绘制折线图
fig, ax = plt.subplots(figsize=(8,4))
alltime_china['当日新增确诊'].plot(ax=ax, style='-',lw=1,color='c',marker='o',ms=3)
ax.xaxis.set_major_locator(dates.MonthLocator()) #设置间距
ax.xaxis.set_major_formatter(dates.DateFormatter('%b')) #设置日期格式
fig.autofmt_xdate() #自动调整日期倾斜
plt.title('全国新冠肺炎新增确诊病例折线图',size=15)
plt.ylabel('人数')
plt.grid(axis='y')
plt.box(False)
plt.show()
#世界各国历史数据探索性分析
# 读取数据
alltime_world = pd.read_csv("alltime_world_2020_04_04.csv")
# 创建中文列名字典
name_dict = {'date':'日期','name':'名称','id':'编号','lastUpdateTime':'更新时间',
'today_confirm':'当日新增确诊','today_suspect':'当日新增疑似',
'today_heal':'当日新增治愈','today_dead':'当日新增死亡',
'today_severe':'当日新增重症','today_storeConfirm':'当日现存确诊',
'total_confirm':'累计确诊','total_suspect':'累计疑似',
'total_heal':'累计治愈','total_dead':'累计死亡','total_severe':'累计重症',
'total_input':'累计输入病例','today_input':'当日输入病例'}
# 更改列名
alltime_world.rename(columns=name_dict,inplace=True)
print(alltime_world.head())
print(alltime_world.info())# 查看数据基本信息
print(alltime_world.describe())
# 将日期一列数据类型变为datetime
alltime_world['日期'] = pd.to_datetime(alltime_world['日期'])
# 计算当日现存确诊
alltime_world['当日现存确诊'] = alltime_world['累计确诊']-alltime_world['累计治愈']-alltime_world['累计死亡']
print(alltime_world.info())
# 查看唯一值,可使用len()查看个数
alltime_world['名称'].unique()
# 统计每天有多少国家出现疫情,即哪20天疫情出现的最多
alltime_world['日期'].value_counts().head(20)
# 设置日期索引
alltime_world.set_index('日期',inplace=True)
# 3月31日数据统计不完全,我们将其删除
alltime_world = alltime_world.loc[:'2020-03-31']
# groupby创建层次化索引
data = alltime_world.groupby(['日期','名称']).mean()
print(data.head())
# 提取部分数据
data_part = data.loc(axis=0)[:,['中国','日本','韩国','美国','意大利','英国','西班牙','德国']]
print(data_part.head())
# 将层级索引还原
data_part.reset_index('名称',inplace=True)
print(data_part.head())
# 绘制多个国家的累计确诊人数折线图
fig, ax = plt.subplots(figsize=(8,4))
data_part['2020-02':].groupby('名称')['累计确诊'].plot(legend=True,marker='o',ms=3,lw=1)
ax.xaxis.set_major_locator(dates.MonthLocator()) #设置间距
ax.xaxis.set_major_formatter(dates.DateFormatter('%b')) #设置日期格式
fig.autofmt_xdate() #自动调整日期倾斜
plt.title('各国新冠肺炎累计确诊病例折线图',size=15)
plt.ylabel('人数')
plt.grid(axis='y')
plt.box(False)
plt.legend(bbox_to_anchor = [1,1])
plt.show()
# 绘制各国新增确诊人数折线图
fig, ax = plt.subplots(figsize=(8,4))
data_part['2020-03':'2020-03-29'].groupby('名称')['当日新增确诊'].plot(legend=True,marker='o',ms=3,lw=1)
ax.xaxis.set_major_locator(dates.MonthLocator()) #设置间距
ax.xaxis.set_major_formatter(dates.DateFormatter('%b')) #设置日期格式
fig.autofmt_xdate() #自动调整日期倾斜
plt.title('各国新冠肺炎新增确诊病例折线图',size=15)
plt.ylabel('人数')
plt.grid(axis='y')
plt.box(False)
plt.legend(bbox_to_anchor = [1,1])
plt.show()
#绘制日本新冠肺炎疫情折线图
japan = alltime_world[alltime_world['名称']=='日本']
fig, ax = plt.subplots(figsize=(8,4))
japan['累计确诊'].plot(ax=ax, fontsize=10, style='-',lw=1,color='c',marker='o',ms=3,legend=True)
ax.set_ylabel('人数', fontsize=10)
ax1 = ax.twinx()
ax1.bar(japan.index, japan['当日新增确诊'])
ax1.xaxis.set_major_locator(dates.DayLocator(interval = 5))
ax1.xaxis.set_major_formatter(dates.DateFormatter('%b %d'))
ax1.legend(['当日新增确诊'],loc='upper left',bbox_to_anchor=(0.001, 0.9))
plt.grid(axis='y')
plt.box(False)
plt.title('日本新冠肺炎疫情折线图',size=15)
plt.show()
| [
"1186141415@qq.com"
] | 1186141415@qq.com |
c99a631a7811780613dfb8c523090511e72b47ed | 7e2198e218bbec882cce3d2db246e3e41db70f51 | /getIamgeStatsPytorch.py | 54ba8af415ee43a98282091f7ed4e3272c26d3be | [
"MIT"
] | permissive | adityag6994/pytorch_ssd_training | 72445d1150f0115aac4c978b6cf9c28c1e4cdab4 | 404f3cbef815e314337ec2c1b4f06a2403a7ce03 | refs/heads/master | 2023-05-08T21:13:34.538207 | 2021-05-25T02:06:06 | 2021-05-25T02:06:06 | 368,198,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | import torch.utils.data
from datasets import *
from utils import *
# Data parameters
data_folder = 'data/rafeeq/' # folder with data files
keep_difficult = True # use objects considered difficult to detect?
# Model parameters
# Not too many here since the SSD300 has a very specific structure
n_classes = len(label_map) # number of different types of objects
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Learning parameters
batch_size = 1 # batch size
workers = 1 # number of workers for loading data in the DataLoader
# Custom dataloaders
train_dataset = PascalVOCDataset(data_folder,
split='test',
keep_difficult=keep_difficult)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
collate_fn=train_dataset.collate_fn, num_workers=workers,
pin_memory=True) # note that we're passing the collate function here
# find mean variance of dataset
mean, std = zzzf_mean_and_std(train_loader)
# mean, std = xwkuang_mean_and_std(train_loader)
print(mean, std)
## test hard | zzzf || xwkuang
# tensor([0.5881, 0.5617, 0.4820])
# tensor([0.2968, 0.3004, 0.2938])
## train
# mean = [0.4898, 0.4867, 0.4050]
# std = [0.2774, 0.2832, 0.2501] | [
"agupta@neurala.com"
] | agupta@neurala.com |
d1e5565535aa6454248bea2abdc3892524a84c42 | 36c95182453f97bc5587ac8f5f763641b9739dce | /src/video/urls.py | a32614ee211a03c78eb98a09f2c48450c2ca6018 | [] | no_license | DreamfoxTeam/MyDream.API.Video | ea8d4194ccbc2f4f247645cf116070bc44b4c4b8 | 72abd0258a8e0a46cc337210b32c69720f81a858 | refs/heads/master | 2020-04-05T02:21:45.808768 | 2018-11-25T13:37:15 | 2018-11-25T13:37:15 | 156,474,229 | 0 | 0 | null | 2018-11-25T13:37:16 | 2018-11-07T01:44:14 | Python | UTF-8 | Python | false | false | 825 | py | """video URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url,include
urlpatterns = [
url(r'^', include('api.urls')),
url(r'^admin/', admin.site.urls),
]
| [
"msgrubler@gmail.com"
] | msgrubler@gmail.com |
36fd9544ce7e5544264fcd2e31e7b8e4bac2fe1b | 70caddd0199b026b9fae11bc8da4bbc875b3a737 | /test1/sentence_test.py | 1b40fd4ee85bc018354c8ad8ba5cfc080b841880 | [] | no_license | zzxzzg/python | 997e36fc9e8676dfb108082d273c03eb40eb2fae | d58d142ea5d5fb7f1197cf2981ece85909ddb0ba | refs/heads/master | 2020-06-13T12:39:53.612822 | 2016-12-20T10:35:18 | 2016-12-20T10:35:18 | 75,379,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | #! /usr/bin/env python3
# 使用 elif替代 else if ,去掉大括号,使用缩进来分割代码块
# if condition_1:
# statement_block_1
# elif condition_2:
# statement_block_2
# else:
# statement_block_3
#注意冒号!!!
# while 判断条件:
# 语句
#另外,在Python中没有do..while循环。
# while还提供了else的语句配对,当不满足条件的时候运行else(只运行一次)
count = 0
while count < 5:
print (count, " 小于 5")
count = count + 1
else:
print (count, " 大于或等于 5")
# Python for循环可以遍历任何序列的项目,如一个列表或者一个字符串。
# for循环的一般格式如下:
# for <variable> in <sequence>:
# <statements>
# else:
# <statements>
#如果你需要遍历数字序列,可以使用内置range()函数 for i in range(5) for i in range(5,9)
# Python pass是空语句,是为了保持程序结构的完整性。
# pass 不做任何事情,一般用做占位语句,如下实例
while True:
pass
| [
"xiaobanma2292@gmail.com"
] | xiaobanma2292@gmail.com |
20f03c830a4454b43d9ab80e86ffc4f75dbfde63 | 370090ab025824b5d38618a6c845fb5f293efe38 | /manage.py | 070e489cdb724e330a0ddd103793cf82b814ab50 | [
"MIT"
] | permissive | Antavio/Moringa-Project-Awards | 4bcb1d325226cc9dcd239556c085e8d8d47491ab | 941d6ca0e8978cbb54bb8858ac02bed34fd73306 | refs/heads/master | 2021-09-08T04:06:14.895597 | 2019-05-29T07:37:46 | 2019-05-29T07:37:46 | 188,390,930 | 0 | 1 | MIT | 2021-09-08T01:00:56 | 2019-05-24T09:12:00 | Python | UTF-8 | Python | false | false | 812 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Project_Awards.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"njuguna13@gmail.com"
] | njuguna13@gmail.com |
072edfb2aef8eea1bcec97a890b4d476ea0fb2ba | e3fbb84151f39c3e576bd857a94256e9ecfc2817 | /0x07-python-test_driven_development/tests/6-max_integer_test.py | 73b68c8c52f56476a1d8122a16c47d872dda234f | [] | no_license | Myduzo/holbertonschool-higher_level_programming | 1a6ed5eb499b16f968aed4522c464e5c9f89a41e | aee7350473a8f740f6eb8e3f1abbb359d88cae0f | refs/heads/master | 2022-12-19T18:59:29.672269 | 2020-09-24T15:18:24 | 2020-09-24T15:18:24 | 259,302,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | #!/usr/bin/python3
"""Unittest for max_integer([..])
"""
import unittest
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
def test_list(self):
"""Empty list test"""
self.assertEqual(max_integer([5, 0, 2, 9]), 9)
self.assertEqual(max_integer([0, 0, 0]), 0)
self.assertEqual(max_integer([-5, -1, 0]), 0)
self.assertEqual(max_integer([-2, -3, -7]), -2)
self.assertEqual(max_integer([3]), 3)
self.assertEqual(max_integer([-4]), -4)
self.assertEqual(max_integer([1.5, 3.9, 4.25, 2.7]), 4.25)
def test_empty(self):
"""Empty list test"""
self.assertIsNone(max_integer([]), None)
def test_error(self):
with self.assertRaises(TypeError):
max_integer(7)
with self.assertRaises(TypeError):
max_integer(None)
with self.assertRaises(TypeError):
max_integer([4, "", 1, 6])
with self.assertRaises(TypeError):
max_integer([4, "x", 1, 6])
| [
"sahli.youssef@outlook.com"
] | sahli.youssef@outlook.com |
b91ee62ab15974dcb724ceeb00d00689410e332f | 7bf1dc58ba0884ed957efdb5459ae44851b2b36e | /practice_450/greedy/33_rearrange_characters.py | 858f75ba242070202848f0f4f1146c91f0ceea28 | [] | no_license | ksaubhri12/ds_algo | 672260f07f41bcfc33f8ac23a64085a1f27ab4a5 | 46505b89371cae3321f48609dd755c7e5cfed302 | refs/heads/master | 2023-05-12T08:37:06.789111 | 2023-05-03T03:06:49 | 2023-05-03T03:06:49 | 211,793,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | def rearrange_characters(input_string: str):
n = len(input_string)
dict_value = {}
for i in range(0, n):
if input_string[i] in dict_value:
dict_value[input_string[i]] = dict_value[input_string[i]] + 1
else:
dict_value[input_string[i]] = 1
sorted_count_list = sorted(dict_value, key=dict_value.get, reverse=True)
i = 0
start = 0
char_list = list(input_string)
while len(sorted_count_list) > 0:
char = sorted_count_list.pop(0)
count = dict_value[char]
if count > n / 2:
return -1
start = start + 1
for k in range(0, count):
char_list[i] = char
i = i + 2
if i >= n:
i = 1
return ''.join(char_list)
if __name__ == '__main__':
print(rearrange_characters('geeksforgeeks'))
print(rearrange_characters('bbbbb'))
print(rearrange_characters('kkk'))
| [
"kalpesh@getvokal.com"
] | kalpesh@getvokal.com |
5738640214bb8de04103ec491a2e51acea88a2f5 | 4182189a2624d4aa36619915b7dba42fc5ecdaad | /xxxRekt/urls.py | e179cc2dd01d6dffeacffb8380cffad8d245d708 | [] | no_license | HackerProjects/lindahearts | 3b74e187176404b587bb55b03301f285966d393f | 37492d88ebd28d65b225125b9c75547cb8687a10 | refs/heads/master | 2020-03-21T15:34:51.570455 | 2018-06-26T11:08:07 | 2018-06-26T11:08:07 | 138,721,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | from django.contrib import admin
from django.urls import path, re_path, include
from django.contrib.staticfiles.urls import static, staticfiles_urlpatterns
from . import settings
urlpatterns = [
path('admin/', admin.site.urls),
re_path(r'', include('home.urls'))
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"cjhoussem@gmail.com"
] | cjhoussem@gmail.com |
5c8fe94773b018fec08156dff5a0e0e776f1b94c | 159a08aeb28fecfaeaa85cd253d1435255aad466 | /HW13/HW13.py | 6a47e237671b35e83c0ac065e22a314261858c4b | [
"MIT"
] | permissive | ji3g4aunaun/assignment-speech-recognition | ee5220fb8588fee11316d63545935f562a1531c1 | 794907a417d054477812c1f50695312601eae929 | refs/heads/master | 2020-03-24T07:33:05.884142 | 2018-07-25T20:49:48 | 2018-07-25T20:49:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,926 | py | import numpy as np
from dspBox import str2ndar
obs1 = str2ndar(open('observation1.txt', 'r').read())
obs2 = str2ndar(open('observation2.txt', 'r').read())
obs3 = str2ndar(open('observation3.txt', 'r').read())
# 助教附的oberservation3.txt跟上次一樣沒有換行,所以只會讀到49個狀態,如果加了換行可以讀到50個。
a1 = np.array([[0.2, 0.7, 0.1], [0.1, 0.2, 0.7], [0.7, 0.1, 0.2]])
b1 = np.array([[0.5, 0.4, 0.1], [0.7, 0.2, 0.1], [0.7, 0.1, 0.2]])
pi1 = np.array([0.7, 0.2, 0.1])
a2 = np.array([[0.7, 0.2, 0.1], [0.3, 0.6, 0.1], [0.1, 0.2, 0.7]])
b2 = np.array([[0.1, 0.8, 0.1], [0.2, 0.7, 0.1], [0.4, 0.5, 0.1]])
pi2 = np.array([0.1, 0.7, 0.2])
a3 = np.array([[0.2, 0.7, 0.1], [0.6, 0.3, 0.1], [0.2, 0.7, 0.1]])
b3 = np.array([[0.1, 0.2, 0.7], [0.2, 0.2, 0.6], [0.3, 0.1, 0.6]])
pi3 = np.array([0.2, 0.2, 0.6])
obs, a ,b, pi = [obs1, obs2, obs3], [a1, a2, a3], [b1, b2, b3], [pi1, pi2, pi3]
# -------------------
for obsi in range(3): # Index of observation
size = len(obs[obsi])
print("\nobser" + str(obsi + 1))
for mi in range(3): # Index of model
# =====viterbi algorithm=====
p = np.zeros((size, 3)) # possibility
s = np.zeros((size, 3)) # max state
p[0] = [pi[mi][state] * b[mi][state, obs[obsi][0]] for state in range(3)]
s[0] = [state for state in range(3)]
for i in range(1, size):
for state in range(3):
evaluate = [p[i-1, from_] * a[mi][from_, state] * b[mi][state, obs[obsi][i]] for from_ in range(3)]
p[i, state] = np.max(evaluate)
s[i, state] = np.argmax(evaluate)
seq = np.zeros((size), dtype=int)
seq[-1] = np.argmax(p[-1])
for i in range(size-2, -1, -1):
seq[i] = s[i+1, seq[i+1]]
print("viterbi max state sequence", np.array_str(seq, 100))
print('model_{:d} probability:{:.6e}'.format(mi+1, np.max(p[-1])))
| [
"jason860421@gmail.com"
] | jason860421@gmail.com |
048f1454876a4040818f971ff05431b5505e1f2b | 4e31845d08ae3d8a525ce67bb1abab271e037eca | /FirefoxBotAD/fb.sikuli/sikuli.puller.v1.1/puller.py | f1a63ecf8fb0612ea8c16c81a1b1770b513c721d | [] | no_license | AdCrushMedia/sikuli | d1a17b814ec5b80c6107828dd0d8c2c89ae24995 | 7dd03f7653a3ecef25eb1ceb1970baca4e4cfd9b | refs/heads/master | 2021-06-15T22:08:46.487573 | 2017-05-02T19:23:33 | 2017-05-02T19:23:33 | 73,114,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,196 | py | import os
import sys
import json
import time
import getopt
import shutil
import ctypes
import zipfile
import hashlib
import paramiko
import functools
import subprocess
from collections import defaultdict
_NEW_ = "*new*"
_UPD_ = "*upd*"
_DEL_ = "*del*"
_ZIP_ = '.zip'
_JSON_ = '.json'
_PKGVER_ = 'version'+ _JSON_
_CHANGES_ = 'changes'+ _JSON_
_FILE_MAP_ = 'filemap'+ _JSON_
_SETTINGS_ = 'settings'+ _JSON_
__home = os.getcwd()
__temp_path = os.path.join(__home, 'temp')
__pkgver_path = os.path.join(__home, _PKGVER_)
__settings_path = os.path.join(__home, _SETTINGS_)
class AllowAnythingPolicy(paramiko.MissingHostKeyPolicy):
def missing_host_key(self, client, hostname, key):
return
def sftp_transcb(filepath, trans, spinner, bytes_so_far, bytes_total):
sys.stdout.write('> sftp %s: %r %s (%d/%d) \r' %
(trans, os.path.basename(filepath), spinner.next(), bytes_so_far, bytes_total))
sys.stdout.flush()
def sftp_rexists(sftp, path):
try:
sftp.stat(path)
return True
except IOError, e:
if e.errno == 2:
return False
return True
def sftp_upload(sftp, local_path, remote_path):
spinner = spinning_cursor()
cb = functools.partial(sftp_transcb, remote_path, 'put', spinner)
remote_stat = sftp.put(local_path, remote_path, callback=cb, confirm=True)
sys.stdout.write("\n")
local_stat = os.stat(local_path)
if remote_stat.st_size == local_stat.st_size:
print '> transfer complete! remote file last modified: %s\n' % (
time.ctime(remote_stat.st_mtime))
return True
else:
print '> An error occured during the transfer..'
return False
def sftp_download(sftp, remote_path, local_path):
spinner = spinning_cursor()
cb = functools.partial(sftp_transcb, remote_path, 'get', spinner)
try:
sftp.get(remote_path, local_path, callback=cb)
sys.stdout.write("\n")
return True
except Exception, e:
print "\n> An error occured during donwload"
return False
def ask(question):
# raw_input returns the empty string for "enter"
yes = set(['yes','y', 'ye', ''])
no = set(['no','n'])
sys.stdout.write(question + ' [Y/n] ')
while True:
choice = raw_input().lower()
if choice in yes:
return True
elif choice in no:
return False
else:
sys.stdout.write('Please respond with [Y/n] ')
def Mbox(title, text, style=1):
return ctypes.windll.user32.MessageBoxA(0, text, title, style)
## Styles:
## 0 : OK
## 1 : OK | Cancel
## 2 : Abort | Retry | Ignore
## 3 : Yes | No | Cancel
## 4 : Yes | No
## 5 : Retry | No
## 6 : Cancel | Try Again | Continue
def spinning_cursor():
while True:
for cursor in '|/-\\':
yield cursor
def checksum(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def get_hasver(sftp, rmt_ver_path):
if not sftp_rexists(sftp, rmt_ver_path):
return False
else:
return True
def rmvfile(filepath):
if os.path.isfile(filepath):
os.remove(filepath)
def rmvdir_contents(dirpath):
for the_file in os.listdir(dirpath):
file_path = os.path.join(dirpath, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print "> READ ONLY folder cant be deleted"
print "> %S" % file_path
def reset():
rmvfile(__pkgver_path)
with open(__settings_path) as settings_file:
settings = json.load(settings_file)
watch_folder = settings['watch_folder']
rmvdir_contents(watch_folder)
rmvdir_contents(__temp_path)
def extract(zipath, dest):
filename = os.path.basename(zipath)
print "> Extracting package '%s'" % filename
with zipfile.ZipFile(zipath, "r") as z:
try:
z.extractall(dest)
z.close()
return True
except:
print "> Error on zip extract. \n>'%s'" % filename
return False
def check_integrity(watch_folder):
print '# Checking for file integrity'
fmap = defaultdict(dict)
spinner = spinning_cursor()
filemap_path = os.path.join(watch_folder, _FILE_MAP_)
with open(filemap_path) as fmout:
fmap = json.load(fmout)
for path, csum in fmap['map'].items():
target = os.path.join(watch_folder, path)
if os.path.isfile(target):
localcsum = checksum(target)
same = localcsum == csum
if same:
sys.stdout.write('> %s %s\r' % (csum, spinner.next()))
sys.stdout.flush()
time.sleep(0.002)
else:
print "> Bad checksum: %s %s" % (csum, path)
print "\n> done.."
def checkploy(tempver_path, watch_folder):
changes = defaultdict(dict)
fmap = defaultdict(dict)
changes_path = os.path.join(tempver_path, _CHANGES_)
with open(changes_path) as fileout:
changes = json.load(fileout)
csum = changes['checksum']
filemap_path = os.path.join(tempver_path, _FILE_MAP_)
with open(filemap_path) as fmout:
fmap = json.load(fmout)
pkgzip_path = os.path.join(tempver_path, csum + _ZIP_)
extr_dest = os.path.join(tempver_path, csum)
if not os.path.exists(extr_dest):
os.makedirs(extr_dest)
if extract(pkgzip_path, extr_dest):
print '> Checking for file integrity at temp level'
for path, csum in fmap['map'].items():
target = os.path.join(extr_dest, path)
if os.path.isfile(target):
localcsum = checksum(target)
same = localcsum == csum
if same:
sys.stdout.write('> %s\r' % csum)
sys.stdout.flush()
else:
print "> Bad checksum: local: %s >> %s %s" % (csum, localcsum, path)
return False
print '> checksum passed.. '
rmvdir_contents(extr_dest)
os.rmdir(extr_dest)
print "\n# Deploying version package"
if not extract(pkgzip_path, watch_folder):
return False
for path, tag in changes['pkg'].items():
if tag == _DEL_:
abspath = os.path.join(watch_folder, path)
rmvfile(abspath)
return True
def deploy_pkg(pkgpath, watch_folder, version):
filename = os.path.basename(pkgpath)
fn, fe = os.path.splitext(filename)
extr_dest = os.path.join(__temp_path, fn)
if not os.path.exists(extr_dest):
os.makedirs(extr_dest)
if not extract(pkgpath, extr_dest):
return False
if not checkploy(extr_dest, watch_folder):
return False
else:
pkgver = defaultdict(dict)
pkgver['version'] = version
with open(__pkgver_path, 'w') as pvout:
json.dump(pkgver, pvout)
print "> Package deployed. Current version: %d\n" % version
return True
def analyze(watch_folder, ver):
changes = defaultdict(dict)
changes_path = os.path.join(watch_folder, _CHANGES_)
with open(changes_path) as fileout:
changes = json.load(fileout)
if ver != changes['version']:
print '> package version not the same [%d.zip]' % ver
return False
zipname = changes['checksum']+ _ZIP_
pkgzip = os.path.join(watch_folder, zipname)
if not os.path.isfile(pkgzip):
print "> package %s not found in '%s.zip'" % (changes['checksum'], ver)
return False
extract(pkgzip, watch_folder, zipname)
rmvfile(pkgzip)
print '> root folder: %s' % watch_folder
for path, tag in changes['pkg'].items():
print "> %s %s" % (tag, path)
if tag == _DEL_:
abspath = os.path.join(watch_folder, path)
rmvfile(abspath)
rmvfile(changes_path)
def run_cmd(exe, arg):
print "> running defined command.."
if os.path.isfile(exe):
p = subprocess.Popen([exe, arg])
else:
print "> Cant find defined 'exe_to_run' path"
print '> Please check your settings'
# --- main
def main_puller():
_pkgver = defaultdict(dict)
if not os.path.isfile(__settings_path):
print '> Cant find required file \'%s\'' %(_SETTINGS_)
sys.exit()
# -- load settings variables
with open(__settings_path) as settings_file:
_settings = json.load(settings_file)
_address = _settings['ftp']['address']
_username = _settings['ftp']['username']
_password = _settings['ftp']['password']
_keep_pkg = _settings['keep_pkg']
_ftp_home = _settings['ftp_home']
_app_title = _settings['app_title']
_watch_folder = _settings['watch_folder']
_exe_to_run = _settings['exe_to_run']
_exe_argument = _settings['exe_argument']
if not os.path.exists(_watch_folder):
print "> Defined watch folder not found!"
print "> %s" % _watch_folder
print "\n> Please check your settings"
errmsg = "> Defined watch folder not found!\n> %s\n> Please check your settings" % _watch_folder
Mbox( "Settings Problem", str(errmsg), 0)
sys.exit()
if not os.path.exists(__temp_path):
os.makedirs(__temp_path)
if not os.path.isfile(__pkgver_path):
print "> Cant find '%s'. Assumed first pull setting version to 0" % _PKGVER_
_pkgver['version'] = 0
else:
with open(__pkgver_path) as pkgver_file:
_pkgver = json.load(pkgver_file)
print "# Current version: %d" % _pkgver['version']
user_ok = False
box_title = "Package Update for '%s'" % _app_title
box_msg = "An update has been detected. Click 'OK' to download or 'Cancel' to ingore."
print '# Connecting to server \'%s\'' % _address
client = paramiko.SSHClient()
client.set_missing_host_key_policy(AllowAnythingPolicy())
client.connect(_address, username=_username, password=_password)
_sftp = client.open_sftp()
print '# Checking for updates..'
version = _pkgver['version']
while True:
version += 1
verzip = str(version)+ _ZIP_
remote_verpkg = os.path.join(_ftp_home, verzip)
if not get_hasver(_sftp, remote_verpkg):
print '> No updates detected..'
break
else:
if not user_ok:
if Mbox( str(box_title), box_msg) == 1:
user_ok = True
else:
sys.exit()
pkgver_zip = os.path.join(__temp_path, verzip)
if not sftp_download(_sftp, remote_verpkg, pkgver_zip):
sys.exit()
if not deploy_pkg(pkgver_zip, _watch_folder, version):
sys.exit()
_sftp.close()
client.close()
run_cmd(_exe_to_run, _exe_argument)
def print_help():
print 'puller.py [-h]'
def main(argv):
try:
opts,args = getopt.getopt(argv,"hxp", ['help', 'reset', 'pull'])
except getopt.GetoptError:
print_help()
for opt, arg in opts:
if opt == '-h':
print_help()
sys.exit()
elif opt in ("-x", "--reset"):
reset() # for debugging only
sys.exit()
elif opt in ("-p", "--pull"):
print ''
main_puller()
if __name__ == "__main__":
main(sys.argv[1:]) | [
"matthew.best@toptal.com"
] | matthew.best@toptal.com |
5a346858f79c757e13d5570ec88addd3bfc652ac | 0ae6e24fa95b8a22d9fe9d36f8786cdc4adbceac | /tests/test_api.py | 1c95830966214ef48771a702eba86858d69f7bfe | [] | no_license | lopezjimenezjesus/flasktaskr | cd0c407d7dec0920a1bf2eabb39f1b40ea04fa03 | f7167092a0a99969a730c3d80290ce0919de8ddb | refs/heads/master | 2022-11-10T00:31:22.165929 | 2016-12-18T01:15:41 | 2016-12-18T01:15:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,473 | py | import os
import unittest
from datetime import date
from project import app, db
from project._config import basedir
from project.models import Task
TEST_DB = 'test.db'
class APITest(unittest.TestCase):
#######################
# setup and tear down #
#######################
def setUp(self):
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
app.config['DEBUG'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \
os.path.join(basedir, TEST_DB)
self.app = app.test_client()
db.create_all()
self.assertEquals(app.debug, False)
# executed after each test
def tearDown(self):
db.session.remove()
db.drop_all()
##################
# helper methods #
##################
def add_tasks(self):
db.session.add(
Task(
"Run around in circles",
date(2015, 10, 5),
'10',
date(2015, 10, 5),
1,
1
)
)
db.session.commit()
db.session.add(
Task(
"Purchase Real Python",
date(2016, 2, 23),
10,
date(2016, 2, 7),
1,
1
)
)
db.session.commit()
def login(self, name, password):
return self.app.post('/', data=dict(
name=name, password=password), follow_redirects=True)
def register(self, name, email, password, confirm):
return self.app.post(
'register/',
data=dict(name=name, email=email, password=password,
confirm=confirm),
follow_redirects=True
)
#########
# tests #
#########
def test_collection_endpoint_returns_correct(self):
self.register('Michael', 'michael@realpython.com',
'python', 'python')
self.login('Michael', 'python')
self.add_tasks()
response = self.app.get('api/v1/tasks/',
follow_redirects=True)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.mimetype, 'application/json')
self.assertIn(b'Run around in circles', response.data)
self.assertIn(b'Purchase Real Python', response.data)
def test_resource_endpoint_returns_correct_data(self):
self.register('Michael', 'michael@realpython.com',
'python', 'python')
self.login('Michael', 'python')
self.add_tasks()
response = self.app.get('api/v1/tasks/2', follow_redirects=True)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.mimetype, 'application/json')
self.assertIn(b'Purchase Real Python', response.data)
self.assertNotIn(b'Run around in circles', response.data)
def test_invalid_resource_endpoint_returns_error(self):
self.register('Michael', 'michael@realpython.com',
'python', 'python')
self.login('Michael', 'python')
self.add_tasks()
response = self.app.get('api/v1/tasks/209', follow_redirects=True)
self.assertEquals(response.status_code, 404)
self.assertEquals(response.mimetype, 'application/json')
self.assertIn(b'Element does not exists', response.data)
if __name__ == "__main__":
unittest.main()
| [
"gesusjl@gmail.com"
] | gesusjl@gmail.com |
33cb9493d7550749594d94675bba7dd68faf783d | d92cd1bce2af9d86e83a732a26419be0d76ae8eb | /demo_final/mvc/controllers/alumnos/delete.py | b4f77f02691bb3d8c541d728780cb34b2d8396fd | [] | no_license | MariaFernandaReyesLopez/demo_web_page | fd0973d0cc362dc3cc24784c0fdf10cb58154e2a | 40879f2265c068fed226b4bc560efd975d56974d | refs/heads/master | 2022-11-29T13:03:08.334216 | 2020-08-13T21:18:04 | 2020-08-13T21:18:04 | 287,380,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | import web
import mvc.models.alumnos as alumnos
model_alumnos = alumnos.Alumnos()
render = web.template.render("mvc/views/alumnos/", base="template")
class Delete():
def GET(self, id_alumno):
try:
result = model_alumnos.view(id_alumno)[0]
return render.delete(result) # renderizando delete.html
except Exception as e:
print(e)
return "Error"
def POST(self, id_alumno):
try:
form = web.input()
id_alumno = form.id_alumno #hidden
result = model_alumnos.delete(id_alumno)
web.seeother('/alumnos_list')
except Exception as e:
print(e)
return "Error"
| [
"noreply@github.com"
] | noreply@github.com |
d9e0220046732ed6ddb31fb31a5d9bdbd1e08ecf | a28df6dc7901e0799ddbcdd43dc17f1f966e5eb5 | /interview_preperation_kit/arrays/array_manipulation/editorial.py | 586037ec6827623696fb06fc01fac1bee8c27853 | [] | no_license | jlucasldm/hackerrank | ede7aafa0131171a358c1601a0ccb26da4f3b5dc | 3189c3b9844eaff5873f4d4cf6c94aaf3b88b864 | refs/heads/master | 2023-07-30T22:18:30.166508 | 2021-09-12T20:15:00 | 2021-09-12T20:15:00 | 396,453,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | def arrayManipulation(n, queries):
arr = [0] * (n+1)
# add the value at first index
# subtract the value at last index + 1
for q in queries:
start, end, amt = q
arr[start-1] += amt
arr[end] -= amt
# max value and running sum
mv = -1
running = 0
for a in arr:
running += a
if running > mv:
mv = running
return mv | [
"jlucas.ldm@gmail.com"
] | jlucas.ldm@gmail.com |
dc764acc87abd223ec64716f693f78733a9ce009 | acdbaf53db693f63313ee5811b15c810b6f2df60 | /backend/test_flaskr.py | 763983588893e2501ddf8728dcbbe0672423c72d | [] | no_license | fanchenjie/Trivia | 5de5482b163496f6126cadd51cae8b4b797a4499 | 6fe9fc8175fd6d651a582f40a1ad67cbf68425e8 | refs/heads/main | 2023-01-15T11:26:08.678506 | 2020-11-20T23:10:32 | 2020-11-20T23:10:32 | 302,809,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,213 | py | import os
import unittest
import json
from flask_sqlalchemy import SQLAlchemy
from flaskr import create_app
from models import setup_db, Question, Category
class TriviaTestCase(unittest.TestCase):
"""This class represents the trivia test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_name = "triviatest"
self.database_path = "postgres://{}/{}".format('xiaofan@localhost:5432', self.database_name)
setup_db(self.app, self.database_path)
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
self.new_question = {
'question':'question1',
'answer':'answer1',
'difficulty':1,
'category':1
}
# edit to test serch by term
self.search_term_with_result = {
'searchTerm':'question'
}
# self.total_questions_with_search_term = 6
self.search_term_without_result = {
'searchTerm':'xxxxxxxxxx'
}
# edit these to test delete
# res = self.client().post('/questions', json = self.new_question)
# id = json.loads(res.data)['question_id']
self.exist_question_ID_to_delete = 11
self.non_exist_question_ID_to_delete = 1000
# edit these to test non_valid page
self.non_valid_page = 1000
# edit these to test get question by category
self.exist_category_ID = 1
# edit these to test post quiz type and previous question
self.quiz_type_previous_questions = {
'quiz_category':{'id':2},
'previous_questions':[16]
}
def tearDown(self):
"""Executed after reach test"""
pass
"""
TODO
Write at least one test for each test for successful operation and for expected errors.
"""
def test_get_categories(self):
res = self.client().get('/categories')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(len(data['categories']))
# get questions
def test_get_paginated_questions(self):
res = self.client().get('/questions?page=1')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['total_questions'])
self.assertTrue(len(data['questions']))
self.assertTrue(len(data['categories']))
self.assertFalse(data['current_category'])
def test_404_sent_requesting_beyond_valid_page(self):
res = self.client().get('/questions?page={}'.format(self.non_valid_page))
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'],'Not Found')
# search by term
def test_search_question_by_searchTerm_with_result(self):
res = self.client().post('/questions/searchTerm?page=1', json=self.search_term_with_result)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(data['success'])
self.assertTrue(len(data['questions']))
self.assertTrue(data['total_questions'])
self.assertFalse(data['current_category'])
self.assertTrue(data['search_term'])
# self.assertEqual(data['total_questions'], self.total_questions_with_search_term)
def test_search_question_by_searchTerm_without_result(self):
res = self.client().post('/questions/searchTerm?page=1', json=self.search_term_without_result)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(data['success'])
self.assertEqual(len(data['questions']), 0)
self.assertFalse(data['current_category'])
self.assertTrue(data['search_term'])
self.assertEqual(data['total_questions'], 0)
def test_400_bad_search_post_without_body(self):
res = self.client().post('/questions/searchTerm')
data = json.loads(res.data)
self.assertEqual(res.status_code, 400)
self.assertFalse(data['success'])
self.assertEqual(data['message'], 'Bad Request')
# get questions by category
def test_get_questions_by_category(self):
res = self.client().get('/categories/{}/questions'.format(self.exist_category_ID))
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(data['success'])
self.assertTrue(len(data['questions']))
self.assertTrue(data['total_questions'])
# self.assertTrue(data['current_category'])
self.assertFalse(data['search_term'])
def test_404_questions_by_category_beyond_valid_page(self):
res = self.client().get('/categories/{}/questions?page={}'.format(self.exist_category_ID, self.non_valid_page))
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'],'Not Found')
# quizzes
def test_post_quiz_type_previous_questions(self):
res = self.client().post('/quizzes', json=self.quiz_type_previous_questions)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(data['success'])
self.assertTrue(data['question'])
def test_400_bad_quiz_post_without_body(self):
res = self.client().post('/quizzes')
data = json.loads(res.data)
self.assertEqual(res.status_code, 400)
self.assertFalse(data['success'])
self.assertEqual(data['message'], 'Bad Request')
# post question
def test_post_question(self):
res = self.client().post('/questions', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(data['success'])
def test_400_bad_post_request_without_body(self):
res = self.client().post('/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 400)
self.assertFalse(data['success'])
self.assertEqual(data['message'], 'Bad Request')
# delete question
# def test_delete_question_by_id(self):
# res = self.client().delete('/questions/{}'.format(self.exist_question_ID_to_delete))
# data = json.loads(res.data)
# self.assertEqual(res.status_code, 200)
# self.assertEqual(data['success'], True)
def test_404_if_question_does_not_exist(self):
res = self.client().delete('/questions/{}'.format(self.non_exist_question_ID_to_delete))
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'],'Not Found')
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main() | [
"fanchenjie@seattleu.edu"
] | fanchenjie@seattleu.edu |
2f43fcb6f336490af1bbc1d36b02d53bc911db08 | 78cff47ad6e4c8dc600249915aff2a3c54b723a2 | /AntiAlias.py | c2d25ee7bdcaf3239d2397ab1d5d6b0e1b66e246 | [] | no_license | willstem/Research_Tools | c6e7e526863abbe048a1c973430211a2bfa36b5b | 42e5dbed7ed786e212a1115974c2235de26cf63a | refs/heads/master | 2020-03-22T05:57:43.184438 | 2018-07-03T15:17:08 | 2018-07-03T15:17:08 | 139,602,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 30 11:24:23 2018
@author: will
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import butter, lfilter
def cosine(t, amp, freq, phase = 0):
return amp*np.cos(2*np.pi*freq*t + phase)
def view_section(datax, datay, samp_density, osc_win, Nosc = 10):
#Just take a section for viewing: first Nosc oscillations
Psamp = samp_density/osc_win #number of samples per period
sec = int(Psamp*Nosc)
return datax[0:sec], datay[0:sec]
def no_off_spectrum(sig, t):
f = np.fft.rfftfreq(len(sig), d = t[1]-t[0])
sig -= np.mean(sig)
y = np.fft.rfft(sig)
return f, y
class anti_alias:
def __init__(self, tdata, ydata):
self.ydata = ydata
self.tdata = tdata
def butter_bandpass(self, lowcut, highcut, fs, order=5):
nyq = 0.5*fs
low = lowcut/nyq
high = highcut/nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(self, lowcut, highcut, fs, order=5):
b, a = self.butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, self.ydata)
return y
def sig_shift(self, freq):
return self.tdata*freq
def sample(self, samp_f):
return self.ydata[0::samp_f]
def aa_filt(self, freq, fs):
yfilt = self.butter_bandpass_filter(freq - 0.1*freq, freq + 0.1*freq, fs)
yshift = self.sig_shift(freq)
#STILL NEED TO FINISH THIS!! MAKE SURE YOU DON'T MIX UP X's and Y's
def main():
#define some parameters
Amp = 2.
freq = 50.
nyq = 2*freq
window = 100
osc_win = window*freq #number of oscillations in the window
samp_dense = 1e6
t = np.linspace(0, window, samp_dense)
y = cosine(t, Amp, freq)
t_sec, y_sec = view_section(t, y, samp_dense, osc_win)
#samp_sparse
eps = 0.01*osc_win
samp_sparse = 1.9*osc_win
#samp_sparse = 2*osc_win - eps
ts = np.linspace(0, window, samp_sparse)
ys = cosine(ts, Amp, freq)
ts_sec, ys_sec = view_section(ts, ys, samp_sparse, osc_win)
#Now use anti-aliasing BPF before sampling
T = 1/freq
fs = samp_dense/T
filt = anti_alias(t, y)
#Take some FFTs
fd, yd = no_off_spectrum(y, t)
fs, ys = no_off_spectrum(ys, ts)
#plot formatting
plt.rc('text', usetex = False)
plt.rc('font', family = 'serif')
plt.rc('font', size = 22)
plt.rc('axes', linewidth = 2)
plt.rc('lines', linewidth = 3)
plt.rc('legend', fontsize = 16)
plt.rc('figure', figsize = (10, 6))
plt.rc('lines', markersize = 15)
#plots
plt.figure(1)
plt.plot(t_sec, y_sec)
plt.plot(ts_sec, ys_sec, '.', color = 'red')
plt.ylim(-2*Amp, 2*Amp)
#plot FFTs
plt.figure(2)
plt.plot(fd, abs(yd)/max(abs(yd)))
plt.plot((fs-1.9*freq)*-1, abs(ys)/max(abs(ys)))
plt.xlim(-freq-eps, freq+eps)
plt.ylim(-0.5, 1.5)
if __name__ == "__main__":
main() | [
"will.d.stem@gmail.com"
] | will.d.stem@gmail.com |
fd282217f2a3abbd3385cb2e72791deea7441668 | 771ed599c431f7cc05b4b90895cea24dac3895ba | /utils/edit_misc.py | d3baf8250121fd7a05b544a6510ad407f7a5672b | [] | no_license | baaaad/coco-edit | 8e5395a52f1f1b85db62393c2ac2ab1d00230c7a | b70e36bf05be5af16923f5ca0b8cce91ec9c29d1 | refs/heads/main | 2023-07-14T23:40:54.684513 | 2021-08-13T07:22:57 | 2021-08-13T07:22:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,978 | py | import numpy as np
def edit_distance(sent1, sent2):
# edit from sent1 to sent2
# Create a table to store results of subproblems
m = len(sent1)
n = len(sent2)
dp = [[0 for x in range(n+1)] for x in range(m+1)]
# Fill d[][] in bottom up manner
for i in range(m+1):
for j in range(n+1):
# If first string is empty, only option is to
# isnert all characters of second string
if i == 0:
dp[i][j] = j # Min. operations = j
# If second string is empty, only option is to
# remove all characters of second string
elif j == 0:
dp[i][j] = i # Min. operations = i
# If last characters are same, ignore last char
# and recur for remaining string
elif sent1[i-1] == sent2[j-1]:
dp[i][j] = dp[i-1][j-1]
# If last character are different, consider all
# possibilities and find minimum
else:
dp[i][j] = 1 + min(
dp[i][j-1], # Insert
dp[i-1][j] # Remove
)
return dp
def sent2edit(sent1, sent2):
dp = edit_distance(sent1, sent2)
edits = []
pos = []
m, n = len(sent1), len(sent2)
while m != 0 or n != 0:
curr = dp[m][n]
if m==0: #have to insert all here
while n>0:
left = dp[1][n-1]
edits.append(sent2[n-1])
pos.append(left)
n-=1
elif n==0:
while m>0:
top = dp[m-1][n]
edits.append('DEL')
pos.append(top)
m -=1
else: # we didn't reach any special cases yet
diag = dp[m-1][n-1]
left = dp[m][n-1]
top = dp[m-1][n]
if sent2[n-1] == sent1[m-1]: # keep
edits.append('KEEP')
pos.append(diag)
m -= 1
n -= 1
elif curr == top+1: # INSERT preferred before DEL
edits.append('DEL')
pos.append(top) # (sent2[n-1])
m -= 1
else: #insert
edits.append(sent2[n - 1])
pos.append(left) # (sent2[n-1])
n -= 1
edits = edits[::-1]
# replace the keeps at the end to stop, this helps a bit with imbalanced classes (KEEP,INS,DEL,STOP)
# for i in range(len(edits))[::-1]: #reversely checking
# if edits[i] == 'KEEP':
# if edits[i-1] =='KEEP':
# edits.pop(i)
# else:
# edits[i] = 'STOP'
# break
# if edits == []: # do we learn edits if input and output are the same?
# edits.append('STOP') #in the case that input and output sentences are the same
return edits
def edit2sent(sent, edits, last=False):
"""
Edit the sentence given the edit operations.
:param sent: sentence to edit, list of string
:param edits: a sequence of edits in ['KEEP','DEL','STOP']+INS_vocab_set
:return: the new sentence, as the edit sequence is deterministic based on the edits labels
"""
new_sent = []
sent_pointer = 0 #counter the total of KEEP and DEL, then align with original sentence
if len(edits) == 0 or len(sent) ==0: # edit_list empty, return original sent
return sent
for i, edit in enumerate(edits):
if len(sent) > sent_pointer: #there are tokens left for editing
if edit =="KEEP":
new_sent.append(sent[sent_pointer])
sent_pointer += 1
elif edit =="DEL":
sent_pointer += 1
elif edit == 'STOP':
break # go outside the loop and copy everything after current sent_pointer into the new sentence
else: #insert the word in
new_sent.append(edit)
if sent_pointer < len(sent):
for i in range(sent_pointer,len(sent)):
new_sent.append(sent[i])
return new_sent
def decode_edit(p_pos, p_edit, p_wins, p_wsub):
'''
Edit the sentence given the prediction of the model
'''
bsz = p_wins.shape[0]
edit = np.argmax(p_edit, axis=-1)
pos = np.argmax(p_pos, axis=-1)
wins = np.argmax(p_wins, axis=-1)[np.arange(bsz), pos]
wsub = np.argmax(p_wsub, axis=-1)[np.arange(bsz), pos]
#print(edit.shape, pos.shape, wins.shape, wsub.shape)
return edit, pos, wins, wsub
def main():
#this prints an example of doing minimum editing distance
sent1 = "Military experts say the line between combat is getting blurry .".split()
sent2 = "Military experts say war is changing .".split()
A = edit_distance(sent1, sent2)
print('\n'.join([''.join(['{:4}'.format(item) for item in row])
for row in A]))
B = sent2edit(sent1, sent2)
print(B)
print(edit2sent(sent1,B))
if __name__ == '__main__':
main() | [
"mwb@zju.edu.cn"
] | mwb@zju.edu.cn |
298c5334b8cb8458e240533cc78ba90bc20dd8b5 | 5964e11c3d1ea543e139c6a4a66751a611fd0ac5 | /test/test_forest_delete.py | 63d8f63b4002dea380136151df3c75eb74080640 | [
"Apache-2.0"
] | permissive | codycollier/booster | 333a60caeb4df554d49d1cd8251fc4c58b57c76c | 8639d74cc98c4276fba281bbe244795e8f44a833 | refs/heads/master | 2020-04-16T17:42:04.494735 | 2019-07-12T03:25:21 | 2019-07-12T03:25:21 | 2,858,765 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,131 | py | #!/usr/bin/env python
import unittest
import boostertest
class TestForestDelete(boostertest.BoosterTestCase):
""" Test the forest-delete action """
def setUp(self):
""" Set the action and other commonly used fixture data """
self.params = {}
self.params['action'] = "forest-delete"
self.params['forest-name'] = "pinecone-a"
self.params['delete-data'] = "true"
# collect forest names for later teardown
self.teardown_forests = []
def tearDown(self):
""" Remove items from server created during tests """
params = {}
params['action'] = "forest-delete"
params['delete-data'] = "true"
for forest in self.teardown_forests:
params['forest-name'] = forest
response, body = self.booster.request(params)
self.assertTrue(response.status in (404, 200))
def test_basic_forest_deletion_results_in_200(self):
""" A successful forest deletion should result in 200 """
# create the forest
params = {}
params['action'] = "forest-create"
params['forest-name'] = "firs"
params['host-name'] = "localhost"
params['data-directory'] = ""
self.teardown_forests.append(params['forest-name'])
response, body = self.booster.request(params)
err = response.get("x-booster-error", "none")
self.assertEqual(response.status, 201)
self.assertEqual(err, "none")
# delete and assert
params = self.params
params['forest-name'] = "firs"
response, body = self.booster.request(params)
err = response.get("x-booster-error", "none")
self.assertEqual(response.status, 200)
self.assertEqual(err, "none")
def test_delete_nonexistent_forest_results_in_404(self):
""" Attempting to delete a non-existent forest should return 404 """
params = self.params
params['forest-name'] = "no-such-forest-exists-here"
response, body = self.booster.request(params)
err = response.get("x-booster-error", "")
self.assertEqual(response.status, 404)
self.assertTrue(err.find("does not exist") != 1)
def test_empty_forest_name_results_in_404(self):
""" A forest-delete with empty forest-name value should result in 404 """
params = self.params
params['forest-name'] = ""
response, body = self.booster.request(params)
err = response.get("x-booster-error", "none")
self.assertEqual(response.status, 404)
self.assertTrue(err.find("Forest '' does not exist") != 1)
def test_delete_forest_with_no_forest_name_results_in_400(self):
""" A forest-delete with missing forest-name should result in 400 """
params = self.params
del params['forest-name']
response, body = self.booster.request(self.params)
err = response.get("x-booster-error", "")
self.assertEqual(response.status, 400)
self.assertTrue(err.find("valid set of arguments was not provided") != 1)
if __name__=="__main__":
unittest.main()
| [
"cody@telnet.org"
] | cody@telnet.org |
ab7a78b9db6f60371ee1fac74f8b8411ff23aa43 | a179d2abea58ee4d987bf05729a5e7df727af3cd | /instaclone/settings.py | 6e13a5e35aa00ba74ca16a19dd70fe50c0cb34ee | [
"MIT"
] | permissive | Derrick-Nyongesa/instagram-clone | ced05a4c334c9e95e96bec9a3883b448c5fa95c6 | 2f3c018c33aa440160401f0c1878a2670f2f0081 | refs/heads/main | 2023-05-14T01:32:36.211904 | 2021-05-26T13:42:26 | 2021-05-26T13:42:26 | 369,403,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,796 | py | """
Django settings for instaclone project.
Generated by 'django-admin startproject' using Django 3.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
from decouple import config, Csv
import cloudinary
import cloudinary.uploader
import cloudinary.api
import django_heroku
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'instagram',
'bootstrap3',
'cloudinary'
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'instaclone.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'instaclone.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/accounts/login/'
#AUTH_PROFILE_MODULE = 'accounts.Profile'
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
cloudinary.config(
cloud_name = config("CLOUDINARY_NAME"),
api_key = config("CLOUDINARY_KEY"),
api_secret = config("CLOUDINARY_SECRET")
)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
django_heroku.settings(locals()) | [
"nyongesaderrick@gmail.com"
] | nyongesaderrick@gmail.com |
acd62ee375dff59ca59e70a02bc32374c22f0693 | 02f25642ee890ef749e29ead0009309519412906 | /serverCS.py | 58c4bc85c86c48f3550e3d72e54eb0ded880163f | [] | no_license | romantikatara/Analytics_Model_Deployment | c753da09b8df26f6a20780316fbcf5f36668a4cf | 6e70c3cc8fe095a97350483ec7e3321a4f8967d5 | refs/heads/master | 2020-07-30T19:05:05.015626 | 2019-09-23T14:14:12 | 2019-09-23T14:14:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,185 | py | # Membuat API dari model Machine Learning dengan menggunakan flask
# Ingin mengeload model .pkl ke suatu server
'''
Code ini mengambil data JSON dengan POST request suatu tampilan prediksi menggunakan model yang telah di-load.
Kemudian mengeluarkan hasil dengan format JSON.
'''
# Import libraries
import numpy as np
from flask import Flask, request, jsonify
import pickle
app = Flask(__name__)
# Load Model Random Forest
# De-serialization
model = pickle.load(open('modelcreditscoring.pkl','rb'))
@app.route('/api',methods=['POST']) #menerima masukan/POST/API
def predict():
# Get the data from the POST request.
data = request.get_json(force=True)
# Make prediction using model loaded from disk as per the data.
prediction = model.predict([np.array([data['AGE'],
data['MARRIAGE'],
data['PAY_1'],
data['PAY_2'],
data['PAY_3']])])
# Take the first value of prediction
output = int(prediction[0])
return jsonify(output)
if __name__ == '__main__':
app.run(port=5000, debug=True) | [
"noreply@github.com"
] | noreply@github.com |
ce74370ba73cd08e5d80892ce3594a66fbe400e2 | 1496fd578daaf13bebccd68057c455b74481b8de | /week3/hangman/ps3_hangman.py | 89ffb62c683272716b7e0678921ac1462f6ae894 | [] | no_license | leahfrye/MITx-6.00.1x | 274c58a6d7ca4c64dbd1bda5c2f102012f4a4f17 | 41e26ded3606b83b21998777ff32cf862e2b7f1d | refs/heads/master | 2021-09-03T14:43:04.654759 | 2018-01-09T21:43:45 | 2018-01-09T21:43:45 | 115,056,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,980 | py | # Hangman game
#
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
# (so be sure to read the docstrings!)
import random
import string
WORDLIST_FILENAME = "C:\projects\mitCourse6.00.1x\week3\hangman\words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
def chooseWord(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
wordlist = loadWords()
# end of helper code
# -----------------------------------
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
# FILL IN YOUR CODE HERE...
guessedString = ""
for letter in secretWord:
if letter in lettersGuessed:
guessedString += letter
else:
guessedString += "_"
if secretWord == guessedString:
return True
else:
return False
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
# FILL IN YOUR CODE HERE...
string = ""
for letter in secretWord:
if letter in lettersGuessed:
string += letter
else:
string += "_"
return string
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
# FILL IN YOUR CODE HERE...
alphabet = string.ascii_lowercase
for letter in lettersGuessed:
alphabet = alphabet.replace(letter, "")
return alphabet
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
# FILL IN YOUR CODE HERE...
## Add correct number of spaces to lettersGuessed list
letterLength = len(secretWord)
correctGuesses = []
incorrectGuesses = []
print("Welcome to the game Hangman!")
print("I am thinking of a word that is " + str(letterLength) + " letters long")
print("-------------")
turn = 8;
while turn > 0:
print("You have " + str(turn) + " guesses left")
print("Available letters: " + getAvailableLetters(correctGuesses + incorrectGuesses))
guess = input("Please guess a letter: ")
guess = guess.lower()
wordSoFar = getGuessedWord(secretWord, correctGuesses)
if isWordGuessed(secretWord, correctGuesses):
print("Congratulations, you won!")
else:
if guess in incorrectGuesses or guess in correctGuesses:
print("Oops! You've already guessed that letter: " + wordSoFar)
elif guess not in secretWord:
print("Oops! That letter is not in my word: " + wordSoFar);
turn -= 1
incorrectGuesses.append(guess)
else:
correctGuesses.append(guess)
wordSoFar = getGuessedWord(secretWord, correctGuesses)
print("Good guess: " + wordSoFar)
if isWordGuessed(secretWord, correctGuesses):
print("-------------")
print("Congratulations, you won!")
print("-------------")
if "_" in wordSoFar:
print("Sorry, you ran out of guesses. The word was " + secretWord + ".")
# When you've completed your hangman function, uncomment these two lines
# and run this file to test! (hint: you might want to pick your own
# secretWord while you're testing)
secretWord = chooseWord(wordlist).lower()
hangman(secretWord)
| [
"lefrye@nps.edu"
] | lefrye@nps.edu |
b2db820fc500dacc8d7771ff92eaa649fc902760 | cb28de2d461cb7f099ca8b61bb5b2015dff0c776 | /spyder_scripts/jan_workshop/ex4.py | 27323317afdf37ef6f16d56f2e58bb6d7681a0bc | [] | no_license | chuymtz/python_scripts | 04082241ae3e3c3a4772aa45f6d667e665f7200c | 6b06a44671ddbafd073764e50bab9117ee72d2cc | refs/heads/master | 2021-01-17T17:37:03.486857 | 2016-06-11T03:11:22 | 2016-06-11T03:11:22 | 60,887,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | # -*- coding: utf-8 -*-
"""
Python Workshop 1/30/2015
Exercise 4
1. Write a function that returns its GC-content when a sequence is given
GC_Content = (#G + #C) / length(seq)
2. SeqForEX4.txt contains 5 different DNA sequences with its corresponding
headers, find a way to read in the 5 sequences alone and print them out,
respectively
3. Calculate the GC-content for all 5 sequences
* Find the sequence with the highest GC-content, write its ID from header
and its coresponding GC-content to a file named 'result.txt'
@author: Yuan
"""
| [
"mtzgtzjesus@gmail.com"
] | mtzgtzjesus@gmail.com |
6bcc678d37afb5abbd9da779e9cdb18e702c0092 | 5558ba2dafd38d659f417948109f22958cedaa2a | /test_main_page.py | 06c543b2291f231191f27b780cd84f001d658f6e | [] | no_license | NikitaSivolobov/LastTask | ff7d78ea94ff40911b1149c59d7a5af9c86afcff | 074b63bd8fe58aa39e58f1e1972beefe69f4a349 | refs/heads/master | 2023-07-08T23:03:34.473619 | 2021-08-05T20:30:46 | 2021-08-05T20:30:46 | 389,947,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | from .pages.main_page import MainPage
from .pages.login_page import LoginPage
# import time
# обновили функцию из задания
def test_guest_can_go_to_login_page(browser):
link = "http://selenium1py.pythonanywhere.com/"
page = MainPage(browser, link) # инициализируем Page Object, передаём в конструктор экземпляр драйвера и url адрес
page.open() # открываем страницу
page.go_to_login_page() # выполняем метод страницы - переходим на страницу логина
login_page = LoginPage(browser, browser.current_url)
login_page.should_be_login_page()
# time.sleep(10)
def test_guest_should_see_login_link(browser):
link = "http://selenium1py.pythonanywhere.com/"
page = MainPage(browser, link)
page.open()
page.should_be_login_link()
# time.sleep(10)
def test_guest_should_be_login_url(browser):
link = "http://selenium1py.pythonanywhere.com/accounts/login/"
page = LoginPage(browser, link)
page.open()
page.should_be_login_url()
# time.sleep(10)
def test_guest_should_be_login_form(browser):
link = "http://selenium1py.pythonanywhere.com/accounts/login/"
page = LoginPage(browser, link)
page.open()
page.should_be_login_form()
# time.sleep(10)
def test_guest_should_be_register_form(browser):
link = "http://selenium1py.pythonanywhere.com/accounts/login/"
page = LoginPage(browser, link)
page.open()
page.should_be_register_form()
# time.sleep(10)
| [
"nikita1314@yandex.ru"
] | nikita1314@yandex.ru |
a8aed15c162e96815fa1961c2bdd1299d9c3454d | a29a73de4df917da642adec96286d7ed3b2a0a42 | /TankDPPO/kankan.py | 0315d142c18ecb16f26c20009d72880e4e7f967d | [] | no_license | tankche1/Learn-To-Run | 9f0546f2d2c74cf18879579a3ccb2aeb3bea2765 | 27a48c8e1ec5864ab58caa9df4098a1089641cc0 | refs/heads/master | 2021-03-24T11:07:15.949621 | 2017-10-18T14:43:41 | 2017-10-18T14:43:41 | 101,266,609 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,281 | py | """
A simple version of OpenAI's Proximal Policy Optimization (PPO). [https://arxiv.org/abs/1707.06347]
Distributing workers in parallel to collect data, then stop worker's roll-out and train PPO on collected data.
Restart workers once PPO is updated.
The global PPO updating rule is adopted from DeepMind's paper (DPPO):
Emergence of Locomotion Behaviours in Rich Environments (Google Deepmind): [https://arxiv.org/abs/1707.02286]
View more on my tutorial website: https://morvanzhou.github.io/tutorials
Dependencies:
tensorflow r1.3
gym 0.9.2
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import gym, threading, queue
EP_MAX = 1000
EP_LEN = 200
N_WORKER = 4 # parallel workers
GAMMA = 0.9 # reward discount factor
A_LR = 0.0001 # learning rate for actor
C_LR = 0.001 # learning rate for critic
MIN_BATCH_SIZE = 64 # minimum batch size for updating PPO
UPDATE_STEP = 5 # loop update operation n-steps
EPSILON = 0.2 # for clipping surrogate objective
GAME = 'Pendulum-v0'
S_DIM, A_DIM = 3, 1 # state and action dimension
class PPO(object):
def __init__(self):
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu)
self.v = tf.layers.dense(l1, 1)
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)
# actor
pi, pi_params = self._build_anet('pi', trainable=True)
oldpi, oldpi_params = self._build_anet('oldpi', trainable=False)
self.sample_op = tf.squeeze(pi.sample(1), axis=0) # operation of choosing action
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)]
self.tfa = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
# ratio = tf.exp(pi.log_prob(self.tfa) - oldpi.log_prob(self.tfa))
ratio = pi.prob(self.tfa) / (oldpi.prob(self.tfa) + 1e-5)
surr = ratio * self.tfadv # surrogate loss
self.aloss = -tf.reduce_mean(tf.minimum( # clipped surrogate objective
surr,
tf.clip_by_value(ratio, 1. - EPSILON, 1. + EPSILON) * self.tfadv))
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
self.sess.run(tf.global_variables_initializer())
def update(self):
global GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
if GLOBAL_EP < EP_MAX:
UPDATE_EVENT.wait() # wait until get batch of data
self.sess.run(self.update_oldpi_op) # copy pi to old pi
data = [QUEUE.get() for _ in range(QUEUE.qsize())] # collect data from all workers
data = np.vstack(data)
s, a, r = data[:, :S_DIM], data[:, S_DIM: S_DIM + A_DIM], data[:, -1:]
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
# update actor and critic in a update loop
[self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(UPDATE_STEP)]
[self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range(UPDATE_STEP)]
UPDATE_EVENT.clear() # updating finished
GLOBAL_UPDATE_COUNTER = 0 # reset counter
ROLLING_EVENT.set() # set roll-out available
def _build_anet(self, name, trainable):
with tf.variable_scope(name):
l1 = tf.layers.dense(self.tfs, 200, tf.nn.relu, trainable=trainable)
mu = 2 * tf.layers.dense(l1, A_DIM, tf.nn.tanh, trainable=trainable)
sigma = tf.layers.dense(l1, A_DIM, tf.nn.softplus, trainable=trainable)
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def choose_action(self, s):
s = s[np.newaxis, :]
a = self.sess.run(self.sample_op, {self.tfs: s})[0]
return np.clip(a, -2, 2)
def get_v(self, s):
if s.ndim < 2: s = s[np.newaxis, :]
return self.sess.run(self.v, {self.tfs: s})[0, 0]
class Worker(object):
def __init__(self, wid):
self.wid = wid
self.env = gym.make(GAME).unwrapped
self.ppo = GLOBAL_PPO
def work(self):
global GLOBAL_EP, GLOBAL_RUNNING_R, GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
s = self.env.reset()
ep_r = 0
buffer_s, buffer_a, buffer_r = [], [], []
for t in range(EP_LEN):
if not ROLLING_EVENT.is_set(): # while global PPO is updating
ROLLING_EVENT.wait() # wait until PPO is updated
buffer_s, buffer_a, buffer_r = [], [], [] # clear history buffer, use new policy to collect data
a = self.ppo.choose_action(s)
s_, r, done, _ = self.env.step(a)
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r + 8) / 8) # normalize reward, find to be useful
s = s_
ep_r += r
GLOBAL_UPDATE_COUNTER += 1 # count to minimum batch size, no need to wait other workers
if t == EP_LEN - 1 or GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
v_s_ = self.ppo.get_v(s_)
discounted_r = [] # compute discounted reward
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
QUEUE.put(np.hstack((bs, ba, br))) # put data in the queue
if GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
ROLLING_EVENT.clear() # stop collecting data
UPDATE_EVENT.set() # globalPPO update
if GLOBAL_EP >= EP_MAX: # stop training
COORD.request_stop()
break
# record reward changes, plot later
if len(GLOBAL_RUNNING_R) == 0: GLOBAL_RUNNING_R.append(ep_r)
else: GLOBAL_RUNNING_R.append(GLOBAL_RUNNING_R[-1]*0.9+ep_r*0.1)
GLOBAL_EP += 1
print('{0:.1f}%'.format(GLOBAL_EP/EP_MAX*100), '|W%i' % self.wid, '|Ep_r: %.2f' % ep_r,)
if __name__ == '__main__':
GLOBAL_PPO = PPO()
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
UPDATE_EVENT.clear() # not update now
ROLLING_EVENT.set() # start to roll out
workers = [Worker(wid=i) for i in range(N_WORKER)]
GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0
GLOBAL_RUNNING_R = []
COORD = tf.train.Coordinator()
QUEUE = queue.Queue() # workers putting data in this queue
threads = []
for worker in workers: # worker threads
t = threading.Thread(target=worker.work, args=())
t.start() # training
threads.append(t)
# add a PPO updating thread
threads.append(threading.Thread(target=GLOBAL_PPO.update,))
threads[-1].start()
COORD.join(threads)
# plot reward change and test
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('Episode'); plt.ylabel('Moving reward'); plt.ion(); plt.show()
env = gym.make('Pendulum-v0')
while True:
s = env.reset()
for t in range(300):
env.render()
s = env.step(GLOBAL_PPO.choose_action(s))[0]
| [
"15307130191@fudan.edu.cn"
] | 15307130191@fudan.edu.cn |
1ba1793bbfb48e97c4458d7b42bef536b6f33c05 | 6a604f32b7c5b69f12ac37339f55cef9b2a9e90e | /venv/Scripts/easy_install-3.7-script.py | b774706563c77f039089db0f2cf71b11b836aa09 | [] | no_license | ithekozub/telegram_bot | d6bc2663e15a4910ed254c78dc92721cb6146c1c | feeec2bc1a2a63f35052ed3cf43771f59c232c88 | refs/heads/master | 2023-04-01T03:50:14.555471 | 2021-04-02T12:18:37 | 2021-04-02T12:18:37 | 353,803,156 | 0 | 0 | null | null | null | null | WINDOWS-1251 | Python | false | false | 462 | py | #!"F:\Рабочий Диск\CRYPTO_BOT\venv\Scripts\python.exe" -x
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"ithekozub@inbox.ru"
] | ithekozub@inbox.ru |
ee760f49be6ef89ccf152093972a09626ddee30f | 6d0eb72dfbc165b6129d955b9fe5e4a1d8b1681e | /rules/file_dialogue.py | ca404e0aed146b539dd034ef7b3ef3ab64df350a | [] | no_license | seekM/.caster | 4ee55974e75ef8cab04b21866959664d551e14a5 | f36152e6d60a2b9a486493c3271d204388b1bfaf | refs/heads/master | 2020-05-17T14:31:23.156785 | 2019-06-18T22:02:17 | 2019-06-18T22:02:17 | 183,766,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | from dragonfly import (AppContext, Dictation, Grammar, IntegerRef, Key, MappingRule,
Pause, Repeat, Text)
from dragonfly.actions.action_mimic import Mimic
from castervoice.lib import control, settings
from castervoice.lib.dfplus.additions import IntegerRefST
from castervoice.lib.dfplus.merge import gfilter
from castervoice.lib.dfplus.merge.mergerule import MergeRule
from castervoice.lib.dfplus.state.short import R
class FileDialogueRule(MergeRule):
pronunciation = "file dialogue"
mapping = {
"get up [<n>]":
R(Key("a-up"))*Repeat(extra="n"),
"get back [<n>]":
R(Key("a-left"))*Repeat(extra="n"),
"get forward [<n>]":
R(Key("a-right"))*Repeat(extra="n"),
"new folder":
R(Key("cs-n")),
"address bar":
R(Key("c-l")),
"search":
R(Key("c-l, tab")),
"left pane":
R(Key("c-l, tab:3")),
"center pane":
R(Key("c-l, tab:4")),
"sort":
R(Key("c-l, tab:5")),
"organize":
R(Key("c-l, tab:2")),
"(dateiname | filename)":
R(Key("c-l, tab:6")),
"(dateityp | file type)":
R(Key("c-l, tab:7")),
}
extras = [IntegerRefST("n", 1, 10)]
defaults = {
"n": 1,
}
dialogue_names = [
"open",
"ffnen",
"speichern",
"select",
]
context = AppContext(title="save")
for name in dialogue_names:
context = context | AppContext(title=name)
grammar = Grammar("FileDialogue", context=context)
rule = FileDialogueRule()
gfilter.run_on(rule)
grammar.add_rule(FileDialogueRule(name="filedialogue"))
grammar.load()
| [
"seekm@users.noreply.github.com"
] | seekm@users.noreply.github.com |
9a4c8ab6eb3dcedee5f481ceb64bf03bf5dc1c2c | b1710da529375a794ef2d0552bea3e57096cb06f | /contacts/models.py | f78f435bd243fcd2efe89c3fb62b93151cdd529c | [] | no_license | gaganbr4/django_Real_Estate | 7aab601fc8f1052f7dfe348ca7104237a28eb063 | bb4640955f9190a6e90207eb5ae5a8d5c95ff3dc | refs/heads/master | 2022-12-01T02:22:40.618282 | 2019-07-17T12:34:23 | 2019-07-17T12:34:23 | 197,345,104 | 1 | 0 | null | 2022-11-22T04:08:02 | 2019-07-17T08:10:43 | CSS | UTF-8 | Python | false | false | 527 | py | from django.db import models
from datetime import datetime
class Contact(models.Model):
listing = models.CharField(max_length=200)
listing_id = models.IntegerField()
name = models.CharField(max_length=200)
email = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
message = models.CharField(max_length=200)
contact_date = models.DateTimeField(default=datetime.now, blank=True)
user_id = models.IntegerField(blank=True)
def __str__(self):
return self.name
| [
"gaganbr4@gmail.com"
] | gaganbr4@gmail.com |
354e85d57c3ff0baac11c37c47ff1eb7448d1ca6 | 4ff078b6df0b6441ae2759b6c9713bc60d00e073 | /vertical flip along matrix axis/vertical_flip_H.py | c5ff174bfbf2e31d804005ab739ee3a60d3ba3e3 | [] | no_license | MLBott/python-fundamentals-student | b18ae425fde24befaa14f66078c3e9983c888289 | fe4efa6666b07203e3e0749596b587a30a80acae | refs/heads/master | 2020-12-02T00:40:57.314215 | 2020-01-20T03:30:48 | 2020-01-20T03:30:48 | 230,832,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | def flip_vertical_axis(matrix):
c = len(matrix) - 1
r = len(matrix) - 1
temp = 0
o = 0
while o <= r:
i = 0
while i <= r / 2:
temp = matrix[o][i]
matrix[o][i] = matrix[o][c - i]
matrix[o][c - i] = temp
i = i + 1
o = o + 1
return matrix
my_matrix = [1,2,3],[4,5,6],[7,8,9]
print(flip_vertical_axis(my_matrix))
# def flip_vertical_axis(matrix):
# for i in range(len(matrix)):
# matrix[i] = matrix[i][::-1]
# my_matrix = [1,2,3],[4,5,6],[7,8,9]
flip_vertical_axis(my_matrix)
| [
"noreply@github.com"
] | noreply@github.com |
76de75eab7bff110c78485c517c7eac9d8ff0041 | 7a1c34276bf178d1d56cfb05c5d6346f48ba07b6 | /partial_circuits/blkhash_circuit_for_4_blocks_of_4_pixels.py | 4b767fbef7a7e983547b51c0bfa6b9251e549bde | [] | no_license | timashana/Secure-MPPH | d2e7f4fede22260ac28b4ae5e168953cdeccd204 | 005bd1c612db2bcd0f77d0e135f9adac41932802 | refs/heads/master | 2020-03-10T14:11:17.739030 | 2018-11-07T09:09:55 | 2018-11-07T09:09:55 | 129,419,750 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,918 | py | '''
Anastasiia Timashova github.com/timashana
This is a tester program to evaluate BLKHASH part of the circuit by creating
the libscapi format circuit for calculating block-mean-hash of
4 blocks of 4 pixels that can be controlled manually (every pixel value is hardcoded)
'''
from hash_circuit_functions import *
def writeCircuitFile(num_of_gates, parties, p2_wires, gateList, output):
with open('MPPH.txt','w') as f:
s1 = str(num_of_gates) + '\n' + str(parties) + '\n'
s1 += '1 ' + str(2) + '\n\n'
f.write(s1)
# Party one provides only the 0 and 1 wires
for i in range(0, 2):
f.write(str(i))
f.write('\n')
input_size = len(p2_wires)*len(p2_wires[0])*len(p2_wires[0][0])
s2 = '2 ' + str(input_size) + '\n\n'
f.write(s2)
for i in range(2, input_size + 2):
f.write(str(i))
f.write('\n')
s = '\n'+ str(len(output))+'\n\n'
f.write(s)
for i in output:
f.write(str(i))
f.write('\n')
f.write('\n\n')
for g in gateList:
f.write(g)
f.write('\n')
f.close()
def writePartyOneInputFile():
with open('MPPHPartyOneInputs.txt', 'w') as t:
s = str(2)+'\n'
t.write(s)
for i in range(0,2):
s = str(i) + '\n'
t.write(s)
t.close()
def writePartyTwoInputFile(p2):
with open('MPPHPartyTwoInputs.txt', 'w') as t:
input_size = len(p2)*len(p2[0])*len(p2[0][0])
s = str(input_size)+'\n'
t.write(s)
for i in p2:
for j in i:
for k in j:
s = str(k) + '\n'
t.write(s)
t.close()
l=[] #list to store the libscapi-formatted gates
zero=0 # 0-wire
one=1 # 1-wire
gates = 0 # keep track of the number of gates in the circuit
curr_wire = 2 # keep track of the last used label
''' MANUAL BLOCK ASSIGNMENT TO CONTROL EVERY PIXEL'''
'''all numbers are represented in [LSB...MSB] format
for example, 123 -> [3, 2, 1]'''
A_1 = [0, 0, 0, 0, 0, 0, 0, 0]
B_1 = [0, 0, 0, 0, 0, 0, 0, 0]
C_1 = [0, 0, 0, 0, 0, 0, 0, 0]
D_1 = [0, 0, 0, 0, 0, 0, 0, 0]
A_1_labels = [i for i in range(curr_wire, curr_wire + len(A_1))]
curr_wire += len(A_1)
B_1_labels = [i for i in range(curr_wire, curr_wire + len(B_1))]
curr_wire += len(B_1)
C_1_labels = [i for i in range((curr_wire), curr_wire + len(C_1))]
curr_wire += len(C_1)
D_1_labels = [i for i in range((curr_wire), curr_wire + len(D_1))]
curr_wire += len(D_1)
# 1 block of 4 pixels
block_1 = [A_1, B_1, C_1, D_1]
A_2 = [0, 0, 0, 0, 0, 0, 0, 0]
B_2 = [1, 0, 1, 0, 1, 0, 1, 0]
C_2 = [0, 0, 0, 0, 0, 0, 0, 0]
D_2 = [1, 0, 1, 0, 1, 0, 1, 0]
A_2_labels = [i for i in range(curr_wire, curr_wire + len(A_2))]
curr_wire += len(A_2)
B_2_labels = [i for i in range(curr_wire, curr_wire + len(B_2))]
curr_wire += len(B_2)
C_2_labels = [i for i in range((curr_wire), curr_wire + len(C_2))]
curr_wire += len(C_2)
D_2_labels = [i for i in range((curr_wire), curr_wire + len(D_2))]
curr_wire += len(D_2)
# 1 block of 4 pixels
block_2 = [A_2, B_2, C_2, D_2]
A_3 = [1, 1, 1, 1, 1, 1, 1, 1]
B_3 = [0, 1, 0, 1, 0, 1, 0, 1]
C_3 = [1, 1, 1, 1, 1, 1, 1, 1]
D_3 = [0, 1, 0, 1, 0, 1, 0, 1]
A_3_labels = [i for i in range(curr_wire, curr_wire + len(A_3))]
curr_wire += len(A_3)
B_3_labels = [i for i in range(curr_wire, curr_wire + len(B_3))]
curr_wire += len(B_3)
C_3_labels = [i for i in range((curr_wire), curr_wire + len(C_3))]
curr_wire += len(C_3)
D_3_labels = [i for i in range((curr_wire), curr_wire + len(D_3))]
curr_wire += len(D_3)
# 1 block of 4 pixels
block_3 = [A_3, B_3, C_3, D_3]
A_4 = [1, 1, 1, 1, 1, 1, 1, 1]
B_4 = [1, 1, 1, 1, 1, 1, 1, 1]
C_4 = [1, 1, 1, 1, 1, 1, 1, 1]
D_4 = [1, 1, 1, 1, 1, 1, 1, 1]
A_4_labels = [i for i in range(curr_wire, curr_wire + len(A_4))]
curr_wire += len(A_4)
B_4_labels = [i for i in range(curr_wire, curr_wire + len(B_4))]
curr_wire += len(B_4)
C_4_labels = [i for i in range((curr_wire), curr_wire + len(C_4))]
curr_wire += len(C_4)
D_4_labels = [i for i in range((curr_wire), curr_wire + len(D_4))]
curr_wire += len(D_4)
# 1 block of 4 pixels
block_4 = [A_4, B_4, C_4, D_4]
# 4 blocks as above
blocks = [block_1[::], block_2[::], block_3[::], block_4[::]]
# labels of the first block
nums = [A_1_labels, B_1_labels, C_1_labels, D_1_labels]
# the other 3 blocks' labels follow right after each other
blocks_labels = [nums[::], [[i + len(nums)*len(nums[0]) for i in j] for j in nums], [[i + len(nums)*len(nums[0])*2 for i in j] for j in nums], [[i + len(nums)*len(nums[0])*3 for i in j] for j in nums]]
means, curr_wire, gates = ALLMEANS(blocks, zero, one, curr_wire, gates, l)
result, curr_wire, gates = BLKHASH(means, one, curr_wire, gates, l)
writeCircuitFile(gates, 2, blocks_labels, l, result)
writePartyOneInputFile()
writePartyTwoInputFile(blocks)
| [
"atimash000@citymail.cuny.edu"
] | atimash000@citymail.cuny.edu |
ec491a079245a58e14d42601f0fca085af3e714e | 12738d4511f72cf8ac2f8a63df635f74698d5f63 | /configs/fpn_resnext50_32x4d_cityscapes_2gpu_f.py | 77269bafb8deda39295a9c1c77835019749bb104 | [
"MIT"
] | permissive | ternaus/iglovikov_segmentation | 5731bbd0cbf7125bdc121e7357820b8bec8297ae | 5a9463031e5da7c2cf34c967a4f2657416c11bd2 | refs/heads/master | 2020-08-09T12:26:44.779660 | 2019-12-15T21:59:42 | 2019-12-15T21:59:42 | 214,087,415 | 18 | 1 | MIT | 2020-04-03T02:16:36 | 2019-10-10T04:35:44 | Python | UTF-8 | Python | false | false | 3,589 | py | # Cityscapes. 2 x TeslaV100
from pathlib import Path
import albumentations as albu
import cv2
import segmentation_models_pytorch as smp
import torch
from torch.optim import Adam
from src.loss import CCE
ignore_index = 255
num_classes = 19
encoder_type = "resnext50_32x4d"
preprocess_parameters = smp.encoders.get_preprocessing_params(encoder_type)
mean = preprocess_parameters["mean"]
std = preprocess_parameters["std"]
num_gpu = 2
num_samples = None
train_parameters = dict(
lr=0.001,
train_batch_size=8 * num_gpu,
val_batch_size=num_gpu,
fp16=False,
num_epochs=300,
height_crop_size=512,
width_crop_size=512,
ignore_index=ignore_index,
tta=None, # can be None, d4 or lr
downsample_mask_factor=None, # can be 4 for FPN
)
if train_parameters["downsample_mask_factor"] is not None:
if not train_parameters["height_crop_size"] / train_parameters["downsample_mask_factor"]:
raise ValueError(
f"Height crop size ({train_parameters['height_crop_size']}) "
f"should be divisible by the downsample_mask_factor "
f"({train_parameters['downsample_mask_factor']})"
)
if not train_parameters["width_crop_size"] / train_parameters["downsample_mask_factor"]:
raise ValueError(
f"Width crop size ({train_parameters['width_crop_size']}) "
f"should be divisible by the downsample_mask_factor"
f"({train_parameters['downsample_mask_factor']})"
)
final_upsampling = None
else:
final_upsampling = 4
model = smp.FPN(
encoder_type,
encoder_weights="imagenet",
classes=num_classes,
activation=None,
final_upsampling=final_upsampling,
dropout=0.5,
decoder_merge_policy="cat",
)
pad_factor = 64
imread_library = "cv2" # can be cv2 or jpeg4py
optimizer = Adam(
[
{"params": model.decoder.parameters(), "lr": train_parameters["lr"]},
# decrease lr for encoder in order not to permute
# pre-trained weights with large gradients on training start
{"params": model.encoder.parameters(), "lr": train_parameters["lr"] / 100},
],
weight_decay=1e-3,
)
normalization = albu.Normalize(mean=mean, std=std, p=1)
train_augmentations = albu.Compose(
[
albu.RandomSizedCrop(
min_max_height=(
int(0.5 * (train_parameters["height_crop_size"])),
int(2 * (train_parameters["height_crop_size"])),
),
height=train_parameters["height_crop_size"],
width=train_parameters["width_crop_size"],
w2h_ratio=1.0,
p=1,
),
albu.ShiftScaleRotate(rotate_limit=20, scale_limit=0, p=0.5, mask_value=ignore_index),
albu.RandomBrightnessContrast(p=0.5),
albu.RandomGamma(p=0.5),
albu.HueSaturationValue(p=0.5),
albu.HorizontalFlip(p=0.5),
normalization,
],
p=1,
)
val_augmentations = albu.Compose(
[
albu.PadIfNeeded(
min_height=1024, min_width=2048, border_mode=cv2.BORDER_CONSTANT, mask_value=ignore_index, p=1
),
normalization,
],
p=1,
)
test_augmentations = albu.Compose([normalization], p=1)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[200, 250, 280], gamma=0.1)
train_image_path = Path("data/train/images")
train_mask_path = Path("data/train/masks")
val_image_path = Path("data/val/images")
val_mask_path = Path("data/val/masks")
loss = CCE(ignore_index=ignore_index)
callbacks = []
logdir = f"runs/2gpu_{model.name}_f/baseline"
| [
"viglovikov@lyft.com"
] | viglovikov@lyft.com |
0768aef21a8d1b52a028c4de5676b7188709d19b | fb71813276a917fef7914093c0757ded24c38bc2 | /scripts/step6.py | f9337d84f8d69324d4613ab844d8db70677bfe25 | [] | no_license | epimedai/flowers | 38f02c1977700b98e562d0da9d1a5f7ae06466da | 54fd016f9740500110a9b968c56121ae8c034860 | refs/heads/master | 2020-03-27T13:31:07.616364 | 2019-11-06T19:03:31 | 2019-11-06T19:03:31 | 146,614,046 | 18 | 14 | null | 2019-11-06T19:03:33 | 2018-08-29T14:40:41 | Jupyter Notebook | UTF-8 | Python | false | false | 1,976 | py | import matplotlib.pyplot as plt
import numpy as np
from keras.applications.vgg19 import decode_predictions, \
preprocess_input, VGG19
from keras.engine import Model
from keras.layers import Dense, Dropout
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
np.random.seed(1234)
# If you run into memory errors, try reducing this
batch_size = 32
train_generator = ImageDataGenerator(preprocessing_function=preprocess_input,
horizontal_flip=True,
zoom_range=.2,
rotation_range=30)
train_batches = train_generator.flow_from_directory('flowers/train',
target_size=(224, 224),
batch_size=batch_size)
val_generator = ImageDataGenerator(
preprocessing_function=preprocess_input)
val_batches = val_generator.flow_from_directory('flowers/val',
target_size=(224, 224),
batch_size=batch_size)
indices = train_batches.class_indices
labels = [None] * 17
for key in indices:
labels[indices[key]] = key
pretrained = VGG19(include_top=False, input_shape=(
224, 224, 3), weights='imagenet', pooling='max')
for layer in pretrained.layers:
layer.trainable = False
inputs = pretrained.input
outputs = pretrained.output
hidden = Dense(128, activation='relu')(outputs)
dropout = Dropout(.3)(hidden)
preds = Dense(17, activation='softmax')(dropout)
model = Model(inputs, preds)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-4), metrics=['acc'])
model.fit_generator(train_batches,
epochs=100,
validation_data=val_batches,
steps_per_epoch=len(train_batches),
validation_steps=len(val_batches))
| [
"estenleonardsen@gmail.com"
] | estenleonardsen@gmail.com |
0ee934977a97f8ccfc74f34b846ea3150a594458 | 954f61df1242e7a512a7084a64084c097c612724 | /python_old/prepare_data_from_scratch.py | 4b6a9ea31a850f6e64be524056d7f765f86bcc64 | [] | no_license | ssh352/Speculation-and-volatility-of-cryptocurrencies | 4af4025d85e309db291d53e50410d0f102d146c0 | 9827996a4fb38b137a7708dd9c4ed9db22fc4830 | refs/heads/master | 2021-04-07T14:05:06.603601 | 2018-09-13T21:36:45 | 2018-09-13T21:36:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py |
df=pd.read_csv('all-crypto-currencies/crypto-markets.csv')
df.head()
list(df)
df=df[['date','name','volume','close']]
df=df[df['name']=='Bitcoin']
df=df.iloc[243:,:]
res_table=prepare_df(df)
res
# res_table=res_table.iloc[50:,:]
res_table.to_csv('df_prepared_only_btc.csv',header=True,index=True) | [
"minasyanvaagn@mail.ru"
] | minasyanvaagn@mail.ru |
2e7a7f063587cd0d743af42a293e11b777285e59 | b9bcb18f8ae5a75390518136400a3e52a4bc5678 | /checkjson.py | 46732aad08103015872180b71a5d152a582cc4c9 | [] | no_license | tarunlochib/python-automation- | 093bb414ede1d2cdeb974a2bc55dcc9ead4bb59a | 5aeddc76a399bee3a2bc1421888f9e409696bea3 | refs/heads/main | 2022-12-31T09:35:02.603621 | 2020-10-20T05:39:50 | 2020-10-20T05:39:50 | 305,601,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | import os
import sys
import json
if len(sys.argv) > 1:
if os.path.exists(sys.argv[1]):
file = open(sys.argv[1], "r")
json.load(file)
file.close()
print("Validate JSON!")
else:
print(sys.argv[1] + "not found")
else:
print("usage: checkjson.py <file>")
| [
"tarun@simpleisbetterthancomplex.com"
] | tarun@simpleisbetterthancomplex.com |
99fbc74ec5207bb5eea3ee53f31b234166d342b6 | 5f8b7bf78073fbc2b15fe947e9988cdab511d5ef | /PFA/cms/urls.py | 944efb779a0a309a48126e7b7c9b66c74dd76654 | [] | no_license | jordanfaure/PFA | 4284c7a84a5ddc1c3fb67b553e94826eed4e78f5 | be3d9dd6c2bc26f90ad13ec4180f03af46f640ed | refs/heads/master | 2020-06-01T17:39:38.472704 | 2014-01-27T09:36:55 | 2014-01-27T09:36:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | #-*- coding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns('cms',
url(r'^$', 'views.home'),
)
| [
"thomas.miele@epitech.eu"
] | thomas.miele@epitech.eu |
690dee5b0190d88a4aa134d46dc443c1979b1bb0 | 282c06d6dd365003d7ec266c9732da5becffb368 | /test_darknet.py | 5de747795f0c8047f75502f312267203d6febdb0 | [] | no_license | thanakorn/yolov3_pytorch | e4f1a8ce10b705b688ea4e86c76289f107d2b44b | c59c6bddf8eea655e11202c589c452a4bac53ded | refs/heads/main | 2022-12-19T23:37:24.930939 | 2020-10-17T17:19:09 | 2020-10-17T17:19:09 | 304,828,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | import cv2 as cv
import numpy as np
import torch
from models.darknet import parse_cfg, create_modules, Darknet
from torch.autograd import Variable
def get_test_input():
img = cv.imread("dog-cycle-car.png")
img = cv.resize(img, (416,416)) #Resize to the input dimension
img_ = img[:,:,::-1].transpose((2,0,1)) # BGR -> RGB | H X W C -> C X H X W
img_ = img_[np.newaxis,:,:,:]/255.0 #Add a channel at 0 (for batch) | Normalise
img_ = torch.from_numpy(img_).float() #Convert to float
img_ = Variable(img_) # Convert to Variable
return img_
if __name__ == '__main__':
model = Darknet('cfg/yolov3.cfg')
# print(model)
test_input = get_test_input()
detection = model(test_input)
print(detection.size())
print(detection) | [
"thanakorn.panyapiang@outlook.com"
] | thanakorn.panyapiang@outlook.com |
5ca7400dd101f54f05f252c458a3444c85fe7629 | bfb10fede6f32fd6e91bac5068bf4417495b7d90 | /ImageProcessor/ImageFilter.py | 7041f3d156046b26aa83c995b911b598b3ab73e4 | [] | no_license | lennyhits/fungal-detector | 23feab1d268c6208dd22a51d971434f6f0e9371c | d4b07843a443db8ff95240c8d064ef5b8f5c3748 | refs/heads/master | 2020-03-26T15:28:22.069244 | 2015-05-17T09:34:30 | 2015-05-17T09:34:30 | 35,759,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,484 | py | ''' This file holds the functionality for image filtering and pre-processing '''
import cv2
from skimage import data, filter
import numpy as np
import os
import re
def skimage_filter_technique(image_path):
img2 = cv2.imread(image_path, True)
tv_filter = img2
return tv_filter
def getJpgImages(imgPath):
images = next(os.walk(imgPath))[2]
imgs = []
for image in images:
if re.search(r'\S+.jpg', image):
imgs.append(os.path.join(imgPath, image))
return imgs
def calculateHistograms(images, bins):
numOfImages = len(images)
imageData = np.zeros((numOfImages, bins))
for imageIndex in range(numOfImages):
img = cv2.imread(images[imageIndex])
img2 = skimage_filter_technique(images[imageIndex])
hist = cv2.calcHist([img], [0], None, [bins], [0, 256])
imageData[imageIndex, :] = hist.transpose()
return imageData
def get_histograms(disease_train_path, healthy_train_path, disease_test_path):
bins = 50 # length of data vector
trainingDiseasedImages = getJpgImages(disease_train_path)
trainingHealthyImages = getJpgImages(healthy_train_path)
testDiseasedImages = getJpgImages(disease_test_path)
testHealthyImages = getJpgImages(disease_test_path)
trainingDiseasedData = calculateHistograms(trainingDiseasedImages, bins).astype(np.float32)
trainingHealthyData = calculateHistograms(trainingHealthyImages, bins).astype(np.float32)
testDiseasedData = calculateHistograms(testDiseasedImages, bins).astype(np.float32)
testHealthyData = calculateHistograms(testHealthyImages, bins).astype(np.float32)
return {"trainingDiseasedData": trainingDiseasedData, "trainingHealthyData": trainingHealthyData,
"testDiseasedData": testDiseasedData, "testHealthyData": testHealthyData}
def define_data_classes(trainingDiseasedData, trainingHealthyData, testDiseasedData, testHealthyData):
#Healthy - class 0, Diseased - class - 1
trainingDiseasedClasses = np.ones((len(trainingDiseasedData), 1)).astype(np.float32)
trainingHealthyClasses = np.zeros((len(trainingHealthyData), 1)).astype(np.float32)
testDiseasedClasses = np.ones((len(testDiseasedData), 1)).astype(np.float32)
testHealthyClasses = np.ones((len(testHealthyData), 1)).astype(np.float32)
return {"trainingDiseasedClasses": trainingDiseasedClasses, "trainingHealthyClasses": trainingHealthyClasses,
"testDiseasedClasses": testDiseasedClasses, "testHealthyClasses": testHealthyClasses}
def concatenate_data(trainingHealthyData, trainingHealthyClasses, trainingDiseasedData, trainingDiseasedClasses,
testDiseasedData, testDiseasedClasses, testHealthyData, testHealthyClasses):
trainingData = np.vstack((trainingHealthyData, trainingDiseasedData))
trainingClasses = np.vstack((trainingHealthyClasses, trainingDiseasedClasses))
testData = np.vstack((testHealthyData, testDiseasedData))
testClasses = np.vstack((testHealthyClasses, testDiseasedClasses))
return {"trainingData": trainingData, "trainingClasses": trainingClasses, "testData": testData,
"testClasses": testClasses}
def save_data_to_folder(dataFolderPath, dataClasses):
dataName = dataFolderPath + 'imageData.npz'
np.savez(dataName,
train=dataClasses['trainingData'], train_labels=dataClasses['trainingClasses'],
test=dataClasses['testData'], test_labels=dataClasses['testClasses'])
return True | [
"joachimhits@gmail.com"
] | joachimhits@gmail.com |
54b6d27f371e43575fbcd045778475cebc4e124c | 74b2f9658f3fd47aaf255febf2852263c6ec19e0 | /takerest/src/helpers/test-data-gen/src/lib/vendors/pairs/main/create_allpairs.py | d7ea9dd782cca38036dc02fe37ad0bb06554a910 | [] | no_license | upworka0/restio | bf47db136e884c72b061962e3973546a4bdd78c7 | 9f22fc6513fefd6c98738b0ea016abfec7c437cb | refs/heads/master | 2023-03-05T00:23:12.243382 | 2021-10-01T01:49:01 | 2021-10-01T01:49:01 | 203,429,128 | 1 | 0 | null | 2023-03-01T19:12:22 | 2019-08-20T18:07:32 | JavaScript | UTF-8 | Python | false | false | 322 | py | #!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
from pairwisepy import AllPairs
import json
import sys
import logging
import ast
parameters = ast.literal_eval(sys.argv[1])
pairwise = AllPairs(parameters)
results = {i:pairs for i, pairs in enumerate(pairwise)}
print(json.dumps(results)) | [
"upworka0@gmail.com"
] | upworka0@gmail.com |
7f37aced58730055c3055c0db0f91adbadd4ae55 | 965f3998f443d457b6b28e3c24bade38f72c108f | /Test_ist_so_cool.py | 82353da3f5fc6d9bea7ffd7dc2f3efd15bd6e6b3 | [] | no_license | Micha040/FirstTryPython | d2bddde70c5917b9e6a88ecf5787d74cedbe13cd | 9da5341c9dbc1069374831ac195c2ee7107d3007 | refs/heads/master | 2021-01-19T16:43:18.471511 | 2017-04-17T08:14:51 | 2017-04-17T08:14:51 | 88,284,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | import turtle
t = turtle.Pen()
t.forward(100)
t.left(100)
t.left(90)
t.left(170)
t.left(90)
t.forward(100)
t.left(90)
t.forward(100)
t.left(90)
t.forward(100)
t.left(90) | [
"mkroeger.hh@gmail.com"
] | mkroeger.hh@gmail.com |
23a26d9a374dd4a953f78591e47dd7cde12e1ad0 | aee1e843ca826ac92271151bbe8fd21828c8b044 | /apps/get_quotes/models.py | 6b5b66644f144987c4670ac573671ceafb69896d | [] | no_license | sullvantes/ajonestwitbot | 5312f199745372b6c399d519f7ed6a585305ee86 | dc0f23f3d7011a461c0777fd8de9a79518a0138b | refs/heads/master | 2022-12-09T13:06:47.466932 | 2017-08-04T15:05:19 | 2017-08-04T15:05:19 | 99,273,128 | 0 | 0 | null | 2022-12-08T00:42:25 | 2017-08-03T20:39:23 | Python | UTF-8 | Python | false | false | 6,410 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
#from tagulous.models import SingleTagField, TagField
import tagulous
import bcrypt
import re
from datetime import datetime
EMAIL_REGEX = re.compile(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)")
'''
class UserManager(models.Manager):
def register_validator(self, user_input):
errors = {}
input_email = user_input['email']
check_email_list = User.objects.filter(email = input_email)
if len(check_email_list) <> 0:
errors['existing'] = 'This email is already registered. Log in.'
if len(user_input['first_name']) < 2:
errors['first_name'] = 'First name must be at least 2 characters long'
if not (user_input['first_name']).isalpha() or not (user_input['last_name']).isalpha():
errors['name_chars'] = 'Name fields can only contain letters of the alphabet'
if len(user_input['last_name']) < 2:
errors['last_name'] = 'Last name must be at least 2 characters long'
if len(user_input['password']) < 8:
errors['password'] = 'Password must be at least 8 characters long'
if not EMAIL_REGEX.match(user_input['email']):
errors['email'] = 'Email syntax not valid.'
return errors
def login_validator(self, user_input):
errors = {}
input_email = user_input['email']
input_password = user_input['password']
check_user = User.objects.filter(email = input_email)
if len(check_user) == 0:
errors['not_registered'] = 'Email not in system. Register first before attempting login.'
if not bcrypt.checkpw(input_password.encode(), check_user[0].password.encode()):
errors['wrong_password'] = 'Incorrect password. Please try again.'
return errors
class User(models.Model):
first_name = models.CharField(max_length = 255)
last_name = models.CharField(max_length = 255)
email = models.CharField(max_length = 255, unique = True)
password = models.CharField(max_length = 45)
#address = models.CharField(max_length = 255)
#city = models.CharField(max_length = 45)
#state = models.CharField(max_length = 45)
#zip_code = models.CharField(max_length = 45)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
#objects = UserManager()
'''
class MyTagModel(tagulous.models.TagModel):
class TagMeta:
autocomplete_view = 'tags_autocomplete'
class QuoteManager(models.Manager):
def quote_validator(self, user_input):
errors = {}
input_quote = user_input['quote']
if len(input_quote) > 124 :
errors['quote_length'] = 'The quote is too long. This is twitter.'
if len(input_quote)<5:
errors['quote_length'] = "That's no quote, that's far too short."
return errors
class Quote(models.Model):
quote = models.CharField(max_length = 255)
tags = tagulous.models.TagField(to=MyTagModel)
#created_by = models.ForeignKey(User, related_name = 'quotes')
approved = models.BooleanField(default = False)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
objects=QuoteManager()
# class followed(models.Model):
# screenname=models.CharField(max_length = 255)
# quotes=
# created_at = models.DateTimeField(auto_now_add = True)
# updated_at = models.DateTimeField(auto_now = True)
'''
class Venue(models.Model):
title = models.CharField(max_length = 255)
address = models.CharField(max_length = 255)
city = models.CharField(max_length = 45)
state = models.CharField(max_length = 45)
zip_code = models.CharField(max_length = 45)
seating_map = models.CharField(max_length = 255)
banner = models.CharField(max_length = 255, default = '')
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class Event(models.Model):
title = models.CharField(max_length = 255)
venue = models.ForeignKey(Venue, related_name = 'events')
performers = models.ForeignKey(Performer, related_name = 'events', default="")
start_time = models.DateTimeField(default=datetime.now)
popularity = models.FloatField(default = 0.0)
average_price = models.FloatField(null = True)
banner = models.CharField(max_length = 255)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class Listing(models.Model):
seller = models.ForeignKey(User, related_name = 'listings')
event = models.ForeignKey(Event,related_name = 'listings')
zone = models.CharField(max_length = 255)
section = models.CharField(max_length = 255)
row = models.CharField(max_length = 255, null=True)
tickets_for_sale = models.IntegerField(default = 0)
delivery_method = models.CharField(max_length = 255, default='Electronic')
price_value=models.FloatField(default = 0.0)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class Ticket(models.Model):
listing = models.ForeignKey(Listing, related_name = 'tickets')
seat = models.IntegerField(default = 0)
price = models.FloatField(default = 0.0)
sold = models.BooleanField(default = False)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class CreditCard(models.Model):
user = models.ForeignKey(User, related_name = 'credit_cards')
name_on_card = models.CharField(max_length = 255)
number = models.CharField(max_length = 45)
expiration = models.CharField(max_length = 16)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class Transaction(models.Model):
buyer = models.ForeignKey(User, related_name = 'transactions')
listing = models.ForeignKey(Listing, related_name = 'transactions')
tickets_bought = models.IntegerField(default = 0)
credit_card = models.ForeignKey(CreditCard, related_name = 'transactions')
total = models.FloatField(default = 0.0)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
'''
| [
"sullyut@gmail.com"
] | sullyut@gmail.com |
a3efe365054faff24943537ffee1d595dbee5184 | 3af020fd309cde6b08e8f3b8944a80d4c674eaf4 | /tracker.py | 3a4458a6aa5a50b50c3110b7641468366d44b5a2 | [] | no_license | ismailkasarci/atv-tracking | 3b89d6cdc26c9477be7dd1dbbe022b449bc473f3 | 786c448c78bbb04ffe1d7fa831d8080d6f88497f | refs/heads/master | 2023-08-30T10:23:21.601516 | 2021-11-13T10:12:23 | 2021-11-13T10:12:23 | 351,635,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,768 | py | import math
class EuclideanDistTracker:
def __init__(self):
# Store the center positions of the objects
self.center_points = {}
# Keep the count of the IDs
# each time a new object id detected, the count will increase by one
self.id_count = 0
def update(self, objects_rect):
# Objects boxes and ids
objects_bbs_ids = []
# Get center point of new object
for rect in objects_rect:
x, y, w, h = rect
cx = (x + x + w) // 2
cy = (y + y + h) // 2
# Find out if that object was detected already
same_object_detected = False
for id, pt in self.center_points.items():
dist = math.hypot(cx - pt[0], cy - pt[1])
if dist < 2000:
self.center_points[id] = (cx, cy)
# print(self.center_points)
objects_bbs_ids.append([x, y, w, h, id])
same_object_detected = True
break
# New object is detected we assign the ID to that object
if same_object_detected is False:
self.center_points[self.id_count] = (cx, cy)
objects_bbs_ids.append([x, y, w, h, self.id_count])
self.id_count += 1
# Clean the dictionary by center points to remove IDS not used anymore
new_center_points = {}
for obj_bb_id in objects_bbs_ids:
_, _, _, _, object_id = obj_bb_id
center = self.center_points[object_id]
new_center_points[object_id] = center
# Update dictionary with IDs not used removed
self.center_points = new_center_points.copy()
return objects_bbs_ids
| [
"ismail.kasarci@gmail.com"
] | ismail.kasarci@gmail.com |
9a40f68069d810351a46713b9be977a322a86944 | 192b302220d33da72b8ee12f5d51b67d90108a15 | /super_movie_picker/urls.py | 8f015dd58bbca83a340c63600d1670edf7582176 | [] | no_license | greenewk/movie_picker | 9eb3b12bf45e81b7dd16b6590b189bbf71a86c3a | 40915abd18b8fe0181c73fe392dcfb26998744c1 | refs/heads/master | 2021-06-26T17:46:44.054377 | 2020-01-24T14:12:38 | 2020-01-24T14:12:38 | 228,456,534 | 0 | 0 | null | 2021-06-02T00:48:32 | 2019-12-16T19:08:41 | JavaScript | UTF-8 | Python | false | false | 883 | py | """super_movie_picker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from movie_picker import views
urlpatterns = [
path('admin/', admin.site.urls),
path('movie_picker/', include('movie_picker.urls', namespace='movie_picker')),
]
| [
"greenew.k@gmail.com"
] | greenew.k@gmail.com |
196ce871fe293b80a739a6da8ff8edb36a5e0ec0 | b509bf60f6218caf522edbd19c18c24958268968 | /tehtava5_1.py | e61f107774080ac479c87cb6a43f5f6c1641f483 | [] | no_license | lketoja/PythonKurssiAalto | fd23b54d3fac74368bd43db784623215987cc64b | 180fd32c66a630ded1cf90d73a7a19c7abfce0d7 | refs/heads/master | 2020-06-06T08:18:47.897176 | 2020-02-27T17:39:43 | 2020-02-27T17:39:43 | 192,685,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | '''
Created on 17.6.2019
@author: Lotta Ketoja
'''
def main():
lkm = int(input("Kuinka monen tuotteen hinnat annat?\n"))
hinnat = [0.0] * lkm
for i in range(lkm):
luettu_hinta = float(input("Anna seuraavan tuotteen hinta (eur).\n"))
hinnat[i] = luettu_hinta
print("Alennetut hinnat")
RAJA = 50.0
PIENI_ALENNUS = 10.0
SUURI_ALENNUS = 30.0
summa = 0
for hinta in hinnat:
if hinta < RAJA:
alennettu_hinta = (100 - PIENI_ALENNUS) / 100 * hinta
else:
alennettu_hinta = (100 - SUURI_ALENNUS) / 100 * hinta
print("{:.2f} eur".format(alennettu_hinta))
summa += alennettu_hinta
print("Alennetut hinnat yhteensa: {:.2f} eur.".format(summa))
main() | [
"lotta.ketoja@gmail.com"
] | lotta.ketoja@gmail.com |
5197e27e5292042c0deb17e850c425d7d76d9e58 | f1d07f934c302f207673411960e3fc075ca86777 | /client.py | 6806c5f3ca2f565840d0b7b821fb0315a59dcab6 | [] | no_license | insa-vn/bang | ab52fea0045908e178b1339877098d169d49339d | d349d9029dbd6fc458870bf8b4660bb9115f524a | refs/heads/master | 2021-01-10T10:21:44.325190 | 2016-02-29T21:46:58 | 2016-02-29T21:46:58 | 52,547,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | from socketIO_client import SocketIO, LoggingNamespace
def on_response(*args):
print('Response from Bang server: ' + args[0])
with SocketIO('localhost', 5000, LoggingNamespace) as socketIO:
socketIO.emit('hoang sida', {'message': 'Lieu hoang co bi benh sida ? '}, on_response)
socketIO.wait_for_callbacks(seconds=1)
| [
"danghaian168@gmail.com"
] | danghaian168@gmail.com |
abdc48b6b1a077d05328750e05db9c729762934c | b2f8146e3331a035445a964e534e2dde2604b33f | /rigBuilder/source/rigBuilder/face/faceIO.py | d4f79e1643cd42885583a1e64367dc9fe153330c | [] | no_license | timm-gitHub/fbRepo | f2f584d84329f614d20ef64c1666ec54b9849f69 | f85dceef672538c9273ffdbc360f0764a026345a | refs/heads/master | 2016-09-05T15:11:07.083941 | 2013-11-05T00:40:28 | 2013-11-05T00:40:28 | 13,265,053 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 16,605 | py | '''
Created on 18/09/2013
@author: caleb.bell
faceIO.py is home to the Data Input and Output functions used by the face rig
builder. Thes include functions for importing Preference and Set Driven Keyframe
data. Both of these data types are exported in a JSON file format, so there are
generic JSON import and export functions in here too.
'''
import json
import os
import re
import sys
import types
import maya.cmds
import maya.mel
import pymel.core
SDK_ANIMCURVE_TYPES = ['animCurveUA', 'animCurveUL', 'animCurveUT', 'animCurveUU']
#===============================================================================
# JSON import/export wrappers
#===============================================================================
def importJSON(filePath, verbose=True):
if not os.path.exists(filePath):
if verbose:
sys.stdout.write("File path '%s' does not exist.\n" % filePath)
return False
f = open(filePath, 'r')
if verbose:
sys.stdout.write("Reading JSON data from file '%s'.\n" % filePath)
data = json.load(f)
f.close()
return data
def exportJSON(data, filePath, verbose=True):
f = file(filePath, 'w')
json.dump(data, f, indent=4, sort_keys=True)
if verbose:
sys.stdout.write(">> Data written to '%s'.\n" % filePath)
f.close()
return filePath
#===============================================================================
# Attribute import/export wrappers (Preference Data).
#===============================================================================
def importAttributeData(node, filePath, verbose=True):
return setAttributeData(importJSON(filePath, verbose), node, verbose)
def exportAttributeData(node, filePath, verbose=True, **kwargs):
return exportJSON(getAttributeData(node, **kwargs), filePath, verbose)
#===============================================================================
# Attribute data getters/setters.
#===============================================================================
def getAttributeData(node, **kwargs):
''' This function will iterate over attributes on a node (based upon filters
passed in as keyword arguments of maya's listAttr command) and export the
numeric attribute values. '''
if not maya.cmds.objExists(node):
maya.cmds.select(node)
data = dict()
for attr in maya.cmds.listAttr(node, **kwargs):
data[attr] = maya.cmds.getAttr('%s.%s' % (node, attr))
return data
def setAttributeData(data, node, verbose=True):
''' This function will iterate over keys in a dictionary which correspond
with attribute names and attempt to set the attribute values. '''
if not maya.cmds.objExists(node):
maya.cmds.select(node)
if not isinstance(data, types.DictType):
raise ValueError('Data must be a dictionary.')
if verbose:
sys.stdout.write("Setting attribute data on node '%s'...\n" % node)
# Begin progress Bar
gMainProgressBar = maya.mel.eval('$tmp = $gMainProgressBar')
status = "Setting attribute data on node '%s'...\n" % node
maya.cmds.progressBar(gMainProgressBar, edit=True, beginProgress=True,
isInterruptable=False, status=status, maxValue=len(data))
# Iterate.
for attr, value in data.iteritems():
# Progress bar step.
maya.cmds.progressBar(gMainProgressBar, edit=True, step=1)
# First check that the attribute exists.
if not maya.cmds.attributeQuery(attr, n=node, ex=True):
continue
# Then see if it's locked.
if maya.cmds.getAttr('%s.%s' % (node, attr), lock=True):
continue
# Check to see if it has a parent attribute, and if so if that attribute
# has an incoming connection.
parentAttr = maya.cmds.attributeQuery(attr, n=node, lp=True)
if parentAttr:
cons = maya.cmds.listConnections('%s.%s' % (node, parentAttr[0]),
s=True, d=True, scn=True)
# Skip if we find the parent attribute has connections.
if cons:
if verbose:
sys.stdout.write("Parent attribute of '%s' has an incoming "
"connection. Skipping...\n" % attr)
continue
# Check for input connections.
cons = maya.cmds.listConnections('%s.%s' % (node, attr), s=True, d=False,
p=False, scn=True)
if cons:
if verbose:
sys.stdout.write("Attribute '%s' has an incoming connection."
" Skipping...\n" % attr)
continue
# Set the attribute value.
maya.cmds.setAttr('%s.%s' % (node, attr), value)
if verbose:
sys.stdout.write("Set attribute data complete.\n")
maya.cmds.progressBar(gMainProgressBar, edit=True, endProgress=True)
return True
#===============================================================================
# Driven Key getters/setters.
#===============================================================================
def getDrivenKeysData(attribute):
def doIt(attribute):
result = dict()
# Get the name of the driver attribute. It will be the key for the driven
# key data, so that in the event there is multiple drivers things will nest
# nicely.
currentDriver = maya.cmds.setDrivenKeyframe(attribute, q=True, cd=True)[0]
result[currentDriver] = dict()
# Get the keyframe count. This will be stored as well as used as the range
# value for the iterator when capturing driven key data.
keyframeCount = maya.cmds.keyframe(attribute, q=True, kc=True)
result[currentDriver]['keyframeCount'] = keyframeCount
# Get pre-infinity status.
result[currentDriver]['preInfinity'] = maya.cmds.setInfinity(
attribute, q=True, pri=True)[0]
# Get the post-Infinity status.
result[currentDriver]['postInfinity'] = maya.cmds.setInfinity(
attribute, q=True, poi=True)[0]
# Get the rest of the data. Note that I am grabbing this based upon the
# alpha-numeric order of the dictionary keys only.
result[currentDriver]['drivenValue'] = [maya.cmds.keyframe(
attribute, q=True, index=(j, j), vc=True)[0] for j in range(keyframeCount)]
# Get the driver values.
result[currentDriver]['driverValue'] = [maya.cmds.keyframe(
attribute, q=True, index=(j, j), fc=True)[0] for j in range(keyframeCount)]
# Get the in angles.
result[currentDriver]['inAngle'] = [maya.cmds.keyTangent(
attribute, q=True, index=(j, j), ia=True)[0] for j in range(keyframeCount)]
# Get the in tangent types.
result[currentDriver]['inTangentType'] = [maya.cmds.keyTangent(
attribute, q=True, index=(j, j), itt=True)[0] for j in range(keyframeCount)]
# Get the in weight values.
result[currentDriver]['inWeight'] = [maya.cmds.keyTangent(
attribute, q=True, index=(j, j), iw=True)[0] for j in range(keyframeCount)]
# Get the tangent lock status
result[currentDriver]['lock'] = [maya.cmds.keyTangent(
attribute, q=True, index=(j, j), l=True)[0] for j in range(keyframeCount)]
# Get the out angle values.
result[currentDriver]['outAngle'] = [maya.cmds.keyTangent(
attribute, q=True, index=(j, j), oa=True)[0] for j in range(keyframeCount)]
# Get the out tangent types.
result[currentDriver]['outTangentType'] = [maya.cmds.keyTangent(
attribute, q=True, index=(j, j), ott=True)[0] for j in range(keyframeCount)]
# Get the out weight values.
result[currentDriver]['outWeight'] = [maya.cmds.keyTangent(
attribute, q=True, index=(j, j), ow=True)[0] for j in range(keyframeCount)]
# Get the weight lock status
result[currentDriver]['weightLock'] = [maya.cmds.keyTangent(
attribute, q=True, index=(j, j), wl=True)[0] for j in range(keyframeCount)]
# See if the animation curve has weighted tangents.
result[currentDriver]['weightedTangents'] = maya.cmds.keyTangent(
attribute, q=True, wt=True)[0]
return result
# Declare the variable we'll return.
data = list()
# Check that the attribute exists.
if not maya.cmds.objExists(attribute):
return data
# Check and see if the attribute is driven by animCurves.
drivers = maya.cmds.setDrivenKeyframe(attribute, q=True, dr=True)
if drivers == [u'No drivers.']: # Ugliest code ever.
return data()
else:
if len(drivers) == 1: # This should be just an animCurve.
data.append(doIt(attribute))
else: # This is blendWeighted option.
bwNode = maya.cmds.listConnections(attribute,
s=True, d=False, p=False, scn=True, t='blendWeighted')
if not bwNode: # Huh? This pretty much can't happen.
return data()
# Get ALL the source connections to the blendWeighted node.
cons = maya.cmds.listConnections(bwNode[0], s=True, d=False,
p=False, scn=True)
for con in cons:
if not maya.cmds.nodeType(con) in SDK_ANIMCURVE_TYPES:
return data()
# Iterate over the inputs of the blendWeighted node.
for i in range(maya.cmds.getAttr('%s.input' % bwNode[0], size=True)):
data.append(doIt('%s.input[%d]' % (bwNode[0], i)))
return data
def setDrivenKeysData(attribute, data, verbose=True):
if not isinstance(data, types.ListType):
raise ValueError('Data must be a list.')
# Run some checks on the driven attribute.
if not maya.cmds.objExists(attribute):
if verbose:
sys.stdout.write(
"Attribute '%s' does not exist. Skipping...\n" % attribute
)
return False
# Check to see if the driven attribute has a parent attribute, and if so
# if that attribute has an incoming connection.
parentAttr = maya.cmds.attributeQuery(attribute.split('.', 1)[-1],
n=attribute.split('.', 1)[0], lp=True)
if parentAttr:
cons = maya.cmds.listConnections('%s.%s' % (attribute.split('.', 1)[0],
parentAttr[0]), s=True, d=True, scn=True)
# Skip if we find the parent attribute has connections.
if cons:
if verbose:
sys.stdout.write(
"Parent attribute of '%s' has an incoming connection."
" Skipping...\n" % attribute
)
return False
# Check for pre-existing connections to the driven attr.
if maya.cmds.listConnections(attribute, s=True, d=False):
# Check and see if it's driven keys.
drivers = maya.cmds.setDrivenKeyframe(attribute, q=True, dr=True)
if drivers == [u'No drivers.']: # Ugliest code ever.
if verbose:
sys.stdout.write(
"Attribute '%s' has an incoming connection."
" Skipping...\n" % attribute
)
return False
# If it is driven keys we need to kill them so that they can be
# rebuilt from the data.
else:
if len(drivers) == 1: # This should be just an animCurve.
maya.cmds.delete(maya.cmds.listConnections(attribute,
s=True, d=False, p=False, scn=True, t='animCurve'))
else: # This is blendWeighted option.
bwNode = maya.cmds.listConnections(attribute,
s=True, d=False, p=False, scn=True, t='blendWeighted')
if not bwNode: # Huh? This pretty much can't happen.
if verbose:
sys.stdout.write(
"A pre-condition of this function rebuilding SDK"
" data has failed on attribute '%s'."
" Skipping...\n" % attribute
)
return False
# Get ALL the source connections to the blendWeighted node.
cons = maya.cmds.listConnections(bwNode, s=True, d=False,
p=False, scn=True)
for con in cons:
if not maya.cmds.nodeType(con) in SDK_ANIMCURVE_TYPES:
if verbose:
sys.stdout.write(
"A pre-condition of this function rebuilding SDK"
" data has failed on attribute '%s'."
" Skipping...\n" % attribute
)
return False
# Delete the animCurves.
maya.cmds.delete(cons)
# Now we can build the SDK's
for item in data:
driverAttr = item.keys()[0]
if not maya.cmds.objExists(driverAttr):
if verbose:
sys.stdout.write(
"Driver attribute '%s' does not exist. Skipping...\n" % (
driverAttr)
)
continue
animCurveData = item.values()[0]
for i in range(animCurveData['keyframeCount']):
# Create the driven keyframe.
maya.cmds.setDrivenKeyframe(attribute, cd=driverAttr,
dv=animCurveData['driverValue'][i],
v=animCurveData['drivenValue'][i],)
# Set tangent information.
maya.cmds.keyTangent(attribute, edit=True, index=(i, i),
ia=animCurveData['inAngle'][i],
itt=animCurveData['inTangentType'][i],
oa=animCurveData['outAngle'][i],
ott=animCurveData['outTangentType'][i],
l=animCurveData['lock'][i])
# Set tangent weights (if applicable).
if animCurveData['weightedTangents']:
# Set the weighted tangents status (This only needs doing once).
if not i:
maya.cmds.keyTangent(attribute, edit=True, wt=True)
# Set the weight lock status and the in and out weights.
maya.cmds.keyTangent(attribute, edit=True, index=(i, i),
wl=animCurveData['weightLock'][i],
iw=animCurveData['inWeight'][i],
ow=animCurveData['outWeight'][i])
return True
def getSceneDrivenKeysData():
data = dict()
# Iterate over all the driven key anim curve types in the scene.
for animCurve in maya.cmds.ls(typ=SDK_ANIMCURVE_TYPES):
drivenAttr = maya.cmds.listConnections('%s.o' % animCurve, s=False,
d=True, p=True, scn=False)
# This should never trigger, but just in case...
if not drivenAttr:
continue
# Blend-weighted SDKS need an extra step to find the driven attribute.
if maya.cmds.objectType(drivenAttr[0].split('.', 1)[0], i='blendWeighted'):
drivenAttr = maya.cmds.listConnections('%s.o' %
drivenAttr[0].split('.', 1)[0], s=False, d=True, p=True, scn=False)
# This should never trigger, but just in case...
if not drivenAttr:
continue
# We need to check that we aren't doubling up the data capture,
# because the basis for our data capture is EVERY SDK animCurve,
# so there is a chance we could look at the same blendWeighted
# node multiple times.
if drivenAttr[0] in data:
continue
# Capture the data from the drivenAttr.
data[drivenAttr[0]] = getDrivenKeysData(drivenAttr[0])
return data
def setSceneDrivenKeyData(data, verbose=True):
if not isinstance(data, types.DictType):
raise ValueError('Data must be a dictionary.')
# Iterate over all the keys in the data, which correspond to driven attributes.
for attribute in data:
setDrivenKeysData(attribute, data[attribute], verbose)
return True
#===============================================================================
# SDK import/export wrappers.
#===============================================================================
def importSceneDrivenKeysData(filePath, verbose=True):
return setSceneDrivenKeyData(importJSON(filePath, verbose), verbose)
def exportSceneDrivenKeysData(filePath, verbose=True):
return exportJSON(getSceneDrivenKeysData(), filePath, verbose)
| [
"caleb.bell+github@gmail.com"
] | caleb.bell+github@gmail.com |
e9165e98cf0d6b7f222062b1e491eee7993fdf66 | c128a8a0d206ea6c2db1e764b8578ae44eb61f7b | /main.py | 9509ea434b45e3faf138a395e7381c4da2369d0e | [] | no_license | jc2/cluster_client | 044652f389c62bad0ff95ec8f07527fc89422a65 | 8cd6488b0363ca3b6ab7c1086dcd2b614c12f4ca | refs/heads/master | 2023-06-01T21:08:59.902903 | 2021-06-15T23:45:43 | 2021-06-15T23:45:43 | 376,964,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,303 | py | import asyncio
import logging
from enum import Enum
from node import CreateGroup, DeleteGroup, NodeActionState
logging.basicConfig(filename='/tmp/run.log', format='%(asctime)s - %(levelname)s: %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
class CoroutineState(Enum):
STATE_FETCHED = 1
DONE = 2
ROLLED_BACK = 3
ERROR = 4
class Coroutine():
actions = {
"create_group": CreateGroup,
"delete_group": DeleteGroup
}
def __init__(self, action, nodes, group):
self.group = group
class_ = self.actions.get(action)
if not class_:
# TODO: Handle this exception in a better way
raise Exception()
# TODO: Implement a creational pattern
self.tasks = [class_(node) for node in nodes]
self.status = None
def run(self):
loop = asyncio.get_event_loop()
# 1. Get status of all nodes
result = loop.run_until_complete(self._get_status(self.tasks))
errors = [i for i in result if isinstance(i, Exception)]
if errors:
logger.error("Unable to get current status")
self.status = CoroutineState.ERROR
return
task_to_run = [task for task in self.tasks if task.status == NodeActionState.READY]
# 2. Run the desired action
if task_to_run:
result = loop.run_until_complete(self._forward(task_to_run))
errors = [str(i.last_attempt.exception()) for i in result if isinstance(i, Exception)]
if errors:
logger.error(errors)
logger.warning("Unable to perform updates. Rolling back")
task_to_rollback = [task for task in self.tasks if task.status == NodeActionState.DONE]
# 2.1 Rollback in case of errors
result = loop.run_until_complete(self._backward(task_to_rollback))
errors = [str(i.last_attempt.exception()) for i in result if isinstance(i, Exception)]
if errors:
self.status = CoroutineState.ERROR
logger.error(errors)
logger.critical("Error, Rollback failed, a manual check is needed")
else:
self.status = CoroutineState.ROLLED_BACK
logger.info("Rollback was successful")
else:
self.status = CoroutineState.DONE
logger.info("Done")
async def _get_status(self, tasks):
result = await asyncio.gather(
*[asyncio.create_task(task.get_current_status(self.group)) for task in tasks],
return_exceptions=True
)
return result
async def _forward(self, tasks):
result = await asyncio.gather(
*[asyncio.create_task(task.forward(self.group)) for task in tasks],
return_exceptions=True
)
return result
async def _backward(self, tasks):
result = await asyncio.gather(
*[asyncio.create_task(task.backward(self.group)) for task in tasks],
return_exceptions=True
)
return result
if __name__ == '__main__':
import argparse
import json
import sys
parser = argparse.ArgumentParser(description='Cluster API: For creating groups and beyond :D')
parser.add_argument('action', type=str, nargs='?', choices=['create_group', 'delete_group'],
help='Action to perform: create_group or delete_group')
parser.add_argument('group_name', type=str, nargs='?',
help='Group name')
parser.add_argument('node_file', nargs='?', type=argparse.FileType('r'),
help="Json file with a list of string (nodes urls)")
args = parser.parse_args()
try:
nodes = json.loads(args.node_file.read())
except json.decoder.JSONDecodeError:
sys.exit("File can not be readed")
if not isinstance(nodes, list):
sys.exit("Root element must be a list")
if any([not isinstance(node, str) for node in nodes]):
sys.exit("Nodes must be a string")
c = Coroutine(args.action, set(nodes), args.group_name)
c.run()
| [
"juancamiloceron@gmail.com"
] | juancamiloceron@gmail.com |
94de0d26045a93e111fa60999c0e6ffc8c94c834 | 6da006544407166029abae9c692d734975bd893a | /bert_layer.py | 642abe685ad1ecc5c0a888efcee9407f73ca5d3b | [] | no_license | petrjanda/bert-playground | b7563aa03cec03145b47e93447afa43da74dd9cd | 16d569dba49796a3459d7a7255e3663f7d742876 | refs/heads/master | 2020-05-19T19:44:25.280480 | 2019-05-06T11:57:29 | 2019-05-06T11:57:29 | 185,187,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras import backend as K
class BertLayer(tf.keras.layers.Layer):
def __init__(self, bert_path, n_fine_tune_layers=10, **kwargs):
self.n_fine_tune_layers = n_fine_tune_layers
self.trainable = True
self.output_size = 768
self._bert_path = bert_path
super(BertLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.bert = hub.Module(self._bert_path,
trainable=self.trainable,
name="{}_module".format(self.name)
)
trainable_vars = self.bert.variables
# Remove unused layers
trainable_vars = [var for var in trainable_vars if not "/cls/" in var.name]
# Select how many layers to fine tune
trainable_vars = trainable_vars[-self.n_fine_tune_layers :]
# Add to trainable weights
for var in trainable_vars:
self._trainable_weights.append(var)
for var in self.bert.variables:
if var not in self._trainable_weights:
self._non_trainable_weights.append(var)
super(BertLayer, self).build(input_shape)
def call(self, inputs):
inputs = [K.cast(x, dtype="int32") for x in inputs]
input_ids, input_mask, segment_ids = inputs
bert_inputs = dict(
input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids
)
result = self.bert(inputs=bert_inputs, signature="tokens", as_dict=True)[
"pooled_output"
]
return result
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_size) | [
"petrjandacz@gmail.com"
] | petrjandacz@gmail.com |
5cd901ed36eb82cabb4c257cfe13ed0271cb4cef | 80ff29422e987f17cbb06211907c555d713c881f | /MADE/ALGORITHMS/WEEK6_D.py | fd332a0afb0a419716f1ebed1802744db3174cc5 | [] | no_license | be-y-a/smth | 8780f362305ddb4d25834b921459fb4c9115ab5f | 2ee632b629f68aca49642322b28c8487ce24fee9 | refs/heads/master | 2023-01-10T16:08:25.030319 | 2020-11-02T21:48:10 | 2020-11-09T10:09:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | def levenstein(a, b):
n, m = len(a), len(b)
if n > m:
a, b = b, a
n, m = m, n
current = range(n + 1)
for i in range(1, m + 1):
prev, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete, change = prev[j] + 1, current[j - 1] + 1, prev[j - 1]
if a[j - 1] != b[i - 1]:
change += 1
current[j] = min(add, delete, change)
return current[n]
print(levenstein(input(), input())) | [
"mvvm@yandex-team.ru"
] | mvvm@yandex-team.ru |
ec36c2f9e3113a532ed0280c0dfc2e3a0f942acd | 2e38c69121b4ee829972a4cf5845b15d9b0eca25 | /python/python_base/网易云音乐下载.py | 935f86a974b19da381f0867179ed2fcd6a0d857b | [] | no_license | steadyrays/usermaster | 92eda1783994cadd023fcdc9afd31eb2f6bcb95b | 2849cbbc23df700d0387f48f36b7c726954a10c5 | refs/heads/master | 2022-07-01T18:02:45.811761 | 2020-05-10T15:45:18 | 2020-05-10T15:45:18 | 262,750,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py |
#保存图片、音乐、视频文件用二进制格式wb
#单首音乐下载
# import requests
# url='http://music.163.com/song/media/outer/url?id=1369798757'
# headers={'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'}
# response=requests.get(url,headers=headers)
# data=response.content
# with open('芒种.mp3','wb')as f:
# f.write(data)
#批量下载
import requests
from lxml import etree
url='https://music.163.com//discover/toplist?id=3778678'
headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}
response=requests.get(url,headers=headers)
data=response.content.decode('utf-8')
#初始化
html=etree.HTML(data)
#歌曲名
name=html.xpath('//ul[@class="f-hide"]/li/a/text()')
id=html.xpath('//ul[@class="f-hide"]/li/a/@href')
print(id)
# for i in range(200):
# id_all=id[i].split('=')[1]
# url_all='http://music.163.com/song/media/outer/url?id={}'.format(id_all)
# res=requests.get(url_all,headers=headers)
# datas=res.content
# with open(r"D:\pycharm\pycharm_dm\a课程资料\网易云音乐\{}.mp3".format(name[i]),'wb') as f:
# f.write(datas)
# print('下载<%s>完成'%name[i])
| [
"877243985@qq.om"
] | 877243985@qq.om |
d5c6978e92c337050ab5f5f1908a4ef11c57304f | d9ec3b14a34e1cc7df2bbb51cb83d0e9c2fec0e5 | /Lista1/exercicio4.py | 1b6400cc559289daba3d890f05d4ae5e0b8ba13c | [] | no_license | nevesjf/python | ffb2838efc96cdb51aeb903c169c7ca15b454799 | 1269cc9cc0b24fbf6bc48e1258dd1bf3847f22bb | refs/heads/master | 2020-04-10T23:55:16.177841 | 2018-03-25T21:22:03 | 2018-03-25T21:22:03 | 124,309,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | #EXERCICIO4
vlrSalario = float(input("Valor do salario: "))
pctAumento = int(input("Porcentagem de aumento: "))
novoSalario = vlrSalario+vlrSalario*pctAumento/100
print("Novo salário é: ", novoSalario) | [
"nevesjf@uol.com.br"
] | nevesjf@uol.com.br |
d00094432e33d5323e9928793d41a57003c88524 | 35d67bf16427d421c986602bc50b98b94b3328be | /Python/chapter15/21.py | 159cf75fde0da58c20ef62d953bb182510fcdb5d | [] | no_license | xiaoy/Exercise | 74eefe6458d202ea6310d958374cbb761d4a7ad5 | c89b7beac3ae7508cd00d81e215b7e00e4306d85 | refs/heads/master | 2021-06-12T04:40:54.555509 | 2021-05-16T09:03:22 | 2021-05-16T09:03:22 | 5,648,724 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | # use this program to verify the month
import re
re_str = r".+(Jan|Feb|Mar|Jan|Jul|Apr|Sep|Oct|Nov)"
f = open("data.txt", 'r')
for line in f:
print re.match(re_str, line).groups()[0]
f.close()
| [
"zgwulongfei@gmail.com"
] | zgwulongfei@gmail.com |
7fa2949cf1cd3bc986e1801d3d60ef78650ba85e | 8186514b510a801863229e3f9711c0c657e727e5 | /assembly/0427/explore_qt/22/2.py | 1995344ca3c4e4d6ee83bf7b963ca016295d0b6c | [] | no_license | masknugget/mypyqt | 274b2cbbf66c04927453815248f9c1bc5e65ca17 | b86a49e4b8c7c8c3d8546ce1b49f8f3bb6332307 | refs/heads/main | 2023-08-17T13:30:11.451066 | 2021-09-27T14:14:54 | 2021-09-27T14:14:54 | 355,904,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,037 | py | import sys
from PyQt5.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit, QTextEdit, QPushButton, \
QGridLayout
class Demo(QWidget):
def __init__(self):
super(Demo, self).__init__()
self.name_btn = QPushButton('Name', self)
self.gender_btn = QPushButton('Gender', self)
self.age_btn = QPushButton('Age', self)
self.score_btn = QPushButton('Score', self)
self.info_btn = QPushButton('Info', self)
self.name_btn.clicked.connect(lambda: self.open_dialog_func(self.name_btn))
self.gender_btn.clicked.connect(lambda: self.open_dialog_func(self.gender_btn))
self.age_btn.clicked.connect(lambda: self.open_dialog_func(self.age_btn))
self.score_btn.clicked.connect(lambda: self.open_dialog_func(self.score_btn))
self.info_btn.clicked.connect(lambda: self.open_dialog_func(self.info_btn))
self.name_line = QLineEdit(self)
self.gender_line = QLineEdit(self)
self.age_line = QLineEdit(self)
self.score_line = QLineEdit(self)
self.info_textedit = QTextEdit(self)
self.g_layout = QGridLayout()
self.g_layout.addWidget(self.name_btn, 0, 0, 1, 1)
self.g_layout.addWidget(self.name_line, 0, 1, 1, 1)
self.g_layout.addWidget(self.gender_btn, 1, 0, 1, 1)
self.g_layout.addWidget(self.gender_line,1, 1, 1, 1)
self.g_layout.addWidget(self.age_btn, 2, 0, 1, 1)
self.g_layout.addWidget(self.age_line, 2, 1, 1, 1)
self.g_layout.addWidget(self.score_btn, 3, 0, 1, 1)
self.g_layout.addWidget(self.score_line, 3, 1, 1, 1)
self.g_layout.addWidget(self.info_btn, 4, 0, 1, 1)
self.g_layout.addWidget(self.info_textedit, 4, 1, 1, 1)
self.setLayout(self.g_layout)
def open_dialog_func(self, btn):
if btn == self.name_btn: # 1
name, ok = QInputDialog.getText(self, 'Name Input', 'Please enter the name:')
if ok:
self.name_line.setText(name)
elif btn == self.gender_btn: # 2
gender_list = ['Female', 'Male']
gender, ok = QInputDialog.getItem(self, 'Gender Input', 'Please choose the gender:', gender_list, 0, False)
if ok:
self.gender_line.setText(gender)
elif btn == self.age_btn:
age, ok = QInputDialog.getInt(self, 'Age Input', 'Please select the age:')
if ok:
self.age_line.setText(str(age))
elif btn == self.score_btn:
score, ok = QInputDialog.getDouble(self, 'Score Input', 'Please select the score:')
if ok:
self.score_line.setText(str(score))
else:
info, ok = QInputDialog.getMultiLineText(self, 'Info Input', 'Please enter the info:')
if ok:
self.info_textedit.setText(info)
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = Demo()
demo.show()
sys.exit(app.exec_()) | [
"946883098@qq.com"
] | 946883098@qq.com |
bce6b6017de354154b0a2b7fd4412e1132861e58 | 6d00353112578165aebcc620e60229ec40e846ee | /challenges/pythagorean-triplet-in-array.py | ac8bf1b7e3584a9ec632081d74320233a1b6bbc8 | [] | no_license | dkp1903/daily-coding-problem | 955c310778d299bb729fe6f4c21ee056edc260cd | 2b83fd91c486bf097d4f7755677c14cc3e564013 | refs/heads/master | 2020-05-27T06:26:13.884888 | 2019-05-27T00:24:08 | 2019-05-27T00:24:08 | 188,521,411 | 1 | 0 | null | 2019-05-25T04:42:25 | 2019-05-25T04:42:25 | null | UTF-8 | Python | false | false | 795 | py | """
#282
Netflix
Given an array of integers, determine whether it contains a Pythagorean triplet.
Recall that a Pythogorean triplet (a, b, c) is defined by the equation a2+ b2= c2.
"""
import random, math
def has_pythagorean_triplet(arr):
arr.sort()
squares = [num*num for num in arr]
for ind in range(len(arr)-1, -1, -1):
for i in range(ind):
for j in range(i+1, ind):
if squares[i]+squares[j] == squares[ind]:
print(int(math.sqrt(squares[i])), int(math.sqrt(squares[j])), int(math.sqrt(squares[ind])))
return True
ind -= 1
return False
def main():
arr = [random.randint(1, 1000) for _ in range(1000)]
print(has_pythagorean_triplet(arr))
if __name__ == "__main__":
main() | [
"subsr97@gmail.com"
] | subsr97@gmail.com |
c7c39f7ebb7d0abd8f20c798147d7c8d43e3b0bc | 3afa8265220cd0968262a6b54d66a0dbb5a759ef | /webserver/front/admin.py | dfaa356e2e3a8112b74c4f3086a4436692e92caf | [] | no_license | SacriJuts/4PROJ | 71d15661f74c8a8234e480dd8c7b73f32001cb13 | b4c628f730ec2b82b27dde94d948c752bfc05860 | refs/heads/master | 2022-11-03T17:47:08.085395 | 2020-06-14T15:26:44 | 2020-06-14T15:26:44 | 265,799,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | from django.contrib import admin
# Register your models here.
from django.contrib.auth.admin import UserAdmin
from .models import User, Cart, Product, Shelf
admin.site.register(User, UserAdmin)
admin.site.register(Cart)
admin.site.register(Product)
admin.site.register(Shelf) | [
"jutslegameur@gmail.com"
] | jutslegameur@gmail.com |
bb650d1bf5cd61f76b2df5950229d9dafb96e643 | fcc7e2459ea6cab4cb7a61f007c26539871064b1 | /pgrm36.py | b42ea7d22884815972d2b03940bdef0aa7fc2b17 | [] | no_license | ChandanaBasavaraj/python-code | 95a3fb01c8202e8fbea868697c32c55161b78665 | 90b51c2bfe4b2c516d344e218f92279a81afaf44 | refs/heads/master | 2020-03-27T23:26:06.222622 | 2018-09-04T08:53:46 | 2018-09-04T08:53:46 | 147,316,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | '''code to get username and password'''
def main():
login()
def login():
username="harshita"
password="Harshita@96"
print("Enter username : ")
answer1=input()
print("Enter password : ")
answer2=input()
if answer1==username and answer2==password:
print("Welcome - Access Granted")
else:
print("Access Denied")
main()
| [
"noreply@github.com"
] | noreply@github.com |
963c21dcb3fda320cc53ce7e08d427b37c2d8aea | 6a2b1b1d6092a8d2492a6677b6fd19d27b0f461f | /08-Python-DataTypes/Tuples/02-create-one-element-tuple.py | 05fa335a53400e9ea8a0525d7b35a9f3a2482310 | [] | no_license | Uttam1982/PythonTutorial | 3cfbe237199e048967502f3d0c1936f2b878cb87 | 8e28cc5c4be5826a011059db66f6952871248c82 | refs/heads/master | 2022-12-17T18:47:28.397383 | 2020-09-22T08:55:23 | 2020-09-22T08:55:23 | 288,524,784 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | # Creating a tuple with one element is a bit tricky.
# 1. Having one element within parentheses is not enough.
# 2. We will need a trailing comma to indicate that it is, in fact, a tuple.
my_tuple = ("python")
print("tuple without trailing comma: ",type(my_tuple)) # <class 'str'>
#Creating a tuple having one element
my_tuple = ("python",)
print("tuple with trailing comma: ",type(my_tuple)) # <class 'tuple'>
## Parentheses is optional
my_tuple = "python",
print("Parentheses is optional: ",type(my_tuple)) # <class 'tuple'>
| [
"uttampat@gmail.com"
] | uttampat@gmail.com |
3ca31b3d92fb01a00afe5d42039144c994edc0ad | 1a8e511d2609f7b5b70a07d758664aed643763b0 | /defines.py | c293428b936aa76d44ae92a66e27726e5410acea | [] | no_license | MaxDuong/IG-Graph-API-Access-Token | aecdd2c81c67f11fa298a272c80311e510c26b07 | 5c4a694df176356084e8b49efdb334708a87d2cf | refs/heads/main | 2023-02-02T13:45:04.843036 | 2020-12-20T08:05:10 | 2020-12-20T08:05:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,294 | py | import requests
import json
def getCreds():
creds = dict() # dictionary to hold everything
creds['access_token'] = 'EAAHHaCP5zx0BAAp2Iv8GsRCmwchFHExO0GZBhCOZBk5svujnkT9vEBRk9UDeHcsHKMzkwz4nn3h0wQbXG2AHvwyZC8C0AUjA7P5bOAABZBP91XKBAu1KT5ZB7TuRBtcYCeovv4fX5WXDhFPfnXr6CJx962x1cXr6hLtLx7Dql2QZDZD'
creds['client_id'] = '500725070810909' # client id from facebook app IG Graph API Test
creds['client_secret'] = 'edfcda20c1d776ad7757555972ea2654' # client secret from facebook app
creds['graph_domain'] = 'https://graph.facebook.com/' # base domain for api calls
creds['graph_version'] = 'v8.0' # version of the api we are hitting
creds['endpoint_base'] = creds['graph_domain'] + creds['graph_version'] + '/' # base endpoint with domain and version
creds['debug'] = 'no' # debug mode for api call
creds['page_id'] = '118580610011877' # users page id
creds['instagram_account_id'] = '17841404636746484' # users instagram account id # ig-user-id = instagram_account_id
creds['ig_username'] = 'duonghoanvu1' # ig username
return creds
def makeApiCall( url, endpointParams, debug = 'no' ):
data = requests.get( url, endpointParams ) # make get request
response = dict() # hold response info
response['url'] = url # url we are hitting
response['endpoint_params'] = endpointParams #parameters for the endpoint
response['endpoint_params_pretty'] = json.dumps( endpointParams, indent = 4 ) # pretty print for cli
response['json_data'] = json.loads( data.content ) # response data from the api
response['json_data_pretty'] = json.dumps( response['json_data'], indent = 4 ) # pretty print for cli
if ( 'yes' == debug ) : # display out response info
displayApiCallData( response ) # display response
return response # get and return content
def displayApiCallData( response ) :
""" Print out to cli response from api call """
print ("\nURL: ") # title
print (response['url']) # display url hit
print ("\nEndpoint Params: ") # title
print (response['endpoint_params_pretty']) # display params passed to the endpoint
print ("\nResponse: ") # title
print (response['json_data_pretty']) # make look pretty for cli | [
"noreply@github.com"
] | noreply@github.com |
12b59c10509167df99a13ed9eb017a489626d74b | f2a2a07c145398b29a033344b5764153e743a167 | /source/doublylinkedlist.py | 8010b016013c766d0dde8df1c80bd161c5807a3f | [] | no_license | kaichimomose/CS-2-Tweet-Generator | d6e751b4337dbfbf1184b2f425268bb0406383f3 | faa356a3ac3dbecaa81342e16222cfc375e54712 | refs/heads/master | 2021-05-08T03:19:52.600531 | 2018-01-10T02:25:49 | 2018-01-10T02:25:49 | 108,192,717 | 0 | 0 | null | 2017-10-24T22:55:44 | 2017-10-24T22:55:44 | null | UTF-8 | Python | false | false | 7,085 | py | #!python
class DoublyNode(object):
def __init__(self, data):
"""Initialize this node with the given data."""
self.data = data
self.next = None
self.prev = None
def __repr__(self):
"""Return a string representation of this node."""
return 'DoublyNode({!r})'.format(self.data)
class DoublyLinkedList(object):
def __init__(self, items=None):
"""Initialize this linked list and append the given items, if any."""
self.head = None # First node
self.tail = None # Last node
# Append given items
if items is not None:
for item in items:
self.append(item)
def __str__(self):
"""Return a formatted string representation of this linked list."""
items = ['({!r})'.format(item) for item in self.items()]
return '[{}]'.format(' <-> '.join(items))
def __repr__(self):
"""Return a string representation of this linked list."""
return 'LinkedList({!r})'.format(self.items())
# Build an iterator from scratch with __iter__() and __next__()
def __iter__(self):
"""Return the iterator object itself"""
self.node = self.head
return self
def __next__(self):
"""Return the next item in the sequence. On reaching the end, and in subsequent calls, it must raise StopIteration"""
if self.node is not None:
node = self.node
# assign next node to self.node
self.node = self.node.next
return node
else:
raise StopIteration
def items(self):
"""Return a list (dynamic array) of all items in this linked list.
Best and worst case running time: O(n) for n items in the list (length)
because we always need to loop through all n nodes to get each item."""
items = [] # O(1) time to create empty list
for node in self:
items.append(node.data)
return items # O(1) time to return list
def is_empty(self):
"""Return a boolean indicating whether this linked list is empty."""
return self.head is None
def length(self):
"""Return the length of this linked list by traversing its nodes.
TODO: Running time: O(n) Why and under what conditions?"""
# TODO: Loop through all nodes and count one for each
length = 0
length = len(self.items())
return length # O(1) time to return list
def append(self, item):
"""Insert the given item at the tail of this linked list.
TODO: Running time: O(1) Why and under what conditions?"""
# TODO: Create new node to hold given item
node = DoublyNode(item)
# TODO: Append node after tail, if it exists
if self.tail is None:
self.head = node
self.tail = node
else:
self.tail.next = node
node.prev = self.tail
self.tail = node
def prepend(self, item):
"""Insert the given item at the head of this linked list.
TODO: Running time: O(1) Why and under what conditions?"""
# TODO: Create new node to hold given item
node = DoublyNode(item)
# TODO: Prepend node before head, if it exists
if self.is_empty():
self.head = node
self.tail = node
else:
self.head.prev = node
node.next = self.head
self.head = node
def find(self, quality):
"""Return an item from this linked list satisfying the given quality.
TODO: Best case running time: O(???) Why and under what conditions?
TODO: Worst case running time: O(???) Why and under what conditions?"""
# TODO: Loop through all nodes to find item where quality(item) is True
# TODO: Check if node's data satisfies given quality function
for node in self:
if quality(node.data):
item = node.data
return item
def delete(self, item):
"""Delete the given item from this linked list, or raise ValueError.
TODO: Best case running time: O(???) Why and under what conditions?
TODO: Worst case running time: O(???) Why and under what conditions?"""
# TODO: Loop through all nodes to find one whose data matches given item
# TODO: Update previous node to skip around node with matching data
# TODO: Otherwise raise error to tell user that delete has failed
# Hint: raise ValueError('Item not found: {}'.format(item))
# Loop until node is None, which is one node too far past tail
find_item = self.find(lambda item_: item_ == item)
if find_item is None:
raise ValueError('Item not found: {}'.format(item))
else:
for node in self:
if node == self.head:
if node.data == item:
self.head = node.next
if node == self.tail:
self.tail = None
break
else:
if node.data == item:
node.prev.next = node.next
if node == self.tail:
self.tail = node.prev
else:
node.next.prev = node.prev
break
def replace(self, item, new_item):
"""Find a node whose data is item and replace it new item"""
find_item = self.find(lambda item_: item_ == item)
if find_item is None:
raise ValueError('Item not found: {}'.format(item))
else:
for node in self:
if node.data == item:
node.data = new_item
break
def test_doubly_linked_list():
dll = DoublyLinkedList()
print('list: {}'.format(dll))
print('\nTesting append:')
for item in ['A', 'B', 'C']:
print('append({!r})'.format(item))
dll.append(item)
print('list: {}'.format(dll))
print('head: {}'.format(dll.head))
print('tail: {}'.format(dll.tail))
print('length: {}'.format(dll.length()))
dll.replace('A', 'D')
print('list: {}'.format(dll))
print('head: {}'.format(dll.head))
print('tail: {}'.format(dll.tail))
print('length: {}'.format(dll.length()))
dll.replace('D', 'A')
print('list: {}'.format(dll))
print('head: {}'.format(dll.head))
print('tail: {}'.format(dll.tail))
print('length: {}'.format(dll.length()))
# Enable this after implementing delete method
delete_implemented = True
if delete_implemented:
print('\nTesting delete:')
for item in ['B', 'C', 'A']:
print('delete({!r})'.format(item))
dll.delete(item)
print('list: {}'.format(dll))
print('head: {}'.format(dll.head))
print('tail: {}'.format(dll.tail))
print('length: {}'.format(dll.length()))
if __name__ == '__main__':
test_doubly_linked_list()
| [
"30785433+kaichimomose@users.noreply.github.com"
] | 30785433+kaichimomose@users.noreply.github.com |
ac633c6eea08f47d069923c69bcb99584e0012dd | 87a4749af8bec681072cf13e362efad537b5ca65 | /sel.py | e2b97709b9966aee1fe78862294b2ea4cebb41ef | [] | no_license | connorschwartz/ODROID-Process-Migration-Data | 21ca8be2b912a664d9dc9e2e498cf4ceece5cd9b | 05b19dd699111d0fca1dcd799b7762ae76e15cdd | refs/heads/master | 2020-09-30T05:13:38.301243 | 2020-02-24T21:41:38 | 2020-02-24T21:41:38 | 227,210,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,314 | py | import collections
import textwrap
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import staleness_of
from xvfbwrapper import Xvfb
import sys
import json
import datetime
'''
addressArray = [
'file:///home/odroid/bbench-3.0/sites/amazon/www.amazon.com/index.html',
'file:///home/odroid/bbench-3.0/sites/bbc/www.bbc.co.uk/index.html',
'file:///home/odroid/bbench-3.0/sites/cnn/www.cnn.com/index.html',
'file:///home/odroid/bbench-3.0/sites/craigslist/newyork.craigslist.org/index.html',
'file:///home/odroid/bbench-3.0/sites/ebay/www.ebay.com/index.html',
'file:///home/odroid/bbench-3.0/sites/espn/espn.go.com/index.html',
'file:///home/odroid/bbench-3.0/sites/google/www.google.com/index.html',
'file:///home/odroid/bbench-3.0/sites/msn/www.msn.com/index.html',
'file:///home/odroid/bbench-3.0/sites/slashdot/slashdot.org/index.html',
'file:///home/odroid/bbench-3.0/sites/twitter/twitter.com/index.html',
'file:///home/odroid/bbench-3.0/sites/youtube/www.youtube.com/index.html']
'''
addressArray = [
'http://tucunare.cs.pitt.edu:8080/amazon/www.amazon.com/',
'http://tucunare.cs.pitt.edu:8080/bbc/www.bbc.co.uk/',
'http://tucunare.cs.pitt.edu:8080/cnn/www.cnn.com/',
'http://tucunare.cs.pitt.edu:8080/craigslist/newyork.craigslist.org/',
'http://tucunare.cs.pitt.edu:8080/ebay/www.ebay.com/',
'http://tucunare.cs.pitt.edu:8080/google/www.google.com/',
'http://tucunare.cs.pitt.edu:8080/msn/www.msn.com/',
'http://tucunare.cs.pitt.edu:8080/slashdot/slashdot.org/',
'http://tucunare.cs.pitt.edu:8080/twitter/twitter.com/',
'http://tucunare.cs.pitt.edu:8080/youtube/www.youtube.com/'
]
sites = [
'amazon',
'bbc',
'cnn',
'craigslist',
'ebay',
'google',
'msn',
'slashdot',
'twitter',
'youtube'
]
class PageLoadTimer:
def __init__(self,driver):
self.driver = driver
self.jscript = textwrap.dedent("""
var performance = window.performance || {};
var timings = performance.timing || {};
return timings;
""")
def inject_timing_js(self):
timings = self.driver.execute_script(self.jscript)
return timings
def get_event_times(self):
timings = self.inject_timing_js()
good_values = [epoch for epoch in timings.values() if epoch != 0] # filter out bad timestamps
ordered_events = ('startTimestamp','navigationStart','fetchStart',
'domainLookupStart', 'domainLookupEnd','connectStart',
'connectEnd', 'secureConnectionStart','requestStart',
'responseStart','responseEnd','domLoading',
'domInteractive','domContentLoadedEventStart',
'domContentLoadedEventEnd', 'domComplete',
'loadEventStart','loadEventEnd' )
# have one entry from the start of the execution and one from the actual timestamp
startTimestamp = min(good_values)
timings['startTimestamp'] = startTimestamp
event_times = ((event, (timings[event],timings[event] - startTimestamp if timings[event] - startTimestamp >= 0 else 0)) for event
in ordered_events if event in timings)
return collections.OrderedDict(event_times)
def saveResults(iters,sitesused,timestamps):
with open('output.json','w') as jsonFile:
info = {"iterations":iters, "sites":sitesused, "timestamps":timestamps}
content = json.dumps(info)
jsonFile.write(content)
def runTest(sitesused,iters,addresses):
results = dict(zip(sitesused,[[] for site in sitesused])) # empty results dict
with Xvfb() as xvfb:
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument("--test-type")
options.binary_location = "/usr/bin/chromium-browser"
driver = webdriver.Chrome(chrome_options=options)
averagingCoeff = 1.0/iters
for i in range(iters):
for index,address in enumerate(addresses):
print("loading address: " + address)
driver.get(address)
timer = PageLoadTimer(driver)
results[sitesused[index]].append(timer.get_event_times())
return results
if __name__ == '__main__':
site_num = int(sys.argv[1])
iterations = int(sys.argv[2])
# Right now, we only test on one site at a time
siteused = [ sites[site_num] ]
addressused = [ addressArray[site_num] ]
saveResults(iterations,siteused,runTest(siteused,iterations,addressused))
| [
"ces133@pitt.edu"
] | ces133@pitt.edu |
b0e2af4e4d675713ffc95e2005e39ebb9196bccb | 2b1448085c5ad44e78772dde1dcc2fae9cc4c3cc | /botorch/models/converter.py | 35da4a3d8d21b48c62c2098e7a129b871f4e43c0 | [
"MIT"
] | permissive | leelasd/botorch | 47fa0ff9c5f6c534ecfcba59f5b1bf52eea0d62e | c48bfc822940ee8a6e5e2604d4ff282033dbe892 | refs/heads/master | 2022-12-17T04:42:41.591444 | 2020-09-10T23:45:05 | 2020-09-10T23:46:41 | 294,561,185 | 1 | 0 | MIT | 2020-09-11T01:19:36 | 2020-09-11T01:19:35 | null | UTF-8 | Python | false | false | 8,088 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Utilities for converting between different models.
"""
from __future__ import annotations
from copy import deepcopy
import torch
from botorch.exceptions import UnsupportedError
from botorch.models.gp_regression import FixedNoiseGP, HeteroskedasticSingleTaskGP
from botorch.models.gp_regression_fidelity import SingleTaskMultiFidelityGP
from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel
from botorch.models.model_list_gp_regression import ModelListGP
from torch.nn import Module
def _get_module(module: Module, name: str) -> Module:
"""Recursively get a sub-module from a module.
Args:
module: A `torch.nn.Module`.
name: The name of the submodule to return, in the form of a period-delinated
string: `sub_module.subsub_module.[...].leaf_module`.
Returns:
The requested sub-module.
Example:
>>> gp = SingleTaskGP(train_X, train_Y)
>>> noise_prior = _get_module(gp, "likelihood.noise_covar.noise_prior")
"""
current = module
if name != "":
for a in name.split("."):
current = getattr(current, a)
return current
def _check_compatibility(models: ModelListGP) -> None:
"""Check if a ModelListGP can be converted."""
# check that all submodules are of the same type
for modn, mod in models[0].named_modules():
mcls = mod.__class__
if not all(isinstance(_get_module(m, modn), mcls) for m in models[1:]):
raise UnsupportedError(
"Sub-modules must be of the same type across models."
)
# check that each model is a BatchedMultiOutputGPyTorchModel
if not all(isinstance(m, BatchedMultiOutputGPyTorchModel) for m in models):
raise UnsupportedError(
"All models must be of type BatchedMultiOutputGPyTorchModel."
)
# TODO: Add support for HeteroskedasticSingleTaskGP
if any(isinstance(m, HeteroskedasticSingleTaskGP) for m in models):
raise NotImplementedError(
"Conversion of HeteroskedasticSingleTaskGP is currently unsupported."
)
# TODO: Add support for custom likelihoods
if any(getattr(m, "_is_custom_likelihood", False) for m in models):
raise NotImplementedError(
"Conversion of models with custom likelihoods is currently unsupported."
)
# check that each model is single-output
if not all(m._num_outputs == 1 for m in models):
raise UnsupportedError("All models must be single-output.")
# check that training inputs are the same
if not all(
torch.equal(ti, tj)
for m in models[1:]
for ti, tj in zip(models[0].train_inputs, m.train_inputs)
):
raise UnsupportedError("training inputs must agree for all sub-models.")
def model_list_to_batched(model_list: ModelListGP) -> BatchedMultiOutputGPyTorchModel:
"""Convert a ModelListGP to a BatchedMultiOutputGPyTorchModel.
Args:
model_list: The `ModelListGP` to be converted to the appropriate
`BatchedMultiOutputGPyTorchModel`. All sub-models must be of the same
type and have the shape (batch shape and number of training inputs).
Returns:
The model converted into a `BatchedMultiOutputGPyTorchModel`.
Example:
>>> list_gp = ModelListGP(gp1, gp2)
>>> batch_gp = model_list_to_batched(list_gp)
"""
models = model_list.models
_check_compatibility(models)
# if the list has only one model, we can just return a copy of that
if len(models) == 1:
return deepcopy(models[0])
# construct inputs
train_X = deepcopy(models[0].train_inputs[0])
train_Y = torch.stack([m.train_targets.clone() for m in models], dim=-1)
kwargs = {"train_X": train_X, "train_Y": train_Y}
if isinstance(models[0], FixedNoiseGP):
kwargs["train_Yvar"] = torch.stack(
[m.likelihood.noise_covar.noise.clone() for m in models], dim=-1
)
if isinstance(models[0], SingleTaskMultiFidelityGP):
init_args = models[0]._init_args
if not all(
v == m._init_args[k] for m in models[1:] for k, v in init_args.items()
):
raise UnsupportedError("All models must have the same fidelity parameters.")
kwargs.update(init_args)
# construct the batched GP model
batch_gp = models[0].__class__(**kwargs)
tensors = {n for n, p in batch_gp.state_dict().items() if len(p.shape) > 0}
scalars = set(batch_gp.state_dict()) - tensors
input_batch_dims = len(models[0]._input_batch_shape)
# ensure scalars agree (TODO: Allow different priors for different outputs)
for n in scalars:
v0 = _get_module(models[0], n)
if not all(torch.equal(_get_module(m, n), v0) for m in models[1:]):
raise UnsupportedError("All scalars must have the same value.")
# ensure dimensions of all tensors agree
for n in tensors:
shape0 = _get_module(models[0], n).shape
if not all(_get_module(m, n).shape == shape0 for m in models[1:]):
raise UnsupportedError("All tensors must have the same shape.")
# now construct the batched state dict
scalar_state_dict = {
s: p.clone() for s, p in models[0].state_dict().items() if s in scalars
}
tensor_state_dict = {
t: (
torch.stack(
[m.state_dict()[t].clone() for m in models], dim=input_batch_dims
)
if "active_dims" not in t
else models[0].state_dict()[t].clone()
)
for t in tensors
}
batch_state_dict = {**scalar_state_dict, **tensor_state_dict}
# load the state dict into the new model
batch_gp.load_state_dict(batch_state_dict)
return batch_gp
def batched_to_model_list(batch_model: BatchedMultiOutputGPyTorchModel) -> ModelListGP:
"""Convert a BatchedMultiOutputGPyTorchModel to a ModelListGP.
Args:
model_list: The `BatchedMultiOutputGPyTorchModel` to be converted to a
`ModelListGP`.
Returns:
The model converted into a `ModelListGP`.
Example:
>>> train_X = torch.rand(5, 2)
>>> train_Y = torch.rand(5, 2)
>>> batch_gp = SingleTaskGP(train_X, train_Y)
>>> list_gp = batched_to_model_list(batch_gp)
"""
# TODO: Add support for HeteroskedasticSingleTaskGP
if isinstance(batch_model, HeteroskedasticSingleTaskGP):
raise NotImplementedError(
"Conversion of HeteroskedasticSingleTaskGP currently not supported."
)
batch_sd = batch_model.state_dict()
tensors = {n for n, p in batch_sd.items() if len(p.shape) > 0}
scalars = set(batch_sd) - tensors
input_bdims = len(batch_model._input_batch_shape)
models = []
for i in range(batch_model._num_outputs):
scalar_sd = {s: batch_sd[s].clone() for s in scalars}
tensor_sd = {
t: (
batch_sd[t].select(input_bdims, i).clone()
if "active_dims" not in t
else batch_sd[t].clone()
)
for t in tensors
}
sd = {**scalar_sd, **tensor_sd}
kwargs = {
"train_X": batch_model.train_inputs[0].select(input_bdims, i).clone(),
"train_Y": batch_model.train_targets.select(input_bdims, i)
.clone()
.unsqueeze(-1),
}
if isinstance(batch_model, FixedNoiseGP):
noise_covar = batch_model.likelihood.noise_covar
kwargs["train_Yvar"] = (
noise_covar.noise.select(input_bdims, i).clone().unsqueeze(-1)
)
if isinstance(batch_model, SingleTaskMultiFidelityGP):
kwargs.update(batch_model._init_args)
model = batch_model.__class__(**kwargs)
model.load_state_dict(sd)
models.append(model)
return ModelListGP(*models)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
d6f59a69819f4cf1e01f0c222031046166e565de | a743df23910cfcb6ac81da926474603aeb1cab32 | /.i3/bing_images.py | 7bc26d58f4658700eba5624e48d42625a3112bb6 | [
"MIT"
] | permissive | DenisPalchuk/dotfiles | 1252601f848e776715fe7835d135123add9fc53e | 3191a0fc7aa1cf8a1482bfd4c93feb42a8fb9deb | refs/heads/master | 2020-05-02T18:53:26.738092 | 2019-03-28T06:48:52 | 2019-03-28T06:48:52 | 178,142,984 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | import requests
import json
import os
def main():
response = requests.get(
'http://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1&mkt=en-US')
data = json.loads(response.text)
url_to_image = data["images"][0]['url']
file_path = download_file("http://bing.com" + url_to_image)
os.system("feh --bg-scale " + file_path)
0
def download_file(url):
# NOTE the stream=True parameter
response = requests.get(url, stream=True)
file_system_path = os.path.dirname(
os.path.realpath(__file__)) + "/current_image_python.jpg"
with open(file_system_path, 'wb') as file:
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
file.write(chunk)
# f.flush() commented by recommendation from J.F.Sebastian
return file_system_path
if __name__ == "__main__":
main()
| [
"palchukdenis@gmail.com"
] | palchukdenis@gmail.com |
a3063e76e55f195357aeabfba8c153ef2363fa5c | edbaabe49217ac3626fe285d9301f0583d02b617 | /task7.py | 3e48bc5b05ab442dc4d00c6dab246d01d21e252e | [] | no_license | Durga944/web_scraping | 7da8c4b5a14b6033fb3714e1fca4f98721b66c3a | bebbe59fbda9ba554182e34ab5458f1ab22d433a | refs/heads/main | 2023-08-05T04:17:30.069262 | 2021-09-25T10:40:10 | 2021-09-25T10:40:10 | 410,244,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | from task5 import*
def analyse_movies_directors(movie_list):
director_dict={}
index1 =0
while index1<len(movie_list):
index2=0
count=0
while index2<len(movie_list):
if movie_list[index1]["director"]==movie_list[index2]["director"]:
count+=1
director=str(movie_list[index1]["director"])[2:-2]
director_dict[director]=count
index2+=1
index1+=1
with open("task7.json","w+") as language_data:
json.dump(director_dict,language_data,indent=4)
pprint(director_dict)
analyse_movies_directors(movie_data)
| [
"noreply@github.com"
] | noreply@github.com |
3667c8f1f8d45c41f552e9abe2d97e7838ac9395 | ef187d259d33e97c7b9ed07dfbf065cec3e41f59 | /work/atcoder/abc/abc083/B/answers/896331_rin1120.py | b9ceb3808d88e51ac154a5487becbe592cfa4936 | [] | no_license | kjnh10/pcw | 847f7295ea3174490485ffe14ce4cdea0931c032 | 8f677701bce15517fb9362cc5b596644da62dca8 | refs/heads/master | 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | N, A, B = map(int, input().split())
ans=0
for i in range(N):
t = 0
j = i+1
while j != 0:
t += j%10
j //= 10
if A <= t and t <= B:
ans += i+1
print(ans) | [
"kojinho10@gmail.com"
] | kojinho10@gmail.com |
e3dd198e14372fd22f803e0d441d2904fea3e65c | d24ba8ee62d040be0da52d4e322290843d213393 | /BookShare/app/test.py | e8e4501edbfdca9372ce4d6ee251d20003849b2b | [] | no_license | ShuQiongZheng/COMP2011-Coursework2 | f0ec07e8b5431f91f912db5dce707ab3dadba297 | bfce332d7fdbdde4a534b047a503f2ef60c411d3 | refs/heads/master | 2023-02-13T16:59:46.427339 | 2019-12-10T10:44:47 | 2019-12-10T10:44:47 | 227,087,277 | 0 | 0 | null | 2023-02-02T05:11:21 | 2019-12-10T10:12:01 | Python | UTF-8 | Python | false | false | 2,777 | py | import unittest
from app import app
class MyTestCase(unittest.TestCase):
def test_login(self):
with app.test_client() as client:
response = client.post('/login/', data=dict( # Use the client client to simulate sending web requests
username='A',
password='123456'
), follow_redirects=True)
assert response.status_code == 200 # The assertion is 200
def test_login_second(self):
with app.test_client() as client:
response = client.post('/login/', data=dict( # Use the client client to simulate sending web requests
username='12345687@@@@@',
password='55555555555555555'
), follow_redirects=True)
assert response.status_code == 200 # The assertion is 200
# Test the registry to determine the response and status codes returned
def test_register(self):
with app.test_client() as client:
response = client.post('/register/', data=dict(
username='abc',
password='1234'
), follow_redirects=True)
assert response.status_code == 200
def test_register_second(self): # Test the registry to determine the response and status codes returned
with app.test_client() as client:
response = client.post('/register/', data=dict(
username='sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss',
password='1234'
), follow_redirects=True)
assert response.status_code == 200
# Test the post access /my_book/ to determine the response and status code returned
def test_post_mybook(self):
with app.test_client() as client:
response = client.post('/my_book/', data=dict(
title='1234',
authdescripitionor='1234',
author="abc",
math=True
), follow_redirects=True)
assert response.status_code == 200
def test_post_mybook_second(self):
with app.test_client() as client:
response = client.post('/my_book/', data=dict(
title='sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss',
authdescripitionor='1234',
author="abc",
math=True
), follow_redirects=True)
assert response.status_code == 200
# test get method to get new
def test_get_mybook(self):
with app.test_client() as client:
response = client.get('/my_book/')
assert response.status_code == 302
if __name__ == '__main__':
unittest.main() | [
"noreply@github.com"
] | noreply@github.com |
89cd4ca057d69b4c1e05d0a821256293352b855f | 4a8c1f7d9935609b780aff95c886ef7781967be0 | /atcoder/_codeforces/1467_c.py | a74cb1c4230efe5766c5cfc9695586b0a0b3e910 | [] | no_license | recuraki/PythonJunkTest | d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a | 2556c973d468a6988d307ce85c5f2f8ab15e759a | refs/heads/master | 2023-08-09T17:42:21.875768 | 2023-07-18T23:06:31 | 2023-07-18T23:06:31 | 13,790,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
from pprint import pprint
import sys
input = sys.stdin.readline
def do():
n1, n2, n3 = map(int, input().split())
dat1 = list(map(int, input().split()))
dat2 = list(map(int, input().split()))
dat3 = list(map(int, input().split()))
q = int(input())
for _ in range(q):
do()
# do()
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """2 4 1
1 2
6 3 4 5
5"""
output = """20"""
self.assertIO(input, output)
def test_input_2(self):
print("test_input_2")
input = """3 2 2
7 5 4
2 9
7 1"""
output = """29"""
self.assertIO(input, output)
def test_input_3(self):
print("test_input_3")
input = """xxx"""
output = """xxx"""
self.assertIO(input, output)
def test_input_4(self):
print("test_input_4")
input = """xxx"""
output = """xxx"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main() | [
"kanai@wide.ad.jp"
] | kanai@wide.ad.jp |
a6f904efba10e1fbab2f31b9138a1a593af8144f | 23de9f92e95b0428bebb4152d96374352eba1f4b | /combine_pdf.py | 9ba50dbb19e21bda93716d932d351982b6c6bee3 | [] | no_license | iccowan/pdf_combiner | 340a2d8e70fe365b8dc94fb3a5db6bf4e992858b | de36eae78078b475eefff8f1eda0b927ed521868 | refs/heads/master | 2020-05-18T09:48:09.442232 | 2019-04-30T21:52:33 | 2019-04-30T21:52:33 | 184,336,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | #!/usr/bin/env python
from PyPDF2 import PdfFileReader, PdfFileWriter
def pdf_cat(input_files, output_stream):
input_streams = []
try:
# First open all the files, then produce the output file, and
# finally close the input files. This is necessary because
# the data isn't read from the input files until the write
# operation. Thanks to
# https://stackoverflow.com/questions/6773631/problem-with-closing-python-pypdf-writing-getting-a-valueerror-i-o-operation/6773733#6773733
for input_file in input_files:
input_streams.append(open(input_file, 'rb'))
writer = PdfFileWriter()
for reader in map(PdfFileReader, input_streams):
for n in range(reader.getNumPages()):
writer.addPage(reader.getPage(n))
output_file = open(output_stream, 'wb')
writer.write(output_file)
finally:
for f in input_streams:
f.close()
def main():
# Get the file names
file1 = input("Enter the name of the first file to combine: ")
file2 = input("Enter the name of the second file to combine: ")
files = [file1, file2]
output = input("Enter the name of the output file (no extension): ") + ".pdf"
pdf_cat(files, output)
main()
| [
"ian@cowanemail.com"
] | ian@cowanemail.com |
528943be9743fda9bd8b1ce58150965fe2446877 | c34ec26602207dc070b03fdb501c5abc5b7a2b89 | /board/models.py | 018910fd3d66c7237ae72b1536436da5191bb2c4 | [] | no_license | mcandidier/citrus | 80bb9630a7e7434a452378a54128a94f46e0dba2 | cb7d0d2a0e5c1512fa23adf84b71a69d33225ab7 | refs/heads/master | 2023-01-10T14:24:11.494600 | 2019-10-24T08:03:18 | 2019-10-24T08:03:18 | 213,924,555 | 0 | 0 | null | 2023-01-05T23:18:07 | 2019-10-09T13:21:15 | TypeScript | UTF-8 | Python | false | false | 399 | py | from django.db import models
from django.contrib.auth.models import User
class Board(models.Model):
name = models.CharField(max_length=128)
owner = models.ForeignKey(User, related_name='boards', on_delete=models.CASCADE)
date_created = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
def __str__(self):
return '{self.name}'
| [
"mannycandidier@gmail.com"
] | mannycandidier@gmail.com |
006e8135fa1fa82728fac868d73e4071f1a79e68 | e5740540f6409b63d09a51aea243917583b3f172 | /migrations/versions/4cfda2af8664_.py | 7b8c3ab24b99b4bfd61458baaa4de710f1836d45 | [] | no_license | SER-401-Team-36/football-app | 9e13f59b663a97653352964f1600382a53c716a4 | 58de57ba6fa25ded2c261b8745d62e1bc6cb1c30 | refs/heads/main | 2023-04-18T11:23:55.538702 | 2021-04-23T16:39:51 | 2021-04-23T16:39:51 | 304,128,123 | 0 | 0 | null | 2021-04-22T01:56:08 | 2020-10-14T20:28:51 | Python | UTF-8 | Python | false | false | 1,429 | py | """empty message
Revision ID: 4cfda2af8664
Revises: 9b77973248af
Create Date: 2021-02-19 05:23:32.738161
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4cfda2af8664'
down_revision = '9b77973248af'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('player_draft_user',
sa.Column('player_id', sa.Integer(), nullable=False),
sa.Column('draft_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('time_created', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
sa.Column('time_updated', sa.DateTime(timezone=True), nullable=True),
sa.ForeignKeyConstraint(['draft_id'], ['draft.id'], ),
sa.ForeignKeyConstraint(['player_id'], ['player.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('player_id', 'draft_id')
)
op.alter_column('draft', 'user_id',
existing_type=sa.INTEGER(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('draft', 'user_id',
existing_type=sa.INTEGER(),
nullable=True)
op.drop_table('player_draft_user')
# ### end Alembic commands ###
| [
"jborden36@gmail.com"
] | jborden36@gmail.com |
0e2463c6cb9d8e330322439d06736eb909665ebf | 792b3b5eddf4e070ec1fc99fb799914a332803cf | /rgsim/gui/building_panel.py | 73432b5060877769812e0331d06012c11e6353a4 | [] | no_license | verdesmarald/rgsim | 981db4367850e5cab5f574637c000229702425d1 | 6ee277d3da7c4fc67fefd075e316b8fcd2dc20ae | refs/heads/master | 2023-02-08T18:00:25.508225 | 2020-12-25T03:31:50 | 2020-12-25T03:31:50 | 324,280,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | from __future__ import annotations
from typing import TYPE_CHECKING
import wx # type: ignore
from . import shared
if TYPE_CHECKING:
from ..entities import building
class BuildingPanel(wx.Panel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.border = wx.BoxSizer()
self.border.Add(self.sizer, 1, wx.ALL | wx.EXPAND, 5)
for building_state in shared.game_state.buildings.values():
self.button = wx.Button(self, label=str(f'{building_state.building.name}: {building_state.owned}'))
self.sizer.Add(self.button)
self.SetSizerAndFit(self.border)
| [
"jmbungard@gmail.com"
] | jmbungard@gmail.com |
063c20aca5f58c1c231ec9832567e20e23ea98ba | 0ec6c2eeff4c01bd8bf9316eabf35bafd98233f4 | /chat/pusher_integration.py | 628348ae8c9b576114d3dbf8f015fc550db3efdd | [] | no_license | michaeltingley/bw | d013e0043132e9b3885beb6b96519a6665f6a619 | bd44a53d1a4dd936099374f73e0cd049784b2d38 | refs/heads/master | 2023-01-05T06:29:53.555597 | 2016-01-20T05:10:43 | 2016-01-20T05:10:43 | 49,931,111 | 0 | 0 | null | 2022-12-26T19:47:02 | 2016-01-19T06:18:57 | JavaScript | UTF-8 | Python | false | false | 1,576 | py | """Contains functionality for interfacing with the Pusher module."""
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseBadRequest, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from pusher import Pusher
PUSHER = Pusher(
app_id=settings.PUSHER_APP_ID,
key=settings.PUSHER_KEY,
secret=settings.PUSHER_SECRET
)
def notify_conversation_updated(user, conversation):
"""Notifies provided user's channel that the conversation was updated
Args:
user (User): The user that should be notified.
conversation (Conversation): The conversation that has been updated.
"""
PUSHER.trigger(
'private-participant-' + user.username,
'conversation updated',
conversation.as_dict()
)
@login_required(login_url='/chat/login/')
@csrf_exempt
def authenticate(request):
"""Authenticates using Pusher.
Args:
POST['channel_name'] (str): The channel the user is trying to subscribe
to.
POST['socket_id'] (str): The socket the subscription is being made to.
"""
(_, resource, resource_id) = request.POST['channel_name'].split('-')
if (resource == 'participant'
and not request.user.username == resource_id):
return HttpResponseBadRequest(
'User not permitted to subscribe to participant updates'
)
return JsonResponse(PUSHER.authenticate(
channel=request.POST['channel_name'],
socket_id=request.POST['socket_id']
))
| [
"michaelalantingley@gmail.com"
] | michaelalantingley@gmail.com |
adbeff76935cbd7b2290404a3caf4ecbd26075b6 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_motion.py | 19c9b83026d27cb66cfde07dd09ad7733cf6dde8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py |
#calss header
class _MOTION():
def __init__(self,):
self.name = "MOTION"
self.definitions = [u'the act or process of moving, or a particular action or movement: ', u'a polite way of referring to the process of getting rid of solid waste from the body, or the waste itself: ', u'a formal suggestion made, discussed, and voted on at a meeting: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
cfc5f818426bd33c48bb186b2b69820dad51cf02 | f87febb0a299605a3b09250c750273af0729cd29 | /__init__.py | d41574b679c29eab33c404bb9f923842c709cd37 | [
"MIT"
] | permissive | Anxiko/feld | 677184f26e1b251a6246ae0ef3ecd4a0fe4dd1b7 | 88f92a0583e6f346ad6009e91c14a05d5310fb50 | refs/heads/master | 2022-11-04T05:34:24.036919 | 2020-06-22T21:54:16 | 2020-06-22T21:58:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | from .feld import Feld
| [
"Kindos7@gmail.com"
] | Kindos7@gmail.com |
9fb11faad70e9d3527ef6937bba12ee7d6eb1ca9 | 0774c3faafc4ea49085b0be0b91f4609ca817460 | /Common/config.py | 9fa418c9b4cf626395cacfcf9c352fbcd07170e2 | [] | no_license | zh-sky/AutoFrame | 6398c3ebb7a9c4ae09d4f689f55c31bbbd41d8cf | 7fae0123682ff16f86b6330f88bcce7012822419 | refs/heads/master | 2022-04-25T01:23:18.399699 | 2020-04-28T09:56:57 | 2020-04-28T09:56:57 | 259,596,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,415 | py | import configparser
from Common import logger
"""
封装解析文件方法
"""
class Config:
# 构造方法
def __init__(self, path):
# 解析对象实例
self.config = configparser.ConfigParser()
# 配置文件路径
self.path = path
self.config.read(path)
def read(self, section=None):
"""
根据分组名返回对应的键值对
@param: section 分组名
"""
if section is not None:
if section not in self.config.sections():
logger.error('%s不在配置文件里' % section)
return
# 存放结果
res = {}
for item in self.config[section].items(): # item->(str,str)
res[item[0]] = item[1]
return res
else:
logger.warning('请输入想要读取的分组名称')
def add(self, section, value):
"""
向配置文件里添加分组信息
@param: section 分组名->str
@param: value 分组内的值->dict
"""
# 对输入类型进行校验
if type(section) == str and type(value) == dict:
self.config[section] = value
self.config.write(open(self.path, 'w'))
else:
logger.error('请输入字符串类型的key和字典类型的value')
def delete_section(self, section):
"""
删除配置文件内的分组
@param section 分组名
"""
try:
self.config.remove_section(section)
except KeyError as e:
logger.error('输入的分组名%s不在配置文件里' % section)
self.config.write(open(self.path, 'w'))
def delete_option(self, section, option):
"""
删除配置文件分组内的某个键值对
@param: section 分组名
@param: option 键值
"""
if section in self.config.sections():
if option in self.config[section].keys():
self.config.remove_option(section, option)
self.config.write(open(self.path, 'w'))
else:
logger.error('错误的键名%s' % option)
else:
logger.error('输入的分组名%s不在配置文件里' % section)
if __name__ == '__main__':
config = Config('conf.ini')
config.read('Mysql')
config.delete_section('haha')
| [
"18310226539@163.com"
] | 18310226539@163.com |
d516750fb5a1a90fc4d562b94cf5101053ef800c | 51121dfadd85b220448b00383eaa7cff221427df | /Code/scrapy/shiyanlou/shiyanlou/models.py | 8f0d64de0e80064e72fb7032d8e9bdeb65b0701a | [] | no_license | lilywang20119/shiyanlou | 62f5ef85736608aacf76207b1b5de6bdcc32c258 | 0fc29461c2e0132abce2b66b58bbd6e705f8e172 | refs/heads/master | 2020-04-11T08:30:38.873166 | 2019-03-12T00:15:49 | 2019-03-12T00:15:49 | 161,645,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | #!/usr/bin/env python3
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column,Integer,String
from sqlalchemy import Date
engine = create_engine('mysql+mysqldb://root@localhost:3306/shiyanlou?charset=utf8')
Base = declarative_base()
class Course(Base):
__tablename__ = 'courses'
id = Column(Integer,primary_key=True)
name = Column(String(64),index=True)
description = Column(String(1024))
type = Column(String(64),index=True)
students = Column(Integer)
class User(Base):
__tablename__ = 'users'
id = Column(Integer,primary_key=True)
name = Column(String(64),index=True)
type = Column(String(64))
join_date = Column(Date)
level = Column(Integer,index=True)
status = Column(String(64),index=True)
school_job =Column(String(64))
learn_courses_num = Column(Integer)
if __name__ == '__main__':
Base.metadata.create_all(engine) | [
"noreply@github.com"
] | noreply@github.com |
bd332698a29bfa43f6e4bbd49ff2f89aacf70b72 | aed1fdbad787e252c19b6fa1e7328014df02feda | /Module-03-Getting-Deeper-02/Example03_Multiple_Inheritance/example01_a_skeleton.py | 45e97ebe9039dd875b58f698a1dacad4ab00a9fe | [
"MIT"
] | permissive | CodingGearsCourses/Python-OO-Programming-Fundamentals | 35d6aac715a42e668355eed4b503da8bac92d876 | 5516c935aefae98dcd20a77ea98b41865d44675c | refs/heads/main | 2023-02-21T22:28:14.323831 | 2021-01-28T04:16:19 | 2021-01-28T04:16:19 | 333,641,074 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | # https://www.globaletraining.com/
# Simple Inheritance
class ParentClass1:
pass
class ChildClass1(ParentClass1):
pass
def main():
obj1 = ChildClass1()
print(isinstance(obj1, ChildClass1))
print(isinstance(obj1, ParentClass1))
# Method Resolution Order (MRO)
print("Method Resolution Order ".center(40, "-"))
list1 = ChildClass1.mro()
for i in list1:
print(i)
if __name__ == '__main__':
main() | [
"training@codinggears.io"
] | training@codinggears.io |
49f443bca89a578d3f8a95d298ab5d11a2d23640 | b2d091ac795fffa63b00a517b4b56ff3da75df28 | /home/admin.py | 90d7a155be98dfcfc4f494f076ddb0626258fd93 | [
"MIT"
] | permissive | Qvineox/accessControl_app | d553ef1159ef140dc1a7a6bd7f222559b15fd4ae | 4d3f451ad21e8727a3c3b73a2defc95549da8b27 | refs/heads/main | 2023-05-06T07:54:04.198735 | 2021-05-25T14:00:20 | 2021-05-25T14:00:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | from django.contrib import admin
from .models import Entry
class AuthorAdmin(admin.ModelAdmin):
pass
@admin.register(Entry)
class AdminResources(admin.ModelAdmin):
list_display = ('id', 'author', 'executor', 'status', 'created_at', 'granted_at', 'expired_at', 'resource')
list_filter = ('status', 'author')
fieldsets = (
('Агенты доступа', {
'fields': ('author', 'executor', 'resource')
}),
('Даты действия', {
'fields': ('granted_at', 'expired_at')
}),
('Статус заявки', {
'fields': ['status']
}),
)
| [
"yarlrusman@gmail.com"
] | yarlrusman@gmail.com |
178d3eff0f1328a1e00b87c67db329fa0c82a193 | ed46d5e9a31c1d0afb094839e1f54dd7ba38e942 | /cutkum/ck_model.py | 3766fca53738eb443cb3ccf4ff075e3e36d94301 | [
"MIT"
] | permissive | punch872/cutkum | 7f0aee56619b774d6a73dee06cdec01df1196a56 | da59520f12cd0ba8c21c774d0cd76e6db63df112 | refs/heads/master | 2021-04-28T17:49:05.886524 | 2018-02-17T09:07:53 | 2018-02-17T09:07:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,375 | py | #!/usr/bin/env python
#
# Pucktada Treeratpituk (https://pucktada.github.io/)
# License: MIT
# 2017-05-01
#
# A recurrent neural network model (LSTM) for thai word segmentation
import logging
import re
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
import tensorflow.contrib.rnn as rnn
from . import char_dictionary
def load_settings(sess):
model_settings = dict()
model_vars = dict()
graph = tf.get_default_graph()
configs = ['cell_sizes', 'num_layers', 'input_classes', 'label_classes', 'learning_rate', 'l2_regularization', 'cell_type', 'direction']
for c in configs:
name = 'prefix/%s:0' % c
model_settings[c] = sess.run(graph.get_tensor_by_name(name))
model_vars['inputs'] = graph.get_tensor_by_name('prefix/placeholder/inputs:0')
model_vars['seq_lengths'] = graph.get_tensor_by_name('prefix/placeholder/seq_lengths:0')
model_vars['keep_prob'] = graph.get_tensor_by_name('prefix/placeholder/keep_prob:0')
model_vars['probs'] = graph.get_tensor_by_name('prefix/batch_probs:0')
return model_settings, model_vars
def load_graph(model_file):
""" loading necessary configuration of the network from the meta file &
the checkpoint file together with variables that are needed for the inferences
"""
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(model_file, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we can use again a convenient built-in function to import a graph_def into the
# current default Graph
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name='prefix',
op_dict=None,
producer_op_list=None)
return graph
def load_model2(sess, meta_file, checkpoint_file):
""" loading necessary configuration of the network from the meta file &
the checkpoint file together with variables that are needed for the inferences
"""
saver = tf.train.import_meta_graph(meta_file, clear_devices=True)
saver.restore(sess, checkpoint_file)
configs = tf.get_collection('configs')
pvars = tf.get_collection('placeholders')
model_settings = dict()
for c in configs:
name = c.name.split(':')[0]
model_settings[name] = sess.run(c)
model_vars = dict()
for p in pvars:
scope, name, _ = re.split('[:/]', p.name)
model_vars[name] = p
model_vars['probs'] = tf.get_collection('probs')[0]
return model_settings, model_vars
class CkModel:
""" cutkum model: LSTM recurrent neural network model """
def __init__(self, model_settings):
logging.info('...init WordSegmentor')
self.cell_sizes = model_settings["cell_size"] # list of cell_size, same length as num_layers
#self.num_unroll = model_settings["num_unroll"]
self.num_layers = model_settings["num_layers"]
self.input_classes = model_settings['input_classes']
self.label_classes = model_settings['label_classes']
self.learning_rate = model_settings['learning_rate']
self.l2_regularization = model_settings['l2_regularization'] # 0.1
self.cell_type = model_settings['cell_type']
self.direction = model_settings['direction']
tf.add_to_collection('configs', tf.constant(self.cell_sizes, name="cell_sizes"))
#tf.add_to_collection('configs', tf.constant(self.num_unroll, name="num_unroll"))
tf.add_to_collection('configs', tf.constant(self.num_layers, name="num_layers"))
tf.add_to_collection('configs', tf.constant(self.input_classes, name="input_classes"))
tf.add_to_collection('configs', tf.constant(self.label_classes, name="label_classes"))
tf.add_to_collection('configs', tf.constant(self.learning_rate, name="learning_rate"))
tf.add_to_collection('configs', tf.constant(self.l2_regularization, name="l2_regularization"))
tf.add_to_collection('configs', tf.constant(self.cell_type, name="cell_type"))
tf.add_to_collection('configs', tf.constant(self.direction, name="direction"))
self.global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
self.increment_global_step_op = tf.assign(self.global_step, self.global_step+1)
def _create_placeholders(self):
logging.info('...create placeholder')
with tf.name_scope("placeholder"):
# (time, batch, in)
self.inputs = tf.placeholder(tf.float32, (None, None, self.input_classes), name="inputs")
# (time, batch, out)
self.outputs = tf.placeholder(tf.float32, (None, None, self.label_classes), name="outputs")
# [batch]
self.seq_lengths = tf.placeholder(tf.int32, [None], name="seq_lengths")
#self.fw_state = tf.placeholder(tf.float32, [self.num_layers, 2, None, self.cell_sizes], name="fw_state")
#self.bw_state = tf.placeholder(tf.float32, [self.num_layers, 2, None, self.cell_sizes], name="bw_state")
self.keep_prob = tf.placeholder(tf.float32, name="keep_prob")
tf.add_to_collection('placeholders', self.inputs)
tf.add_to_collection('placeholders', self.outputs)
tf.add_to_collection('placeholders', self.seq_lengths)
#tf.add_to_collection('placeholders', self.fw_state)
#tf.add_to_collection('placeholders', self.bw_state)
tf.add_to_collection('placeholders', self.keep_prob)
def _inference(self):
logging.info('...create inference')
#fw_state_list = tf.unstack(self.fw_state, axis=0)
#fw_state_tuple = tuple(
# [tf.contrib.rnn.LSTMStateTuple(fw_state_list[idx][0], fw_state_list[idx][1])
# for idx in range(self.num_layers)])
#bw_state_list = tf.unstack(self.bw_state, axis=0)
#bw_state_tuple = tuple(
# [tf.contrib.rnn.LSTMStateTuple(bw_state_list[idx][0], bw_state_list[idx][1])
# for idx in range(self.num_layers)])
fw_cells = list()
for i in range(0, self.num_layers):
if (self.cell_type == 'lstm'):
cell = rnn.LSTMCell(num_units=self.cell_sizes[i], state_is_tuple=True)
elif (self.cell_type == 'gru'):
# change to GRU
cell = rnn.LSTMCell(num_units=self.cell_sizes[i], state_is_tuple=True)
else:
cell = rnn.BasicRNNCell(num_units=self.cell_sizes[i])
cell = rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
fw_cells.append(cell)
self.fw_cells = rnn.MultiRNNCell(fw_cells, state_is_tuple=True)
if (self.direction == 2): # bidirectional
print('bidirectional')
bw_cells = list()
for i in range(0, self.num_layers):
if (self.cell_type == 'lstm'):
cell = rnn.LSTMCell(num_units=self.cell_sizes[i], state_is_tuple=True)
elif (self.cell_type == 'gru'):
# change to GRU
cell = rnn.LSTMCell(num_units=self.cell_sizes[i], state_is_tuple=True)
else:
cell = rnn.BasicRNNCell(num_units=self.cell_sizes[i])
cell = rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
bw_cells.append(cell)
self.bw_cells = rnn.MultiRNNCell(bw_cells, state_is_tuple=True)
if (self.direction == 1):
rnn_outputs, states = tf.nn.dynamic_rnn(
self.fw_cells,
self.inputs,
#initial_state=fw_state_tuple,
sequence_length=self.seq_lengths,
dtype=tf.float32, time_major=True)
else: # self.direction = 2
# bidirectional rnn
outputs, states = tf.nn.bidirectional_dynamic_rnn(
cell_fw=self.fw_cells,
cell_bw=self.bw_cells,
#initial_state_fw=fw_state_tuple,
#initial_state_bw=bw_state_tuple,
dtype=tf.float32,
sequence_length=self.seq_lengths,
inputs=self.inputs, time_major=True)
rnn_outputs = tf.concat(outputs, axis=2)
# project output from rnn output size to OUTPUT_SIZE. Sometimes it is worth adding
# an extra layer here.
self.projection = lambda x: layers.linear(x,
num_outputs=self.label_classes, activation_fn=tf.nn.sigmoid)
self.logits = tf.map_fn(self.projection, rnn_outputs, name="logits")
self.probs = tf.nn.softmax(self.logits, name="probs")
self.states = states
tf.add_to_collection('probs', self.probs)
def _create_loss(self):
logging.info('...create loss')
with tf.name_scope("loss"):
# shape=[Time * Batch, label_classes]
outputs_flat = tf.reshape(self.outputs, [-1, self.label_classes])
logits_flat = tf.reshape(self.logits, [-1, self.label_classes])
# calculate the losses shape=[Time * Batch]
losses = tf.nn.softmax_cross_entropy_with_logits(
labels=outputs_flat, logits=logits_flat)
# create mask [Time * Batch] where 0: padded, 1: not-padded
mask = outputs_flat[:,0]
mask = tf.abs(tf.subtract(mask, tf.ones_like(mask)))
# mask the losses
masked_losses = mask * losses
l2_reg = self.l2_regularization
l2 = l2_reg * sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()
if not ("noreg" in tf_var.name or "Bias" in tf_var.name))
self.losses = masked_losses + l2
self.num_entries = tf.reduce_sum(mask)
self.mean_loss = tf.reduce_sum(masked_losses) / self.num_entries
# accuracy
correct_pred = tf.cast(tf.equal(tf.argmax(outputs_flat, 1), tf.argmax(logits_flat, 1)), tf.float32)
mask_correct_pred = mask * correct_pred
self.accuracy = tf.reduce_sum(mask_correct_pred) / self.num_entries
def _create_optimizer(self):
logging.info('...create optimizer')
with tf.name_scope("train"):
#self.optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.mean_loss, global_step=self.global_step)
max_gradient_norm = 1.0
params = tf.trainable_variables()
gradients = tf.gradients(self.mean_loss, params)
clipped_gradients, norm = tf.clip_by_global_norm(gradients, max_gradient_norm)
#self.train_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\
# .apply_gradients(zip(clipped_gradients, params), global_step=self.global_step)
self.train_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\
.apply_gradients(zip(clipped_gradients, params))
def _create_summary(self):
logging.info('...create summary')
tf.summary.scalar("mean_loss", self.mean_loss)
tf.summary.scalar("accuracy", self.accuracy)
self.summary_op = tf.summary.merge_all()
def build_graph(self):
self._create_placeholders()
self._inference()
self._create_loss()
self._create_optimizer()
self._create_summary()
if __name__ == '__main__':
print('create word segmentor model')
char_dict = CharDictionary()
# MODEL
model_settings = dict()
#model_settings["l2_regularisation"] = 0.0 # not usring right now
model_settings['num_unroll'] = 12
model_settings['num_layers'] = 3
model_settings['cell_size'] = 64
model_settings['input_classes'] = char_dict.num_char_classes() + 1
model_settings['label_classes'] = char_dict.num_label_classes() + 1
model_settings['learning_rate'] = 0.001 # Initial learning rate
model = CkModel(model_settings)
model.build_graph()
| [
"pucktada@highgarden.local"
] | pucktada@highgarden.local |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.