blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
57f18d2d17a7cb6eca7d70d8d978cc510de04737 | 818462196c483ce25ceb84cdab386d4a5a5db131 | /customusers/migrations/0001_initial.py | a62390d5e27e1136f0d4e40625cf26c3f140b41c | [] | no_license | denn-is-njeruh/DjangoMutltipleUsers | 0239c09fd80cc630513428d6e8ac84fa051a8190 | a6f0d58183b01efe626e33842fa62388e08cb61a | refs/heads/main | 2023-08-21T21:15:32.523655 | 2021-10-05T05:33:12 | 2021-10-05T05:33:12 | 413,668,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | # Generated by Django 3.2.7 on 2021-10-05 04:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('email', models.EmailField(max_length=254, unique=True)),
('name', models.CharField(blank=True, max_length=254, null=True)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('last_login', models.DateTimeField(blank=True, null=True)),
('date_joined', models.DateTimeField(auto_now_add=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"dennis.njeru@student.moringaschool.com"
] | dennis.njeru@student.moringaschool.com |
8a2150267801d8dbbb8be912f431c32e54340802 | 55a661e7f50d326b45f1419b694d6b93a218c70f | /code/layers.py | 0026ea81113086bfdeb76239594c99f87d9d0c41 | [] | no_license | pearsonlab/gbds | 9e3f918b5e2ed6e312c87f9a91f2a0fd3c50f8cf | ddccec69e774d867548b5db2cb0db9e670450955 | refs/heads/master | 2021-01-22T18:50:39.810108 | 2017-06-08T21:02:10 | 2017-06-08T21:02:10 | 85,121,733 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,276 | py | """
The MIT License (MIT)
Copyright (c) 2017 Shariq Iqbal
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import theano
import lasagne
import theano.tensor as T
import numpy as np
class DLGMLayer(lasagne.layers.Layer):
"""
This layer is inspired by the paper "Stochastic Backpropagation and
Approximate Inference in Deep Generative Models"
incoming (Lasagne Layer): preceding layer in DLGM
num_units (int): number of output units in this layer
srng (theano RandomState): random number generator
rec_nets (dictionary of lasagne NNs): Neural networks that
paramaterize the recognition model
J (theano symbolic matrix): Input to rec model
k (float): regularization term on generative weights
"""
def __init__(self, incoming, num_units, srng, rec_nets, k,
output_layer=False, extra_noise=0.01,
param_init=lasagne.init.Normal(0.01),
nonlinearity=lasagne.nonlinearities.rectify,
**kwargs):
super(DLGMLayer, self).__init__(incoming, **kwargs)
num_inputs = self.input_shape[1]
self.srng = srng
self.num_units = num_units
self.output_layer = output_layer
self.extra_noise = extra_noise
# Initialize generative/decoding Parameters
self.W = self.add_param(param_init, (num_inputs, num_units),
name='W')
self.b = self.add_param(param_init, (num_units,), name='b')
self.unc_G = self.add_param(param_init, (num_units, num_units),
name='unc_G')
self.G = (T.diag(T.nnet.softplus(T.diag(self.unc_G))) +
T.tril(self.unc_G, k=-1))
self.nonlinearity = nonlinearity
# regularization term
self.k = k
# Load recognition/encoding Parameters
self.mu_net = rec_nets['mu_net']
self.u_net = rec_nets['u_net']
self.unc_d_net = rec_nets['unc_d_net']
# add parameters to layer class
rec_params = (lasagne.layers.get_all_params(self.mu_net) +
lasagne.layers.get_all_params(self.u_net) +
lasagne.layers.get_all_params(self.unc_d_net))
for param in rec_params:
self.add_param(param, param.shape.eval())
def calculate_xi(self, postJ):
"""
Calculate xi based on sampled J from posterior
"""
# get output of rec model
self.batch_mu = lasagne.layers.get_output(self.mu_net, inputs=postJ)
self.batch_u = lasagne.layers.get_output(self.u_net, inputs=postJ)
self.batch_unc_d = lasagne.layers.get_output(self.unc_d_net,
inputs=postJ)
# add extra dim to batch_u, so it gets treated as column vectors when
# iterated over
self.batch_u = self.batch_u.reshape(
(self.batch_u.shape[0], self.batch_u.shape[1], 1))
def get_cov(u, unc_d):
# convert output of rec model to rank-1 covariance matrix
# use softplus to get positive constrained d, minimum of -15
# since softplus will turn low numbers into 0, which become NaNs
# when inverted
d = T.nnet.softplus(T.maximum(unc_d, -15))
D_inv = T.diag(1.0 / d)
eta = 1.0 / (u.T.dot(D_inv).dot(u) + 1.0)
C = D_inv - eta * D_inv.dot(u).dot(u.T).dot(D_inv)
Tr_C = T.nlinalg.trace(C)
ld_C = T.log(eta) - T.log(d).sum() # eq 20 in DLGM
# coeff = ((1 - T.sqrt(eta)) / (u.T.dot(D_inv).dot(u)))
# simplified coefficient below is more stable as u -> 0
# original coefficient from paper is above
coeff = eta / (1 + T.sqrt(eta))
R = T.sqrt(D_inv) - coeff * D_inv.dot(u).dot(u.T).dot(T.sqrt(D_inv))
return Tr_C, ld_C, R
(self.batch_Tr_C, self.batch_ld_C, self.batch_R), _ = theano.scan(
fn=get_cov, outputs_info=None, sequences=[self.batch_u,
self.batch_unc_d])
self.batch_xi = (self.batch_mu +
T.batched_dot(self.batch_R,
self.srng.normal(
(self.batch_R.shape[0],
self.num_units))))
def get_ELBO(self, length):
"""
Get ELBO for this layer
length (theano symbolic int): length of current batch
"""
# KL divergence between posterior and N(0,1) prior
KL_div = 0.5 * (T.sqrt((self.batch_mu**2).sum(axis=1)).sum() +
self.batch_Tr_C.sum() - self.batch_ld_C.sum() -
length)
weight_reg = ((0.5 / self.k) *
T.sqrt((self.W**2).sum()) *
T.sqrt((self.G**2).sum()))
return -(weight_reg + KL_div)
def get_output_for(self, input, add_noise=False, use_rec_model=False,
**kwargs):
activation = self.nonlinearity(input).dot(self.W) + self.b
if use_rec_model:
# use sample from rec model
xi = self.batch_xi
if add_noise: # additional noise
xi += self.extra_noise * self.srng.normal(self.batch_xi.shape)
else:
# pure random input
xi = self.srng.normal((input.shape[0], self.num_units))
# we want the mean when training, so don't add noise to
# output of last layer when training.
if not self.output_layer:
activation += T.dot(xi, self.G)
elif not add_noise:
activation += T.dot(xi, self.G)
return activation
def get_output_shape_for(self, input_shape):
return (input_shape[0], self.num_units)
class PKBiasLayer(lasagne.layers.Layer):
"""
This layer draws different biases (depending on the mode)
from a normal distribution, then adds them to the input
Default modes are as follows:
0: normal, no biases added
1: saline and DLPFC, bias 0 is added
2: saline and DMPFC, bias 1 is added
3: muscimol and DLPFC, biases 0 and 2 are added
4: muscimol and DMPFC, biases 1 and 3 are added
"""
def __init__(self, incoming, srng, params, param_init=lasagne.init.Normal(0.01),
num_biases=4, **kwargs):
super(PKBiasLayer, self).__init__(incoming, **kwargs)
num_inputs = self.input_shape[1]
self.mode = T.zeros(num_biases)
self.srng = srng
self.k = np.cast[theano.config.floatX](params['k'])
self.m = self.add_param(param_init, (num_biases, num_inputs),
name='m')
self.log_s = self.add_param(param_init, (num_biases, num_inputs),
name='log_s')
# standard deviation will always be positive but optimization over
# log_s can be unconstrained
self.s = T.exp(self.log_s)
self.draw_biases()
self.draw_on_every_output = True
def set_mode(self, mode):
self.mode = mode
def draw_biases(self):
self.biases = self.m + self.srng.normal(self.s.shape) * self.s
def get_ELBO(self, nbatches):
"""
Return the contribution to the ELBO for these biases
Normalized by nbatches (number of batches in dataset)
"""
ELBO = (-T.abs_(self.biases) / self.k - T.log(2 * self.k)).sum()
ELBO += T.log(self.s).sum()
return ELBO / nbatches
def get_output_for(self, input, **kwargs):
if self.draw_on_every_output:
self.draw_biases()
act_biases = self.mode.astype(theano.config.floatX).reshape((1, -1)).dot(self.biases)
return input + act_biases
class PKRowBiasLayer(lasagne.layers.Layer):
"""
This layer draws different biases (depending on the mode)
from a normal distribution, then adds them to the input.
This layer has sparsity at the row level, instead of the individual
sparsity of the PKBiasLayer.
Default modes are as follows:
0: normal, no biases added
1: saline and DLPFC, bias 0 is added
2: saline and DMPFC, bias 1 is added
3: muscimol and DLPFC, biases 0 and 2 are added
4: muscimol and DMPFC, biases 1 and 3 are added
"""
def __init__(self, incoming, srng, params, param_init=lasagne.init.Normal(0.01),
num_biases=4, **kwargs):
super(PKRowBiasLayer, self).__init__(incoming, **kwargs)
num_inputs = self.input_shape[1]
self.mode = T.zeros(num_biases)
self.srng = srng
# parameters on prior
self.a = np.cast[theano.config.floatX](params['a']) # shape
self.b = np.cast[theano.config.floatX](params['b']) # rate
# learnable posterior parameters
# normal dist over biases
self.mu = self.add_param(param_init, (num_biases, num_inputs),
name='mu')
self.unc_sig = self.add_param(param_init, (num_biases, num_inputs),
name='unc_sig')
# gamma over rows
self.alpha = theano.shared(value=self.a * np.ones((num_biases, 1)),
name='alpha', broadcastable=[False, True])
self.beta = theano.shared(value=self.b * np.ones((num_biases, 1)),
name='beta', broadcastable=[False, True])
# update for alpha
self.alpha += (num_inputs / 2.0)
# standard deviation will always be positive but optimization over
# unc_sig can be unconstrained
self.sigma = T.nnet.softplus(self.unc_sig)
self.draw_biases()
self.draw_on_every_output = True
def set_mode(self, mode):
self.mode = mode
def draw_biases(self):
self.gamma = self.mu + self.srng.normal(self.sigma.shape) * self.sigma
def coord_update(self):
self.beta = self.b + 0.5 * (self.mu**2 + self.sigma**2).sum(axis=1,
keepdims=True)
self.beta = T.addbroadcast(self.beta, 1)
def get_ELBO(self, nbatches):
"""
Return the contribution to the ELBO for these biases
Normalized by nbatches (number of batches in dataset)
"""
self.coord_update()
# Log Density
ELBO = (-0.5 * (self.mu**2 + self.sigma**2) * (self.alpha / self.beta) +
0.5 * (T.psi(self.alpha) - T.log(self.beta)) -
0.5 * T.log(2 * np.pi)).sum()
ELBO += ((self.a - 1) * (T.psi(self.alpha) - T.log(self.beta)) -
self.b * (self.alpha / self.beta) + self.a * T.log(self.b) -
T.gammaln(self.a)).sum()
# entropy
ELBO += (0.5 * T.log(2 * np.pi) + 0.5 + T.log(self.sigma)).sum()
ELBO += (self.alpha - T.log(self.beta) + T.gammaln(self.alpha) +
(1 - self.alpha) * T.psi(self.alpha)).sum()
return ELBO / nbatches
def get_output_for(self, input, **kwargs):
if self.draw_on_every_output:
self.draw_biases()
act_biases = self.mode.astype(theano.config.floatX).reshape((1, -1)).dot(self.gamma)
return input + act_biases
| [
"shariqiqbal2810@users.noreply.github.com"
] | shariqiqbal2810@users.noreply.github.com |
3a58a95ff9ca04c8a9228ff5e3822cfa35a2e6dc | 3ccc43f582471af88c90d52bcba0142186d9829e | /src/sorter.py | 3cfc650ed0c98ddf50f57f4e7b863c29b968c8e6 | [] | no_license | sd2017/typer | bc6dded8acfff5c773b854686a2f81c478db5da3 | e0e1e1604598f8d5be492b6b646c02421e809e1c | refs/heads/master | 2021-05-03T12:19:44.672193 | 2016-10-17T00:51:50 | 2016-10-17T00:51:50 | 70,138,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | import linecache
import strategy
import strategy_eng
import logger
import logging
class Sorter:
def __init__(self,loggeri,filename,dicti,strategy):
self.logger=loggeri
self.filename=filename
self.dicta=dicti
self.line="the most noticeable members of the Reform Club, though he seemed"
self.strategy=strategy
self.linecache=linecache
self.linenum=1
def line_read(self):
self.line=self.linecache.getline(self.filename,self.linenum)
self.linenum=self.linenum+1
def line_to_dictionary(self):
words = self.line.split(self.strategy.seperator)
for word in words:
word_aligned=self.strategy.align(word)
word_index= self.strategy.index(word_aligned)
#self.logger.log(logging.ERROR, "{}".format( word_aligned))
# TODO self.logger.log(logging.DEBUG, "{}:{}".format( word_index,word_aligned))
if (None != word_index):
self.dicta.setdefault(word_index, set()).add(word_aligned)
def sort(self,num):
for i in range(0,num):
self.line_read()
self.line_to_dictionary()
def info(self):
self.logger.log(logging.INFO, self.dicta)
self.logger.log(logging.INFO, self.dicta.keys())
if __name__=="__main__":
sorter=Sorter(logger.Loggerf(),"../data/words_80day10.txt",{},strategy_eng.StrategyEng(logger.Logger()))
sorter.line_to_dictionary()
sorter.info() | [
"sd2017@walla.co.il"
] | sd2017@walla.co.il |
bc8fbb48a26fbbb57765c7ee73f93db6fb9cad11 | a05fdeb2958b16640fd98120a1898610b9a88467 | /ResNet_tf2/source/network_models/resnet50.py | da9bf7270713d57796d2af4c889a4cc3feadc453 | [] | no_license | kawayoshiHirota/endscope_diagnosis | 15bd6197512736a207010db2c4adb864d03e4735 | 02006f4db309bfb301707ddb0f67ad391d9fa885 | refs/heads/main | 2023-03-08T15:04:19.595681 | 2021-02-24T05:01:40 | 2021-02-24T05:01:40 | 338,940,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,572 | py | # -*- coding: utf-8 -*-
import os
import warnings
from tensorflow.keras import models
from tensorflow.keras import backend
from tensorflow.keras import layers
import tensorflow.keras.utils as keras_utils
def identity_block(input_tensor, kernel_size, filters, stage, block):
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(filters1, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2a')(input_tensor)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters2, kernel_size,
padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters3, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = layers.Activation('relu')(x)
return x
def conv_block(input_tensor,
kernel_size,
filters,
stage,
block,
strides=(2, 2)):
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(filters1, (1, 1), strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '2a')(input_tensor)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters2, kernel_size, padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters3, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = layers.Conv2D(filters3, (1, 1), strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '1')(input_tensor)
shortcut = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = layers.Activation('relu')(x)
return x
def ResNet50(input_tensor=None,
input_shape=None,
pooling=None,
classes=3,
**kwargs):
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_input)
x = layers.Conv2D(64, (7, 7),
strides=(2, 2),
padding='valid',
kernel_initializer='he_normal',
name='conv1')(x)
x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = layers.Activation('relu')(x)
x = layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = layers.Dense(classes, activation='softmax', name='fc1000')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='resnet50')
return model
| [
"shohei@hirota-mbp.local"
] | shohei@hirota-mbp.local |
7860766fce7868311fe335a79413268f3035d2e4 | 20670e0ee8673317d90e22f9c99a230c5c810877 | /interview/urls.py | ddccc499cdf4513f32930b56b7519b17c5b9912e | [] | no_license | SameerRokade/video-interview-platform | a98871d831508c18e7a314ecc12c00861d3ecc8f | c4d832fd1163daea787fbfa9f0a95d3fdf3f1cba | refs/heads/master | 2023-06-17T06:06:17.078647 | 2021-07-14T22:49:29 | 2021-07-14T22:49:29 | 386,064,507 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('record/', views.record, name='record'),
path('ajaxHandler/<int:num_question>/', views.getQuestionAjaxHandler, name='ajaxHandler'),
path('create-interview/', views.createInterview, name='create-interview'),
path('finish-interview/', views.interview_is_finished, name='finish-interview'),
path('success-interview/', views.success_interview, name='success-interview'),
] | [
"sameerrokade5@gmail.com"
] | sameerrokade5@gmail.com |
cde14b6cc471a81ced9162a70e2bbfd34f16cf3e | 2be837d9b230753853cc5bb1ef8339196462f4be | /manage.py | a11d0815cdd16e20353ceae1ed6608d63558369d | [] | no_license | DensonNgumo/deenze-music | 0b2747c8698f317bb5bdb1d73db1c44094a6eddc | 3ae3d1ad600f718a25b0e0599d555dcc344b58ae | refs/heads/master | 2021-01-02T22:40:55.627175 | 2017-08-07T17:20:09 | 2017-08-07T17:20:09 | 99,366,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "deenze.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"ngumohd@gmail.com"
] | ngumohd@gmail.com |
5bd0e53ba54a4a57cf01aa9a0f830f42c969bd2f | e08e7bb643b81899d261bbdada63754eb32da2e8 | /demos/helloworld/jinja2-support/main.py | fff720215c30da226ea87bdea8861ee34f58e750 | [
"Apache-2.0"
] | permissive | tao12345666333/app-turbo | 95baa0e0d7f7172183591c2bc177efc9ae0e1b37 | 8717ba5631e47c476e277c3a897d85b5a93f9082 | refs/heads/master | 2020-12-25T04:45:26.575354 | 2016-12-11T15:35:12 | 2016-12-11T15:35:12 | 31,700,837 | 0 | 0 | null | 2015-03-05T07:14:08 | 2015-03-05T07:14:08 | null | UTF-8 | Python | false | false | 478 | py | #-*- coding:utf-8 -*-
from tornado.options import define, options
import tornado.options
import setting
import turbo.register
import turbo.app
#uncomment this to init state manager: store
#import store
turbo.register.register_app(setting.SERVER_NAME, setting.TURBO_APP_SETTING, setting.WEB_APPLICATION_SETTING, __file__, globals())
define("port", default=8888, type=int)
if __name__ == '__main__':
tornado.options.parse_command_line()
turbo.app.start(options.port) | [
"zhyq0826@gmail.com"
] | zhyq0826@gmail.com |
b32f8c39c81c04f7be2b22b229a7687ece7a70fd | ccbb20f765ed4c4804af9e9141710e9eb8c36567 | /news/urls.py | 4f2ddc1cb65e9932f0e26bc8194a0b74c41bb51c | [] | no_license | sabyrbekov/newsblog | 7f9d84877f8ff6739e2f1b0c9b3b10b285b1568b | 014527cc09fc6cc753dd777ab71f597819a9e57b | refs/heads/master | 2023-01-30T01:24:23.769452 | 2020-12-06T18:44:37 | 2020-12-06T18:44:37 | 319,101,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | """news URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blogs.urls')),
path('', include('users.urls')),
path('accounts/', include('django.contrib.auth.urls')),
]
| [
"aza10k@mail.ru"
] | aza10k@mail.ru |
9e1564d5ecf85014989676a983939906ad716413 | a709c22084dd15d0e162416569d811986cb52d3b | /solutions/problem20.py | 71bc3b960dcd686a8540796bf8800fe39e37e1a6 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | wy/ProjectEuler | 1fe137364293d373fb2e3738bcaa01969fc2d601 | 842d45994fe76ef0ea70dbe6b6f2a559078f45f3 | refs/heads/master | 2020-09-16T06:49:36.345278 | 2017-06-27T22:03:33 | 2017-06-27T22:03:33 | 94,484,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | # coding: utf8
# Author: Wing Yung Chan (~wy)
# Date: 2017
import math
x = math.factorial(100)
s = str(x)
acc = 0
for i in s:
a = int(i)
acc = acc + a
print(acc)
| [
"wingyungchan@gmail.com"
] | wingyungchan@gmail.com |
22731541dd107b93c3c9efbc5cf7a570dc5ca82e | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/pointcloud/hoverlabel/font/_color.py | ab79906bd35243f454c553b46a4a5012a256ba50 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 473 | py | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="pointcloud.hoverlabel.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
b1e8dff10bd7f06b4f82282a4a65779bd9215537 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/hamming/ccca6c36f7464721986b3e6214962018.py | 73c19fda905d9bdfc0b57e3963e8c92516bc8d4d | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 239 | py | def distance(strand1, strand2):
if len(strand1) != len(strand2):
raise Exception('length mismatch', len(strand1), len(strand2))
hd = 0
for i in xrange(len(strand1)):
if strand1[i] != strand2[i]:
hd += 1
return hd
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
66f4ca605b876deb7eac52d014d5a6b74acf04b6 | 6da1a1ca56acf3d45c3f96939fc4d299c0c571f1 | /dependencies/punica/cli/tool_cmd.py | e9bee5ce703b616c97a05599d7d25512dd357ca4 | [] | no_license | OnyxPayDev/SmartContractDemo | 87b510c85abba35dd564e747426274d8028e956c | 907aee784367bb339bb8a081f074bef5f4815d12 | refs/heads/master | 2020-08-15T12:05:58.858948 | 2019-10-15T19:02:58 | 2019-10-15T19:02:58 | 215,338,981 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,692 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import click
from punica.tool.tool import Tool
from .main import main
@main.group('tool', invoke_without_command=True)
@click.pass_context
def tool_cmd(ctx):
"""
Data format conversion tool
"""
if ctx.invoked_subcommand is None:
print('Usage: punica tool [OPTIONS] COMMAND [ARGS]...')
print('')
print(' ', 'Data format conversion tool.')
print()
print('Options:')
print(' ', '-h, --help Show this message and exit.')
print()
print('Commands:')
print(' ', 'decryptprivatekey decrypt privatekey')
print(' ', 'transform transform data')
else:
pass
@tool_cmd.command('transform')
@click.option('--addresstohex', nargs=1, type=str, default='', help='transform address to hex.')
@click.option('--stringtohex', nargs=1, type=str, default='', help='transform string to hex.')
@click.option('--hexreverse', nargs=1, type=str, default='', help='hex string reverse.')
@click.option('--inttohex', nargs=1, type=int, default=0, help='transform int to hex.')
def transform_cmd(addresstohex, stringtohex, hexreverse, inttohex):
"""
transform data
"""
if addresstohex != '':
Tool.address_to_hex(addresstohex)
elif stringtohex != '':
Tool.str_to_hex(stringtohex)
elif hexreverse != '':
Tool.hex_reverse(hexreverse)
elif inttohex != 0:
Tool.num_to_hex(inttohex)
else:
print('Usage: punica tool transform [OPTIONS]')
print('')
print(' ', 'transform data.')
print()
print('Options:')
print(' ', '--addresstohex TEXT transform address to hex.')
print(' ', '--stringtohex TEXT transform string to hex.')
print(' ', '--hexreverse TEXT hex string reverse.')
print(' ', '--numtohex TEXT transform num to hex.')
print(' ', '-h, --help Show this message and exit.')
print()
@tool_cmd.command('decryptprivatekey')
@click.option('--key', nargs=1, type=str, default='', help='encrypted private key.')
@click.option('--address', nargs=1, type=str, default='', help='address.')
@click.option('--salt', nargs=1, type=str, default='', help='salt.')
@click.option('--n', nargs=1, type=int, default=16384, help='n.')
@click.option('--password', nargs=1, type=str, default='', help='password.')
def decryptprivatekey_cmd(key, address, salt, n, password):
"""
decrypt privatekey
"""
if key == '' and address == '' and salt == '' or n == 0 and password == '':
print('Usage: punica tool decryptprivatekey [OPTIONS]')
print('')
print(' ', 'decrypt privatekey')
print()
print('Options:')
print(' ', '--key TEXT encrypted private key.')
print(' ', '--address TEXT address.')
print(' ', '--salt TEXT salt.')
print(' ', '--n TEXT n.')
print(' ', '--password TEXT password.')
print(' ', '-h, --help Show this message and exit.')
print()
return
if key == '':
print('Error:')
print('key should not be \'\'')
return
if address == '':
print('Error:')
print('address should not be \'\'')
return
if salt == '':
print('Error:')
print('salt should not be \'\'')
return
if password == '':
print('Error:')
print('password should not be \'\'')
return
Tool.decrypt_private_key(key, address, salt, n, password)
| [
"5020980+DancingAxolotl@users.noreply.github.com"
] | 5020980+DancingAxolotl@users.noreply.github.com |
0ddbf86f3bedb7bcc299a23d41f190b92dc242af | bc2c2d63ac18dfa6b5171ff97ad6b88f647dc282 | /mininet/wifi/util.py | 00b9511e070c1f3f235e10d02d277a5b7d286e1e | [
"LicenseRef-scancode-x11-stanford"
] | permissive | MatthiasEckhart/mininet-wifi | ca8cadccb62db7ce6221ab0dcf4af7a79a93e74e | 95392e59f82e1380730b0b3f4e375a04839316ce | refs/heads/master | 2020-03-16T00:52:39.895729 | 2018-05-11T09:17:26 | 2018-05-11T09:17:26 | 132,427,457 | 1 | 2 | null | 2018-05-07T08:03:36 | 2018-05-07T08:03:36 | null | UTF-8 | Python | false | false | 1,014 | py | "Utility functions for Mininet-WiFi"
from mininet.util import retry
def moveIntfNoRetry(intf, dstNode, printError=False):
"""Move interface to node, without retrying.
intf: string, interface
dstNode: destination Node
printError: if true, print error"""
from mininet.wifi.node import Station, Car, AP
if (isinstance(dstNode, Station) or isinstance(dstNode, Car)
or isinstance(dstNode, AP) and 'eth' not in str(intf)):
if isinstance(dstNode, Station) or isinstance(dstNode, Car):
return True
else:
return True
def moveIntf(intf, dstNode, printError=True,
retries=3, delaySecs=0.001):
"""Move interface to node, retrying on failure.
intf: string, interface
dstNode: destination Node
printError: if true, print error"""
from mininet.wifi.node import AP
if not isinstance(dstNode, AP):
retry(retries, delaySecs, moveIntfNoRetry, intf, dstNode,
printError=printError)
| [
"ramonreisfontes@gmail.com"
] | ramonreisfontes@gmail.com |
7bacefcd596348b1f5cbe3fa92de6644588f16e7 | a1f1e51fd8c75775f6f28f578e0510cd5acefdd2 | /messaging/migrations/0012_auto_20200916_0702.py | 5161b2ade8cd722c03986ad1450215265f815b0b | [] | no_license | shantanu-tomar/teamit | 31740563bb5a10c2f32644d07c9fe9789fc10bd3 | 17fec26466485b8795c8e5a981d1a00ce0ced6af | refs/heads/master | 2022-12-21T07:12:49.485986 | 2020-09-28T12:04:18 | 2020-09-28T12:04:18 | 296,505,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | # Generated by Django 3.0.8 on 2020-09-16 07:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('messaging', '0011_auto_20200916_0603'),
]
operations = [
migrations.AlterField(
model_name='message',
name='recepient',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='recepient', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='message',
name='sender',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sender', to=settings.AUTH_USER_MODEL),
),
]
| [
"shantanu.tomar94@gmail.com"
] | shantanu.tomar94@gmail.com |
094fb1998b4fb1a04b1860e17d4d7bcda5a15b28 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_0848+249/sdB_PG_0848+249_lc.py | 3419530c1b53949ad19342e48dd7c6716eb727b3 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[132.907667,24.697419], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_0848+249 /sdB_PG_0848+249_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
80ca757742ecfdb8fa368be024eac91a254813d2 | 023ef60f3ab5e97f436def9cbb5b62472223796e | /todo/models.py | 1f691bb9e485cb0eb221f45447315a6c9a871770 | [] | no_license | Sahouaneyassine/ToDo | 4cf4a537d47e25c8c6c7b5a7aaf3cf7ce732c587 | e657f9f2c5a28186b68130d0fb42a5a97b7f227d | refs/heads/main | 2023-02-17T11:52:29.313699 | 2021-01-06T23:44:08 | 2021-01-06T23:44:08 | 327,454,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Todo(models.Model):
title=models.CharField(max_length=100)
memo=models.TextField(blank=True)
created=models.DateTimeField(auto_now_add=True)
datecompleted=models.DateTimeField(null=True,blank=True)
important=models.BooleanField(default=False)
user=models.ForeignKey(User,on_delete=models.CASCADE)
| [
"sahouaneyassine1999@gmail.com"
] | sahouaneyassine1999@gmail.com |
4880c6673cf71e0c7ee5ecb34afce54a4736b043 | 48156b85839d832ecfe8fdf0a0e17b5ebf8b9460 | /75.findPeak.py | 1672766c60cae18647eadcca3b6c3ce2ede0c597 | [] | no_license | Aissen-Li/lintcode | 7dc2564fcec20667f073d9219fe049808c5de625 | 4d2a717956a75197ce1dfa1094cdd5ab3a1d2004 | refs/heads/master | 2020-11-28T16:43:21.760691 | 2020-01-14T15:08:45 | 2020-01-14T15:08:45 | 229,871,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | class Solution:
"""
@param A: An integers array.
@return: return any of peek positions.
"""
def findPeak(self, A):
if A[1] > A[2]:
return 1
if A[len(A) - 2] > A[len(A) - 3]:
return len(A) - 2
start, end = 0, len(A) - 1
while start + 1 < end:
mid = (start + end) // 2
if A[mid] > A[mid + 1] and A[mid] > A[mid - 1]:
return mid
if A[mid] < A[mid + 1]:
start = mid + 1
else:
end = mid - 1
return start if A[start] >= A[end] else end
| [
"aissen_f@163.com"
] | aissen_f@163.com |
085e7bbd3a6c1d51945a3a15cfab6d8854d292a9 | 3a8edf3c09ff69dc654a6a3a3af2c6f5058ab3e7 | /C4_BT13.3.py | a7aaa33f23a82f4289c2444803ee29223626c362 | [] | no_license | Huynhngocthanh/shinzerorin | f05375966ebe86b314e260d1c2e3dd986d0fc234 | 5cb0baf715ba48331d3abeeb28f29c784a475440 | refs/heads/main | 2023-04-17T22:18:04.552717 | 2021-04-26T04:17:00 | 2021-04-26T04:17:00 | 314,985,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | import os,random
import string
t = input('Nhập tên thư mục: ') #Em muốn tạo thư mục trực tiếp trên python
path = 'C:\\Users\\MyPC\\Documents\\'
os.chdir(path)
os.mkdir(t)
file_name = input("Nhập tên file dữ liệu: ") #Em muốn tạo tên file trực tiếp trên python
n = int(input("Nhập tổng số lượng file với dung lượng 1MB-1024MB: ")) #vì 1MB=>1024KB nên số lượng file nằm từ khoảng 2->1048 files
i = 1
for i in range(n):
path1 = path + t
os.chdir(path1)
i = str(i)
f = open(file_name + i + '.txt','w+')
f.seek(1024*1000-1)
f.write(random.choice(string.ascii_lowercase))
if f.seek(0):
os.remove(f) #Lọc file 0KB
| [
"noreply@github.com"
] | Huynhngocthanh.noreply@github.com |
81e8042a40de433fce29be36bc546150bd69ec66 | 87e60b0504be11c6997f1b20b72e9428cc128342 | /python/cowbells/data/tqplot.py | 5ca6659a29428f017baacabacc79523cfcbe6ff4 | [] | no_license | brettviren/cowbells | 70a85856fdfc54526c847f115d5dc01ec85ec215 | 1ceca86383f4f774d56c3f159658518242875bc6 | refs/heads/master | 2021-01-10T18:44:41.531525 | 2014-04-09T15:17:29 | 2014-04-09T15:17:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,644 | py | #!/usr/bin/env python
'''
Make some plots from the TQ tree.
'''
import ROOT
import math
def liberate(tobj):
ROOT.SetOwnership(tobj,0)
return tobj
def draw_stats(pad, h, fitnum =111):
h.Draw()
pad.Modified()
pad.Update()
stats = h.FindObject("stats")
if stats:
stats.SetOptStat(1110)
stats.SetOptFit(fitnum)
return
class Plots(object):
expected_led_time = "abs(tmin[%(chn)d]-1530) < 30"
def __init__(self, tree, canvas = None, pdffile = 'tqplot.pdf'):
self.tree = tree
self.pdffile = pdffile
if not canvas:
canvas = ROOT.TCanvas("tqtree","tqtree debug", 0,0, 1000, 700)
self.canvas = canvas
def cprint(self,extra=''):
self.canvas.Print('%s%s'%(self.pdffile,extra), 'pdf')
def do_twoXtwo(self, what, chn=0):
self.canvas.Clear()
self.canvas.Divide(2,2)
for count, what in enumerate(what):
pad = self.canvas.cd(count+1)
pad.SetLogy(True)
self.tree.Draw("%s[%d]"%(what,chn))
return
def do_minmax(self, chn=0):
self.do_twoXtwo(['qmin','qmax','tmin','tmax'], chn)
def do_stats(self, chn=0):
self.do_twoXtwo(['avg','mean','rms','sigma'], chn)
def do_sumn(self, chn=0):
self.do_twoXtwo(['n3','n4','sum3','sum4'], chn)
def do_34(self, chn=0, maxq=400, opt="", logy=True, fit=(25,100)):
self.canvas.Clear()
self.canvas.Divide(2,2)
todraw = "n%(nsig)d[%(chn)d]*mean[%(chn)d] -sum%(nsig)d[%(chn)d]"
for count,nsig in enumerate([3,4]):
pad = self.canvas.cd(count+1)
pad.SetLogy(logy)
self.tree.Draw(todraw%locals(),"",opt)
for count,nsig in enumerate([3,4]):
pad = self.canvas.cd(count+3)
pad.SetLogy(logy)
h = liberate(ROOT.TH1F("spe%d"%nsig,'sum(ADC) >%d sigma above ped'%nsig,maxq,0,maxq))
self.tree.Draw(todraw%locals()+">>spe%d"%nsig,"",opt)
if fit:
h.Fit("gaus","","", *fit)
h.Draw()
pad.Modified()
pad.Update()
stats = h.FindObject("stats")
if stats:
stats.SetOptStat(1110)
stats.SetOptFit(111)
continue
return
def do_34_50(self, chn=0, opt="", logy=True):
self.do_34(chn=chn, maxq=50, opt=opt, logy=logy,fit=None)
def do_34vEntry(self, chn=0):
self.canvas.Clear()
self.canvas.Divide(2,2)
measure = "n%(nsig)d[%(chn)d]*mean[%(chn)d]-sum%(nsig)d[%(chn)d]"
for count,nsig in enumerate([3,4]):
pad = self.canvas.cd(count+1)
m = measure % locals()
m += ':Entry$'
c = ""
print m
self.tree.Draw(m,c,'colz')
for count,nsig in enumerate([3,4]):
pad = self.canvas.cd(count+3)
m = measure % locals()
c = "%s > 0 && %s < 400" % (m,m)
m += ':Entry$'
print m
print c
self.tree.Draw(m,c,'colz')
return
def do_fit(self, chn=0):
self.canvas.Clear()
self.canvas.Divide(2,2)
toplot = "mean[%(chn)d] sigma[%(chn)d] mean[%(chn)d]:Entry$ sigma[%(chn)d]:Entry$"
toplot = toplot % locals()
for count,what in enumerate(toplot.split()):
pad = self.canvas.cd(count+1)
opt = ""
if 'Entry$' in what:
opt = "COLZ"
self.tree.Draw(what,"",opt)
continue
return
def _fit_pe(self, chn=0, cuts=None,
spe=(60,110), dpe=(115,220), tpe=(225,350), qmeas = 'qpeak'):
'''
Fit single/double PE peak of qpeak.
'''
if cuts is None:
cuts = self.expected_led_time
nbins, minq, maxq = 500, 0, 500
cuts = cuts%locals()
what = "%(qmeas)s[%(chn)d]"%locals()
h = liberate(ROOT.TH1F('hqpeak', "%s {%s}" % (qmeas, cuts,), nbins, minq, maxq))
self.tree.Draw('%s >> hqpeak'%what, cuts)
pe1 = liberate(h.Clone())
pe1.Fit("gaus","L","",*spe)
fit1 = pe1.GetFunction("gaus")
fit1.SetRange(minq,maxq)
fit1.SetLineColor(2)
pe2 = liberate(h.Clone())
pe2.Add(fit1, -1)
pe2.Fit("gaus","L","",*dpe)
fit2 = pe2.GetFunction("gaus")
fit2.SetRange(spe[0],maxq)
fit2.SetLineColor(4)
pe3 = liberate(h.Clone())
pe3.Add(fit2, -1)
pe3.Fit("gaus","L","",*tpe)
fit3 = pe3.GetFunction("gaus")
#fit3.SetRange(dpe[0],maxq)
fit3.SetLineColor(6)
pe123 = liberate(h.Clone())
dfit = liberate(ROOT.TF1("dfit","gaus(0)+gaus(3)+gaus(6)",10,tpe[1]))
for ind in range(3):
dfit.SetParameter(ind, fit1.GetParameter(ind))
dfit.SetParameter(ind+3,fit2.GetParameter(ind))
dfit.SetParameter(ind+6,fit3.GetParameter(ind))
pe123.Fit(dfit,"L","",10,maxq)
dfit = pe123.GetFunction("dfit")
dfit.SetRange(10,maxq)
dfit.SetLineColor(7)
self.canvas.Clear()
self.canvas.Divide(2,2)
pad = self.canvas.cd(1)
draw_stats(pad, pe1)
pad = self.canvas.cd(2)
draw_stats(pad, pe2)
pad = self.canvas.cd(3)
draw_stats(pad, pe3)
pad = self.canvas.cd(4)
draw_stats(pad, pe123, 111111111)
a1 = fit1.Integral(minq,maxq)
if not a1:
print 'No fit 1'
return
a2 = fit2.Integral(minq,maxq)
c1 = fit1.GetParameter(0)
c2 = fit2.GetParameter(0)
mu1 = fit1.GetParameter(1)
mu2 = fit2.GetParameter(1)
mupe = 2.0*a2/a1
print 'Mean <PE> of source = 2*%.1f/%.1f = %.3f' %(a2,a1,mupe)
mu2mu1_frac = 0
if mu1: mu2mu1_frac = mu2/mu1
print 'Ratio of PE2/PE1: %.1f/%.1f = %.3f (~2?)' % (mu2,mu1,mu2/mu1)
if mupe > 0:
print 'Prob 0PE: %.3f' % (math.exp(-1*mupe),)
return
def do_pe_fits(self, chn=0, cuts = None):
for qmeas in ['qpeak','qpeaks3','qpeaks4','qpeaks5','qwin']:
self._fit_pe(chn=chn,qmeas=qmeas,cuts=cuts)
self.cprint()
continue
return
def do_interspersed_led_cuts(self):
# Cuts to select LEDs interspersed with cosmic muon triggers
self.canvas.Clear()
self.canvas.Divide(2,2)
pad = self.canvas.cd(1)
pad.SetLogy(True)
self.tree.Draw("mean[2]-qmin[2]","mean[2]-qmin[2]<1000")
pad = self.canvas.cd(2)
pad.SetLogy(True)
self.tree.Draw("qnpeaks[0]","mean[2]-qmin[2]<100")
pad = self.canvas.cd(3)
pad.SetLogy(True)
self.tree.Draw("tmin[0]","mean[2]-qmin[2]<100 && qnpeaks[0] == 1")
pad = self.canvas.cd(4)
pad.SetLogy(False)
self.tree.Draw("qpeak[0]")
def all(self, chn = 0):
self.cprint('[')
for what in [
'minmax','stats','fit','sumn',
'34','34_50', '34vEntry',
]:
meth = getattr(self, 'do_%s' % what)
meth(chn)
self.cprint()
self.do_interspersed_led_cuts()
self.cprint()
self.do_pe_fits(chn)
self.cprint(']')
if __name__ == '__main__':
import sys
fp = ROOT.TFile.Open(sys.argv[1])
tree = fp.Get("tq")
try:
pdf = sys.argv[2]
except IndexError:
pdf = None
p = Plots(tree, pdffile=pdf)
p.all()
| [
"bv@bnl.gov"
] | bv@bnl.gov |
32356ca667142349328e148f40149adb4f120c1a | 639494a7ac8fcd9f7cd00533669b1888cc626cd2 | /src/scripts/addDocument.py | c660da2de42009c234896cbc4889d1dfd83cb325 | [
"BSD-2-Clause"
] | permissive | kstaken/Syncato | 17fb859b1e0b7a8373b919f0e1a8d12b916881e3 | 860822e08b08b88b749961a1d61be902ce3b8ea3 | refs/heads/master | 2020-04-05T22:48:28.727982 | 2013-05-19T05:49:27 | 2013-05-19T05:49:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | #!/usr/bin/env python
#
# See the file LICENSE for redistribution information.
#
# Copyright (c) 2003 Kimbro Staken. All rights reserved.
#
# Adds a new document to the database
import libxml2
import sys
sys.path.append('lib')
import WeblogUtil
if (len(sys.argv) != 6):
print "Usage: addDocument.py host baseURL username password content"
sys.exit()
host = sys.argv[1]
base = sys.argv[2]
username = sys.argv[3]
password = sys.argv[4]
document = sys.argv[5]
content = open(document).read()
result = WeblogUtil.runRequest(host, username, password, "POST", base, content)
if (result.status == 201):
print "Document URL: " + result.getheader("Location")
else:
print str(result.status) + " " + result.reason
| [
"kstaken@kstaken.com"
] | kstaken@kstaken.com |
da20b663295dc181cdefdec4059f904a810546e2 | f86a8b55b6e983e694a453300b2b9a58806f73a0 | /src/embeddings.py | eeca4ddc3a4e7fd6719d4c030664cdcd1d780a17 | [
"MIT"
] | permissive | ArturPrzybysz/ReutersRNNClassifier | d56479fab4ef61d02a7705734007941c1a2e46a4 | fba90eccdb73832c4f8057ed43aa23e025f944d3 | refs/heads/master | 2020-05-07T10:22:44.572242 | 2019-04-15T12:13:58 | 2019-04-15T12:13:58 | 180,415,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | import csv
import numpy as np
import pandas as pd
from ReutersRNNClassifier.src.config import MAX_NUM_WORDS
def embeddings(glove_model_path: str):
words = pd.read_csv(glove_model_path, sep=" ", index_col=0, header=None, quoting=csv.QUOTE_NONE)
vocab_len = MAX_NUM_WORDS + 1
emb_dim = words.iloc[0].shape[0]
emb_matrix = np.zeros((vocab_len, emb_dim))
for i in np.arange(vocab_len - 1):
emb_matrix[i, :] = words.iloc[i]
print('Found %s word vectors.' % len(emb_matrix))
return emb_matrix, words
| [
"prz.artur@gmail.com"
] | prz.artur@gmail.com |
eb93998c886f3467fdcd99b8c83ff8a9276b0aa1 | 74a1145f20945311d6223d92a5affac688a97622 | /Attraction/enums/DangerLevel.py | ffd0de398f91ddc10b8c89cba3e9e222469707f4 | [] | no_license | OrestSrasiv2001/Lab11 | 844073e91f7c351d50d9091d6f2c4ec2c53a8d21 | 78a68ab0f1b8a9361c4ac35be2c30d0abdc2b5b3 | refs/heads/master | 2020-06-01T03:00:46.705667 | 2019-06-06T16:03:27 | 2019-06-06T16:03:27 | 190,607,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | from enum import Enum
class DangerLevel(Enum):
LOW = 0
MIDDLE = 1,
HIGH = 2,
MAXIMUM = 3 | [
"stasiv.orest@gmail.com"
] | stasiv.orest@gmail.com |
94b494245f7b4c4498e64b18adc55b39bbe3f559 | 592e01558a8dafca01b143c2f063a196edc0ea77 | /05-django/02-required/Ninja_Gold_project/manage.py | 397915e9e628ee279674e531fcbb3be6d3e1c96d | [] | no_license | Python-November-2018/nshredz | 899ccb975cbb6779771f0e3555f82235a16556a6 | ad840b570546dc036eb135c5edb1a9de58e8b1f8 | refs/heads/master | 2020-04-03T22:34:31.629095 | 2018-12-18T21:01:58 | 2018-12-18T21:01:58 | 155,607,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Ninja_Gold_project.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"44245934+nick77-s@users.noreply.github.com"
] | 44245934+nick77-s@users.noreply.github.com |
6bbd854d7aa3af2c54a8a772e46e58e79118d2ad | 6857f021aebab3e5be9b6bb2b917026034739465 | /EasyLogistics/settings/base.py | 2a053944d4bff7b58b7a5b273fefb97cedeeb81b | [] | no_license | gauravgs06/EasyLogistics | 4e798c5878e96fce833caceaba0b9f1d9e965de4 | 509734971f11f2a222c4230ee61ae7fba3efdb6a | refs/heads/master | 2022-02-02T10:54:26.480448 | 2019-07-21T09:01:34 | 2019-07-21T09:01:34 | 167,551,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,006 | py | """
Django settings for EasyLogistics project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
from .secrets import SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'EasyLogistics.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'EasyLogistics.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"gauravsharmags06@gmail.com"
] | gauravsharmags06@gmail.com |
cd7a6e39bddcd867989015fc0c40cc09c18bc796 | d86e9d59784097a7262fa9337585a36bd58a6d29 | /cvxbenchmarks/lib/data/epsilon/epopt/problems/hinge_l2.py | 41b8b42fd08be15cf32527c0526e7dc334f6548e | [] | no_license | nishi951/cvxbenchmarks | 2ae36e75c42c8bd35fafac98bad5d9d88168bd68 | 932141d8e4e929860011bf25c41e941e2f8fbd76 | refs/heads/master | 2021-01-11T07:23:32.260811 | 2018-09-15T22:23:14 | 2018-09-15T22:23:14 | 62,177,196 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | """Standard SVM, i.e.. hinge loss w/ l2 regularization."""
from epopt.problems import problem_util
import cvxpy as cp
import epopt as ep
import numpy as np
import scipy.sparse as sp
def create(**kwargs):
A, b = problem_util.create_classification(**kwargs)
lam = 1
x = cp.Variable(A.shape[1])
f = ep.hinge_loss(x, A, b) + lam*cp.sum_squares(x)
return cp.Problem(cp.Minimize(f))
| [
"nishimuramarky@yahoo.com"
] | nishimuramarky@yahoo.com |
87a1365fd6a9f6ccca02348485d3a70abebd022f | 34a26b713021f15d94d416b9728bac50d283ed5f | /interno_pymedigital-9.0/sale_order_invoice_amount/models/sale_order.py | 7dc71e3de0680f62a283f98ad86d6675e8ad4e35 | [] | no_license | Brahim820/odoo-1 | 7641b2a0ef411fb3b82f806a11e88b9880875a46 | d8ee18a7dc467ff250113a0a3df3fcf1e876b321 | refs/heads/master | 2020-04-07T10:58:09.517097 | 2018-11-19T16:56:45 | 2018-11-19T16:56:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,690 | py | # -*- encoding: utf-8 -*-
from openerp import api, fields, models, _
from openerp.tools import float_is_zero
from openerp.exceptions import UserError
class SaleOrder(models.Model):
_inherit = 'sale.order'
@api.multi
def action_invoice_create_from_amount(self, grouped=False, amount=0):
if amount == 0:
raise UserError(_('The amount to invoice should be greater than cero.'))
for order in self:
group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)
inv_obj = self.env['account.invoice']
invoices = {}
for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice):
if group_key not in invoices:
inv_data = order._prepare_invoice()
invoice = inv_obj.create(inv_data)
invoices[group_key] = invoice
elif group_key in invoices:
vals = {}
if order.name not in invoices[group_key].origin.split(', '):
vals['origin'] = invoices[group_key].origin + ', ' + order.name
if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(', '):
vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref
invoices[group_key].write(vals)
discount = 1 - (line.discount or 0.0 / 100.0)
if line.price_unit > 0 and discount < 100:
paid_qty = amount / (line.price_unit * discount)
else:
paid_qty = line.product_uom_qty
to_invoice = 0
if line.qty_to_invoice > 0:
if paid_qty >= line.qty_to_invoice:
to_invoice = line.qty_to_invoice
else:
to_invoice = paid_qty
name = line.name + ' desde ' + str(round(line.qty_invoiced, 2)) + ' a ' + str(
round(line.qty_invoiced + to_invoice, 2)) + ' de ' + str(round(line.product_uom_qty, 2))
line.invoice_line_create_from_amount(invoices[group_key].id, to_invoice, name)
amount -= to_invoice * line.price_unit
if amount > 0:
discount = 1 - (line.discount or 0.0 / 100.0)
lines = order.order_line.filtered(lambda l: l.product_uom_qty - l.qty_invoiced > 0)
for line in lines.sorted(
key=lambda l: (l.product_uom_qty - l.qty_invoiced) * l.price_unit):
if line.price_unit > 0 and discount < 100:
paid_qty = amount / (line.price_unit * discount)
else:
paid_qty = line.product_uom_qty
residual_qty = line.product_uom_qty - line.qty_invoiced
to_invoice = 0
if residual_qty > 0:
if round(paid_qty, 5) > round(residual_qty, 5):
to_invoice = residual_qty
else:
to_invoice = paid_qty
name = ' Pago anticipado: ' + line.name + ' desde ' + str(round(line.qty_invoiced, 2)) + ' a ' + str(
round(line.qty_invoiced + to_invoice, 2)) + ' de ' + str(round(line.product_uom_qty, 2))
line.invoice_line_create_from_amount(invoices[group_key].id, to_invoice, name)
amount -= to_invoice * line.price_unit
if not invoices:
raise UserError(_('There is no invoicable line.'))
for invoice in invoices.values():
if not invoice.invoice_line_ids:
raise UserError(_('There is no invoicable line.'))
# If invoice is negative, do a refund invoice instead
if invoice.amount_untaxed < 0:
invoice.type = 'out_refund'
for line in invoice.invoice_line_ids:
line.quantity = -line.quantity
# Use additional field helper function (for account extensions)
for line in invoice.invoice_line_ids:
line._set_additional_fields(invoice)
# Necessary to force computation of taxes. In account_invoice, they are triggered
# by onchanges, which are not triggered when doing a create.
invoice.compute_taxes()
#TODO: agregar este cálculo a la función principal
# para evitar problemas con las funciones que hacen super
# como en el módulo l10n_ec_sri_sale
resx = [inv.id for inv in invoices.values()]
invx = self.env['account.invoice'].browse(resx)
for i in invx:
i.compute_sri_invoice_amounts()
return [inv.id for inv in invoices.values()]
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
@api.multi
def invoice_line_create_from_amount(self, invoice_id, qty, name):
"""
Create an invoice line. The quantity to invoice can be positive (invoice) or negative
(refund).
:param name: char
:param invoice_id: integer
:param qty: float quantity to invoice
"""
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
for line in self:
if not float_is_zero(qty, precision_digits=precision):
vals = line._prepare_invoice_line(qty=qty)
vals.update({'name': name, 'invoice_id': invoice_id, 'sale_line_ids': [(6, 0, [line.id])]})
self.env['account.invoice.line'].create(vals)
| [
"ceduardodch@gmail.com"
] | ceduardodch@gmail.com |
d6412d262b12115aff9e4f9c44ad0910e0fa69e8 | b6d1a5740a4931c216a9f4e4a602e2c817c7270b | /72.py | 5f8e11e8fa55efaf8ef4dc6f7d466824400db848 | [] | no_license | Kavinkumaran/pri-codekata | 9645087019d4b972770d41af47f2fcf235b79464 | 1ffc2b228ed6b15829f4c28bba8aee0ecab53fa9 | refs/heads/master | 2020-06-19T04:33:34.604445 | 2019-07-10T18:43:16 | 2019-07-10T18:43:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | s=input()
p=set(s)
if(p=={"1","0"}):
print("yes")
else:
print("no") | [
"noreply@github.com"
] | Kavinkumaran.noreply@github.com |
f412ca0a2c5181ff18128b3529882d3a0380882b | 8fe1c908807f53fa489a9fae0afcec79b24e9aeb | /run.py | a3397d3e7b3c7c4b9dd45a0a4c21ce0a196e280e | [] | no_license | satwik77/Simple_LoginApp | 2fe8973e86b128b3a4b71149606149389416c6b4 | af8c4a08ef909985127fb6267a8701bbeb7ae288 | refs/heads/master | 2021-01-10T04:04:01.857690 | 2016-04-14T14:51:13 | 2016-04-14T14:51:13 | 55,737,211 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | #!flask/bin/python
from login_app import login_app
login_app.run(debug=True)
| [
"satwik55@gmail.com"
] | satwik55@gmail.com |
8dfbfec39bab4550e6585a3ead46beabf5f8759b | 70543b9ba43e3026b10d2e5b85589c5f3adbfff5 | /recommendation_engine/content_retrieval_v4.py | 7249f90634668352da79545511716e17fd0fe13c | [] | no_license | gitw3ll/content_recommendation_engine | 2ceb4256b398f01e135509d2380a0fd7548ac29f | 8624beacf470ebd5ad96313430b34c426c2672d9 | refs/heads/master | 2023-03-01T13:46:23.135697 | 2021-01-03T16:25:04 | 2021-01-03T16:25:04 | 279,657,049 | 0 | 0 | null | 2020-10-31T21:54:41 | 2020-07-14T17:59:07 | Jupyter Notebook | UTF-8 | Python | false | false | 2,617 | py | import numpy as np
import sqlite3
import dask.dataframe as dd
from collections import deque
class ContentRetrieval:
def __init__(self):
self.event_type_strength = {
'VIEW': 1.0,
'LIKE': 2.0,
'BOOKMARK': 3.0,
'COMMENT CREATED': 4.0,
'FOLLOW': 5.0,
}
self.articles_df = None
self.interactions_df = None
self.score_data = None
self.content_data = None
def load_preprocess(self, database_directory):
"""
"""
conn = sqlite3.connect(database_directory)
self.articles_df = dd.read_sql_table('content_data', conn)
self.interactions_df = dd.read_sql_table('user_data', conn)
df = self.articles_df.merge(self.interactions_df, on='content_id')
df['eventStrength'] = df['event_type'].apply(lambda x: self.event_type_strength[x])
df = df.drop_duplicates()
df = df.groupby(['person_id', 'content_id', 'title']).sum().reset_index()
df['title'] = df['title'].astype('category')
df['person_id'] = df['person_id'].astype('category')
df['content_id'] = df['content_id'].astype('category')
df['personId'] = df['person_id'].cat.codes
df['contentId'] = df['content_id'].cat.codes
self.content_data = df
#Load user wellness score data
self.scores_data = dd.read_sql_table('scores_data', conn)
def retrieve_content(self, user_id, wellness_metric, similar_users=3, content_per_user=5):
"""
"""
user_content = list(self.content_data.loc[self.content_data['personId'] == user_id]['title'])
user_content = deque([user_content])
user_score = self.scores_data.at[user_id, wellness_metric]
closest_emotional = self.scores_data.iloc[(self.scores_data[wellness_metric] - user_score).abs().argsort()[:similar_users]]
closest_ids_list = list(closest_emotional['personId'])
similar_content = deque([])
for num in closest_ids_list:
similar_content.append(list(self.content_data.loc[self.content_data['personId'] == num]['title'][:content_per_user]))
for content in user_content:
if content in similar_content:
similar_content.remove(content)
similar_content = np.concatenate(similar_content, axis=0).tolist()
return similar_content | [
"noreply@github.com"
] | gitw3ll.noreply@github.com |
d5458814021de425558b79919347e857df1cc131 | b9da4c5de88d3f965b2d39f4174678e886d91c15 | /russian/posts_crawler.py | 51e935d248f124e4f5dd6a952d74bb01f4477abe | [
"MIT"
] | permissive | snail-fuji/troll2vec | b35e1aed88adf3e2e94f5feb5e875bb91cb79f47 | 3c77d52db56c2a4e9cab7249db7c906925f562ee | refs/heads/master | 2023-08-03T02:18:38.084553 | 2019-03-11T01:08:08 | 2019-03-11T01:08:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | import scrapy
import pdb
from tqdm import tqdm
PAGES = 1000
class PikabuSpider(scrapy.Spider):
name = 'pikabu'
start_urls = ['https://pikabu.ru/new?page={}'.format(i) for i in range(PAGES)]
def parse(self, response):
for post in response.css('.stories-feed__container > .story'):
yield {
'url': post.css('a.story__title-link ::attr(href)').get(),
'title': post.css('a.story__title-link ::text').get(),
'tags': post.css('a.tags__tag ::text').extract(),
}
| [
"belchikov1996@gmail.com"
] | belchikov1996@gmail.com |
efb56228f8ae67a595e421d815297f1e01d08b2f | 60381c941ec5394ec255d5873a2bf0d662a12b52 | /24.py | 56ba76acabe17fff6f161b10c7eb2213950ca281 | [] | no_license | lee000000/leetcodePractice | 16fc154111a0f5561da8e3a7e9c1442bd5124567 | 552e2673d8b3022f6a4e8df234332c9ed653779e | refs/heads/master | 2020-01-23T21:31:56.135550 | 2017-01-02T20:49:33 | 2017-01-02T20:49:33 | 74,700,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,396 | py | '''
24. Swap Nodes in Pairs
Given a linked list, swap every two adjacent nodes and return its head.
For example,
Given 1->2->3->4, you should return the list as 2->1->4->3.
Your algorithm should use only constant space. You may not modify the values
in the list, only nodes itself can be changed.
'''
from ListNode import *
from pprint import pprint
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
# To go from pre -> a -> b -> b.next to pre -> b -> a -> b.next,
# we need to change those three references. Instead of thinking about
# in what order I change them, I just change all three at once.
pre = self
pre.next = head # <<<< equals self.next = head
# next attribute is likely not in Solution but setted manually
while pre.next and pre.next.next:
a = pre.next
b = a.next
pre.next, b.next, a.next = b, a, b.next
pre = a
return self.next
def test():
a = [1, 2, 3, 4, 5, 6]
head = List_to_Link(a).head
sl = Solution()
b = sl.swapPairs(head)
Link_to_List(b).print_list()
if __name__ == "__main__":
test()
| [
"licheng@utexas.edu"
] | licheng@utexas.edu |
24673abaadfc0876b37e11c910ea20939b0461a1 | e7823c85962f7b7b08339cbcf7aa05de422c0fe2 | /td/credentials.py | 05a309651f83069bd35e17910b624d0130bdcbe5 | [
"MIT"
] | permissive | Aftermath213/td-ameritrade-api | 1a8a4a63b98b2fef1543ef24b069de90f1ef9612 | e5132f13c883d9bd6d15f282662f548467b6ef55 | refs/heads/master | 2023-09-02T23:44:40.935626 | 2021-10-31T18:59:17 | 2021-10-31T18:59:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,506 | py | import json
import urllib
import pathlib
import webbrowser
from typing import Union
from datetime import datetime
from urllib.parse import parse_qs
from urllib.parse import urlparse
import requests
class TdCredentials():
"""
### Overview
----
TD Ameritrade uses an oAuth protocol
to authenticate it's users. The `TdCredential`
object helps the user manage the credentials to ensure
the are properly authenticated.
"""
def __init__(
self,
client_id: str,
redirect_uri: str,
credential_dict: dict = None,
credential_file: Union[str, pathlib.Path] = None
) -> None:
"""Initializes the `TdCredential` object."""
self._access_token = ''
self._refresh_token = ''
self._scope = []
self._token_type = ''
self._expires_in = 0
self._refresh_token_expires_in = 0
self._is_expired = True
self._client_id = client_id
self._redirect_uri = redirect_uri
self._refresh_token_expiration_time = 0
self._access_token_expiration_time = 0
self.resource_url = 'https://api.tdameritrade.com/'
self.version = 'v1/'
self.token_endpoint = 'oauth2/token'
self.authorization_url = 'https://auth.tdameritrade.com/auth?'
self.authorization_code = ""
self._loaded_from_file = False
self._file_path = ""
if credential_file:
if isinstance(credential_file, pathlib.Path):
credential_file = credential_file.resolve()
self._loaded_from_file = True
self._file_path = credential_file
self.from_credential_file(file_path=credential_file)
elif credential_dict:
self.from_credential_dict(token_dict=credential_dict)
else:
self.from_workflow()
@property
def redirect_uri(self) -> str:
"""Returns the user's redirect URI.
### Returns
----
str
The User's redirect URI.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.redirect_uri
"""
return self._redirect_uri
@property
def client_id(self) -> str:
"""Returns the Client ID.
### Returns
----
str
The users Client Id.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.client_id
"""
return self._client_id
@property
def access_token(self) -> str:
"""Returns the Access token.
### Returns
----
str
A valid Access Token.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.access_token
"""
return self._access_token
@property
def refresh_token(self) -> str:
"""Returns the Refresh token.
### Returns
----
str
A valid Refresh Token.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.refresh_token
"""
return self._refresh_token
@property
def refresh_token_expiration_time(self) -> datetime:
"""Returns when the Refresh Token will expire.
### Returns
----
datetime
The date and time of the refresh token
expiration.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.refresh_token_expiration_time
"""
return self._refresh_token_expiration_time
@property
def is_refresh_token_expired(self) -> bool:
"""Specifies whether the current Refresh Token is expired
or not.
### Returns
----
bool
`True` if the Refresh Token is expired,
`False` otherwise.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.is_refresh_token_expired
"""
exp_time = self.refresh_token_expiration_time.timestamp() - 20
now = datetime.now().timestamp()
return bool(exp_time < now)
def from_token_dict(self, token_dict: dict) -> None:
"""Converts a token dicitonary to a `TdCredential`
object.
### Parameters
----
token_dict : dict
A dictionary containing all the
original token details.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.from_dict(
token_dict={
'access_token': '',
'refresh_token': ',
'scope': '',
'expires_in': 0,
'refresh_token_expires_in': 0,
'token_type': ''
}
)
"""
self._access_token = token_dict.get('access_token', '')
self._refresh_token = token_dict.get('refresh_token', '')
self._scope = token_dict.get('scope', [])
self._token_type = token_dict.get('token_type', '')
self._expires_in = token_dict.get('expires_in', 0)
self._refresh_token_expires_in = token_dict.get(
'refresh_token_expires_in',
0
)
self._refresh_token_expiration_time = token_dict.get(
'refresh_token_expiration_time', 0
)
self._access_token_expiration_time = token_dict.get(
'access_token_expiration_time', 0
)
# Calculate the Refresh Token expiration time.
if isinstance(self._refresh_token_expiration_time, str):
self._refresh_token_expiration_time = datetime.fromisoformat(
self._refresh_token_expiration_time
)
elif isinstance(self._refresh_token_expiration_time, float):
self._refresh_token_expiration_time = datetime.fromtimestamp(
self._refresh_token_expiration_time
)
else:
self._calculate_refresh_token_expiration(
expiration_secs=self._refresh_token_expires_in
)
# Calculate the Access Token Expiration Time.
if isinstance(self._access_token_expiration_time, str):
self._access_token_expiration_time = datetime.fromisoformat(
self._access_token_expiration_time
)
elif isinstance(self._access_token_expiration_time, float):
self._access_token_expiration_time = datetime.fromtimestamp(
self._access_token_expiration_time
)
else:
self._calculate_access_token_expiration(
expiration_secs=self._expires_in,
)
self.validate_token()
def to_token_dict(self) -> dict:
"""Converts the TdCredential object
to a dictionary object.
### Returns
----
dict
A dictionary containing all the
original token details.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.to_dict()
"""
token_dict = {
'access_token': self._access_token,
'refresh_token': self._refresh_token,
'scope': self._scope,
'expires_in': self._expires_in,
'refresh_token_expires_in': self._refresh_token_expires_in,
'token_type': self._token_type,
'refresh_token_expiration_time': self.refresh_token_expiration_time.isoformat(),
'access_token_expiration_time': self.access_token_expiration_time.isoformat(),
}
return token_dict
def _calculate_refresh_token_expiration(self, expiration_secs: int) -> None:
"""Calculates the number of seconds until the refresh token
expires.
### Parameters
----
expiration_secs : int
The number of seconds until expiration.
"""
expiration_time = datetime.now().timestamp() + expiration_secs
self._refresh_token_expiration_time = datetime.fromtimestamp(
expiration_time
)
def _calculate_access_token_expiration(self, expiration_secs: int) -> None:
"""Calculates the number of seconds until the access token
expires.
### Parameters
----
expiration_secs : int
The number of seconds until expiration.
"""
expiration_time = datetime.now().timestamp() + expiration_secs
self._access_token_expiration_time = datetime.fromtimestamp(
expiration_time
)
@property
def access_token_expiration_time(self) -> datetime:
"""Returns when the Access Token will expire.
### Returns
----
datetime
The date and time of the access token
expiration.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.access_token_expiration_time
"""
return self._access_token_expiration_time
@property
def is_access_token_expired(self) -> bool:
"""Specifies whether the current Access Token is expired
or not.
### Returns
----
bool
`True` if the Access Token is expired,
`False` otherwise.
### Usage
----
>>> td_credential = TdCredentials()
>>> td_credential.is_access_token_expired
"""
exp_time = self.access_token_expiration_time.timestamp() - 20
now = datetime.now().timestamp()
return bool(exp_time < now)
def from_workflow(self) -> None:
"""Grabs an Access toke and refresh token using
the oAuth workflow.
### Usage
----
>>> td_credentials = TdCredentials(
client_id=client_id,
redirect_uri=redirect_uri,
credential_file='config/td_credentials.jsonc'
)
>>> td_credentials.from_workflow()
"""
self.grab_authorization_code()
token_dict = self.exchange_code_for_token(return_refresh_token=True)
self.from_token_dict(token_dict=token_dict)
def from_credential_file(self, file_path: str) -> None:
"""Loads the credentials for a JSON file that is formatted
in the correct fashion.
### Parameters
file_path : str
The location of the credentials file.
"""
with open(file=file_path, mode='r', encoding='utf-8') as token_file:
token_dict = json.load(fp=token_file)
self.from_token_dict(token_dict=token_dict)
def to_credential_file(self, file_path: Union[str, pathlib.Path]) -> None:
"""Takes the token dictionary and saves it to a JSON file.
### Parameters
----
file_path : Union[str, pathlib.Path]
The file path to the credentials file.
### Usage
----
>>> td_credentials.to_token_file(
file_path='config/td_credentials.json'
)
"""
if isinstance(file_path, pathlib.Path):
file_path = file_path.resolve()
with open(file=file_path, mode='w+', encoding='utf-8') as token_file:
json.dump(obj=self.to_token_dict(), fp=token_file, indent=2)
def from_credential_dict(self, token_dict: dict) -> None:
"""Loads the credentials from a token dictionary.
### Parameters
----
token_dict : dict
The token dictionary with the required
authentication tokens.
### Usage
----
### Example 1
----
You don't necessairly need the `refresh_token_expiration_time` or the
`access_token_expiration_time` because they can be calculated using the
`access_token` key and `refresh_token`.
>>> td_credentials.from_credential_dict(
token_dict={
"access_token": "YOUR_ACCESS_TOKEN",
"refresh_token": "YOUR_REFRESH_TOKEN"
"scope": "PlaceTrades AccountAccess MoveMoney",
"expires_in": 1800,
"refresh_token_expires_in": 7776000,
"token_type": "Bearer",
"refresh_token_expiration_time": "2021-07-08T17:38:07.973982",
"access_token_expiration_time": "2021-04-09T18:08:07.973982"
}
)
### Example 2
----
You don't necessairly need the `refresh_token_expiration_time` or the
`access_token_expiration_time` because they can be calculated using the
`access_token` key and `refresh_token`.
>>> # This just is another way of sending it through.
>>> td_credentials.from_credential_dict(
token_dict={
"access_token": "YOUR_ACCESS_TOKEN",
"refresh_token": "YOUR_REFRESH_TOKEN"
"scope": "PlaceTrades AccountAccess MoveMoney",
"expires_in": 1800,
"refresh_token_expires_in": 7776000,
"token_type": "Bearer"
}
)
"""
self.from_token_dict(token_dict=token_dict)
self.validate_token()
def grab_authorization_code(self) -> None:
"""Generates the URL to grab the authorization code."""
data = {
"response_type": "code",
"redirect_uri": self.redirect_uri,
"client_id": self.client_id + "@AMER.OAUTHAP"
}
# url encode the data.
params = urllib.parse.urlencode(data)
# build the full URL for the authentication endpoint.
url = self.authorization_url + params
webbrowser.open(url=url)
code_url = input("Please Paste the Authorization Code Here: ")
query = urlparse(url=code_url)
parse_code = parse_qs(qs=query.query)
self.authorization_code = parse_code['code'][0]
def exchange_code_for_token(self, return_refresh_token: bool) -> dict:
"""Access token handler for AuthCode Workflow.
### Overview
----
This takes the authorization code parsed from
the auth endpoint to call the token endpoint
and obtain an access token.
### Parameters
----
return_refresh_token: bool
If set to `True`, will request a refresh token in
the request. Otherwise, will only request an access
token along.
### Returns
----
dict :
The token dictionary with the content.
"""
# Define the parameters of our access token post.
data = {
'grant_type': 'authorization_code',
'client_id': self.client_id + '@AMER.OAUTHAP',
'code': self.authorization_code,
'redirect_uri': self.redirect_uri
}
if return_refresh_token:
data['access_type'] = 'offline'
# Make the request.
response = requests.post(
url="https://api.tdameritrade.com/v1/oauth2/token",
headers={
'Content-Type': 'application/x-www-form-urlencoded'
},
data=data
)
if response.ok:
return response.json()
raise requests.HTTPError()
def grab_access_token(self) -> dict:
"""Refreshes the current access token.
This takes a valid refresh token and refreshes
an expired access token. This is different from
exchanging a code for an access token.
### Returns
----
dict:
The dictionary contain all the token
info.
"""
# build the parameters of our request
data = {
'client_id': self.client_id,
'grant_type': 'refresh_token',
'access_type': 'offline',
'refresh_token': self.refresh_token
}
# Make the request.
response = requests.post(
url="https://api.tdameritrade.com/v1/oauth2/token",
headers={
'Content-Type': 'application/x-www-form-urlencoded'
},
data=data
)
if response.ok:
return response.json()
raise requests.HTTPError()
def validate_token(self) -> None:
"""Validates the access token and refresh token.
### Overview
----
A TD Ameritrade Access token is only valid for 30 minutes,
and a TD Ameritrade Refresh token is only valid for 90 days.
When an access token expires, a new one is retrieved using the
refresh token. If the refresh token is expired the oAuth workflow
starts again.
"""
if self.is_refresh_token_expired:
print("Refresh Token Expired, initiating oAuth workflow...")
self.from_workflow()
if self.is_access_token_expired:
print("Access Token Expired, refreshing access token...")
token_dict = self.grab_access_token()
self.from_token_dict(token_dict=token_dict)
if self._loaded_from_file:
self.to_credential_file(file_path=self._file_path)
| [
"alexreed1192@gmail.com"
] | alexreed1192@gmail.com |
0aa840636aa5d3f237b7344f2e83e0a8f9147d66 | 3c279bde31a5fde7599d529cead82662e4173462 | /finndex/util/dateutil.py | 6ca4d066237bd564f138980075af0e510e4e1d51 | [
"MIT"
] | permissive | FinnitoProductions/finndex | 77eea27cdeaf85076d5b1baf13526e8409e002b8 | 777c985ae7ec082284c27dc5e928bdd998bcc962 | refs/heads/master | 2022-03-03T20:02:03.660374 | 2020-12-14T19:24:51 | 2020-12-14T19:24:51 | 194,348,404 | 0 | 1 | MIT | 2022-01-21T19:57:44 | 2019-06-29T00:43:26 | Jupyter Notebook | UTF-8 | Python | false | false | 1,119 | py | '''
Contains utility functions providing an extension to Python's datetime.
'''
import datetime
import pytz
__author__ = "Finn Frankis"
__copyright__ = "Copyright 2019, Crypticko"
DESIRED_DATE_FORMAT = "%Y-%m-%d"
DESIRED_TIME_ZONE = pytz.timezone("US/Pacific")
'''
Converts a timestamp (represented as a string) in one date format into another date format. Returns
the newly formatted date as a string.
A list of acceptable date format characters can be found at the following link.
https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
'''
def convertTimestamp(timestamp, initialFormat, desiredFormat):
return datetime.datetime.strptime(timestamp, initialFormat).strftime(desiredFormat)
'''
A generator function (inclusive on both endpoints) to iterate through every date or datetime in a given range.
'''
def dateRange(startDate, endDate):
for i in range(int((endDate - startDate).days) + 1):
yield startDate + datetime.timedelta(i)
'''
Retrieves the current time and date in Pacific time.
'''
def getCurrentDateTime():
return datetime.datetime.now(DESIRED_TIME_ZONE)
| [
"finn@teachmy.com"
] | finn@teachmy.com |
d76399a2a5ee7d97c620a67a442b0bc91905f22e | 9a88d758e94da3c24e81157bb1aa9862e9346b03 | /urls.py | f34132f34626b6046e654b70b9e1a3122aff7706 | [] | no_license | Gokulk123/E-Learning | adc04efbd64d2a12ca0b3a68f616e36a78a0b619 | 2a8193fd9bbfdc70f971d9bda10585a4bff3fd19 | refs/heads/master | 2022-09-12T18:19:10.450464 | 2020-05-29T08:28:56 | 2020-05-29T08:28:56 | 267,805,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,282 | py | """djangoproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from .djangoapp import views
from .download import views as downloadview
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.index,name="index"),
path('userReg/', views.userReg, name="userReg"),
path('facultyReg/', views.facultyReg, name="facultyReg"),
path('userlogin/', views.userlogin, name="userlogin"),
path('facultylogin/', views.facultylogin, name="facultylogin"),
path('adminlogin/', views.adminlogin, name="adminlogin"),
path('about/', views.about, name="about"),
path('userregistration/', views.usersignup, name="usersignup"),
path('facultyregistration/', views.facultysignup, name="facultysignup"),
path('admincredential/', views.admincredentialauth, name="admincredentialauth"),
path('userdetails/',views.userdetails,name="userdetails"),
path('adminhome/',views.adminhome,name="adminhome"),
path('userhome/',views.userhome,name="userhome"),
path('facultyhome/',views.facultyhome,name="facultyhome"),
path('facultydetails/',views.facultydetails,name="facultydetails"),
path('logout/',views.logout,name="logout"),
path('admincredential/',views.usercredentialauth,name="usercredentialauth"),
path('add_courses/',views.add_course,name="add_course"),
path('add_course_materials/',views.add_course_materials,name="add_course_materials"),
path('select_course/',views.select_course,name="select_course"),
path('select_course_materials/',views.select_course_materials,name="select_course_materials"),
path('ask_question/',views.ask_question,name="ask_question"),
path('view_reply/',views.view_reply,name="view_reply"),
path('view_question/',views.view_question,name="view_question"),
path('responsing/',views.responsing,name="responsing"),
path('usercredential/', views.usercredentialauth, name="usercredentialauth"),
path('userlogout/',views.userlogout,name="userlogout"),
path('facultycredential/', views.facultycredentialauth, name="facultycredentialauth"),
path('facultylogout/',views.facultylogout,name="facultylogout"),
path('add_values/',views.add_values,name="add_values"),
path('assign_course/',views.assign_course,name="assign_course"),
path('course_value/',views.course_value,name="course_value"),
path('view_course/',views.view_course,name="view_course"),
path('ask_question/',views.ask_question,name="ask_question"),
path('save_doubts/',views.save_doubts,name="save_doubts"),
path('view_assigned_courses/', views.view_assigned_courses, name="view_assigned_courses"),
path('view_users_doubts/', views.view_users_doubts, name="view_users_doubts"),
path('reply/', views.reply, name="reply"),
path('save_reply/', views.save_reply, name="save_reply"),
path('all_reply/', views.all_reply, name="all_reply"),
path('view_solution/', views.view_solution, name="view_solution"),
path('add_materials/', views.add_materials, name="add_materials"),
path('generateKey/', views.generateKey, name="generateKey"),
path('material_save/', views.material_save, name="material_save"),
path('add_email/', views.add_email, name="add_email"),
path('send_mail/', views.send_mail, name="send_mail"),
path('read_keys/', views.read_keys, name="read_keys"),
path('login/', downloadview.login, name="login"),
path('downloadresoure/',downloadview.authenticate_and_download,name="authenticate_and_download"),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"gokulkrishnanml@gmail.com"
] | gokulkrishnanml@gmail.com |
6436176ee36f61be4e18fceb8292042e2a8cd3bd | ccbb7fb8fda4d936e765263f05a435058b397bd9 | /src/guiltytargets/ppi_network_annotation/model/__init__.py | ab512ca4a78d001836dbb692256bd93d16deee04 | [
"MIT"
] | permissive | GuiltyTargets/guiltytargets | 5a5d3ba9e45867a64c81a91529ae6689f8be447f | c20a5cae6c9cc71c2ca73080a862abe986bc34c0 | refs/heads/master | 2022-02-13T03:30:49.705239 | 2021-12-22T12:51:20 | 2021-12-22T12:51:20 | 154,318,881 | 10 | 5 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | # -*- coding: utf-8 -*-
"""Package that includes classes for data models."""
from .attribute_network import AttributeNetwork # noqa: F401
from .filtered_network import FilteredNetwork # noqa: F401
from .gene import Gene # noqa: F401
from .labeled_network import LabeledNetwork # noqa: F401
from .network import Network # noqa: F401
| [
"cthoyt@gmail.com"
] | cthoyt@gmail.com |
34ea6aa1267f976a48d3711fbeae24feee76691c | a74cabbe1b11fc8ef575ea86f2543cd95db78ec9 | /python_program/q1769_Minimum_Number_of_Operations_to_Move_All_Balls_to_Each_Box.py | f0c1cbca670d01a831baba6505718f6aabeab348 | [] | no_license | tszandy/leetcode | 87e3ccf291b2879637d2d8238935a455b401a78a | f1f4361541dcffbb291285663c8820d7ffb37d2f | refs/heads/master | 2023-04-06T15:34:04.847875 | 2023-03-26T12:22:42 | 2023-03-26T12:22:42 | 204,069,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,932 | py | from typing import List
from collections import Counter,defaultdict
from math import *
from functools import reduce
import numpy as np
from heapq import *
class Solution:
def minOperations(self, boxes: str) -> List[int]:
num1sAhead = 0
num1sBehind = 0
initialDiff = 0
res = []
for num in boxes:
if num == "1":
num1sAhead += 1
for i, num in enumerate(boxes):
if num == "1":
initialDiff += i
for i, num in enumerate(boxes):
if i == 0:
res.append(initialDiff)
if num == "1":
num1sAhead -= 1
num1sBehind += 1
else:
res.append(res[i-1] - num1sAhead + num1sBehind)
if num == "1":
num1sAhead -= 1
num1sBehind += 1
return res
def minOperations_2(self, boxes: str) -> List[int]:
n = len(boxes)
list_1 = np.array(list(map(lambda x:int(x),boxes)))[:,None]
list_2 = np.array(range(n))
result = np.abs((list_2[:,None]-list_2[None,:])*list_1).sum(axis = 0)
return list(result)
def minOperations_1(self, boxes: str) -> List[int]:
n = len(boxes)
return_list = [0]*n
for i in range(n):
count_move = 0
for j,e in enumerate(boxes):
count_move+=abs((j-i)*int(e))
return_list[i] = count_move
return return_list
sol = Solution()
# input
boxes = "110"
# output
output = sol.minOperations(boxes)
# answer
answer = [1,1,3]
print(output, answer, answer == output)
# input
boxes = "001011"
# output
output = sol.minOperations(boxes)
# answer
answer = [11,8,5,4,3,4]
print(output, answer, answer == output)
# input
boxes = "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
# output
output = sol.minOperations(boxes)
# answer
answer = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
print(output, answer, answer == output) | [
"444980834@qq.com"
] | 444980834@qq.com |
f320a663594ea9a837f11ecad07ff612adf1e65b | c9b33b67e3b5b562f696502d7a8c72afc07e7392 | /tests/test_client.py | ad8aaf2b5c319bf3be2233a12da4d42ff046d0dd | [
"MIT"
] | permissive | GinnyGaga/20171202flasky | 09abe186736da990881b6e1c548ea040a73dda82 | 298787c1f54b9ece8048fd359d56044716ffa345 | refs/heads/master | 2021-08-23T01:49:38.835610 | 2017-11-27T15:54:26 | 2017-11-27T15:54:26 | 112,824,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | import unittest
import re
from flask import url_for
from app import create_app,db
from app.models import User,Role
class FlaskClientTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
self.client = self.app.test_client(use_cookies=True)
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_home_page(self):
response = self.client.get(url_for('main.index'))
self.assertTrue('Stranger' in response.get_data(as_text=True))
def test_register_and_login(self):
#模拟注册新账户
response = self.client.post(url_for('auth.register'),data={
'email':'john@example.com',
'username':'john',
'password':'cat',
'password2':'cat'
})
self.assertTrue(response.status_code == 302)
#使用新注册的账户登入
response = self.client.post(url_for('auth.login'),data={
'email':'john@example.com',
'password':'cat'},
follow_redirects=True)
data = response.get_data(as_text=True)
self.assertTrue(re.search('Hello,john!', data))
self.assertTrue('You have not confirmed your account yet' in data)
#发送确认令牌
user=User.query.filter_by(email='john@example.com').first()
token = user.generate_confirmation_token()
response = self.client.get(url_for('auth.confirm',token=token),follow_redirects=True)
data=response.get_data(as_text=True)
self.assertTrue('You have confirmed your account' in data)
#退出
response = self.client.get(url_for('auth.logout'),
follow_redirects=True)
data = response.get_data(as_text=True)
self.assertTrue('You have been logged out' in data)
| [
"mo@example.com"
] | mo@example.com |
3f7145a11f4c1d019d782a5fae6848a3d4d3f507 | 1d892928c70ee9ddf66f2a37a8e083d2632c6e38 | /nova/api/openstack/compute/contrib/rescue.py | 7bf815a37979d0e68811a4baac694cc8f191f500 | [
"Apache-2.0"
] | permissive | usc-isi/essex-baremetal-support | 74196c3f1332ee3cdeba9c263faff0ac0567d3cf | a77daf8ef56cf41e38de36621eda25ed3f180156 | refs/heads/master | 2021-05-19T03:12:11.929550 | 2020-07-24T14:15:26 | 2020-07-24T14:15:26 | 4,702,421 | 0 | 1 | Apache-2.0 | 2020-07-24T14:15:27 | 2012-06-18T15:19:41 | null | UTF-8 | Python | false | false | 3,310 | py | # Copyright 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The rescue mode extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions as exts
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
authorize = exts.extension_authorizer('compute', 'rescue')
class RescueController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(RescueController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _get_instance(self, context, instance_id):
try:
return self.compute_api.get(context, instance_id)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(msg)
@wsgi.action('rescue')
@exts.wrap_errors
def _rescue(self, req, id, body):
"""Rescue an instance."""
context = req.environ["nova.context"]
authorize(context)
if body['rescue'] and 'adminPass' in body['rescue']:
password = body['rescue']['adminPass']
else:
password = utils.generate_password(FLAGS.password_length)
instance = self._get_instance(context, id)
try:
self.compute_api.rescue(context, instance,
rescue_password=password)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rescue')
return {'adminPass': password}
@wsgi.action('unrescue')
@exts.wrap_errors
def _unrescue(self, req, id, body):
"""Unrescue an instance."""
context = req.environ["nova.context"]
authorize(context)
instance = self._get_instance(context, id)
try:
self.compute_api.unrescue(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unrescue')
return webob.Response(status_int=202)
class Rescue(exts.ExtensionDescriptor):
"""Instance rescue mode"""
name = "Rescue"
alias = "os-rescue"
namespace = "http://docs.openstack.org/compute/ext/rescue/api/v1.1"
updated = "2011-08-18T00:00:00+00:00"
def get_controller_extensions(self):
controller = RescueController()
extension = exts.ControllerExtension(self, 'servers', controller)
return [extension]
| [
"dkang@isi.edu"
] | dkang@isi.edu |
c4b7bae3a9e4612d8b544cb4149dab7657f8aa0c | 0a2e83d03cbe7633cce6e6fc1d6ce114f0d507a1 | /CourseSchedule.py | 90f452e1c0e3b72f0b297a16b0e4dbeff194fc49 | [] | no_license | Pr0Vlad/Coding-Problems | 69fe8bddb2e44aa747791ebf4d9ec782ba54031c | a00f5464e06abdc39bce6304868776f7ae390d00 | refs/heads/master | 2022-11-24T07:29:52.816561 | 2020-07-27T22:19:25 | 2020-07-27T22:19:25 | 269,507,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,523 | py | #eachcourse has pre reqs and need to figure out how to take them
#we can store all the data as a graph and the edges go to the next class
#if there is a cycle you cant make the schedule because if a pre req for class 1 is class 2 and other way around that will have a cycle
#first option is to use dfs for checking a cycle we can do a depth limited seach deleting a node once we know it doesnt have a cycle and making time complexity O(n)
class Solution:
def Cycle(self, graph, course, visited, visitedlist):
if course in visitedlist:
return visitedlist[course]
if course in visited:
return True
if course not in graph:
return False
visited.add(course)
ret = False
for neighbor in graph[course]:
if self.Cycle(graph, neighbor, visited, visitedlist):
ret = True
break
visited.remove(course)
visitedlist[course] = ret
return ret
def classes(self, amountCourses, preReqs):
graph = {}
for preReq in preReqs:
if preReq[0] in graph:
graph[preReq[0]].append(preReq[1])
else:
graph[preReq[0]] = [preReq[1]]
for course in range(amountCourses):
if self.Cycle(graph, course, set(), {}):
return False
return True
print(Solution().classes(2, [[1, 0]]))
print(Solution().classes(2, [[1, 0], [0, 1]])) | [
"noreply@github.com"
] | Pr0Vlad.noreply@github.com |
c7a1755f0e7fbc0d4edee7b813130bfb252193cf | 2acf64fca88200f4a4ada46f5da4f96702bafa06 | /stubs/facebook_business/adobjects/hotelroom.pyi | 0c74f95680b203626a30e5473345113f0c61cb3b | [] | no_license | vlab-research/adopt | bf6cdbfb751f7d85674e3925b207639e7d9d92c4 | 66347b00996e26910290e4fdb883e4231cc614af | refs/heads/master | 2023-04-12T12:16:23.061861 | 2021-05-18T14:17:01 | 2021-05-18T14:17:01 | 278,025,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,736 | pyi | from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject as AbstractCrudObject
from facebook_business.adobjects.abstractobject import AbstractObject as AbstractObject
from facebook_business.adobjects.objectparser import ObjectParser as ObjectParser
from facebook_business.api import FacebookRequest as FacebookRequest
from facebook_business.typechecker import TypeChecker as TypeChecker
from typing import Any, Optional
class HotelRoom(AbstractCrudObject):
def __init__(self, fbid: Optional[Any] = ..., parent_id: Optional[Any] = ..., api: Optional[Any] = ...) -> None: ...
class Field(AbstractObject.Field):
applinks: str = ...
base_price: str = ...
currency: str = ...
description: str = ...
id: str = ...
images: str = ...
margin_level: str = ...
name: str = ...
room_id: str = ...
sale_price: str = ...
url: str = ...
def api_delete(self, fields: Optional[Any] = ..., params: Optional[Any] = ..., batch: Optional[Any] = ..., success: Optional[Any] = ..., failure: Optional[Any] = ..., pending: bool = ...): ...
def api_get(self, fields: Optional[Any] = ..., params: Optional[Any] = ..., batch: Optional[Any] = ..., success: Optional[Any] = ..., failure: Optional[Any] = ..., pending: bool = ...): ...
def api_update(self, fields: Optional[Any] = ..., params: Optional[Any] = ..., batch: Optional[Any] = ..., success: Optional[Any] = ..., failure: Optional[Any] = ..., pending: bool = ...): ...
def get_pricing_variables(self, fields: Optional[Any] = ..., params: Optional[Any] = ..., batch: Optional[Any] = ..., success: Optional[Any] = ..., failure: Optional[Any] = ..., pending: bool = ...): ...
| [
"nandanmarkrao@gmail.com"
] | nandanmarkrao@gmail.com |
c9e9c0a25d752fb2e2355dea5265a18f0927289f | 444e82743a5fbfdadbab3b9725d4e790ef015e0b | /ABSSetup.py | fdb4bdc297217230bd988ccce8f960aa87071dfb | [] | no_license | Mamietti/ABS | 31c471b664f88eebee6e96a66ebd3cf827b2fd58 | 747ea6c38536aa85aba9d326bf0ef9648b5bad81 | refs/heads/master | 2021-01-15T13:36:24.567754 | 2018-03-14T16:25:08 | 2018-03-14T16:25:08 | 99,679,965 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | '''
This is the list of default addons which we consider the absolute basics
'''
default_addons_list = [
'English (South African) Language Pack',
'English (GB) Language Pack',
'Application Update Service Helper',
'Pocket',
'Web Compat',
'Site Deployment Checker',
'Default',
'Ubuntu Modifications',
'Multi-process staged rollout',
'Disable Prefetch',
'Disable TLS Certificate Transparency'
]
'''
These are the addons we want the user to have if they're not going with the basics
'''
safety_addons_list = [
'AdBlocker Ultimate'
]
| [
"mauri.miettinen13@gmail.com"
] | mauri.miettinen13@gmail.com |
f3c4b37ede8d850a373064ba90b5ab31a2185e64 | 80522f7edca55f7d85c0e0578f484b4af765b042 | /Session4/Exercises/starter/Question1.py | 7f77f97e7e91eb3b4951219aac6aefbd3e8c7d9e | [] | no_license | georgiedignan/she_codes_python | 607f7aa2c0903ac77e56336b63350afc561b185a | 99a6c07eac74dff2a1b210f3c9664f2f88c1469b | refs/heads/main | 2023-07-07T09:22:42.045543 | 2021-08-08T09:06:18 | 2021-08-08T09:06:18 | 383,382,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | import csv
# with open("colours_20_simple.csv",mode="r") as csv_file:
# csv_reader = csv.reader(csv_file)
# #print(csv_reader)
# headers = next(csv_reader)
# for line in headers:
# print(f"{line[0]} {line[1]} {line[2]}")
with open("colours_20_simple.csv",mode="r") as csv_file:
csv_reader = csv.reader(csv_file)
#print(csv_reader)
for i,line in enumerate(csv_reader):
if i != 0:
print(f"{line[0]} {line[1]} {line[2]}")
| [
"georgiedignan@gmail.com"
] | georgiedignan@gmail.com |
ba1e83684db87779aa34544d18e4959accdff376 | aec66866bfb377d66b9f85883b79c1d5070a96a8 | /mysite/blog/models.py | 1e0b3b36c5263bb91e0a41894147d43c77db649a | [] | no_license | royrob21/my-first-blog | 07432a7c3ecee4381a5ed987be34ef7ce971f622 | d50d7208928c7e1b45e2c7c5fb9a527fdf6b00a9 | refs/heads/master | 2023-06-30T15:27:18.289703 | 2021-08-02T07:46:00 | 2021-08-02T07:46:00 | 391,602,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | from django.conf import settings
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| [
"roberto.roba@virgilio.it"
] | roberto.roba@virgilio.it |
1bda6fd6e7271cebb0d5a3ec0f810bf5ba116d12 | 386d1b6557f4cbaf20794cd222f3b7b8598ef6a6 | /data/clean_data/A1/18.py | 165e341e4a20acdb7adcf06e11bc7e769b947482 | [] | no_license | woowei0102/code2pro | 3baf86985f911264362963c503f12d20bdc1f89f | 0b16c62a1cb9053ab59edd7a52e1b3b39fdf66dc | refs/heads/main | 2023-06-28T23:09:23.998798 | 2021-07-13T11:49:27 | 2021-07-13T11:49:27 | 385,585,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | class Account:
def __init__(self, name):
self.name = name
self._balance = 0
def deposit(self, amount):
self._balance = self._balance + amount
print('{}存了NT${:,.0f}元.'.format(self.name,self._balance))
def withdraw(self, amount):
if amount < self._balance:
self._balance = self._balance - amount
print('{}提了NT${:,.0f}元.'.format(self.name,self._balance))
else:
print('{}的存款不足.'.format(self.name))
def show(self):
print('{}餘額NT${:,.0f}元.'.format(self.name,self._balance))
userA = Account("Jack")
userA.withdraw(1000)
userA.deposit(5000)
userA.withdraw(1000)
userA.show()
| [
"54971984+woowei0102@users.noreply.github.com"
] | 54971984+woowei0102@users.noreply.github.com |
ba164fd970f555f659c1acf2f986cfdf267ee658 | 4a66b8cd4e2fdeead33665303ab4d7f961402995 | /models/layers/layer_normalization.py | 4f89aae97ce336ef4d29f165b2dc748ec96d9591 | [] | no_license | SJ-moon/Transformer | e5e1f45f08f42e512dd471ddddaf97b2afa0f45c | be6f3b48a734dfc13b9e55a300eb1f8887ec96d3 | refs/heads/master | 2023-08-15T02:23:34.556503 | 2021-09-14T15:04:37 | 2021-09-14T15:04:37 | 406,413,884 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | import torch
from torch import nn as nn
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-12):
super(LayerNorm,self).__init__()
self.gamma = nn.Parameter(torch.ones(d_model)) ## Parameter 와 nn.tensor의 차이점: parameter는 paramter()로 검색이 가능
self.beta = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim = True)
std = x.std(-1, keepdim = True)
out = (x-mean)/(std + self.eps)
out = self.gamma * out + self.beta
return out | [
"seungjunee@DESKTOP-NBNJANH.localdomain"
] | seungjunee@DESKTOP-NBNJANH.localdomain |
d6de7da64fe8278c4dcc7e25bc1fdf741e82efa8 | d7e9bf5d59343f9ea1670fc529e1afa8fdcbf337 | /Section-04/create_tables.py | 2cd9ed6868b3e5aafebbbf768358599456b3f6fa | [] | no_license | tyday/solid-guacamole | 2610985f3156d44144cf40dd65b040898fb8c159 | f1a1544ae831c18c2acf558afdf8a1d4c9991152 | refs/heads/master | 2020-05-05T09:01:56.946260 | 2019-04-14T17:49:13 | 2019-04-14T17:50:21 | 179,888,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | import sqlite3
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
create_table = "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, username text, password text)"
cursor.execute(create_table)
create_table = "CREATE TABLE IF NOT EXISTS items (name text, price real)"
cursor.execute(create_table)
cursor.execute("INSERT INTO items VALUES ('test', 10.99)")
connection.commit()
connection.close() | [
"tyrday@gmail.com"
] | tyrday@gmail.com |
02eff9f9d7f73775338d09a8547e254b76580d7d | d04b04735026e8ec27a9210a42c0ffdafa7a75a7 | /venv/bin/flask | c8e56c713c9df5382b3040c2afa5fd1d77a9c0e1 | [] | no_license | iamaayush98/ChatterBotAPI | 4732cdf8e78c1c0d12f4fa8695db0e28ff1f4c61 | f43ae130e65d3458c3121b4a5b52f9de2f356c93 | refs/heads/master | 2020-08-26T17:00:28.647918 | 2019-10-23T19:12:12 | 2019-10-23T19:12:12 | 217,080,609 | 0 | 0 | null | 2019-10-23T19:12:13 | 2019-10-23T14:40:17 | null | UTF-8 | Python | false | false | 260 | #!/home/fake_batman_/PycharmProjects/ChatterBotAPI/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"harshitmah98@gmail.com"
] | harshitmah98@gmail.com | |
ec8bf2a34021126680723c6e854594b55fffde82 | c085f61a0f9da8ccd2f56ab9142799a4dcfd1052 | /bindings/pydeck/examples/path_layer.py | 161fb47737afa6902c05f29604126911794705cb | [
"MIT",
"Apache-2.0"
] | permissive | visgl/deck.gl | 148ec752c02cf6d0a35d7e11dbb44b5e341553c2 | 1c4f9a99596b3f913426f7d5df3d7e831b4e99c0 | refs/heads/master | 2023-09-01T00:08:12.457341 | 2023-08-29T22:19:23 | 2023-08-29T22:19:23 | 48,030,204 | 3,929 | 1,012 | MIT | 2023-09-14T17:42:01 | 2015-12-15T08:38:29 | TypeScript | UTF-8 | Python | false | false | 773 | py | """
PathLayer
=========
Locations of the Bay Area Rapid Transit lines.
"""
import pandas as pd
import pydeck as pdk
DATA_URL = "https://raw.githubusercontent.com/visgl/deck.gl-data/master/website/bart-lines.json"
df = pd.read_json(DATA_URL)
def hex_to_rgb(h):
h = h.lstrip("#")
return tuple(int(h[i : i + 2], 16) for i in (0, 2, 4))
df["color"] = df["color"].apply(hex_to_rgb)
view_state = pdk.ViewState(latitude=37.782556, longitude=-122.3484867, zoom=10)
layer = pdk.Layer(
type="PathLayer",
data=df,
pickable=True,
get_color="color",
width_scale=20,
width_min_pixels=2,
get_path="path",
get_width=5,
)
r = pdk.Deck(layers=[layer], initial_view_state=view_state, tooltip={"text": "{name}"})
r.to_html("path_layer.html")
| [
"noreply@github.com"
] | visgl.noreply@github.com |
448948bbf93501104da3edaacec3e43f8968c928 | 657ab1c997e11c43801960a21368e7167d1df344 | /SHOP/apps.py | 2b17bc9c8d7930b0a52eec1f06c3a850df8655f5 | [
"MIT"
] | permissive | EmilioMartinez22/Tienda-Django | e2916c9bd0f9d87fa904d172e24f6f695f900352 | 7a6dce1a178b019593b6892990866c520e5695a4 | refs/heads/main | 2023-06-16T08:50:57.432115 | 2021-07-06T16:13:30 | 2021-07-06T16:13:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | from django.apps import AppConfig
class ShopConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'SHOP'
| [
"83561976+Kevin-Busy@users.noreply.github.com"
] | 83561976+Kevin-Busy@users.noreply.github.com |
6a377949935a75af9eaadc89fad29c3b315a1549 | 824b582c2e0236e987a29b233308917fbdfc57a7 | /sdk/python/pulumi_google_native/managedidentities/v1beta1/get_domain_iam_policy.py | fb18cbda25e88bff30ce2de666aa2916274a7222 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | 24601/pulumi-google-native | ce8faf8455609a9572a8cbe0638c66427bf0ae7f | b219a14201c6c58eaa10caaeacbdaab528931adf | refs/heads/master | 2023-08-23T05:48:31.819709 | 2021-10-08T18:50:44 | 2021-10-08T18:50:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,609 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetDomainIamPolicyResult',
'AwaitableGetDomainIamPolicyResult',
'get_domain_iam_policy',
'get_domain_iam_policy_output',
]
@pulumi.output_type
class GetDomainIamPolicyResult:
def __init__(__self__, bindings=None, etag=None, version=None):
if bindings and not isinstance(bindings, list):
raise TypeError("Expected argument 'bindings' to be a list")
pulumi.set(__self__, "bindings", bindings)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if version and not isinstance(version, int):
raise TypeError("Expected argument 'version' to be a int")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bindings(self) -> Sequence['outputs.BindingResponse']:
"""
Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.
"""
return pulumi.get(self, "bindings")
@property
@pulumi.getter
def etag(self) -> str:
"""
`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def version(self) -> int:
"""
Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
return pulumi.get(self, "version")
class AwaitableGetDomainIamPolicyResult(GetDomainIamPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDomainIamPolicyResult(
bindings=self.bindings,
etag=self.etag,
version=self.version)
def get_domain_iam_policy(domain_id: Optional[str] = None,
options_requested_policy_version: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDomainIamPolicyResult:
"""
Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
"""
__args__ = dict()
__args__['domainId'] = domain_id
__args__['optionsRequestedPolicyVersion'] = options_requested_policy_version
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:managedidentities/v1beta1:getDomainIamPolicy', __args__, opts=opts, typ=GetDomainIamPolicyResult).value
return AwaitableGetDomainIamPolicyResult(
bindings=__ret__.bindings,
etag=__ret__.etag,
version=__ret__.version)
@_utilities.lift_output_func(get_domain_iam_policy)
def get_domain_iam_policy_output(domain_id: Optional[pulumi.Input[str]] = None,
options_requested_policy_version: Optional[pulumi.Input[Optional[str]]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDomainIamPolicyResult]:
"""
Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
"""
...
| [
"noreply@github.com"
] | 24601.noreply@github.com |
4d16cd37adbbc640904a7479f501b5411125f4d6 | 4afc520ad2173499b7a83d0d9deb34593823b163 | /check_postgres.py | 173ea598fa94ceea3965c3881523c46c1e0ea36d | [] | no_license | goncalomatos-ic/vector-api | a1caf6c76d0a17f18b94a69af0dd688ad31bc924 | cc26155c6fff1e740799ea6deaf20cbf6e771239 | refs/heads/master | 2023-01-10T05:57:55.005675 | 2020-10-30T14:25:34 | 2020-10-30T14:25:34 | 307,221,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | import time
# Wait for postgres to be up and running
#
# Improvement: try to connect to postgres
#
# Note: pinging the container is not enough because
# it doesn't ensure that postgres is running
time.sleep(10) | [
"goncalo.matos@indiecampers.com"
] | goncalo.matos@indiecampers.com |
d1bbb2ee724567016d3a3f4f29ed535e7a845f16 | 778ca26a77a2796d3f69f88d792cb92bd066d064 | /module_2/4_with_class/movie.py | 3e5b1c19e6af146ffb1406109d5a932bbf9f0963 | [
"MIT"
] | permissive | esterfogaca/acelera-dev-loadsmart-women | 20f2c6e1b003964d050e6d1e623b977ca47f1859 | 71aac82f3fe2667ee5acda1619ddf9c82b18319d | refs/heads/master | 2020-04-30T14:30:15.810234 | 2019-03-12T09:57:10 | 2019-03-15T01:25:47 | 176,892,528 | 0 | 0 | MIT | 2019-03-21T07:30:02 | 2019-03-21T07:30:01 | null | UTF-8 | Python | false | false | 831 | py | import time
import webbrowser
class Movie:
def __init__(self, title, stars, director, trailer_url, votes):
self.title = title
self.stars = stars
self.director = director
self.trailer_url = trailer_url
self.votes = votes
def print_specifications(self):
print('\nMOVIE: {}'.format(self.title))
time.sleep(2)
print('STARS: {}'.format(self.stars))
time.sleep(2)
print('DIRECTOR: {}'.format(self.director))
time.sleep(2)
def show_trailer(self):
print('\nLoading {} trailer...'.format(self.title))
webbrowser.open_new(self.trailer_url)
time.sleep(10)
def print_votes(self):
print('\n{}'.format(self.title))
time.sleep(2)
print("{} votes".format(self.votes))
time.sleep(2)
| [
"cmaiacd@gmail.com"
] | cmaiacd@gmail.com |
caa8266f63e9454a80ff08be34a5a07f072d0f01 | 98a359465e6e0620accede5b87b819aed663179d | /schol_library/migrations/0059_auto_20190922_1729.py | f4818e0050c290afbb640c3b9136a5ea6ce4a2ed | [] | no_license | mustavfaa/back-end | 88f8674bd6c2f8d0c4984a2a3d34f2aece3ec8d1 | 6635e8f504c7a7ba9709121b4dd8d5ccecdf05ca | refs/heads/main | 2023-08-15T10:48:03.461138 | 2021-09-27T15:26:03 | 2021-09-27T15:26:03 | 410,938,832 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | # Generated by Django 2.2 on 2019-09-22 11:29
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schol_library', '0058_auto_20190922_1144'),
]
operations = [
migrations.AddField(
model_name='requestedition',
name='checkid',
field=models.BooleanField(blank=True, default=False, verbose_name='статус'),
),
migrations.AlterField(
model_name='checkidrequestedition',
name='date_time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2019, 9, 22, 17, 29, 19, 729564), null=True, verbose_name='время просмотра'),
),
migrations.AlterField(
model_name='requestedition',
name='date_time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2019, 9, 22, 17, 29, 19, 728450), verbose_name='время заявки'),
),
]
| [
"72229762+mustavfaa@users.noreply.github.com"
] | 72229762+mustavfaa@users.noreply.github.com |
51acb83d0318a202b1d0631ef8efa17b80816cf3 | 39a9281201d1fc2c5dfe01beb1d2c64805df1e61 | /PYTHON-MUNDO 2/CONVERSAO_TO_BIN_HEX_OCT.py | 0bc47e0d5afef427b494e82e1638bcc08e86010c | [] | no_license | patrickmalg/aulas-python | 1b456efb1c4fcf039ec4cfca530c67aacf2b4587 | ee4b0ec01f270c3b3a318eb13b20e67e9fa1440f | refs/heads/master | 2020-04-23T02:59:49.879284 | 2019-05-09T18:36:11 | 2019-05-09T18:36:11 | 170,863,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | num = int(input("Digite um número inteiro: "))
print("""Escolha uma das bases para conversão:
[1] BINÁRIO
[2] OCTAL
[3] HEXADECIMAL""")
opçao = int(input("Opção: "))
if opçao == 1:
print("{} convertido para BINÁRIO é igual a {}".format(num, bin(num)[2:]))
elif opçao == 2:
print("{} convertido para OCTAL é igual a {}".format(num, oct(num)[2:]))
elif opçao == 3:
print("{} convertido para HEXADECIMAL é igual a {}".format(num, hex(num)[2:]))
else:
print("Opção inválida. Tente novamente.") | [
"patrickmoreira2001@hotmail.com"
] | patrickmoreira2001@hotmail.com |
56ebc3cd0c2af8321e6569378d04115e236581c5 | 2d8519f34fff220e854ec625bed06443d5f50d2f | /book/settings.py | f397a597814e7d94492210c656896dfafa1eb756 | [] | no_license | oneway-fullstack/book-management | a40a9fc5debe54d3c5304334568fa0b47e5b167e | 4985ae067bc873b2ca113b2d8978e8a17083a945 | refs/heads/main | 2023-04-14T05:22:20.110122 | 2021-04-21T12:02:10 | 2021-04-21T12:02:10 | 360,101,616 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,393 | py | """
Django settings for book project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from corsheaders.defaults import default_headers
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
FRONTEND_DIR = os.path.join(BASE_DIR, 'frontend')
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-*&t+$n+_kj^7*#^z#xu0zri!yr!d3@k^@d^vnf2rg+ko!ooxhg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
"*"
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webpack_loader',
'rest_framework',
'rest_framework.authtoken',
'djoser',
'corsheaders',
'api',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
}
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'book.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'book.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATICFILES_DIRS = [os.path.join(BASE_DIR, "site_static")]
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
APPEND_SLASH = False
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': DEBUG,
'BUNDLE_DIR_NAME': '/bundles/', # must end with slash
'STATS_FILE': os.path.join(FRONTEND_DIR, 'webpack-stats.json'),
}
}
CORS_ALLOW_ALL_ORIGINS=True
CORS_ORIGIN_WHITELIST = [
'http://localhost:8000',
'http://127.0.0.1:8000',
'http://127.0.0.1:8080',
]
CORS_ALLOW_HEADERS = list(default_headers) + [
'x-token',
] | [
"guang.jin@chatterize.com"
] | guang.jin@chatterize.com |
3c801ed688a7770f9e2393cb09ac8a8de8a49b2a | 0c83baa22afa84347cc319e0cab214ff779e3254 | /01_fyyur/starter_code/app.py | 8777009175793e3adf6fc9d0e2bd6243990dc656 | [] | no_license | rahulaVT/FullStackNanoDegree | 52c8380bf6e6f050955f1bc1e3797bfc6ffba132 | 2b7b6042f8c804637efcba85554abd6518b96eed | refs/heads/master | 2022-07-02T19:23:46.698131 | 2020-05-18T01:26:43 | 2020-05-18T01:26:43 | 261,893,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,936 | py | #----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
import json
import dateutil.parser
import babel
from flask import Flask, render_template, request, Response, flash, redirect, url_for, jsonify
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
import logging,re
from logging import Formatter, FileHandler
from flask_wtf import Form
from forms import *
from flask_migrate import Migrate
from operator import itemgetter
#----------------------------------------------------------------------------#
# App Config.
#----------------------------------------------------------------------------#
app = Flask(__name__)
moment = Moment(app)
app.config.from_object('config')
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# TODO: connect to a local postgresql database
#----------------------------------------------------------------------------#
# Models.
#----------------------------------------------------------------------------#
class Genre(db.Model):
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String)
venue_genre = db.Table('venue_genre',
db.Column('genre_id',db.Integer, db.ForeignKey(Genre.id),primary_key=True),
db.Column('venue_id',db.Integer, db.ForeignKey("Venue.id"),primary_key=True))
class Venue(db.Model):
__tablename__ = 'Venue'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
city = db.Column(db.String(120))
state = db.Column(db.String(120))
address = db.Column(db.String(120))
phone = db.Column(db.String(120))
image_link = db.Column(db.String(500))
facebook_link = db.Column(db.String(120))
genres = db.relationship('Genre', secondary = venue_genre,
backref = db.backref('venues',lazy=True))
website = db.Column(db.String(120))
seeking_talent = db.Column(db.Boolean, default=False)
seeking_description = db.Column(db.String(120))
shows = db.relationship('Show', backref='venue', lazy=True)
# Can reference show.venue (as well as venue.shows)
def __repr__(self):
# return "f'<Venue {self.id} {self.name}>'"
return "venue"
# TODO: implement any missing fields, as a database migration using Flask-Migrate
class Artist(db.Model):
__tablename__ = 'Artist'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
city = db.Column(db.String(120))
state = db.Column(db.String(120))
phone = db.Column(db.String(120))
genres = db.Column(db.String(120))
image_link = db.Column(db.String(500))
facebook_link = db.Column(db.String(120))
class Show(db.Model):
__tablename__ = 'Show'
id = db.Column(db.Integer, primary_key=True)
start_time = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) # Start time required field
artist_id = db.Column(db.Integer, db.ForeignKey('Artist.id'), nullable=False) # Foreign key is the tablename.pk
venue_id = db.Column(db.Integer, db.ForeignKey('Venue.id'), nullable=False)
def __repr__(self):
return '<Show'+ str(self.id)+ str(self.start_time)+ 'artist_id='+str(artist_id) +'venue_id='+str(venue_id)+'>'
# return "show"
# TODO Implement Show and Artist models, and complete all model relationships and properties, as a database migration.
#----------------------------------------------------------------------------#
# Filters.
#----------------------------------------------------------------------------#
def format_datetime(value, format='medium'):
date = dateutil.parser.parse(value)
if format == 'full':
format="EEEE MMMM, d, y 'at' h:mma"
elif format == 'medium':
format="EE MM, dd, y h:mma"
return babel.dates.format_datetime(date, format)
app.jinja_env.filters['datetime'] = format_datetime
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
@app.route('/')
def index():
return render_template('pages/home.html')
# Venues
# ----------------------------------------------------------------
@app.route('/venues')
def venues():
# TODO: replace with real venues data.
# num_shows should be aggregated based on number of upcoming shows per venue.
# data=[{
# "city": "San Francisco",
# "state": "CA",
# "venues": [{
# "id": 1,
# "name": "The Musical Hop",
# "num_upcoming_shows": 0,
# }, {
# "id": 3,
# "name": "Park Square Live Music & Coffee",
# "num_upcoming_shows": 1,
# }]
# }, {
# "city": "New York",
# "state": "NY",
# "venues": [{
# "id": 2,
# "name": "The Dueling Pianos Bar",
# "num_upcoming_shows": 0,
# }]
# }]
venues = Venue.query.all()
data = []
cities_states = set()
for venue in venues:
cities_states.add( (venue.city, venue.state) ) # Add tuple
# Turn the set into an ordered list
cities_states = list(cities_states)
cities_states.sort(key=itemgetter(1,0))
now = datetime.now()
for loc in cities_states:
# For this location, see if there are any venues there, and add if so
venues_list = []
for venue in venues:
if (venue.city == loc[0]) and (venue.state == loc[1]):
# If we've got a venue to add, check how many upcoming shows it has
venue_shows = Show.query.filter_by(venue_id=venue.id).all()
num_upcoming = 0
for show in venue_shows:
if show.start_time > now:
num_upcoming += 1
venues_list.append({
"id": venue.id,
"name": venue.name,
"num_upcoming_shows": num_upcoming
})
# After all venues are added to the list for a given location, add it to the data dictionary
data.append({
"city": loc[0],
"state": loc[1],
"venues": venues_list
})
return render_template('pages/venues.html', areas=data);
@app.route('/venues/search', methods=['POST'])
def search_venues():
# TODO: implement search on artists with partial string search. Ensure it is case-insensitive.
# seach for Hop should return "The Musical Hop".
# search for "Music" should return "The Musical Hop" and "Park Square Live Music & Coffee"
search_term = request.form.get('search_term', '').strip()
# Use filter, not filter_by when doing LIKE search (i=insensitive to case)
venues = Venue.query.filter(Venue.name.ilike('%' + search_term + '%')).all()
print(venues)
venue_list = []
now = datetime.now()
for venue in venues:
venue_shows = Show.query.filter_by(venue_id=venue.id).all()
num_upcoming = 0
for show in venue_shows:
if show.start_time > now:
num_upcoming += 1
venue_list.append({
"id": venue.id,
"name": venue.name,
"num_upcoming_shows": num_upcoming
})
response = {
"count": len(venues),
"data": venue_list
}
# response={
# "count": 1,
# "data": [{
# "id": 2,
# "name": "The Dueling Pianos Bar",
# "num_upcoming_shows": 0,
# }]
# }
return render_template('pages/search_venues.html', results=response, search_term=search_term)
@app.route('/venues/<int:venue_id>')
def show_venue(venue_id):
# shows the venue page with the given venue_id
# TODO: replace with real venue data from the venues table, using venue_id
# data1={
# "id": 1,
# "name": "The Musical Hop",
# "genres": ["Jazz", "Reggae", "Swing", "Classical", "Folk"],
# "address": "1015 Folsom Street",
# "city": "San Francisco",
# "state": "CA",
# "phone": "123-123-1234",
# "website": "https://www.themusicalhop.com",
# "facebook_link": "https://www.facebook.com/TheMusicalHop",
# "seeking_talent": True,
# "seeking_description": "We are on the lookout for a local artist to play every two weeks. Please call us.",
# "image_link": "https://images.unsplash.com/photo-1543900694-133f37abaaa5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=400&q=60",
# "past_shows": [{
# "artist_id": 4,
# "artist_name": "Guns N Petals",
# "artist_image_link": "https://images.unsplash.com/photo-1549213783-8284d0336c4f?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=300&q=80",
# "start_time": "2019-05-21T21:30:00.000Z"
# }],
# "upcoming_shows": [],
# "past_shows_count": 1,
# "upcoming_shows_count": 0,
# }
# data2={
# "id": 2,
# "name": "The Dueling Pianos Bar",
# "genres": ["Classical", "R&B", "Hip-Hop"],
# "address": "335 Delancey Street",
# "city": "New York",
# "state": "NY",
# "phone": "914-003-1132",
# "website": "https://www.theduelingpianos.com",
# "facebook_link": "https://www.facebook.com/theduelingpianos",
# "seeking_talent": False,
# "image_link": "https://images.unsplash.com/photo-1497032205916-ac775f0649ae?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=750&q=80",
# "past_shows": [],
# "upcoming_shows": [],
# "past_shows_count": 0,
# "upcoming_shows_count": 0,
# }
# data3={
# "id": 3,
# "name": "Park Square Live Music & Coffee",
# "genres": ["Rock n Roll", "Jazz", "Classical", "Folk"],
# "address": "34 Whiskey Moore Ave",
# "city": "San Francisco",
# "state": "CA",
# "phone": "415-000-1234",
# "website": "https://www.parksquarelivemusicandcoffee.com",
# "facebook_link": "https://www.facebook.com/ParkSquareLiveMusicAndCoffee",
# "seeking_talent": False,
# "image_link": "https://images.unsplash.com/photo-1485686531765-ba63b07845a7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=747&q=80",
# "past_shows": [{
# "artist_id": 5,
# "artist_name": "Matt Quevedo",
# "artist_image_link": "https://images.unsplash.com/photo-1495223153807-b916f75de8c5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=334&q=80",
# "start_time": "2019-06-15T23:00:00.000Z"
# }],
# "upcoming_shows": [{
# "artist_id": 6,
# "artist_name": "The Wild Sax Band",
# "artist_image_link": "https://images.unsplash.com/photo-1558369981-f9ca78462e61?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=794&q=80",
# "start_time": "2035-04-01T20:00:00.000Z"
# }, {
# "artist_id": 6,
# "artist_name": "The Wild Sax Band",
# "artist_image_link": "https://images.unsplash.com/photo-1558369981-f9ca78462e61?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=794&q=80",
# "start_time": "2035-04-08T20:00:00.000Z"
# }, {
# "artist_id": 6,
# "artist_name": "The Wild Sax Band",
# "artist_image_link": "https://images.unsplash.com/photo-1558369981-f9ca78462e61?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=794&q=80",
# "start_time": "2035-04-15T20:00:00.000Z"
# }],
# "past_shows_count": 1,
# "upcoming_shows_count": 1,
# }
venue = Venue.query.get(venue_id)
if not venue:
# Didn't return one, user must've hand-typed a link into the browser that doesn't exist
# Redirect home
return redirect(url_for('index'))
else:
genres = [ genre.name for genre in venue.genres ]
past_shows = []
past_shows_count = 0
upcoming_shows = []
upcoming_shows_count = 0
now = datetime.now()
for show in venue.shows:
if show.start_time > now:
upcoming_shows_count += 1
upcoming_shows.append({
"artist_id": show.artist_id,
"artist_name": show.artist.name,
"artist_image_link": show.artist.image_link,
"start_time": format_datetime(str(show.start_time))
})
if show.start_time < now:
past_shows_count += 1
past_shows.append({
"artist_id": show.artist_id,
"artist_name": show.artist.name,
"artist_image_link": show.artist.image_link,
"start_time": format_datetime(str(show.start_time))
})
data = {
"id": venue_id,
"name": venue.name,
"genres": genres,
"address": venue.address,
"city": venue.city,
"state": venue.state,
# Put the dashes back into phone number
"phone": (venue.phone[:3] + '-' + venue.phone[3:6] + '-' + venue.phone[6:]),
"website": venue.website,
"facebook_link": venue.facebook_link,
"seeking_talent": venue.seeking_talent,
"seeking_description": venue.seeking_description,
"image_link": venue.image_link,
"past_shows": past_shows,
"past_shows_count": past_shows_count,
"upcoming_shows": upcoming_shows,
"upcoming_shows_count": upcoming_shows_count
}
# data = list(filter(lambda d: d['id'] == venue_id, [data1, data2, data3]))[0]
return render_template('pages/show_venue.html', venue=data)
# Create Venue
# ----------------------------------------------------------------
@app.route('/venues/create', methods=['GET'])
def create_venue_form():
form = VenueForm()
return render_template('forms/new_venue.html', form=form)
@app.route('/venues/create', methods=['POST'])
def create_venue_submission():
# TODO: insert form data as a new Venue record in the db, instead
# TODO: modify data to be the data object returned from db insertion
form = VenueForm()
name = form.name.data.strip()
city = form.city.data.strip()
state = form.state.data
address = form.address.data.strip()
phone = form.phone.data
phone = re.sub('\D', '', phone)
genres = form.genres.data # ['Alternative', 'Classical', 'Country']
seeking_talent = True if form.seeking_talent.data == 'Yes' else False
seeking_description = form.seeking_description.data.strip()
image_link = form.image_link.data.strip()
website = form.website.data.strip()
facebook_link = form.facebook_link.data.strip()
if not form.validate():
flash( form.errors )
return redirect(url_for('create_venue_submission'))
else:
error = False
body = {}
try:
venue = Venue(name=name, city=city, state=state, address=address, phone=phone, \
seeking_talent=seeking_talent, seeking_description=seeking_description, image_link=image_link, \
website=website, facebook_link=facebook_link)
for genre in genres:
# fetch_genre = session.query(Genre).filter_by(name=genre).one_or_none() # Throws an exception if more than one returned, returns None if none
fetch_genre = Genre.query.filter_by(name=genre).one_or_none() # Throws an exception if more than one returned, returns None if none
if fetch_genre:
# if found a genre, append it to the list
venue.genres.append(fetch_genre)
else:
# fetch_genre was None. It's not created yet, so create it
new_genre = Genre(name=genre)
db.session.add(new_genre)
venue.genres.append(new_genre) # Create a new Genre item and append it
db.session.add(venue)
db.session.commit()
except:
error = True
db.session.rollback()
print(sys.exc_info())
finally:
db.session.close()
if not error:
flash('Venue ' + name + ' was successfully listed!')
# on successful db insert, flash success
# TODO: on unsuccessful db insert, flash an error instead.
# e.g., flash('An error occurred. Venue ' + data.name + ' could not be listed.')
# see: http://flask.pocoo.org/docs/1.0/patterns/flashing/
return render_template('pages/home.html')
@app.route('/venues/<venue_id>', methods=['DELETE'])
def delete_venue(venue_id):
venue = Venue.query.get(venue_id)
if not venue:
# User somehow faked this call, redirect home
return redirect(url_for('index'))
else:
error_on_delete = False
# Need to hang on to venue name since will be lost after delete
venue_name = venue.name
try:
db.session.delete(venue)
db.session.commit()
except:
error_on_delete = True
db.session.rollback()
finally:
db.session.close()
if error_on_delete:
flash('An error occurred deleting venue ' + venue_name)
print("Error in delete_venue()")
abort(500)
else:
flash('Successfully removed venue '+venue_name)
print(url_for('venues'))
return jsonify({
'deleted': True,
'url': url_for('venues')
})
# Artists
# ----------------------------------------------------------------
@app.route('/artists')
def artists():
# TODO: replace with real data returned from querying the database
data=[{
"id": 4,
"name": "Guns N Petals",
}, {
"id": 5,
"name": "Matt Quevedo",
}, {
"id": 6,
"name": "The Wild Sax Band",
}]
return render_template('pages/artists.html', artists=data)
@app.route('/artists/search', methods=['POST'])
def search_artists():
# TODO: implement search on artists with partial string search. Ensure it is case-insensitive.
# seach for "A" should return "Guns N Petals", "Matt Quevado", and "The Wild Sax Band".
# search for "band" should return "The Wild Sax Band".
response={
"count": 1,
"data": [{
"id": 4,
"name": "Guns N Petals",
"num_upcoming_shows": 0,
}]
}
return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))
@app.route('/artists/<int:artist_id>')
def show_artist(artist_id):
# shows the venue page with the given venue_id
# TODO: replace with real venue data from the venues table, using venue_id
data1={
"id": 4,
"name": "Guns N Petals",
"genres": ["Rock n Roll"],
"city": "San Francisco",
"state": "CA",
"phone": "326-123-5000",
"website": "https://www.gunsnpetalsband.com",
"facebook_link": "https://www.facebook.com/GunsNPetals",
"seeking_venue": True,
"seeking_description": "Looking for shows to perform at in the San Francisco Bay Area!",
"image_link": "https://images.unsplash.com/photo-1549213783-8284d0336c4f?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=300&q=80",
"past_shows": [{
"venue_id": 1,
"venue_name": "The Musical Hop",
"venue_image_link": "https://images.unsplash.com/photo-1543900694-133f37abaaa5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=400&q=60",
"start_time": "2019-05-21T21:30:00.000Z"
}],
"upcoming_shows": [],
"past_shows_count": 1,
"upcoming_shows_count": 0,
}
data2={
"id": 5,
"name": "Matt Quevedo",
"genres": ["Jazz"],
"city": "New York",
"state": "NY",
"phone": "300-400-5000",
"facebook_link": "https://www.facebook.com/mattquevedo923251523",
"seeking_venue": False,
"image_link": "https://images.unsplash.com/photo-1495223153807-b916f75de8c5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=334&q=80",
"past_shows": [{
"venue_id": 3,
"venue_name": "Park Square Live Music & Coffee",
"venue_image_link": "https://images.unsplash.com/photo-1485686531765-ba63b07845a7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=747&q=80",
"start_time": "2019-06-15T23:00:00.000Z"
}],
"upcoming_shows": [],
"past_shows_count": 1,
"upcoming_shows_count": 0,
}
data3={
"id": 6,
"name": "The Wild Sax Band",
"genres": ["Jazz", "Classical"],
"city": "San Francisco",
"state": "CA",
"phone": "432-325-5432",
"seeking_venue": False,
"image_link": "https://images.unsplash.com/photo-1558369981-f9ca78462e61?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=794&q=80",
"past_shows": [],
"upcoming_shows": [{
"venue_id": 3,
"venue_name": "Park Square Live Music & Coffee",
"venue_image_link": "https://images.unsplash.com/photo-1485686531765-ba63b07845a7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=747&q=80",
"start_time": "2035-04-01T20:00:00.000Z"
}, {
"venue_id": 3,
"venue_name": "Park Square Live Music & Coffee",
"venue_image_link": "https://images.unsplash.com/photo-1485686531765-ba63b07845a7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=747&q=80",
"start_time": "2035-04-08T20:00:00.000Z"
}, {
"venue_id": 3,
"venue_name": "Park Square Live Music & Coffee",
"venue_image_link": "https://images.unsplash.com/photo-1485686531765-ba63b07845a7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=747&q=80",
"start_time": "2035-04-15T20:00:00.000Z"
}],
"past_shows_count": 0,
"upcoming_shows_count": 3,
}
data = list(filter(lambda d: d['id'] == artist_id, [data1, data2, data3]))[0]
return render_template('pages/show_artist.html', artist=data)
# Update
# ----------------------------------------------------------------
@app.route('/artists/<int:artist_id>/edit', methods=['GET'])
def edit_artist(artist_id):
form = ArtistForm()
artist={
"id": 4,
"name": "Guns N Petals",
"genres": ["Rock n Roll"],
"city": "San Francisco",
"state": "CA",
"phone": "326-123-5000",
"website": "https://www.gunsnpetalsband.com",
"facebook_link": "https://www.facebook.com/GunsNPetals",
"seeking_venue": True,
"seeking_description": "Looking for shows to perform at in the San Francisco Bay Area!",
"image_link": "https://images.unsplash.com/photo-1549213783-8284d0336c4f?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=300&q=80"
}
# TODO: populate form with fields from artist with ID <artist_id>
return render_template('forms/edit_artist.html', form=form, artist=artist)
@app.route('/artists/<int:artist_id>/edit', methods=['POST'])
def edit_artist_submission(artist_id):
# TODO: take values from the form submitted, and update existing
# artist record with ID <artist_id> using the new attributes
return redirect(url_for('show_artist', artist_id=artist_id))
@app.route('/venues/<int:venue_id>/edit', methods=['GET'])
def edit_venue(venue_id):
venue = Venue.query.get(venue_id)
if venue:
form = VenueForm(obj=venue)
else:
return redirect(url_for('index'))
genres = [ genre.name for genre in venue.genres ]
venue = {
"id": venue_id,
"name": venue.name,
"genres": genres,
"address": venue.address,
"city": venue.city,
"state": venue.state,
"phone": venue.phone,
"website": venue.website,
"facebook_link": venue.facebook_link,
"seeking_talent": venue.seeking_talent,
"seeking_description": venue.seeking_description,
"image_link": venue.image_link
}
# venue={
# "id": 1,
# "name": "The Musical Hop",
# "genres": ["Jazz", "Reggae", "Swing", "Classical", "Folk"],
# "address": "1015 Folsom Street",
# "city": "San Francisco",
# "state": "CA",
# "phone": "123-123-1234",
# "website": "https://www.themusicalhop.com",
# "facebook_link": "https://www.facebook.com/TheMusicalHop",
# "seeking_talent": True,
# "seeking_description": "We are on the lookout for a local artist to play every two weeks. Please call us.",
# "image_link": "https://images.unsplash.com/photo-1543900694-133f37abaaa5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=400&q=60"
# }
return render_template('forms/edit_venue.html', form=form, venue=venue)
@app.route('/venues/<int:venue_id>/edit', methods=['POST'])
def edit_venue_submission(venue_id):
form = VenueForm()
name = form.name.data.strip()
city = form.city.data.strip()
state = form.state.data
address = form.address.data.strip()
phone = form.phone.data
phone = re.sub('\D', '', phone)
genres = form.genres.data
print(form.seeking_talent.data)
seeking_talent = True if form.seeking_talent.data == 'Yes' else False
seeking_description = form.seeking_description.data.strip()
image_link = form.image_link.data.strip()
website = form.website.data.strip()
facebook_link = form.facebook_link.data.strip()
# Redirect back to form if errors in form validation
if not form.validate():
flash( form.errors )
return redirect(url_for('edit_venue_submission', venue_id=venue_id))
else:
error_in_edit = False
try:
# get the original venue object
venue = Venue.query.get(venue_id)
# Update fields
venue.name = name
venue.city = city
venue.state = state
venue.address = address
venue.phone = phone
venue.seeking_talent = seeking_talent
venue.seeking_description = seeking_description
venue.image_link = image_link
venue.website = website
venue.facebook_link = facebook_link
venue.genres = []
for genre in genres:
fetch_genre = Genre.query.filter_by(name=genre).one_or_none()
if fetch_genre:
# if found a genre, append it to the list
venue.genres.append(fetch_genre)
else:
# fetch_genre was None. It's not created yet, so create it
new_genre = Genre(name=genre)
db.session.add(new_genre)
venue.genres.append(new_genre)
# Attempt to save everything
db.session.commit()
except Exception as e:
error_in_edit = True
db.session.rollback()
finally:
db.session.close()
if not error_in_edit:
# on successful db update, flash success
flash('Venue ' + request.form['name'] + ' was successfully updated!')
return redirect(url_for('show_venue', venue_id=venue_id))
else:
flash('An error occurred. Venue ' + name + ' could not be updated.')
print("Error in edit_venue_submission()")
abort(500)
# return redirect(url_for('show_venue', venue_id=venue_id))
# Create Artist
# ----------------------------------------------------------------
@app.route('/artists/create', methods=['GET'])
def create_artist_form():
form = ArtistForm()
return render_template('forms/new_artist.html', form=form)
@app.route('/artists/create', methods=['POST'])
def create_artist_submission():
# called upon submitting the new artist listing form
# TODO: insert form data as a new Venue record in the db, instead
# TODO: modify data to be the data object returned from db insertion
# on successful db insert, flash success
flash('Artist ' + request.form['name'] + ' was successfully listed!')
# TODO: on unsuccessful db insert, flash an error instead.
# e.g., flash('An error occurred. Artist ' + data.name + ' could not be listed.')
return render_template('pages/home.html')
# Shows
# ----------------------------------------------------------------
@app.route('/shows')
def shows():
# displays list of shows at /shows
# TODO: replace with real venues data.
# num_shows should be aggregated based on number of upcoming shows per venue.
data=[{
"venue_id": 1,
"venue_name": "The Musical Hop",
"artist_id": 4,
"artist_name": "Guns N Petals",
"artist_image_link": "https://images.unsplash.com/photo-1549213783-8284d0336c4f?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=300&q=80",
"start_time": "2019-05-21T21:30:00.000Z"
}, {
"venue_id": 3,
"venue_name": "Park Square Live Music & Coffee",
"artist_id": 5,
"artist_name": "Matt Quevedo",
"artist_image_link": "https://images.unsplash.com/photo-1495223153807-b916f75de8c5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=334&q=80",
"start_time": "2019-06-15T23:00:00.000Z"
}, {
"venue_id": 3,
"venue_name": "Park Square Live Music & Coffee",
"artist_id": 6,
"artist_name": "The Wild Sax Band",
"artist_image_link": "https://images.unsplash.com/photo-1558369981-f9ca78462e61?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=794&q=80",
"start_time": "2035-04-01T20:00:00.000Z"
}, {
"venue_id": 3,
"venue_name": "Park Square Live Music & Coffee",
"artist_id": 6,
"artist_name": "The Wild Sax Band",
"artist_image_link": "https://images.unsplash.com/photo-1558369981-f9ca78462e61?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=794&q=80",
"start_time": "2035-04-08T20:00:00.000Z"
}, {
"venue_id": 3,
"venue_name": "Park Square Live Music & Coffee",
"artist_id": 6,
"artist_name": "The Wild Sax Band",
"artist_image_link": "https://images.unsplash.com/photo-1558369981-f9ca78462e61?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=794&q=80",
"start_time": "2035-04-15T20:00:00.000Z"
}]
return render_template('pages/shows.html', shows=data)
@app.route('/shows/create')
def create_shows():
# renders form. do not touch.
form = ShowForm()
return render_template('forms/new_show.html', form=form)
@app.route('/shows/create', methods=['POST'])
def create_show_submission():
# called to create new shows in the db, upon submitting new show listing form
# TODO: insert form data as a new Show record in the db, instead
# on successful db insert, flash success
flash('Show was successfully listed!')
# TODO: on unsuccessful db insert, flash an error instead.
# e.g., flash('An error occurred. Show could not be listed.')
# see: http://flask.pocoo.org/docs/1.0/patterns/flashing/
return render_template('pages/home.html')
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def server_error(error):
return render_template('errors/500.html'), 500
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
app.run()
# Or specify port manually:
'''
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
'''
| [
"rahula@vt.edu"
] | rahula@vt.edu |
eb129243a035487b54c5721c6288ed1cc40cdb22 | 96a34a048c783a75736bf0ec775df22142f9ee53 | /packages/models-library/src/models_library/services_access.py | 9e121fad95a0a38e9c550ccca50cfff86227dfc2 | [
"MIT"
] | permissive | ITISFoundation/osparc-simcore | 77e5b9f7eb549c907f6ba2abb14862154cc7bb66 | f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63 | refs/heads/master | 2023-08-31T17:39:48.466163 | 2023-08-31T15:03:56 | 2023-08-31T15:03:56 | 118,596,920 | 39 | 29 | MIT | 2023-09-14T20:23:09 | 2018-01-23T10:48:05 | Python | UTF-8 | Python | false | false | 666 | py | """Service access rights models
"""
from pydantic import BaseModel, Field
from pydantic.types import PositiveInt
GroupId = PositiveInt
class ServiceGroupAccessRights(BaseModel):
execute_access: bool = Field(
default=False,
description="defines whether the group can execute the service",
)
write_access: bool = Field(
default=False, description="defines whether the group can modify the service"
)
class ServiceAccessRights(BaseModel):
access_rights: dict[GroupId, ServiceGroupAccessRights] | None = Field(
None,
alias="accessRights",
description="service access rights per group id",
)
| [
"noreply@github.com"
] | ITISFoundation.noreply@github.com |
aa4d37db1784f9b47b8a48004ec42075076b27bb | 65576a4a827f08d4c8d950b418c03f93a0ee5f04 | /python-program/lcs.py | 59457d90e6fe1e959bcd9e11ac1de6f78d479d0a | [] | no_license | ayuranjan/ARESNAL | ea5dff1e43a7d6ff53250091091e2a7921f9dab5 | db2664a3db01d681368de1ffa91e20c87b62eb53 | refs/heads/master | 2022-11-12T05:19:55.265418 | 2020-06-27T00:11:13 | 2020-06-27T00:11:13 | 138,479,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | # Dynamic Programming implementation of LCS problem
def lcs(X , Y):
# find the length of the strings
m = len(X)
n = len(Y)
# declaring the array for storing the dp values
L = [[None]*(n+1) for i in xrange(m+1)]
"""Following steps build L[m+1][n+1] in bottom up fashion
Note: L[i][j] contains length of LCS of X[0..i-1]
and Y[0..j-1]"""
for i in range(m+1):
for j in range(n+1):
if i == 0 or j == 0 :
L[i][j] = 0
elif X[i-1] == Y[j-1]:
L[i][j] = L[i-1][j-1]+1
else:
L[i][j] = max(L[i-1][j] , L[i][j-1])
# L[m][n] contains the length of LCS of X[0..n-1] & Y[0..m-1]
return L[m][n]
#end of function lcs
# Driver program to test the above function
X = "AGGTAB"
Y = "GXTXAYB"
print "Length of LCS is ", lcs(X, Y)
# This code is contributed by Nikhil Kumar Singh(nickzuck_007)
| [
"ayushranjancr7@gmail.comgit config --global user.name Ayushgit config --global user.email ayushranjancr7@gmail.com"
] | ayushranjancr7@gmail.comgit config --global user.name Ayushgit config --global user.email ayushranjancr7@gmail.com |
425e6ebef4b89f2b0fc514000916eb5d1fa1d4c7 | e06802b9d35d0c1f9ed8046b512af8bfb4366232 | /algorithm_practices/ABC/ABC_20200301/A.py | 5debf79959a5247b30030c2fcc89dec30e333316 | [] | no_license | oyuchangit/Competitive_programming_exercises | e12ae437d1f86138f94e52d8c6d6f952d0c07539 | 4e8210b77dae0a889dd5a4c1fe3bb09727eb0466 | refs/heads/master | 2021-07-08T15:50:42.073220 | 2020-12-28T08:51:59 | 2020-12-28T08:51:59 | 220,625,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | N = int(input())
if N % 2 == 0:
print(N // 2)
else:
print(N //2 + 1) | [
"oyuwantto@gmail.com"
] | oyuwantto@gmail.com |
729f18e269f677d40622fcf61aebbc76d5b43238 | 63bbe2f37dc3f9dd11e31c5f0dbcaee362bf069f | /weixitest/media.py.bak | cf7082c78c5eee1883235b19742ab2ef67444fe3 | [] | no_license | yuxuefendou/weixin | a3809e86e066a5c14abf38c663834ccf2e4a43cb | 73ebd84c0d8b68214d89cf1a53597544e926b8fc | refs/heads/master | 2021-04-06T12:02:38.724598 | 2018-03-09T09:07:56 | 2018-03-09T09:07:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | bak | #!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
Created on 14:03 2017/11/4
@author: acer
'''
from basic import Basic
import urllib2
import poster.encode
from poster.streaminghttp import register_openers
class Media(object):
def __init__(self):
register_openers()
#上传图片
def uplaod(self, accessToken, filePath, mediaType):
openFile = open(filePath, "rb")
param = {'media': openFile}
postData, postHeaders = poster.encode.multipart_encode(param)
postUrl = "https://api.weixin.qq.com/cgi-bin/media/upload?access_token=%s&type=%s" % (accessToken, mediaType)
request = urllib2.Request(postUrl, postData, postHeaders)
urlResp = urllib2.urlopen(request)
print urlResp.read()
if __name__ == '__main__':
myMedia = Media()
accessToken = Basic().get_access_token()
filePath = "D:/Pythonwork/weixin/weixitest/test.jpg" #请安实际填写
mediaType = "image"
myMedia.uplaod(accessToken, filePath, mediaType) | [
"1131419557@qq.com"
] | 1131419557@qq.com |
5431703d1c4fa12874ad6fb9cb4a6c792be79bb7 | 0809ea2739d901b095d896e01baa9672f3138825 | /ORMproject1/testApp/migrations/0002_proxyemployee_proxyemployee2.py | 947459ac9771a7ef22b74ac0159c4d06da01f56a | [] | no_license | Gagangithub1988/djangoprojects | dd001f2184e78be2fb269dbfdc8e3be1dd71ce43 | ea236f0e4172fbf0f71a99aed05ed7c7b38018e2 | refs/heads/master | 2022-11-15T23:46:46.134247 | 2020-07-15T06:37:51 | 2020-07-15T06:37:51 | 273,479,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | # Generated by Django 3.0.5 on 2020-04-30 05:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('testApp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProxyEmployee',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('testApp.employee',),
),
migrations.CreateModel(
name='ProxyEmployee2',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('testApp.employee',),
),
]
| [
"djangopython1988@gmail.com"
] | djangopython1988@gmail.com |
a3cbe14666f7b53f829f52bf78e3d94f8cbf88ac | 0e96b335079815d176e96cf6e903f899f3ade24f | /Ecommerce-site1/site1/store/migrations/0002_productmdl_image.py | 2d6bec9bbeb42170788f77cd4ff5042e3910a7ae | [] | no_license | sudarshannkarki/sda-project | ad2c0a1bed410754b078a53936ff8c11f9f4b26a | b2b17296269028149131ce273d098f575d8cf260 | refs/heads/master | 2022-12-18T14:46:19.429488 | 2020-09-29T11:31:50 | 2020-09-29T11:31:50 | 293,024,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # Generated by Django 3.0.7 on 2020-08-21 07:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='productmdl',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
| [
"sudarshannkarki@gmail.com"
] | sudarshannkarki@gmail.com |
798728848c3880f0e9c27998ada62af9f5d61bd4 | 85ce509476109c756e26cc7f49298ab42d16ad9b | /openapi_client/models/overpayment.py | f4b2f95c29bf122789f2b3b01090b824d443cf23 | [] | no_license | 33N-Ltd/Xero-Python-SDK | 0ea3388fa94a1becc45a0bd3a7e0687ed64aeba7 | b31cbd34e68e5dce31394c7c57601c39997214ea | refs/heads/master | 2020-06-28T01:20:32.424682 | 2019-08-21T07:55:40 | 2019-08-21T07:55:40 | 200,104,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,682 | py | # coding: utf-8
"""
Accounting API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Overpayment(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'contact': 'Contact',
'date': 'date',
'status': 'str',
'line_amount_types': 'LineAmountTypes',
'line_items': 'list[LineItem]',
'sub_total': 'float',
'total_tax': 'float',
'total': 'float',
'updated_date_utc': 'datetime',
'currency_code': 'CurrencyCode',
'overpayment_id': 'str',
'currency_rate': 'float',
'remaining_credit': 'float',
'allocations': 'list[Allocation]',
'payments': 'list[Payment]',
'has_attachments': 'bool',
'attachments': 'list[Attachment]'
}
attribute_map = {
'type': 'Type',
'contact': 'Contact',
'date': 'Date',
'status': 'Status',
'line_amount_types': 'LineAmountTypes',
'line_items': 'LineItems',
'sub_total': 'SubTotal',
'total_tax': 'TotalTax',
'total': 'Total',
'updated_date_utc': 'UpdatedDateUTC',
'currency_code': 'CurrencyCode',
'overpayment_id': 'OverpaymentID',
'currency_rate': 'CurrencyRate',
'remaining_credit': 'RemainingCredit',
'allocations': 'Allocations',
'payments': 'Payments',
'has_attachments': 'HasAttachments',
'attachments': 'Attachments'
}
def __init__(self, type=None, contact=None, date=None, status=None, line_amount_types=None, line_items=None, sub_total=None, total_tax=None, total=None, updated_date_utc=None, currency_code=None, overpayment_id=None, currency_rate=None, remaining_credit=None, allocations=None, payments=None, has_attachments=None, attachments=None): # noqa: E501
"""Overpayment - a model defined in OpenAPI""" # noqa: E501
self._type = None
self._contact = None
self._date = None
self._status = None
self._line_amount_types = None
self._line_items = None
self._sub_total = None
self._total_tax = None
self._total = None
self._updated_date_utc = None
self._currency_code = None
self._overpayment_id = None
self._currency_rate = None
self._remaining_credit = None
self._allocations = None
self._payments = None
self._has_attachments = None
self._attachments = None
self.discriminator = None
if type is not None:
self.type = type
if contact is not None:
self.contact = contact
if date is not None:
self.date = date
if status is not None:
self.status = status
if line_amount_types is not None:
self.line_amount_types = line_amount_types
if line_items is not None:
self.line_items = line_items
if sub_total is not None:
self.sub_total = sub_total
if total_tax is not None:
self.total_tax = total_tax
if total is not None:
self.total = total
if updated_date_utc is not None:
self.updated_date_utc = updated_date_utc
if currency_code is not None:
self.currency_code = currency_code
if overpayment_id is not None:
self.overpayment_id = overpayment_id
if currency_rate is not None:
self.currency_rate = currency_rate
if remaining_credit is not None:
self.remaining_credit = remaining_credit
if allocations is not None:
self.allocations = allocations
if payments is not None:
self.payments = payments
if has_attachments is not None:
self.has_attachments = has_attachments
if attachments is not None:
self.attachments = attachments
@property
def type(self):
"""Gets the type of this Overpayment. # noqa: E501
See Overpayment Types # noqa: E501
:return: The type of this Overpayment. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Overpayment.
See Overpayment Types # noqa: E501
:param type: The type of this Overpayment. # noqa: E501
:type: str
"""
allowed_values = ["RECEIVE-OVERPAYMENT", "SPEND-OVERPAYMENT"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def contact(self):
"""Gets the contact of this Overpayment. # noqa: E501
:return: The contact of this Overpayment. # noqa: E501
:rtype: Contact
"""
return self._contact
@contact.setter
def contact(self, contact):
"""Sets the contact of this Overpayment.
:param contact: The contact of this Overpayment. # noqa: E501
:type: Contact
"""
self._contact = contact
@property
def date(self):
"""Gets the date of this Overpayment. # noqa: E501
The date the overpayment is created YYYY-MM-DD # noqa: E501
:return: The date of this Overpayment. # noqa: E501
:rtype: date
"""
return self._date
@date.setter
def date(self, date):
"""Sets the date of this Overpayment.
The date the overpayment is created YYYY-MM-DD # noqa: E501
:param date: The date of this Overpayment. # noqa: E501
:type: date
"""
self._date = date
@property
def status(self):
"""Gets the status of this Overpayment. # noqa: E501
See Overpayment Status Codes # noqa: E501
:return: The status of this Overpayment. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Overpayment.
See Overpayment Status Codes # noqa: E501
:param status: The status of this Overpayment. # noqa: E501
:type: str
"""
allowed_values = ["AUTHORISED", "PAID", "VOIDED"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def line_amount_types(self):
"""Gets the line_amount_types of this Overpayment. # noqa: E501
:return: The line_amount_types of this Overpayment. # noqa: E501
:rtype: LineAmountTypes
"""
return self._line_amount_types
@line_amount_types.setter
def line_amount_types(self, line_amount_types):
"""Sets the line_amount_types of this Overpayment.
:param line_amount_types: The line_amount_types of this Overpayment. # noqa: E501
:type: LineAmountTypes
"""
self._line_amount_types = line_amount_types
@property
def line_items(self):
"""Gets the line_items of this Overpayment. # noqa: E501
See Overpayment Line Items # noqa: E501
:return: The line_items of this Overpayment. # noqa: E501
:rtype: list[LineItem]
"""
return self._line_items
@line_items.setter
def line_items(self, line_items):
"""Sets the line_items of this Overpayment.
See Overpayment Line Items # noqa: E501
:param line_items: The line_items of this Overpayment. # noqa: E501
:type: list[LineItem]
"""
self._line_items = line_items
@property
def sub_total(self):
"""Gets the sub_total of this Overpayment. # noqa: E501
The subtotal of the overpayment excluding taxes # noqa: E501
:return: The sub_total of this Overpayment. # noqa: E501
:rtype: float
"""
return self._sub_total
@sub_total.setter
def sub_total(self, sub_total):
"""Sets the sub_total of this Overpayment.
The subtotal of the overpayment excluding taxes # noqa: E501
:param sub_total: The sub_total of this Overpayment. # noqa: E501
:type: float
"""
self._sub_total = sub_total
@property
def total_tax(self):
"""Gets the total_tax of this Overpayment. # noqa: E501
The total tax on the overpayment # noqa: E501
:return: The total_tax of this Overpayment. # noqa: E501
:rtype: float
"""
return self._total_tax
@total_tax.setter
def total_tax(self, total_tax):
"""Sets the total_tax of this Overpayment.
The total tax on the overpayment # noqa: E501
:param total_tax: The total_tax of this Overpayment. # noqa: E501
:type: float
"""
self._total_tax = total_tax
@property
def total(self):
"""Gets the total of this Overpayment. # noqa: E501
The total of the overpayment (subtotal + total tax) # noqa: E501
:return: The total of this Overpayment. # noqa: E501
:rtype: float
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this Overpayment.
The total of the overpayment (subtotal + total tax) # noqa: E501
:param total: The total of this Overpayment. # noqa: E501
:type: float
"""
self._total = total
@property
def updated_date_utc(self):
"""Gets the updated_date_utc of this Overpayment. # noqa: E501
UTC timestamp of last update to the overpayment # noqa: E501
:return: The updated_date_utc of this Overpayment. # noqa: E501
:rtype: datetime
"""
return self._updated_date_utc
@updated_date_utc.setter
def updated_date_utc(self, updated_date_utc):
"""Sets the updated_date_utc of this Overpayment.
UTC timestamp of last update to the overpayment # noqa: E501
:param updated_date_utc: The updated_date_utc of this Overpayment. # noqa: E501
:type: datetime
"""
self._updated_date_utc = updated_date_utc
@property
def currency_code(self):
"""Gets the currency_code of this Overpayment. # noqa: E501
:return: The currency_code of this Overpayment. # noqa: E501
:rtype: CurrencyCode
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this Overpayment.
:param currency_code: The currency_code of this Overpayment. # noqa: E501
:type: CurrencyCode
"""
self._currency_code = currency_code
@property
def overpayment_id(self):
"""Gets the overpayment_id of this Overpayment. # noqa: E501
Xero generated unique identifier # noqa: E501
:return: The overpayment_id of this Overpayment. # noqa: E501
:rtype: str
"""
return self._overpayment_id
@overpayment_id.setter
def overpayment_id(self, overpayment_id):
"""Sets the overpayment_id of this Overpayment.
Xero generated unique identifier # noqa: E501
:param overpayment_id: The overpayment_id of this Overpayment. # noqa: E501
:type: str
"""
self._overpayment_id = overpayment_id
@property
def currency_rate(self):
"""Gets the currency_rate of this Overpayment. # noqa: E501
The currency rate for a multicurrency overpayment. If no rate is specified, the XE.com day rate is used # noqa: E501
:return: The currency_rate of this Overpayment. # noqa: E501
:rtype: float
"""
return self._currency_rate
@currency_rate.setter
def currency_rate(self, currency_rate):
"""Sets the currency_rate of this Overpayment.
The currency rate for a multicurrency overpayment. If no rate is specified, the XE.com day rate is used # noqa: E501
:param currency_rate: The currency_rate of this Overpayment. # noqa: E501
:type: float
"""
self._currency_rate = currency_rate
@property
def remaining_credit(self):
"""Gets the remaining_credit of this Overpayment. # noqa: E501
The remaining credit balance on the overpayment # noqa: E501
:return: The remaining_credit of this Overpayment. # noqa: E501
:rtype: float
"""
return self._remaining_credit
@remaining_credit.setter
def remaining_credit(self, remaining_credit):
"""Sets the remaining_credit of this Overpayment.
The remaining credit balance on the overpayment # noqa: E501
:param remaining_credit: The remaining_credit of this Overpayment. # noqa: E501
:type: float
"""
self._remaining_credit = remaining_credit
@property
def allocations(self):
"""Gets the allocations of this Overpayment. # noqa: E501
See Allocations # noqa: E501
:return: The allocations of this Overpayment. # noqa: E501
:rtype: list[Allocation]
"""
return self._allocations
@allocations.setter
def allocations(self, allocations):
"""Sets the allocations of this Overpayment.
See Allocations # noqa: E501
:param allocations: The allocations of this Overpayment. # noqa: E501
:type: list[Allocation]
"""
self._allocations = allocations
@property
def payments(self):
"""Gets the payments of this Overpayment. # noqa: E501
See Payments # noqa: E501
:return: The payments of this Overpayment. # noqa: E501
:rtype: list[Payment]
"""
return self._payments
@payments.setter
def payments(self, payments):
"""Sets the payments of this Overpayment.
See Payments # noqa: E501
:param payments: The payments of this Overpayment. # noqa: E501
:type: list[Payment]
"""
self._payments = payments
@property
def has_attachments(self):
"""Gets the has_attachments of this Overpayment. # noqa: E501
boolean to indicate if a overpayment has an attachment # noqa: E501
:return: The has_attachments of this Overpayment. # noqa: E501
:rtype: bool
"""
return self._has_attachments
@has_attachments.setter
def has_attachments(self, has_attachments):
"""Sets the has_attachments of this Overpayment.
boolean to indicate if a overpayment has an attachment # noqa: E501
:param has_attachments: The has_attachments of this Overpayment. # noqa: E501
:type: bool
"""
self._has_attachments = has_attachments
@property
def attachments(self):
"""Gets the attachments of this Overpayment. # noqa: E501
See Attachments # noqa: E501
:return: The attachments of this Overpayment. # noqa: E501
:rtype: list[Attachment]
"""
return self._attachments
@attachments.setter
def attachments(self, attachments):
"""Sets the attachments of this Overpayment.
See Attachments # noqa: E501
:param attachments: The attachments of this Overpayment. # noqa: E501
:type: list[Attachment]
"""
self._attachments = attachments
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Overpayment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"michael@33n.co.uk"
] | michael@33n.co.uk |
20c69bda08acefab9be2eee8b98a21f97815c495 | 2596d930832260faf2514a35663744340ffecd29 | /functions/diagnose_name_gender_attraction_health.py | 9b709e39c12d8bb4e6c62ebddedeb31f134cb226 | [] | no_license | fuggla/fc-aqua-fish | 971580cc90deae4efa8c599beb732b6e0f10cc12 | 566198fe9a5257851118382233d3ad120ef5ceed | refs/heads/master | 2023-08-31T13:18:36.376163 | 2020-07-30T18:07:48 | 2020-07-30T18:07:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py |
import arcade
def diagnose_name_gender_attraction_health(fish_list, info_list):
# Funtions som skriver ut fiskarnas status
# Informationen ligger lagrad i "info_list"
if len(info_list) < len(fish_list):
list_length = len(info_list)
else:
list_length = len(fish_list)
for i in range(list_length):
x = fish_list[i].center_x
y = fish_list[i].center_y
arcade.draw_text(str(info_list[i][0]) + " " + str(info_list[i][1]), x, y + 24, arcade.color.BLACK, 18)
arcade.draw_text(str(info_list[i][2]), x, y, arcade.color.BLACK, 18)
arcade.draw_text(str(info_list[i][3]), x, y, arcade.color.BLACK, 18, anchor_x="left", anchor_y="top")
| [
"john_nordstrand@hotmail.com"
] | john_nordstrand@hotmail.com |
f3f75af6d875f307aaf0c5dd59ebde978c2efb5d | 19101bf9478c585f73540f1962494a0315ccd0a6 | /ax/models/tests/test_alebo_initializer.py | 8c37f09dce95a0e81f510189ed5551873bcd1268 | [
"MIT"
] | permissive | liusulin/Ax | 4ca1dcaa34f129d25faa2f52a8094b5f6e399eba | 850b6975b7c7f9960ad5461e71d0304b2670232a | refs/heads/main | 2023-07-14T01:02:38.044397 | 2021-08-18T15:34:06 | 2021-08-18T15:35:11 | 397,664,102 | 1 | 0 | MIT | 2021-08-18T16:16:10 | 2021-08-18T16:16:09 | null | UTF-8 | Python | false | false | 1,047 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from ax.models.random.alebo_initializer import ALEBOInitializer
from ax.utils.common.testutils import TestCase
class ALEBOSobolTest(TestCase):
def testALEBOSobolModel(self):
B = np.array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
Q = np.linalg.pinv(B) @ B
# Test setting attributes
m = ALEBOInitializer(B=B)
self.assertTrue(np.allclose(Q, m.Q))
# Test gen
Z, w = m.gen(5, bounds=[(-1.0, 1.0)] * 3)
self.assertEqual(Z.shape, (5, 3))
self.assertTrue(Z.min() >= -1.0)
self.assertTrue(Z.max() <= 1.0)
# Verify that it is in the subspace
self.assertTrue(np.allclose(Q @ Z.transpose(), Z.transpose()))
m = ALEBOInitializer(B=B, nsamp=1)
with self.assertRaises(ValueError):
m.gen(2, bounds=[(-1.0, 1.0)] * 3)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
66f2df3cf8c49c743f988bcbdddae4207bad389c | c0c8aeb5aaf08925d8c9e1d660b02c89cbc7ad71 | /Algorithms/Medium/105. Construct Binary Tree from Preorder and Inorder Traversal/answer.py | bec4709a6dc30054d5688961993bb42736c611cf | [
"Apache-2.0"
] | permissive | kenwoov/PlayLeetCode | b2fdc43d799c37683a9efdc31c4df159cf553bf5 | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | refs/heads/master | 2022-12-17T05:54:22.775972 | 2020-09-26T14:08:43 | 2020-09-26T14:08:43 | 214,839,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
if not preorder:
return None
root = TreeNode(preorder[0])
mid = inorder.index(preorder[0])
root.left = self.buildTree(preorder[1:mid+1], inorder[:mid])
root.right = self.buildTree(preorder[mid+1:], inorder[mid+1:])
return root
if __name__ == "__main__":
s = Solution()
result = s.buildTree([3, 9, 20, 15, 7], [9, 3, 15, 20, 7])
print(result)
| [
"kenwoov@outlook.com"
] | kenwoov@outlook.com |
b9cdb9a5157fe7ff4a82a7e5b176f97a6af89eee | 8494c17343d8918f83236b239275bde50c1490bc | /production/test/spamrgb.py | 40f236efec605c1e795e374e5ddc68849ba9511f | [
"MIT"
] | permissive | Blinkinlabs/EightByEight | 87084d0287abb9a6a69341173e134cfb3367344e | 9df6381c33987d6e1bdc88115bfc41287b6bc875 | refs/heads/master | 2020-04-10T01:41:39.113292 | 2019-09-18T18:03:21 | 2019-09-18T18:03:21 | 61,876,895 | 20 | 8 | null | 2016-11-22T11:01:33 | 2016-06-24T10:25:22 | Eagle | UTF-8 | Python | false | false | 1,195 | py | #!/usr/bin/python
# Spam RGB test pattern over UDP
import socket
import fcntl
import struct
import time
UDP_PORT = 5453
colors = (
(255,0,0),
(0,255,0),
(0,0,255),
(255,255,255)
)
# from:
# https://www.quora.com/Which-Python-library-allows-us-to-specify-network-interface-to-connect-to-the-Internet
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915,
struct.pack('256s', ifname[:15])
)[20:24])
def sendColor(color):
MESSAGE = ""
MESSAGE += chr(0x50) #Magic
MESSAGE += chr(0x4c)
MESSAGE += chr(0x53)
MESSAGE += chr(0x54)
MESSAGE += chr(0x00) # blend time
MESSAGE += chr(0x00)
MESSAGE += chr(0x00)
MESSAGE += chr(0x00)
for pixel in range(0,8*8):
MESSAGE += chr(color[0])
MESSAGE += chr(color[1])
MESSAGE += chr(color[2])
s.sendto(MESSAGE, ('<broadcast>', UDP_PORT))
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((get_ip_address("wlan0"), 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while True:
for color in colors:
sendColor(color)
time.sleep(1)
| [
"matt@blinkinlabs.com"
] | matt@blinkinlabs.com |
8f24ccd8e70cbb75f418b0ad12133319c853e24c | ebb16386c12ad3286aabdf8d8cac452712a3b715 | /Lesson_1_Grishechkina/task_1.py | 90a4a3138fe81193f7a06e2b7c727a0928529eaa | [] | no_license | TatianaGrishechkina/python_DBPyQT | 77e9b8b4ca9945e415b36a31085bc95780000c51 | 73831af5cb58cf978744f80c01e35b7d1efd559b | refs/heads/master | 2023-04-14T20:10:02.402412 | 2021-04-20T06:34:30 | 2021-04-20T06:34:30 | 351,223,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,336 | py | """
1. Написать функцию host_ping(), в которой с помощью утилиты ping
будет проверяться доступность сетевых узлов.
Аргументом функции является список, в котором каждый сетевой узел
должен быть представлен именем хоста или ip-адресом.
В функции необходимо перебирать ip-адреса и проверять
их доступность с выводом соответствующего сообщения
(«Узел доступен», «Узел недоступен»). При этом ip-адрес
сетевого узла должен создаваться с помощью функции ip_address().
"""
from ipaddress import ip_address
from subprocess import Popen, PIPE
host_list = ['127.0.0.1', 'google.com', 'google.ru', 'ya.ru', '192.0.2.1']
def host_ping(my_list, timeout=1000, requests=1):
"""
Функция пингует IP
:param my_list: список IP
:param timeout: параметр для команды IP - сколько ждать ответа
:param requests: параметр для команды IP - сколько запросов слать
:return: возвращает словарик массивами у каких адресов прошел пинг, а у каких - нет
"""
res = {'Пинг проходит': '', 'Пинг не проходит': ''}
print('Начинаем проверочку: ')
for host in my_list:
try:
network = ip_address(host)
except ValueError:
# print(f'{host} - не IP!') # если нужен вывод IP это или нет
network = host
my_ping = Popen(f'ping {network} -w {timeout} -n {requests}', shell=False, stdout=PIPE, stderr=PIPE)
my_wait = my_ping.wait()
if my_wait == 0:
print(f'{host} - Хост доступен!')
res['Пинг проходит'] += f'{str(network)}\n'
else:
print(f'{host} - Хост недоступен!')
res['Пинг не проходит'] += f'{str(network)}\n'
return res
if __name__ == '__main__':
host_ping(host_list)
| [
"tatiana.grishechkina@amdocs.com"
] | tatiana.grishechkina@amdocs.com |
9ad1b7332ec84c019061f954c4bed86e46639726 | 5ecb0648f4585afd7cf0871f0876106100c77931 | /selenium_wework_main/page/main.py | df0a9960224fb620f595d9426ccb31dfb7b310de | [] | no_license | anranshixing/aaa | d97156b231c9509e2188729825858f8003035987 | d7c27dee9314b7c5aa10abdc79cec2a1f294b005 | refs/heads/main | 2023-08-24T18:06:51.510122 | 2021-11-05T10:00:54 | 2021-11-05T10:00:54 | 405,020,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium_wework_main.page.add_member import AddMember
class Main:
def __init__(self):
# options = Options()
# options.debugger_address = '127.0.0.1:9222'
# self._driver = webdriver.Chrome(options=options)
browser = os.getenv('browser')
if browser == 'chrome':
self._driver = webdriver.Chrome()
self._driver.get('https://work.weixin.qq.com/wework_admin/frame')
def goto_add_member(self):
# click add member
self._driver.find_element(By.CSS_SELECTOR,'.index_service_cnt_itemWrap:nth-child(1)').click()
return AddMember(self._driver)
| [
"1553872085@qq.com"
] | 1553872085@qq.com |
ddb5cfc7fe57f3ccc7c6a72f40c8474f83f8f56d | 1c3ba582bf434264d0c6643307345475845c7750 | /5.机器学习/src/CardNumberRecognition/CNN_number.py | 7382a92c0881fd95e6940535de9162e53ffd9db7 | [] | no_license | amaris0508/cultivate | 5b4daf9514db8e6d88a73adc6b700643982742f7 | 10d950060bf3d963bf3b5f5db6812592b838253c | refs/heads/master | 2023-02-10T08:14:35.199223 | 2021-01-07T09:57:51 | 2021-01-07T09:57:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,519 | py | from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import BatchNormalization
filters = 32#滤镜个数
filter_size = 3#滤镜大小
pooling_size = 3
conv_layers = 2
dense_units = 256
dense_layers = 2
use_drop = True
my1stCNN = Sequential()
if (conv_layers == 1):
my1stCNN.add(Conv2D(filters, (filter_size,filter_size), activation = 'relu', input_shape = (30,46,3)))
else:
my1stCNN.add(Conv2D(filters, (filter_size,filter_size), activation = 'relu', input_shape = (30,46,3)))
for i in range(conv_layers -1):
my1stCNN.add(Conv2D(64, (filter_size,filter_size), activation = 'relu'))
my1stCNN.add(MaxPooling2D(pool_size = (pooling_size,pooling_size)))
my1stCNN.add(Dropout(0.35))
my1stCNN.add(Flatten())
for i in range(dense_layers):
my1stCNN.add(Dense(units = dense_units, activation = 'relu'))
if (use_drop):
my1stCNN.add(Dropout(0.5))
my1stCNN.add(Dense(units= 10,activation='softmax'))
print(my1stCNN.summary())
# optimizer, regularization
my1stCNN.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# training
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255)
test_datagen = ImageDataGenerator(rescale = 1./255)
train_set = train_datagen.flow_from_directory('/Users/like/Downloads/data_set/train_set',
target_size = (30,46),
batch_size = 5,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('/Users/like/Downloads/data_set/test_set',
target_size = (30,46),
batch_size = 5,
class_mode = 'categorical')
my1stCNN.fit_generator(train_set,
nb_epoch = 20,
steps_per_epoch = 400,
validation_data = test_set,
nb_val_samples = 300,
use_multiprocessing = True,
workers = 4)
##save architecture
#json_string = my1stCNN.to_json()
#open('my_model_architecture3.json','w').write(json_string)
##save weights
#my1stCNN.save_weights('my_model_weights3.h5')
| [
"624310862@qq.com"
] | 624310862@qq.com |
4ea33e6f46050d5ded98f694ae406241e7ca7d0b | aa0f42f0fe81239e542c52550110baed2380bc96 | /apps/operation/migrations/0001_initial.py | b84004eda21defec456c3eee8a3bfc6f897920d2 | [] | no_license | ChenHangKeep/Django-Education | eda281be2dcbf5bdf566981e1a94ff5fa21b8f82 | 05f815ecc4132dbb28aaded89c9fde93c15928e2 | refs/heads/master | 2020-04-22T15:43:30.756173 | 2019-02-22T04:22:55 | 2019-02-22T04:22:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,529 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-19 19:16
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CourseComments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comments', models.CharField(max_length=200, verbose_name='评论')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '课程评论',
'verbose_name_plural': '课程评论',
},
),
migrations.CreateModel(
name='UserAsk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='姓名')),
('mobile', models.CharField(max_length=11, verbose_name='手机')),
('course_name', models.CharField(max_length=50, verbose_name='课程名')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '用户咨询',
'verbose_name_plural': '用户咨询',
},
),
migrations.CreateModel(
name='UserCourse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '用户课程',
'verbose_name_plural': '用户课程',
},
),
migrations.CreateModel(
name='UserFavorite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fav_id', models.IntegerField(default=0, verbose_name='数据id')),
('fav_type', models.IntegerField(choices=[(1, '课程'), (2, '课程机构'), (3, '讲师')], default=1, verbose_name='收藏类型')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '用户收藏',
'verbose_name_plural': '用户收藏',
},
),
migrations.CreateModel(
name='UserMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.IntegerField(default=0, verbose_name='接收用户')),
('message', models.CharField(max_length=500, verbose_name='消息内容')),
('has_read', models.BooleanField(default=False, verbose_name='是否已读')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '用户消息',
'verbose_name_plural': '用户消息',
},
),
]
| [
"596560557@qq.com"
] | 596560557@qq.com |
4fd186ecb8de7d13fb1a560a5b7063dd55cf34c3 | b0856a2d66cc4c71705b8c16c169848070294cf6 | /graphValidTree.py | c267910cc1708fcf239eee741ff3637ac2e1b0d5 | [] | no_license | jfriend08/LeetCode | 9e378ff015edc3102a4785b0832cf0eeb09f5fc2 | f76d3cf2e7fd91767f80bd60eed080a7bad06e62 | refs/heads/master | 2021-01-21T19:28:25.354537 | 2016-01-15T04:53:11 | 2016-01-15T04:53:11 | 28,518,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,012 | py | '''
Given n nodes labeled from 0 to n - 1 and a list of undirected edges (each edge is a
pair of nodes), write a function to check whether these edges make up a valid tree.
For example:
Given n = 5 and edges = [[0, 1], [0, 2], [0, 3], [1, 4]], return true.
Given n = 5 and edges = [[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]], return false.
Hint:
Given n = 5 and edges = [[0, 1], [1, 2], [3, 4]], what should your return? Is this case a valid tree?
According to the definition of tree on Wikipedia: "a tree is an undirected graph in which any two
vertices are connected by exactly one path. In other words, any connected graph without simple cycles
is a tree."
Note: you can assume that no duplicate edges will appear in edges. Since all edges are undirected,
[0, 1] is the same as [1, 0] and thus will not appear together in edges.
'''
class Solution(object):
def makeMap(self, n, edges, linkMap):
for i in xrange(n):
linkMap[i] = []
for n1, n2 in edges:
linkMap[n1] += [n2]
linkMap[n2] += [n1]
def isValidTravel(self, parent, node, linkMap, visited):
visited[node] = True
for nei in linkMap[node]:
if nei == parent:
continue
elif not nei in visited:
res = self.isValidTravel(node, nei, linkMap, visited)
if not res:
return res
else:
return False
return True
def validTree(self, n, edges):
linkMap, visited = {}, {}
self.makeMap(n, edges, linkMap)
res = self.isValidTravel(None, 0, linkMap, visited)
return len(visited.keys()) == n and res
# for node in xrange(n):
# if not node in visited:
# res = self.isValidTravel(None, node, linkMap, visited)
# if res == False:
# return res
# return True
sol = Solution()
n, edges = 5, [[0, 1], [0, 2], [0, 3], [1, 4]]
print sol.validTree(n, edges)
n, edges = 5, [[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]]
print sol.validTree(n, edges)
n, edges = 5, [[0, 1], [1, 2], [3, 4]]
print sol.validTree(n, edges) | [
"ys486@cornell.edu"
] | ys486@cornell.edu |
fff07f478d6c87c76f671d1ea906c7fc262cc018 | 01f7cfcae361edd7f5f530fa8d8ce44d42021b84 | /Classification/Decision Tree Classification/Python/decision_tree_classification.py | 5100f69795467adbe6505e9e1062ed012e3257cb | [] | no_license | chh14025/ClassAssignments | dec4fb0b4a3f928b7e9f1d7aeee9d4200d086bb1 | 49bb54f0fc6ec097f248c9025b24c3a2b0e511ac | refs/heads/master | 2022-08-02T06:45:57.707092 | 2020-06-03T03:14:01 | 2020-06-03T03:14:01 | 268,873,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,738 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 16:05:22 2020
@author: s.p.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
dataset = pd.read_csv('Social_Network_Ads.csv')
x = dataset.iloc[:, 2:4].values
y = dataset.iloc[:, 4:].values
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.25, random_state = 0)
#No Feature Scailing needed for Decision Tree, but will make high res chart generate quicker
from sklearn.preprocessing import StandardScaler
sc_x = StandardScaler()
x_train = sc_x.fit_transform(x_train)
x_test = sc_x.transform(x_test)
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
classifier.fit(x_train, y_train)
y_pred = classifier.predict(x_test)
#Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
from matplotlib.colors import ListedColormap
x_set, y_set = x_train, y_train
x1, x2 = np.meshgrid(np.arange(start = x_set[:, 0].min() -1, stop = x_set[:,0].max() +1,step = 0.01),
np.arange(start = x_set[:, 1].min() -1, stop = x_set[:,1].max() +1,step = 0.01))
plt.contourf(x1, x2, classifier.predict(np.array([x1.ravel(),x2.ravel()]).T).reshape(x1.shape))
plt.xlim(x1.min(), x1.max())
plt.ylim(x2.min(), x2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(x_set[y_set == j, 0], x_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Decision Tree Classification (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
| [
"64234764+chh14025@users.noreply.github.com"
] | 64234764+chh14025@users.noreply.github.com |
d1676107aebbd8f6a8e915250251ab1170737d58 | 66a9c0e23af1fab7f3c0b2f0cd6b8c6ac060b1d7 | /models/image_segmentation/tensorflow/maskrcnn/inference/fp32/coco.py | b9813e3e51326ca1273d56c770bca83f18cab6b1 | [
"Apache-2.0"
] | permissive | hekaplex/resnet_dl | ea289864b330bfa74996444d0325f1a062feae59 | fc8d4dcc0adffbe22d01d333e6cf5db955f2f011 | refs/heads/master | 2023-04-15T06:03:18.696578 | 2021-05-05T14:18:13 | 2021-05-05T14:18:13 | 364,602,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,454 | py | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
"""
Mask R-CNN
Configurations and data loading code for MS COCO.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 coco.py train --dataset=/path/to/coco/ --model=coco
# Train a new model starting from ImageNet weights
python3 coco.py train --dataset=/path/to/coco/ --model=imagenet
# Continue training a model that you had trained earlier
python3 coco.py train --dataset=/path/to/coco/ --model=/path/to/weights.h5
# Continue training the last model you trained
python3 coco.py train --dataset=/path/to/coco/ --model=last
# Run COCO evaluatoin on the last model you trained
python3 coco.py evaluate --dataset=/path/to/coco/ --model=last
"""
import os
import time
import numpy as np
import subprocess
from pdb import set_trace as bp
# Download and install the Python COCO tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
import zipfile
import urllib.request
import shutil
from config import Config
import utils
import model as modellib
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(os.environ["MOUNT_EXTERNAL_MODELS_SOURCE"], "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(os.environ["MOUNT_BENCHMARK"], "common/tensorflow/logs")
DEFAULT_DATASET_YEAR = "2014"
############################################################
# Configurations
############################################################
class CocoConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "coco"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Uncomment to train on 8 GPUs (default is 1)
# GPU_COUNT = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # COCO has 80 classes
############################################################
# Dataset
############################################################
class CocoDataset(utils.Dataset):
def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None,
class_map=None, return_coco=False, auto_download=False):
"""Load a subset of the COCO dataset.
dataset_dir: The root directory of the COCO dataset.
subset: What to load (train, val, minival, valminusminival)
year: What dataset year to load (2014, 2017) as a string, not an integer
class_ids: If provided, only loads images that have the given classes.
class_map: TODO: Not implemented yet. Supports maping classes from
different datasets to the same class ID.
return_coco: If True, returns the COCO object.
auto_download: Automatically download and unzip MS-COCO images and annotations
"""
if auto_download is True:
self.auto_download(dataset_dir, subset, year)
coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year))
if subset == "minival" or subset == "valminusminival":
subset = "val"
image_dir = "{}/{}{}".format(dataset_dir, subset, year)
# Load all classes or a subset?
if not class_ids:
# All classes
class_ids = sorted(coco.getCatIds())
# All images or a subset?
if class_ids:
image_ids = []
for id in class_ids:
image_ids.extend(list(coco.getImgIds(catIds=[id])))
# Remove duplicates
image_ids = list(set(image_ids))
else:
# All images
image_ids = list(coco.imgs.keys())
# Add classes
for i in class_ids:
self.add_class("coco", i, coco.loadCats(i)[0]["name"])
# Add images
for i in image_ids:
self.add_image(
"coco", image_id=i,
path=os.path.join(image_dir, coco.imgs[i]['file_name']),
width=coco.imgs[i]["width"],
height=coco.imgs[i]["height"],
annotations=coco.loadAnns(coco.getAnnIds(
imgIds=[i], catIds=class_ids, iscrowd=None)))
if return_coco:
return coco
def auto_download(self, dataDir, dataType, dataYear):
"""Download the COCO dataset/annotations if requested.
dataDir: The root directory of the COCO dataset.
dataType: What to load (train, val, minival, valminusminival)
dataYear: What dataset year to load (2014, 2017) as a string, not an integer
Note:
For 2014, use "train", "val", "minival", or "valminusminival"
For 2017, only "train" and "val" annotations are available
"""
# Setup paths and file names
if dataType == "minival" or dataType == "valminusminival":
imgDir = "{}/{}{}".format(dataDir, "val", dataYear)
imgZipFile = "{}/{}{}.zip".format(dataDir, "val", dataYear)
imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format("val", dataYear)
else:
imgDir = "{}/{}{}".format(dataDir, dataType, dataYear)
imgZipFile = "{}/{}{}.zip".format(dataDir, dataType, dataYear)
imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format(dataType, dataYear)
# print("Image paths:"); print(imgDir); print(imgZipFile); print(imgURL)
# Create main folder if it doesn't exist yet
if not os.path.exists(dataDir):
os.makedirs(dataDir)
# Download images if not available locally
if not os.path.exists(imgDir):
os.makedirs(imgDir)
print("Downloading images to " + imgZipFile + " ...")
with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out:
shutil.copyfileobj(resp, out)
print("... done downloading.")
print("Unzipping " + imgZipFile)
with zipfile.ZipFile(imgZipFile, "r") as zip_ref:
zip_ref.extractall(dataDir)
print("... done unzipping")
print("Will use images in " + imgDir)
# Setup annotations data paths
annDir = "{}/annotations".format(dataDir)
if dataType == "minival":
annZipFile = "{}/instances_minival2014.json.zip".format(dataDir)
annFile = "{}/instances_minival2014.json".format(annDir)
annURL = "https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0"
unZipDir = annDir
elif dataType == "valminusminival":
annZipFile = "{}/instances_valminusminival2014.json.zip".format(dataDir)
annFile = "{}/instances_valminusminival2014.json".format(annDir)
annURL = "https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0"
unZipDir = annDir
else:
annZipFile = "{}/annotations_trainval{}.zip".format(dataDir, dataYear)
annFile = "{}/instances_{}{}.json".format(annDir, dataType, dataYear)
annURL = "http://images.cocodataset.org/annotations/annotations_trainval{}.zip".format(dataYear)
unZipDir = dataDir
# print("Annotations paths:"); print(annDir); print(annFile); print(annZipFile); print(annURL)
# Download annotations if not available locally
if not os.path.exists(annDir):
os.makedirs(annDir)
if not os.path.exists(annFile):
if not os.path.exists(annZipFile):
print("Downloading zipped annotations to " + annZipFile + " ...")
with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out:
shutil.copyfileobj(resp, out)
print("... done downloading.")
print("Unzipping " + annZipFile)
with zipfile.ZipFile(annZipFile, "r") as zip_ref:
zip_ref.extractall(unZipDir)
print("... done unzipping")
print("Will use annotations in " + annFile)
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. This
function converts the different mask format to one format
in the form of a bitmap [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a COCO image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "coco":
return super(CocoDataset, self).load_mask(image_id)
instance_masks = []
class_ids = []
annotations = self.image_info[image_id]["annotations"]
# Build mask of shape [height, width, instance_count] and list
# of class IDs that correspond to each channel of the mask.
for annotation in annotations:
class_id = self.map_source_class_id(
"coco.{}".format(annotation['category_id']))
if class_id:
m = self.annToMask(annotation, image_info["height"],
image_info["width"])
# Some objects are so small that they're less than 1 pixel area
# and end up rounded out. Skip those objects.
if m.max() < 1:
continue
# Is it a crowd? If so, use a negative class ID.
if annotation['iscrowd']:
# Use negative class ID for crowds
class_id *= -1
# For crowd masks, annToMask() sometimes returns a mask
# smaller than the given dimensions. If so, resize it.
if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]:
m = np.ones([image_info["height"], image_info["width"]], dtype=bool)
instance_masks.append(m)
class_ids.append(class_id)
# Pack instance masks into an array
if class_ids:
mask = np.stack(instance_masks, axis=2)
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids
else:
# Call super class to return an empty mask
return super(CocoDataset, self).load_mask(image_id)
def image_reference(self, image_id):
"""Return a link to the image in the COCO Website."""
info = self.image_info[image_id]
if info["source"] == "coco":
return "http://cocodataset.org/#explore?id={}".format(info["id"])
else:
super(CocoDataset, self).image_reference(image_id)
# The following two functions are from pycocotools with a few changes.
def annToRLE(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
segm = ann['segmentation']
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, height, width)
rle = maskUtils.merge(rles)
elif isinstance(segm['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, height, width)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann, height, width)
m = maskUtils.decode(rle)
return m
############################################################
# COCO Evaluation
############################################################
def build_coco_results(dataset, image_ids, rois, class_ids, scores, masks):
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
mask = masks[:, :, i]
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "coco"),
"bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score,
"segmentation": maskUtils.encode(np.asfortranarray(mask))
}
results.append(result)
return results
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, warmup=0, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
limit = int(limit/config.BATCH_SIZE)*config.BATCH_SIZE;
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
# Load image
# image = dataset.load_image(image_id)
if (i%config.BATCH_SIZE!=0):
continue;
image_list=[];
for j in range(0,config.BATCH_SIZE):
print("i image_id",i+j, image_id+j)
image = dataset.load_image(image_id+j)
image_list.append(image)
# Run detection
t = time.time()
r = model.detect(image_list, verbose=0)[0]
t1 = time.time() - t
#t_prediction += (time.time() - t)
if (i/config.BATCH_SIZE>=warmup):
t_prediction += t1
print("pred time:",i,t1)
# Convert results to COCO format
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"], r["masks"])
results.extend(image_results)
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print("Batch size: %d" % (config.BATCH_SIZE))
print("Time spent per BATCH: %.4f ms" % (t_prediction / (len(image_ids)/config.BATCH_SIZE-warmup) * 1000))
print("Total samples/sec: %.4f samples/s" % ((len(image_ids)/config.BATCH_SIZE-warmup) * config.BATCH_SIZE / t_prediction))
print("Total time: ", time.time() - t_start)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on MS COCO.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'evaluate' on MS COCO")
parser.add_argument("--trainbs", required=False,
default=2,
metavar="<train batchsize>",
help="Batchsize to train (default=2)")
parser.add_argument("--infbs", required=False,
default=1,
metavar="<inference batchsize>",
help="Batchsize to inference (default=1)")
parser.add_argument("--num_intra_threads", required=False,
default=56,
metavar="<num intra threads>",
help="Num intra threads (default=56)")
parser.add_argument("--num_inter_threads", required=False,
default=1,
metavar="<num inter threads>",
help="Num inter threads (default=1)")
parser.add_argument('--dataset', required=True,
metavar="/path/to/coco/",
help='Directory of the MS-COCO dataset')
parser.add_argument('--year', required=False,
default=DEFAULT_DATASET_YEAR,
metavar="<year>",
help='Year of the MS-COCO dataset (2014 or 2017) (default=2014)')
parser.add_argument('--model', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--cp', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--nb', required=False,
default=50,
metavar="<image count>",
help='Images to use for evaluation (default=500)')
parser.add_argument('--nw', required=False,
default=5,
metavar="<image count>",
help='Images to use for evaluation warmup (default=10)')
parser.add_argument('--download', required=False,
default=False,
metavar="<True|False>",
help='Automatically download and unzip MS-COCO files (default=False)',
type=bool)
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Year: ", args.year)
print("Logs: ", args.cp)
print("Auto Download: ", args.download)
# For pycocotools updates
ppath = subprocess.Popen(["python3", "-m", "site", "--user-site"],
stdout=subprocess.PIPE).communicate()[0].decode("utf-8")
ppath = ppath[:-1] + "/pycocotools/coco.py"
ret = subprocess.Popen(["sed", "-i", "s/unicode/bytes/", ppath],
stdout=subprocess.PIPE).communicate()[0]
# Configurations
if args.command == "train":
class TrainConfig(CocoConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = int(args.trainbs)
STEPS_PER_EPOCH = int(args.nb)
config = TrainConfig()
else:
class InferenceConfig(CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = int(args.infbs)
DETECTION_MIN_CONFIDENCE = 0
config = InferenceConfig()
config.NUM_INTRA = int(args.num_intra_threads)
config.NUM_INTER = int(args.num_inter_threads)
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.cp)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.cp)
# Select weights file to load
if args.model.lower() == "coco":
model_path = COCO_MODEL_PATH
elif args.model.lower() == "last":
# Find last trained weights
model_path = model.find_last()[1]
elif args.model.lower() == "imagenet":
# Start from ImageNet trained weights
model_path = model.get_imagenet_weights()
else:
model_path = args.model
# Load weights
print("Loading weights ", model_path)
model.load_weights(model_path, by_name=True)
# Train or evaluate
if args.command == "train":
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
dataset_train = CocoDataset()
dataset_train.load_coco(args.dataset, "train", year=args.year, auto_download=args.download)
dataset_train.load_coco(args.dataset, "valminusminival", year=args.year, auto_download=args.download)
dataset_train.prepare()
# Validation dataset
dataset_val = CocoDataset()
dataset_val.load_coco(args.dataset, "minival", year=args.year, auto_download=args.download)
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Training - Stage 1
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=1, #40,
layers='heads', warmup=int(args.nw))
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Fine tune Resnet stage 4 and up")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=2, #120,
layers='4+', warmup=int(args.nw))
# Training - Stage 3
# Fine tune all layers
print("Fine tune all layers")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=3, #160,
layers='all', warmup=int(args.nw))
elif args.command == "evaluate":
# Validation dataset
dataset_val = CocoDataset()
coco = dataset_val.load_coco(args.dataset, "minival", year=args.year, return_coco=True, auto_download=args.download)
dataset_val.prepare()
print("Running COCO evaluation on {} images.".format(args.nb))
evaluate_coco(model, dataset_val, coco, "bbox", limit=int(args.nb), warmup=int(args.nw))
else:
print("'{}' is not recognized. "
"Use 'train' or 'evaluate'".format(args.command))
| [
"archaeus@hekaplex.com"
] | archaeus@hekaplex.com |
88231ca16773294f42e2cf6d51ba1b8dc86895a1 | 25b81256057c9a2de014ab511e04703dc617f050 | /etl/census/census_features.py | 19b6e85b6775b9ed6933def9b874cd9390f5bb66 | [
"MIT"
] | permissive | conorhenley/cincinnati | 7b9b2fc6d13e49ad5e95a557cd79b28bd17f0565 | 5ca86a8a31099365188969493e0dd369b4faefc0 | refs/heads/master | 2021-01-13T06:50:18.403686 | 2016-05-26T20:21:12 | 2016-05-26T20:21:12 | 64,249,902 | 1 | 0 | null | 2016-07-26T19:51:03 | 2016-07-26T19:51:03 | null | UTF-8 | Python | false | false | 7,805 | py |
# coding: utf-8
# In[1]:
from sqlalchemy import create_engine
import pandas as pd
from lib_cinci.config import main as config
user = config['db']['user']
password = config['db']['password']
host = config['db']['host']
database = config['db']['database']
engine = create_engine('postgresql://{user}:{password}@{host}:5432/{database}'.format(user=user, password=password, host=host, database=database))
# In[3]:
pop_housing_sql = """SELECT census.*, groups.area FROM shape_files.census_pop_housing as census
JOIN shape_files.census_blocks_groups as groups
on census.tract = groups.tract
and census.block_group = groups.blkgrp;"""
# In[4]:
pop_housing_raw = pd.read_sql_query(pop_housing_sql, con=engine)
# # Raw census data
# In[5]:
pop_housing_raw.head()
# # Calculating census features
# list of feature description and calculation can be found in folder docs/data_dictionaries
#
# features are claculated for each pair of census tract and block
# In[5]:
features = pd.DataFrame({ 'tract' : pop_housing_raw.tract,
'block_group' : pop_housing_raw.block_group,
'housing_density': pop_housing_raw.H0030001/pop_housing_raw.area,
'rate_occupied_units': pop_housing_raw.H0030002/pop_housing_raw.H0030001,
'rate_vacant_units': pop_housing_raw.H0030003/pop_housing_raw.H0030001,
'rate_mortgage_or_loan' : pop_housing_raw.H0040002/pop_housing_raw.H0030001,
'rate_renter_occupied' : pop_housing_raw.H0040004/pop_housing_raw.H0030001,
'rate_for_rent' : pop_housing_raw.H0050002/pop_housing_raw.H0030001,
'rate_white_householder' : pop_housing_raw.H0060002/pop_housing_raw.P0180001,
'rate_black_householder' : pop_housing_raw.H0060003/pop_housing_raw.P0180001,
'rate_native_householder' : (pop_housing_raw.H0060004+pop_housing_raw.H0060006)/pop_housing_raw.P0180001,
'rate_asian_householder' : pop_housing_raw.H0060005/pop_housing_raw.P0180001,
'rate_other_race_householder' : pop_housing_raw.H0060007/pop_housing_raw.P0180001,
'rate_pop_occupied_units' : pop_housing_raw.H0100001/pop_housing_raw.P0010001,
'rate_1_per_household' : pop_housing_raw.H0130002/pop_housing_raw.P0180001,
'rate_2_per_household' : pop_housing_raw.H0130003/pop_housing_raw.P0180001,
'rate_3_per_household' : pop_housing_raw.H0130004/pop_housing_raw.P0180001,
'rate_4_per_household' : pop_housing_raw.H0130005/pop_housing_raw.P0180001,
'rate_5_per_household' : pop_housing_raw.H0130006/pop_housing_raw.P0180001,
'rate_6_per_household' : pop_housing_raw.H0130007/pop_housing_raw.P0180001,
'rate_7_plus_per_household' : pop_housing_raw.H0130008/pop_housing_raw.P0180001,
'rate_owner_occupied' : pop_housing_raw.H0140002/pop_housing_raw.H0030001,
'rate_owner_occupied_white' : pop_housing_raw.H0140003/pop_housing_raw.H0140002,
'rate_owner_occupied_black' : pop_housing_raw.H0140004/pop_housing_raw.H0140002,
'rate_owner_occupied_native' : (pop_housing_raw.H0140005+pop_housing_raw.H0140007)/pop_housing_raw.H0140002,
'rate_owner_occupied_asian' : pop_housing_raw.H0140006/pop_housing_raw.H0140002,
'rate_owner_occupied_other_race' : pop_housing_raw.H0140008/pop_housing_raw.H0140002,
'rate_renter_occupied_white' : pop_housing_raw.H0140011/pop_housing_raw.H0040004,
'rate_renter_occupied_black' : pop_housing_raw.H0140012/pop_housing_raw.H0040004,
'rate_renter_occupied_native' : (pop_housing_raw.H0140013+pop_housing_raw.H0140015)/pop_housing_raw.H0040004,
'rate_renter_occupied_asian' : pop_housing_raw.H0140014/pop_housing_raw.H0040004,
'rate_renter_occupied_other' : pop_housing_raw.H0140016/pop_housing_raw.H0040004,
'rate_owner_occupied_hispanic' : pop_housing_raw.H0150004/pop_housing_raw.H0140002,
#'rate_renter_occupied_hispanic' : pop_housing_raw.H0150005/pop_housing_raw.H0040004,
'rate_owner_occupied_w_children' : pop_housing_raw.H0190003/pop_housing_raw.H0140002,
'rate_owner_occupied_no_children' : pop_housing_raw.H0190004/pop_housing_raw.H0140002,
'rate_renter_occupied_no_children' : 1-(pop_housing_raw.H0190006/pop_housing_raw.H0040004),
'rate_renter_occupied_w_children' : pop_housing_raw.H0190006/pop_housing_raw.H0040004,
'population_density' : pop_housing_raw.P0010001/pop_housing_raw.area,
'rate_white_pop' : pop_housing_raw.P0030002/pop_housing_raw.P0010001,
'rate_black_pop' : pop_housing_raw.P0030003/pop_housing_raw.P0010001,
'rate_native_pop' : (pop_housing_raw.P0030006+pop_housing_raw.P0030004)/pop_housing_raw.P0010001,
'rate_asian_pop' : pop_housing_raw.P0030005/pop_housing_raw.P0010001,
'rate_other_race_pop' : pop_housing_raw.P0030007/pop_housing_raw.P0010001,
'rate_pop_over_18' : pop_housing_raw.P0110001/pop_housing_raw.P0010001,
'rate_male_under_18' : (pop_housing_raw.P0120003+pop_housing_raw.P0120004+pop_housing_raw.P0120005+pop_housing_raw.P0120006)/pop_housing_raw.P0010001,
'rate_male_18_35' : pop_housing_raw[['P0120007','P0120008','P0120009','P0120010','P0120011','P0120012']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_male_35_50' : pop_housing_raw[['P0120013','P0120014','P0120015']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_male_50_75' : pop_housing_raw[['P0120016', 'P0120017', 'P0120018', 'P0120019', 'P0120020', 'P0120021', 'P0120022']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_male_over_75' : pop_housing_raw[['P0120023','P0120024','P0120025']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_female_under_18' : pop_housing_raw[['P0120027','P0120028','P0120029','P0120030']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_female_18_35' : pop_housing_raw[['P0120031', 'P0120032', 'P0120033', 'P0120034', 'P0120035', 'P0120036']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_female_35_50' : pop_housing_raw[['P0120037', 'P0120038', 'P0120039']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_female_50_75' : pop_housing_raw[['P0120040', 'P0120041', 'P0120042', 'P0120043', 'P0120044', 'P0120045', 'P0120046']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_male_over_75' : pop_housing_raw[['P0120047','P0120048','P0120049']].sum(axis=1)/pop_housing_raw.P0010001,
'rate_households' : pop_housing_raw.P0180001/pop_housing_raw.H0030001})
# In[7]:
features
# In[10]:
features.to_sql('census_features', engine, schema='shape_files', if_exists='replace', index=False)
| [
"edu.blancas@gmail.com"
] | edu.blancas@gmail.com |
bd8e762db4466a9af6f76b507af09e77b0f9bb95 | 6d913a2fe43717d73e7e9122b9fbad973315a7b3 | /blog/migrations/0001_initial.py | d70c9f11c5d6a50f4e2c5140b5fbecdf084309ed | [] | no_license | k3nn1x/my-first-blog | b8cb4fb9e53a49d9f8bd4a12abe167363bb0000a | 0868c29983ff5575caf298fa4a8c52aa2aeb925f | refs/heads/master | 2016-09-14T07:05:26.295328 | 2016-06-13T12:05:51 | 2016-06-13T12:05:51 | 59,143,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-18 15:31
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"kennypedrique@gmail.com"
] | kennypedrique@gmail.com |
a0da45bec4db182816084ee19fbf9ac37ec705d9 | d93bd218582a25e7f87ef068ded78a39750a9c0d | /chapter_13/thirteenOne_settings.py | c8f7c6b4f888e009c23564a140fc2caf71f00f77 | [] | no_license | kelvDp/CC_python_cc_projects | e080ff681b81694a25e2d1474d0a224126f42cd4 | 7c49f8f05afa58c99979bf490f7bc4ff85a87167 | refs/heads/master | 2022-12-13T07:36:20.532977 | 2020-07-28T08:27:52 | 2020-07-28T08:27:52 | 281,634,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | class Settings:
"""Class to manage settings"""
def __init__(self):
"""Inits settings"""
self.screen_width = 1000
self.screen_height = 600
self.speed = 1.1
| [
"noreply@github.com"
] | kelvDp.noreply@github.com |
6c4a7e879ce82bb106ec665d968edc363888052d | 24798e941b1dc52f39d45fcafe9e7707d161e74e | /src/config.py | 12ade15a10614a5b15742fc861012dd58032178c | [] | no_license | IanMadlenya/pugg | a2aa1d12510090026fd806656da4447b262a4419 | 806642e1b37d8657db4f41a4a81bdfe878a28bba | refs/heads/master | 2021-01-25T09:14:33.671412 | 2013-03-30T01:48:25 | 2013-03-30T01:48:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | global MONGO_SERVER
global PUGG_DB
try:
MONGO_SERVER
except NameError:
MONGO_SERVER="127.0.0.1"
PUGG_DB="pugg_production"
| [
"natematias@gmail.com"
] | natematias@gmail.com |
d410f466a0f98bdf23b7fc68a30269a04ef74fca | a276798900d6f99d0dc79b8e4e66c23973ede4a9 | /blog/urls.py | a96decec9876c70c542bf8f64bd4f22291847321 | [] | no_license | Mgarchuk/MyBlog | ad73a06e008f80644b33fba795eb601dead21754 | a2890a347ab4b56f1e703053840c1cdd1dddb5ba | refs/heads/main | 2023-03-07T07:03:32.991927 | 2021-02-24T22:14:07 | 2021-02-24T22:14:07 | 342,049,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | from django.conf.urls import url
from django.urls import path
from django.views.generic import ListView
from blog.models import Post
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', views.main_page, name='main_page'),
path('post/<int:pk>/', views.post_detail, name='post_detail'),
path('posts/', ListView.as_view(queryset=Post.objects.all().order_by("-published_date"), template_name='blog/post_list.html')),
path('edit_post/', views.edit_post, name='post_edit'),
path('entry/', views.MyProjectLoginView.as_view(), name='entry_page'),
path('register/', views.RegisterUserView, name='register_page'),
path('create_post/', views.create_post, name='create_post'),
path('update_post/<int:pk>', views.update_post, name='update_post'),
path('delete_post/<int:pk>', views.delete_post, name='delete_post'),
path('create_photo/', views.create_photo, name='create_photo'),
path('logout/', views.MyProjectLogout.as_view(), name='logout'),
path('photo_list/', views.photo_list, name='photo_list'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.activate_account, name='activate'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"mgarchuk6@gmail.com"
] | mgarchuk6@gmail.com |
b561022b7fd0c683ba9c07ba5381c7a55b8b49cd | bc599c9a404940fae21ed6b57edb7bb9dc04e71c | /test/base_test/graphics/baseScatter.py | 2bd2cb944a44d6477901a3cff545da2ae9d41870 | [] | no_license | jcarlosglx/SparkReport | c9b37a1419f113ea13341e6641ceb17056aeb7d0 | 9d6b044f037e8dfe583bcf76c51dd792ac1cc34a | refs/heads/master | 2023-08-11T16:04:28.393856 | 2021-09-21T23:06:08 | 2021-09-21T23:06:08 | 409,001,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | from test.base_test.base_http.baseGetTest import BaseGetGeneralTest
from test.base_test.base_dimension.baseGraphicTwoDimensionTest import \
BaseGraphicTwoDimensionTest
from typing import List, Type
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
class BaseGetScatterTest(BaseGetGeneralTest, BaseGraphicTwoDimensionTest):
Graphics: List[str] = ["Scatter"]
def test_get_scatter(self, get_app: Flask, get_db: Type[SQLAlchemy]):
self.reload_json()
response = get_app.test_client().get(
f"{self.url_get}{self.endpoint_get}", json=self.JSON
)
self.save_response_file(response)
code_response = str(response.status_code)
assert code_response == self.expect_status_get, self.print_error(code_response)
| [
"j.carlosglxg@gmail.com"
] | j.carlosglxg@gmail.com |
fc81e5b25c0c45e42a8c1089b51deb267ca9c1c1 | 72616f323d12c8a6ca2dea8f143c885790de8c61 | /test/test_oauth_flow.py | fcbf1497bd673b09a3b2047413332b7b116540a5 | [
"MIT"
] | permissive | jbn/brittle_wit_core | af9ac9d9bc5f2958805e4476f8c772d202fcee2d | c7c086af1d9e9b3833052fa77416a18405154213 | refs/heads/master | 2021-01-21T05:56:34.994069 | 2018-05-09T00:08:56 | 2018-05-09T00:08:56 | 101,930,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,775 | py | import pytest
from brittle_wit_core.oauth import (extract_access_token,
extract_request_token,
obtain_access_token,
obtain_request_token,
redirect_url,
AppCredentials)
def test_extract_request_token_bad_status():
assert extract_request_token(999, "") == (None, None)
def test_extract_request_token_good_status_bad_resp():
assert extract_request_token(200, "") == (None, None)
def test_extract_request_token_good_status_good_resp():
params = "oauth_token=a,oauth_token_secret=b,oauth_callback_confirmed=true"
resp_body = "&".join(params.split(","))
assert extract_request_token(200, resp_body) == ('a', 'b')
def test_redirect_url():
base_uri = "https://api.twitter.com/oauth/authenticate"
expected = base_uri + "?oauth_token=hello%2Fworld"
assert redirect_url("hello/world") == expected
def test_extract_access_token_bad_status():
assert extract_access_token(999, "") is None
def test_extract_access_token_bad_resp():
assert extract_access_token(200, "") is None
def test_extract_access_token_good_status_good_resp():
d = {'oauth_token': 'token',
'oauth_token_secret': 'secret',
'screen_name': 'techcrunch',
'user_id': 42,
'x_auth_expires': '0'}
resp_body = "&".join(["{}={}".format(k, v) for k, v in d.items()])
assert extract_access_token(200, resp_body) == d
@pytest.fixture
def app_cred():
# See: https://dev.twitter.com/web/sign-in/implementing
return AppCredentials("cChZNFj6T5R0TigYB9yd1w",
"L8qq9PZyRg6ieKGEKhZolGC0vJWLw8iEJ88DRdyOg")
def test_obtain_request_token(app_cred):
callback_url = "http://localhost/sign-in-with-twitter/"
overrides = {'oauth_timestamp': "1318467427",
'oauth_callback': callback_url,
'oauth_nonce': "ea9ec8429b68d6b77cd5600adbbb0456"}
_, headers = obtain_request_token(app_cred, callback_url, **overrides)
expected_substr = 'oauth_signature="F1Li3tvehgcraF8DMJ7OyxO4w9Y%3D"'
assert expected_substr in headers['Authorization']
def test_obtain_access_token(app_cred):
assert app_cred.key == "cChZNFj6T5R0TigYB9yd1w"
tok = "NPcudxy0yU5T3tBzho7iCotZ3cnetKwcTIRlX0iwRl0"
verifier = "uw7NjWHT6OJ1MpJOXsHfNxoAhPKpgI8BlYDhxEjIBY"
overrides = {'oauth_timestamp': "1318467427",
'oauth_nonce': "a9900fe68e2573b27a37f10fbad6a755"}
_, headers = obtain_access_token(app_cred, tok, verifier, **overrides)
expected_substr = 'oauth_signature="eLn5QjdCqHdlBEvOogMeGuRxW4k%3D"'
assert expected_substr in headers['Authorization']
| [
"jbn@abreka.com"
] | jbn@abreka.com |
70be044b2afc007606fdccb195846bb31d41a92a | 4d332c45578246847ef2cdcdeb827ca29ab06090 | /modules/Bio/SeqUtils/IsoelectricPoint.py | 817626b257eee1231c8ea29aca828e2ba2de2cf3 | [
"MIT"
] | permissive | prateekgupta3991/justforlearn | 616cc297a2a6119fa959b9337a5e91c77a11ebf7 | 3984c64063b356cf89003e17a914272983b6cf48 | refs/heads/master | 2021-03-12T22:09:12.184638 | 2014-01-28T10:37:07 | 2014-01-28T10:37:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | /usr/share/pyshared/Bio/SeqUtils/IsoelectricPoint.py | [
"prateekgupta.3991@gmail.com"
] | prateekgupta.3991@gmail.com |
da2cb9526c79dd2e914a5982b37cc3c42794cf39 | c4157663e2aeaf553f64f9cd44540db6a718cdb8 | /Edurights/Edurights/settings.py | 919115c3ec432b8cf0efaba66766354af31427dc | [
"MIT"
] | permissive | AnshShrivastava/EduRights | dba60ae5e33f8e639b7244e94137a197bc8a55fe | 3bb66f168cfa1f012c805032ea192f218a779624 | refs/heads/main | 2023-02-27T04:09:19.825207 | 2021-02-01T16:53:08 | 2021-02-01T16:53:08 | 334,899,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,394 | py | """
Django settings for Edurights project.
Generated by 'django-admin startproject' using Django 3.1.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@4v2x+_(1i5q*1ksy2ox@a142mh1f6^2k76%5_#e-&+m44ky8z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'UI'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Edurights.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Edurights.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
STATICFILES_DIRS = [
BASE_DIR / "static",
'/home/ansh/Desktop/EduRights/Edurights/static/', #the absolute path to your static fil
]
MEDIA_URL = '/media/'
| [
"anshshrivastava1705@gmail.com"
] | anshshrivastava1705@gmail.com |
c85cb0f32d51c4871c1b38ca50593c5a5e7ecd75 | b95f80c0c2e7700ed24248bb84f4ef02723e367c | /tests/k8s/test_discovery.py | 3bec8820ae816dfa8b80dda2036bf7231f9dce29 | [
"MIT"
] | permissive | tinyzimmer/kopf | b97faab3f396dc169ebe053c6b41d57d20756738 | 74c42a2acdf2a72446d290fa1f27b53ec5d43218 | refs/heads/master | 2022-12-04T17:51:30.648646 | 2020-08-30T00:23:18 | 2020-08-30T00:23:18 | 291,496,989 | 0 | 0 | MIT | 2020-08-30T15:26:12 | 2020-08-30T15:26:11 | null | UTF-8 | Python | false | false | 4,573 | py | import aiohttp.web
import pytest
from kopf.clients.discovery import discover, is_namespaced, is_status_subresource
from kopf.structs.resources import Resource
async def test_discovery_of_existing_resource(
resp_mocker, aresponses, hostname):
res1info = {'name': 'someresources', 'namespaced': True}
result = {'resources': [res1info]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
info = await discover(resource=resource)
assert info == res1info
async def test_discovery_of_unexisting_resource(
resp_mocker, aresponses, hostname):
result = {'resources': []}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
info = await discover(resource=resource)
assert info is None
@pytest.mark.parametrize('status', [403, 404])
async def test_discovery_of_unexisting_group_or_version(
resp_mocker, aresponses, hostname, status):
list_mock = resp_mocker(return_value=aresponses.Response(status=status, reason="boo!"))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
info = await discover(resource=resource)
assert info is None
async def test_discovery_is_cached_per_session(
resp_mocker, aresponses, hostname):
res1info = {'name': 'someresources1', 'namespaced': True}
res2info = {'name': 'someresources2', 'namespaced': True}
result = {'resources': [res1info]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
result = {'resources': [res2info]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources1')
info = await discover(resource=resource)
assert info == res1info
resource = Resource('some-group.org', 'someversion', 'someresources2')
info = await discover(resource=resource)
assert info is None # cached as absent on the 1st call.
resource = Resource('some-group.org', 'someversion', 'someresources1')
info = await discover(resource=resource)
assert info == res1info
@pytest.mark.parametrize('namespaced', [True, False])
async def test_is_namespaced(
resp_mocker, aresponses, hostname, namespaced):
res1info = {'name': 'someresources', 'namespaced': namespaced}
result = {'resources': [res1info]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
result = await is_namespaced(resource=resource)
assert result == namespaced
@pytest.mark.parametrize('namespaced', [True, False])
async def test_is_status_subresource_when_not_a_subresource(
resp_mocker, aresponses, hostname, namespaced):
res1info = {'name': 'someresources', 'namespaced': namespaced}
result = {'resources': [res1info]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
result = await is_status_subresource(resource=resource)
assert result is False # an extra type-check
@pytest.mark.parametrize('namespaced', [True, False])
async def test_is_status_subresource_when_is_a_subresource(
resp_mocker, aresponses, hostname, namespaced):
res1info = {'name': 'someresources', 'namespaced': namespaced}
res1status = {'name': 'someresources/status', 'namespaced': namespaced}
result = {'resources': [res1info, res1status]}
list_mock = resp_mocker(return_value=aiohttp.web.json_response(result))
aresponses.add(hostname, '/apis/some-group.org/someversion', 'get', list_mock)
resource = Resource('some-group.org', 'someversion', 'someresources')
result = await is_status_subresource(resource=resource)
assert result is True # an extra type-check
| [
"nolar@nolar.info"
] | nolar@nolar.info |
be5499ad00fe3147d9b4398c71f0d529e962e236 | d4386dbbeb95d6ebcd4fe17a6ecbfeabd4850a44 | /SocialNetwork/authentication/migrations/0003_auto_20190105_1848.py | 3162f5cfc7fa4227a1da658b43cdc3bff7b5ee46 | [] | no_license | abdulwahid24/socialnetwork | a20c7c78a96341c8c3dff46b3f1c7997daf56a47 | 778c4ba0b23647370a37b3b098f4398843fe29c7 | refs/heads/master | 2020-04-30T19:57:56.394788 | 2019-03-22T01:46:39 | 2019-03-22T01:46:39 | 177,053,119 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # Generated by Django 2.0.8 on 2019-01-05 18:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0002_profile'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='first_name',
),
migrations.RemoveField(
model_name='user',
name='last_name',
),
]
| [
"abdulwahid24@gmail.com"
] | abdulwahid24@gmail.com |
35275eeab4f24091188144ceabf10d79a583b99d | cec34868d9bb1740caf0b04004ebf128da03c445 | /dueros/directive/AudioPlayer/Control/NextButton.py | a56a35129de9c97a7da44c3b12e2ec3743a6d959 | [] | no_license | imzeali/dueros_mind_reader | 59c4bfc5976ce561d747c75bce69c672e29f31bd | e1e0b6275acbe7e95e7573c94ef0c561a31a42c8 | refs/heads/master | 2022-12-15T14:42:43.476167 | 2019-11-26T10:01:35 | 2019-11-26T10:01:35 | 170,806,962 | 8 | 2 | null | 2022-12-08T01:21:22 | 2019-02-15T05:30:28 | Python | UTF-8 | Python | false | false | 337 | py | #!/usr/bin/env python2
# -*- encoding=utf-8 -*-
# description:
# author:jack
# create_time: 2018/5/28
"""
desc:pass
"""
from dueros.directive.AudioPlayer.Control.Button import Button
class NextButton(Button):
def __init__(self):
super(NextButton, self).__init__('NEXT')
pass
if __name__ == '__main__':
pass | [
"lizhi@eastwestec.com"
] | lizhi@eastwestec.com |
34ac7b227de0b5c86431e6551131dd49c60e99a1 | 1049985e1fce47ad36b74f8bd162d01ab1ff4580 | /trydjango1-11/src/Scripts/django-admin.py | ca9e6d1181ec0115a43f5a9d9e97084c3d4e0c58 | [] | no_license | Humphreychinedu/first-App-2017 | 8c89fb2aaa717b58cfd204029b3a363cad8e73df | f881879f0a13c1b95f970b8fd97ccf8544bf1b87 | refs/heads/master | 2022-11-23T06:46:27.674197 | 2017-11-24T22:10:54 | 2017-11-24T22:10:54 | 111,957,540 | 0 | 0 | null | 2022-11-16T18:58:01 | 2017-11-24T21:43:21 | Python | UTF-8 | Python | false | false | 174 | py | #!c:\users\toshiba\desktop\dev\trydjango1-11\src\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"mefendja.chinedu@gmail.com"
] | mefendja.chinedu@gmail.com |
63eb117df50510a881cad1cd17e8650e4c931d87 | 84a5c4c2e0977d42425771098f5f881c750da7f0 | /neomodel_constraints/fetcher/constraints/util.py | 3a08ca6379d4f6cab307f35957905429b97c95ac | [] | no_license | SSripilaipong/neomodel-constraints | 6c3023ba156275e48f5f7ebcbdd283ce8d41f9a1 | 4b91185ba9eec993c58e9ae770fd3d0e90f915ae | refs/heads/main | 2023-07-15T09:58:41.451631 | 2021-08-29T13:19:38 | 2021-08-29T13:19:38 | 390,312,509 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | from typing import List
from neomodel_constraints.constraint import ConstraintSet, TypeMapperAbstract
from .data import Neo4jConstraintQueryRecord
def convert_constraints_with_type_mapper(
raw: List[Neo4jConstraintQueryRecord],
type_mapper: TypeMapperAbstract
) -> ConstraintSet:
constraints = set()
for record in raw:
constraint_type = type_mapper.map(record.type_)
constraint = constraint_type.from_raw(record.dict())
constraints.add(constraint)
return ConstraintSet(constraints)
| [
"santhapon.s@siametrics.com"
] | santhapon.s@siametrics.com |
20f4e8a0618bd5f9a0a9beeb9b7851d0af09dcdc | 354b10475f3006b79a8ed78d784980c43efeb9d2 | /Marvellous_Infosystem_Assignment_2/Assingment2_7.py | 1d244667966bc1dcafdc726487190447c86417ea | [] | no_license | spd94/Marvellous_Infosystem_Python_ML_Assignments | 1c851f8e95dcdfa493b2015c64c96afaf3adf3ef | 1c2f578b27fed04e42cef47b97b657eb02888f4a | refs/heads/master | 2020-09-09T14:37:50.791208 | 2019-12-18T11:41:16 | 2019-12-18T11:41:16 | 221,473,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | def print_pattern(n):
i=0
print("Output :")
while(i<n):
j=0
k=1
while(j<n):
print(k,end=" ")
k+=1
j+=1
print("")
i+=1
print("Input :",end=" ")
x=int(input())
print_pattern(x) | [
"shrihari.dhanorkar@spit.ac.in"
] | shrihari.dhanorkar@spit.ac.in |
8b0fa897111dee9d44d7c7bcbc88c5c4ae1a6577 | c91478e1327dcd3e42d6280973ca59afcf1eb91a | /test/regex/corpus2sentences-p.py | a9676e43af45cdaaa377e5efd255204a3c92a8ca | [] | no_license | josefK128/varencoder_docs | 33fc545a83efa445b3549b579f8a212b6e0c2472 | ddaeb4ee8a2eea8bac15dd4eeb24365fa8116656 | refs/heads/master | 2022-12-13T00:59:34.881828 | 2020-09-09T17:09:19 | 2020-09-09T17:09:19 | 274,789,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,155 | py | # corpus2sentences.py
# reads corpus and builds dictionary {doc-index:[doc-sentences],...}
# filters unnecessary and error-causing period expressions such as:
# i.e, e.g, A. Taylor (A.)
# returns dictionary docs to filter_sentences.py
import os
import re
#closure vars for action() - free vars in action()
dbasepath = '../corpus/' #location of corpus file(s) for diagnostics
vbasepath = './corpus/' #location of corpus file(s) for vae run
corpusname = 'corpus0' #default corpus0
docs = {} #dictionary of documents from corpus
def corpus(corpusnm = 'corpus0'):
#identify corpusname as closure var, not local
global corpusname
print('\ncorpus2sentences.corpus(): setting corpusname to ' + corpusnm)
corpusname = corpusnm
def filter(regex, replace, s_pf, diagnostics=False):
#filter each text - eliminate i.e, e.g., A. Taylor
s = re.sub(regex, replace, s_pf) #filter by regex
result = re.subn(regex, replace, s_pf) #filter by regex
if diagnostics:
print('filtering detected ' + str(result[1]) + ' anomaly(ies)!!\n')
if s != s_pf:
print(s_pf + '\n\nreplaced by:\n\n' + s)
return s
def action(diagnostics=False):
#relative location of corpus relative to main file
# !!!! change to original !!!!
# if diagnostics == True:
# basepath = dbasepath + corpusname +'/' #relative to /encoder
# else:
# basepath = vbasepath + corpusname + '/' #relative to vae.py
basepath = vbasepath + corpusname + '/' #relative to vae.py
print('\n+++++++++++ corpus2sentences +++++++++++++++++++++')
print('corpus2sentences: basepath = ' + basepath)
index = 0 #document ordinal identifier - key of dict docs
for root, dirs, filenames in os.walk(basepath):
for fn in filenames:
if fn.endswith(".txt"):
with open(basepath+fn, "r") as f:
s = '' #string representation of file f
for line in f:
if not line.startswith('$'):
s += line.replace("\n"," ").lstrip()
#filter each doc - eliminate i.e, e.g., A. Taylor
print('\nfiltering text ' + fn )
rs = '\s([a-z,A-Z]\.)+,*'
regex = re.compile(rs)
s_pf = s
s = filter(regex, '', s_pf, diagnostics)
#split the file-string on '***' to form docs
a = s.split('***')
if diagnostics: print(str(len(a)) + ' documents')
for doc in a:
doc_sentences = [] #sentences associated with docs[doc]
sentences = doc.split('.') #sections of split doc string
if diagnostics:
print('\n\ndoc ' + str(index))
print(str(len(sentences)-1) + ' sentences:')
for sentence in sentences:
sentence = sentence.strip()
if(len(sentence) >0): #skip empty sentences exp last
if diagnostics: print(sentence +'\n')
doc_sentences.append(sentence) #add non-empty sent.
docs[index] = doc_sentences #sentence-list val for key index
index += 1
if diagnostics == True:
print('\n\n********************************************')
for index in docs:
print('%%%%%%%%%')
print('docs ' + str(index) + ' is ' + str(docs[index]))
else:
print('number of paragraphs extracted = ' + str(len(docs.values())))
for k,v in docs.items():
print('paragraph ' + str(k) + ' has ' + str(len(v)) + ' sentences')
return(docs)
if __name__ == "__main__":
print("corpus2sentences module running in diagnostics mode as __main__")
#action(True)
action(False) #quick check of non-diagnostics output in diagnoistics mode
else:
print("corpus2sentences module imported")
| [
"mark_rudolph@yahoo.com"
] | mark_rudolph@yahoo.com |
aa09a18e47cf459ca97ff6019aee653207fedacf | 47b3a72fa07b705a17b6156741f17fb99946250a | /ServiceTeam/ServiceTeam/settings.py | b8c3689710625fb2a85cad0e7d08834e336935f6 | [] | no_license | Kuber-Ietermagog/Service-Team | 15b65a6a5572c26362ada06b6edd61c6b710566c | d11e18fc4afb8c21f17b4d7df5491b74d099cd78 | refs/heads/master | 2020-05-04T21:48:30.293699 | 2019-04-30T10:04:58 | 2019-04-30T10:04:58 | 179,489,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,517 | py | """
Django settings for ServiceTeam project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_DIR = os.path.join(BASE_DIR, 'static')
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
MEDIA_DIR = os.path.join(BASE_DIR, 'media')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd5irg-7yl9k7w#j^l!a7s)gw4odm^j@&smf=l7ri@q6f=kr_tr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['dimakoserviceteam.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
'clock_card',
'time_sheets'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ServiceTeam.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ServiceTeam.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Johannesburg'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
STATIC_DIR,
]
MEDIA_ROOT = MEDIA_DIR
MEDIA_URL = '/media/'
LOGIN_REDIRECT_URL = '/accounts/users'
LOGOUT_REDIRECT_URL = '/'
| [
"kuber.ietermagog@gmail.com"
] | kuber.ietermagog@gmail.com |
f9296a08deaa5d0ce4a8d34fad46b6f4e17d1cbd | 67cac71810c704596b02b58e56dfd7d825f75c13 | /function_expl.py | 03d3bf2346f243662c934dae6d86cd44d4598b40 | [] | no_license | manhlam/python-core | c3caf8011f176af0d6eb481566c20c568d1e6eaa | a26b952b3f273abc5fdace38d1be3e9509c4406e | refs/heads/master | 2022-11-18T10:59:50.423682 | 2020-07-21T02:46:29 | 2020-07-21T02:46:29 | 255,303,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | import math
print(math.pi)
print(math.e)
#lam tron 2 chu so sau dau phay
print(round(math.e, 2))
#in ra so lam tron xuong cua 1.9
print(math.floor(1.9))
#in ra so lam tron len cua 2.1
print(math.ceil(2.1))
#lay gia tri nguyen cat bo phan thap phan sau dau phay cua so thuc
print(math.trunc(-1.9))
#in ra giai thua cua 5
print(math.factorial(5))
#in ra uoc chung lon nhat cua hai so nguyen
print(math.gcd(12, 18)) | [
"manhlamvepj7543@gmail.com"
] | manhlamvepj7543@gmail.com |
05033b6c1a6811a8241b7fbeb834a416dfd6694b | f7c83e064b497380314e542b1dbfdd62a2d77658 | /singleton_multithreading_metaclass.py | dbf4dec777d16e66e69f1cc4a7b7f4bda4539466 | [] | no_license | Pythonyte/Python_Short_Notes | 52c7947c4d077daecc8840f8c4b092513fd68768 | 9e91348191d97ab8416ad6c06d67c62953811872 | refs/heads/master | 2021-07-22T01:53:57.695489 | 2020-07-10T13:57:31 | 2020-07-10T13:57:31 | 75,210,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | import threading
lock = threading.Lock()
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
"""
This check, (first check) is for avoid unnecessary locking, once instance is
set in _instances in dict
"""
if cls not in cls._instances:
# CheckPoint 1
with lock:
"""
This check, (second check) if needed for overriding _instances dict update:
Scenario:
multiple threads came to CheckPoint 1 at almost near same time.
Now one thread got the lock, came inside critical section and
updates _instances dict with newly created instance and then
releases the lock.
Now second thread got the lock, Now think....
if this second check is not there, this second thread will create new instance
and update the dict (which we really dont want)
"""
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class SingletonClass(metaclass=Singleton):
pass
def testThread(num):
print("Object value",SingletonClass())
if __name__ == '__main__':
for i in range(5):
t = threading.Thread(target=testThread, args=[i])
t.start()
| [
"sumits2@webintensive.com"
] | sumits2@webintensive.com |
5926bf8b1d8f5f1cfca7194aa1b78be52247f472 | 4512420c8330a2b27c6604996edebf7996f8c645 | /gs_quant/analytics/datagrid/data_column.py | 16470870f6139ac680a06b6cbf31512c26b5a5ef | [
"Apache-2.0"
] | permissive | OfficialEasyQuant/gs-quant | 8dd9fb439e0bf8c0256169a85c21cfba6aaeb3fd | b28570be0371976205b6b4c1d88c3b95281a3dbf | refs/heads/master | 2023-04-08T12:12:41.510396 | 2021-04-23T21:57:45 | 2021-04-23T21:57:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,744 | py | """
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from dataclasses import dataclass, asdict, fields
from typing import Dict, List
from gs_quant.analytics.core.processor import BaseProcessor
DEFAULT_WIDTH = 100
class RenderType:
DEFAULT = 'default'
HEATMAP = 'heatmap'
BOXPLOT = 'boxplot'
SCALE = 'scale'
@dataclass
class ColumnFormat:
renderType: RenderType = RenderType.DEFAULT
precision: int = 2
humanReadable: bool = True
tooltip: str = None
displayValues: bool = True
@classmethod
def from_dict(cls, dict_):
class_fields = {f.name for f in fields(cls)}
return ColumnFormat(**{k: v for k, v in dict_.items() if k in class_fields})
class DataColumn:
"""Base class for grid column"""
def __init__(self,
name: str,
processor: BaseProcessor = None,
*,
format_: ColumnFormat = ColumnFormat(),
width: int = DEFAULT_WIDTH):
""" DataColumn
:param name: Name of the column
:param processor: Processor to apply to the column for calculation
:param format_: Formatting information for the column result
:param width: Size of the column in pixels when presented on the UI
"""
self.name = name
self.processor = processor
self.format_ = format_
self.width = width
def as_dict(self):
format_ = asdict(self.format_)
if format_['tooltip'] is None:
del format_['tooltip']
column = {
'name': self.name,
'format': format_,
'width': self.width
}
processor = self.processor
if processor:
column['processorName'] = processor.__class__.__name__
column.update(**processor.as_dict())
return column
@classmethod
def from_dict(cls, obj: Dict, reference_list: List):
processor = BaseProcessor.from_dict(obj, reference_list)
return DataColumn(name=obj['name'],
processor=processor,
format_=ColumnFormat.from_dict(obj.get('format', {})),
width=obj.get('width', DEFAULT_WIDTH))
| [
"noreply@github.com"
] | OfficialEasyQuant.noreply@github.com |
7688658bbde4b5698dc86820d79528f60b976b8e | 4acaa74999d4cb4bbe57aaf8d9c7b4b8c40c4b3d | /todo/tasks/migrations/0002_auto_20201012_2148.py | 03430b1a58bc2fb8c614ca4c53629461568def19 | [] | no_license | AndreRebelo/projeto-todo | 20a550858a9e059048921095404ba5021922564c | 44e501eb190ac4c3869d0a3687057c0dc164a334 | refs/heads/main | 2023-01-06T22:47:21.600178 | 2020-11-09T11:10:51 | 2020-11-09T11:10:51 | 308,009,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | # Generated by Django 3.1.2 on 2020-10-12 20:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tasks', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='task',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
preserve_default=False,
),
migrations.AlterField(
model_name='task',
name='done',
field=models.CharField(choices=[('doing', 'Doing'), ('done', 'Done')], max_length=5),
),
]
| [
"71769528+AndreRebelo@users.noreply.github.com"
] | 71769528+AndreRebelo@users.noreply.github.com |
f51a66342976016246f44004594dec3297759626 | 44f60a52dd6864075171c24df4dd62da425cd3b6 | /vis/migrations/0008_auto_20150523_1123.py | 7467bfc2dfda786b7adc9e0abae79ed049ad5570 | [] | no_license | lukaprelic/Visdjango | 63c1af7391e984186da41d78f2e091728fa06ff1 | 9708e7a4f7cfff140b3801687c053b4872de3015 | refs/heads/master | 2021-01-17T12:21:13.547862 | 2018-09-19T22:25:36 | 2018-09-19T22:25:36 | 28,752,780 | 0 | 0 | null | 2018-09-19T21:40:39 | 2015-01-03T19:37:38 | JavaScript | UTF-8 | Python | false | false | 690 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('vis', '0007_auto_20150523_0005'),
]
operations = [
migrations.AddField(
model_name='visits',
name='meta',
field=models.TextField(default=b'empty'),
preserve_default=True,
),
migrations.AlterField(
model_name='visits',
name='last_visited',
field=models.DateTimeField(default=datetime.datetime(2015, 5, 23, 11, 23, 7, 587000)),
preserve_default=True,
),
]
| [
"luka.prelicads@gmail.com"
] | luka.prelicads@gmail.com |
74173ef5d6c8e8f1b2f1282a3ba50014aaf181af | 5db0fab37c2b8a618d85d3b60fab9f806c416474 | /src/python/pants/backend/experimental/terraform/lint/tfsec/register.py | da368f57c9b4d2296cb0466cd1219a8d9616b2f6 | [
"Apache-2.0"
] | permissive | pantsbuild/pants | 4988d1ac5474ec95f94ce2218aeb759401e4b011 | 98cbda8545f0d58c586ed2daa76fefd729d5e0d5 | refs/heads/main | 2023-09-05T03:44:17.646899 | 2023-09-01T19:52:09 | 2023-09-01T19:52:09 | 7,209,075 | 2,708 | 593 | Apache-2.0 | 2023-09-14T19:33:33 | 2012-12-17T17:39:04 | Python | UTF-8 | Python | false | false | 245 | py | # Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.terraform.lint.tfsec.rules import rules as tfsec_rules
def rules():
return tfsec_rules()
| [
"noreply@github.com"
] | pantsbuild.noreply@github.com |
ee5e410ecf0452265ebf968fe25bbe2adf0808ed | 029bf437976481b5debf3f717bd3fca3a7b3db90 | /recursion[1]/recursion1.py | 73a6097f4908d12ef1a140fa7f4a8a7d6650d60b | [] | no_license | cau-algorithm-study/Python-Data-Structure | 510e22709a1ee69a81a0ebfd9e9634b1b5d15f30 | 91722311123ca985f4c773851b7b212ffc7d21aa | refs/heads/master | 2020-06-15T05:46:19.654957 | 2019-07-08T08:19:20 | 2019-07-08T08:19:20 | 195,218,270 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,715 | py | # -*- coding:utf-8 -*-
'''
남태평양에 있는 어느 나라에 11개의 섬이 다음과 같이 다리로 연결되어 있다.
이 나라의 관광청에서는 관광객들이 11개의 섬들을 모두 방문할 수 있는, 순서가 다른 3개의 관광코스를 만들었다.
각 코스의 관광은 섬 H에서 시작한다. 관광청에서는 각 관광 코스의 방문 순서를 다음과 같은 규칙에 따라 만들었다.
H
/ \
F S
/ \ / \
U E Z K
/ / \
N A Y
\
T
'''
# A 코스 : 섬에 도착하면 항상 도착한 섬을 먼저 관광하고, 그 다음엔 왼쪽 섬으로 관광을 진행한다.
# 왼쪽 방향의 모든 섬들을 방문한 후에는 오른쪽 섬으로 관광을 진행한다.
def A_course(n): # A 코스
if n != None:
print(n.name, '->', end=' ') # 섬 N 방문
A_course(n.left)
A_course(n.right)
# B 코스 : 섬에 도착하면 도착한 섬의 관광을 미루고, 먼저 왼쪽 섬으로 관광을 진행하고 왼쪽 방향의 모든
# 섬들을 방문한 후에 돌아와서 섬을 관광한다. 그 다음엔 오른쪽 섬으로 관광한다.
def B_course(n):
if n != None:
B_course(n.left)
print(n.name, '->', end=' ')
B_course(n.right)
# C 코스 : 섬에 도착하면 도착한 섬의 관광을 미루고, 먼저 왼쪽 섬으로 관광을 진행하고 왼쪽 방향의 모든 섬들을
# 관광한 후에 돌아와서 오른쪽 섬으로 관광을 진행한다. 오른쪽 방향의 모든 섬들을 관광한 후에 돌아와서,
# 관광을 미루었던 섬을 관광한다.
def C_course(n):
if n != None:
C_course(n.left)
C_course(n.right)
print(n.name, '->', end=' ')
class Node:
def __init__(self, name, left=None, right=None):
self.name = name
self.left = left
self.right = right
def map():
n1 = Node('H')
n2 = Node('F')
n3 = Node('S')
n4 = Node('U')
n5 = Node('E')
n6 = Node('Z')
n7 = Node('K')
n8 = Node('N')
n9 = Node('A')
n10 = Node('Y')
n11 = Node('T')
n1.left = n2
n1.right = n3
n2.left = n4
n2.right = n5
n3.left = n6
n3.right = n7
n4.left = n8
n5.left = n9
n7.right = n10
n9.right = n11
return n1 # 시작섬 리턴
start = map()
print('A-코스:\t')
A_course(start)
print("")
print('B-코스:\t')
B_course(start)
print("")
print('C-코스:\t')
C_course(start) | [
"leehj8687@gmail.com"
] | leehj8687@gmail.com |
7dc9604087cf0915f612460be51b976ab915ce17 | 9ec687021cde3b162ae69eebae1d3738507e7d5f | /smart-lock/libs/exit_app.py | cd9ad9a2f08631fd677dc61de6212b49e1dfec96 | [
"MIT"
] | permissive | jorisvos/rasplock | 032d61198b7f10560e4350a3553baad635e4e022 | c22eb8a7f6d161ad75a7733650b5c718a06b35f1 | refs/heads/master | 2021-07-19T19:19:43.129132 | 2020-04-29T12:15:19 | 2020-04-29T12:15:19 | 148,496,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | import variables.colors as c
from libs.clear import *
def exit(signal, frame):
clear()
print(c.bcolors.OKGREEN+"Bye!"+c.bcolors.ENDC)
sys.exit(0)
| [
"jorisvos037@gmail.com"
] | jorisvos037@gmail.com |
e0d0bff373d69b9455fd52b2ddecf9431c15390d | e95eb3b5332ba010669f921fe6ac22f85837da2a | /examples/analysis/parse_demo.py | 1470b8acd579aa33a07b0bd3b49fffc8f89cffa2 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | LABSN/expyfun | a5998722f09bfb08e3167d6309ce0d5d534b8b18 | f324eb8c65afa9530698f15ca058700518355a8f | refs/heads/main | 2023-08-05T13:06:15.026909 | 2023-07-25T19:07:03 | 2023-07-25T19:07:03 | 11,614,571 | 13 | 19 | BSD-3-Clause | 2023-07-25T19:07:05 | 2013-07-23T17:28:02 | Python | UTF-8 | Python | false | false | 733 | py | # -*- coding: utf-8 -*-
"""
============
Parsing demo
============
This example shows some of the functionality of ``read_tab``.
"""
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import ast
from expyfun.io import read_tab
print(__doc__)
data = read_tab('sample.tab') # from simple_experiment
print('Number of trials: %s' % len(data))
keys = list(data[0].keys())
print('Data keys: %s\n' % keys)
for di, d in enumerate(data):
if d['trial_id'][0][0] == 'multi-tone':
print('Trial %s multi-tone' % (di + 1))
targs = ast.literal_eval(d['multi-tone trial'][0][0])
presses = [int(k[0]) for k in d['keypress']]
print(' Targs: %s\n Press: %s' % (targs, presses))
| [
"larson.eric.d@gmail.com"
] | larson.eric.d@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.