blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e1d3a822683b19133ea27b9cc99ca006c2750548 | f44c40a6416b5e5d698fac0e8a0be45486dfb9ce | /remove_commit_test/settings.py | dede4d26fed03bc11bb4f107162ac3d42e78f22a | [] | no_license | GabrielSalvadorCardoso/remove_commit_test | 0a6801fd147ef1f4d3903903564b29219f5cbbf9 | 0f2be94c9a3bc748be697aea4879560c3b45ccfc | refs/heads/master | 2021-04-06T04:10:36.426334 | 2018-03-15T13:48:59 | 2018-03-15T13:48:59 | 125,292,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,138 | py | """
Django settings for remove_commit_test project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b@=5=w44+l@#=o9$#**ie2w1hhe5t8%#68nvd&6o)zylxqi@oo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'commit',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'remove_commit_test.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'remove_commit_test.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"gabrielcardoso95@gmail.com"
] | gabrielcardoso95@gmail.com |
be328abad4773ddd2b9332fbcc647a4305ee3b2e | d23761e4e5856ee15a61b20f05d3a1b09bb0186e | /Tema5/NeuralNetwork.py | d762ed802ef3922515e798775623587d93979849 | [] | no_license | StativaCamelia/InteligentaArticifiala | ff7962a477c8686455be2d8f5c1dbbf9b9543e5e | f73fbb6cc31279d1e4e7d8371eb7218f6972095e | refs/heads/main | 2023-02-15T12:57:45.505890 | 2021-01-11T18:10:55 | 2021-01-11T18:10:55 | 304,123,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,267 | py | import math
import random
import numpy as np
def sigmoid_activation(z):
return 1 / (1 + np.exp(-z))
class NeuralNetwork:
def __init__(self, labels , learning_rate, number_of_epochs):
self.no_of_epochs = number_of_epochs
self.labels = labels
self.learning_rate = learning_rate
self.layers_sizes = [2, 2, 1]
self.weights = []
self.biases = []
self.initialize_biases()
self.initialize_weights()
def initialize_weights(self):
self.weights = [np.random.randn(self.layers_sizes[i], self.layers_sizes[i - 1]) for i in range(1, len(self.layers_sizes))]
print(self.weights)
def initialize_biases(self):
self.biases = [np.random.randn(self.layers_sizes[i], 1) for i in range(1, len(self.layers_sizes))]
def feed_forward(self, x):
activations, net_inputs = [], []
activation_predecesor = x.reshape(2, 1)
activations.append(activation_predecesor)
for i in range(len(self.layers_sizes) - 1):
net_input = np.dot(self.weights[i], activation_predecesor) + self.biases[i]
net_inputs.append(net_input.T)
activation_predecesor = sigmoid_activation(net_input)
activations.append(activation_predecesor.T)
return net_inputs, activations
def get_output(self, x):
activation = x.reshape(2, 1)
for i in range(len(self.layers_sizes) - 1):
net_input = np.dot(self.weights[i], activation) + self.biases[i]
activation = sigmoid_activation(net_input)
return activation
def error_last_layer(self, output, target):
return self.sigmoid_derivative(output)*(output-target)
def backward(self, net_inputs, activations, label):
changes_w, changes_b = [np.zeros(w.shape) for w in self.weights], [np.zeros(b.shape) for b in self.biases]
error = self.error_last_layer(activations[-1], label)
changes_b[-1], changes_w[-1] = error, np.dot(error, activations[-2])
sd = self.sigmoid_derivative(net_inputs[0])
error = np.dot(self.weights[-1].T, error) * sd.T
changes_b[0], changes_w[0] = error, np.dot(error,activations[0].T)
return changes_b, changes_w
@staticmethod
def sigmoid_derivative(z):
return sigmoid_activation(z) * (1 - sigmoid_activation(z))
def get_inputs(self):
return [np.array([0,0]), np.array([0,1]), np.array([1, 0]), np.array([1, 1])]
def train(self):
inputs = self.get_inputs()
epoch = 0
while epoch < self.no_of_epochs:
results = []
delta_w, delta_b = [np.zeros(w.shape) for w in self.weights], [np.zeros(b.shape) for b in self.biases]
for i in range(4):
net_inputs, activations = self.feed_forward(inputs[i])
changes_b, changes_w = self.backward(net_inputs, activations, self.labels[i])
delta_w = [dw + nw for dw, nw in zip(delta_w, changes_w)]
delta_b = [db + nb for db, nb in zip(delta_b, changes_b)]
results.append(self.get_output(inputs[i]))
self.weights = [w - nw * (self.learning_rate/4) for w, nw in zip(self.weights, delta_w)]
self.biases = [b - nb * (self.learning_rate/4) for b, nb in zip(self.biases, delta_b)]
epoch += 1
error = self.loss(results, self.labels)
print(error)
self.accuracy()
def loss(self, y, t):
return np.mean((np.array(t) - np.array(y)) ** 2)
def accuracy(self):
random.shuffle(self.get_inputs())
results = [(x,0) if self.get_output(x) < 0.5 else (x, 1) for x in self.get_inputs()]
print(results)
| [
"stativa50@gmail.com"
] | stativa50@gmail.com |
b8bfa7190ac7732df963c483ad04799f82c731a0 | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4444130.3.spec | f8bf8bd0648b161563dc4f6fe37fbdf708b94ceb | [] | no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22,013 | spec | {
"id": "mgm4444130.3",
"metadata": {
"mgm4444130.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 27709705,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 324,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 47,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/100.preprocess.removed.fna.gz"
},
"150.dereplication.info": {
"compression": null,
"description": null,
"size": 778,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/150.dereplication.info"
},
"150.dereplication.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 27709708,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/150.dereplication.passed.fna.gz"
},
"150.dereplication.passed.fna.stats": {
"compression": null,
"description": null,
"size": 324,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/150.dereplication.passed.fna.stats"
},
"150.dereplication.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 50,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/150.dereplication.removed.fna.gz"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 4792,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 27709701,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 31592,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 324,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/299.screen.passed.fna.stats"
},
"350.genecalling.coding.faa.gz": {
"compression": "gzip",
"description": null,
"size": 13852190,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/350.genecalling.coding.faa.gz"
},
"350.genecalling.coding.faa.stats": {
"compression": null,
"description": null,
"size": 127,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/350.genecalling.coding.faa.stats"
},
"350.genecalling.coding.fna.gz": {
"compression": "gzip",
"description": null,
"size": 21982661,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/350.genecalling.coding.fna.gz"
},
"350.genecalling.coding.fna.stats": {
"compression": null,
"description": null,
"size": 323,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/350.genecalling.coding.fna.stats"
},
"350.genecalling.info": {
"compression": null,
"description": null,
"size": 714,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/350.genecalling.info"
},
"425.usearch.rna.fna.gz": {
"compression": "gzip",
"description": null,
"size": 2554517,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/425.usearch.rna.fna.gz"
},
"425.usearch.rna.fna.stats": {
"compression": null,
"description": null,
"size": 319,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/425.usearch.rna.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 2240922,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 319,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 860,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 45,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 40380,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 13319,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 11286,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 115277,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/450.rna.sims.gz"
},
"450.rna.sims.info": {
"compression": null,
"description": null,
"size": 1376,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/450.rna.sims.info"
},
"550.cluster.aa90.faa.gz": {
"compression": "gzip",
"description": null,
"size": 13440814,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/550.cluster.aa90.faa.gz"
},
"550.cluster.aa90.faa.stats": {
"compression": null,
"description": null,
"size": 127,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/550.cluster.aa90.faa.stats"
},
"550.cluster.aa90.info": {
"compression": null,
"description": null,
"size": 1080,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/550.cluster.aa90.info"
},
"550.cluster.aa90.mapping": {
"compression": null,
"description": null,
"size": 320061,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/550.cluster.aa90.mapping"
},
"550.cluster.aa90.mapping.stats": {
"compression": null,
"description": null,
"size": 50,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/550.cluster.aa90.mapping.stats"
},
"640.loadAWE.info": {
"compression": null,
"description": null,
"size": 114,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/640.loadAWE.info"
},
"650.superblat.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 17236748,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/650.superblat.expand.lca.gz"
},
"650.superblat.expand.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 9177583,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/650.superblat.expand.ontology.gz"
},
"650.superblat.expand.protein.gz": {
"compression": "gzip",
"description": null,
"size": 20882438,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/650.superblat.expand.protein.gz"
},
"650.superblat.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 8220066,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/650.superblat.sims.filter.gz"
},
"650.superblat.sims.gz": {
"compression": "gzip",
"description": null,
"size": 43369844,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/650.superblat.sims.gz"
},
"650.superblat.sims.info": {
"compression": null,
"description": null,
"size": 1343,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/650.superblat.sims.info"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 10924096,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 73878,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 5385181,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 6285381,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 8204500,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 405542954,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 981,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 122,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 160,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 118,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 816,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 2885,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 65,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 10201,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 16519,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 5335,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 1028,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 22982,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 87,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 65997,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.species.stats"
}
},
"id": "mgm4444130.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4444130.3"
}
},
"raw": {
"mgm4444130.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4444130.3"
}
}
} | [
"jared.wilkening@gmail.com"
] | jared.wilkening@gmail.com |
c7b6a2ec061b9b5dc74dac8412e4a7def8329a9a | 83f60a11a6553f3a939171019ff89b417685ed6d | /cost_and_prediction_functions.py | 08936bc86ca67e3737ae652e87731c122f45358d | [] | no_license | echo66/pytorch-rec-sys | 046800f7897fc6a6351b33656a2d68a32a769139 | 1533031e6915e90ee2dcfad2964d1ce8f4b18b99 | refs/heads/master | 2021-01-19T10:11:09.135814 | 2017-04-23T16:07:41 | 2017-04-23T16:07:41 | 87,834,700 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | import torch
def compute_prediction(UFeats, IFeats):
pred = torch.mm(UFeats, IFeats.t())
return pred
def compute_mse(pred, target):
mse = torch.mean((pred - target) ** 2)
return mse
def compute_se(pred, target):
se = torch.sum((pred - target) ** 2)
return se
def compute_mabse(pred, target):
mabse = torch.mean(torch.abs(pred - target))
return mabse
def compute_abse(pred, target):
abse = torch.sum(torch.abs(pred - target))
return abse
def compute_regl2(tensor):
regl2 = torch.sum(tensor * tensor)
return regl2
def compute_mean_regl2(tensor):
mean_regl2 = torch.mean(tensor * tensor)
return mean_regl2 | [
"bruno.filipe.silva.dias@gmail.com"
] | bruno.filipe.silva.dias@gmail.com |
c3631a99cd59826b2a32a514017962e9496fff2f | f7c07caa1210d2a08e8433cdd854b1232efa88e3 | /Collection-Modules/Queue-Module/LIFO-Queue.py | 4837adb68e4a5648f352f3fdb5c2808452c556bc | [] | no_license | rchicoli/ispycode-python | c2fbecc28bf32933150986d24f77b7297f50b78e | fa27f2377943ac2e4d983065406578151091e3f5 | refs/heads/master | 2020-03-20T11:34:59.698618 | 2018-06-14T21:14:02 | 2018-06-14T21:14:02 | 137,407,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py |
import Queue
q = Queueimport Queue
q = Queue.Queue()
q.put(1)
q.put(2)
q.put(3)
print(q.get())
print(q.get())
print(q.get())
| [
"rafaelchicoli@hotmail.com"
] | rafaelchicoli@hotmail.com |
502e0a6630abfde4fcea418ba76872c955a30e3c | a097e203714bb40fdb0e9b3d36977815597707a2 | /CombinationSum2.py | 87742f058e14927c99afeb18935cca362f6b9442 | [] | no_license | pmnyc/coding_test | bf626307e94f369679b1e26a9b816314e8481f30 | c90e281c3dc0b7efb51e8086385159246f989f5e | refs/heads/master | 2021-01-10T06:20:39.474458 | 2019-09-14T17:25:54 | 2019-09-14T17:25:54 | 48,257,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | """
Combination Sum II
Given a collection of candidate numbers (C) and a target number (T), find all unique combinations in C where the candidate numbers sums to T.
Each number in C may only be used once in the combination.
Note:
All numbers (including target) will be positive integers.
Elements in a combination (a1, a2, … , ak) must be in non-descending order. (ie, a1 ≤ a2 ≤ … ≤ ak).
The solution set must not contain duplicate combinations.
For example, given candidate set 10,1,2,7,6,1,5 and target 8,
A solution set is:
[1, 7]
[1, 2, 5]
[2, 6]
[1, 1, 6]
"""
import os, sys
import numpy as np
class Solution(object):
def __init__(self, C,T):
self.c = C[:]
self.c = sorted(self.c)
self.t = T
self.res = []
def getList(self):
self.combineSum(self.c, [], self.t)
def combineSum(self, candidates, cand, target):
if target <0:
return
elif target == 0 and cand[:] not in self.res:
self.res.append(cand[:])
else:
for i, num in enumerate(candidates):
cand.append(num)
print(str(cand), str(target))
self.combineSum(candidates[i+1:],cand,target-num)
cand.pop()
### test
C=[10,1,2,7,6,1,5]
T=8
self = Solution(C,T)
self.getList()
self.res
| [
"pmiori@gmail.com"
] | pmiori@gmail.com |
5ee05c4aa48322123557903e7a05f55eaddb36ec | b729ea7c029c67641c93e91ec954213b6758c5cb | /pyQt_function.py | 104fe0daa739a49fdc7cd874ef3e4bd1efd3f8d7 | [] | no_license | Codelegant92/CreditScoring | 5690493bcdb9601b3acec3adda53803794a87431 | a517f4b5b3caf72c241daefcbf49b488c49582b6 | refs/heads/master | 2021-01-17T12:24:40.626343 | 2017-07-12T09:13:25 | 2017-07-12T09:13:25 | 41,335,171 | 12 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,543 | py | __author__ = 'robin'
from PyQt4 import QtCore, QtGui
from creditScoring_UI import Ui_Dialog, _translate
from commonFunction import *
from decisionTree import decision_Tree
class Window(QtGui.QDialog):
def __init__(self):
QtGui.QWidget.__init__(self)
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.ui.fileButton.clicked.connect(self.openFileDialog)
self.ui.pushButton.clicked.connect(self.classifier)
def openFileDialog(self):
filename = QtGui.QFileDialog.getOpenFileNames(self, "Open File", "/home/robin/Documents")
print(filename[0])
for i in range(len(filename[0])):
if(filename[0][-i-1] == '/'):
break
self.ui.filePathBrowser.setText('.../' + filename[0][-i:])
self.ui.fileButton.setText(_translate("Dialog", filename[0], None))
def classifier(self):
testFeature = np.zeros(20)
testFeature[0] = int(self.ui.comboBox_10.currentIndex())
testFeature[1] = int(self.ui.spinBox.text())
testFeature[2] = int(self.ui.comboBox_12.currentIndex())
testFeature[3] = int(self.ui.comboBox_9.currentIndex())
testFeature[4] = int(self.ui.spinBox_2.text())
testFeature[5] = int(self.ui.comboBox_11.currentIndex())
testFeature[6] = int(self.ui.comboBox_5.currentIndex())
testFeature[7] = float(self.ui.doubleSpinBox.text())
testFeature[8] = int(self.ui.comboBox_2.currentIndex())
testFeature[9] = int(self.ui.comboBox_13.currentIndex())
testFeature[10] = int(self.ui.spinBox_3.text())
testFeature[11] = int(self.ui.comboBox_8.currentIndex())
testFeature[12] = int(self.ui.spinBox_4.text())
testFeature[13] = int(self.ui.comboBox_14.currentIndex())
testFeature[14] = int(self.ui.comboBox_7.currentIndex())
testFeature[15] = int(self.ui.spinBox_5.text())
testFeature[16] = int(self.ui.comboBox_4.currentIndex())
testFeature[17] = int(self.ui.spinBox_6.text())
testFeature[18] = int(self.ui.comboBox_3.currentIndex())
testFeature[19] = int(self.ui.comboBox_6.currentIndex())
filePath = self.ui.fileButton.text()
trainFeature, trainLabel = read_GermanData20(filePath)
predictedLabel = decision_Tree(trainFeature, trainLabel, testFeature)
self.ui.textBrowser.setText(str(predictedLabel))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| [
"robinchan1992@gmail.com"
] | robinchan1992@gmail.com |
774da8852253326d7bfda4448bf7270c23aee218 | 39640766120eb5e5757a0d474d0c1d56d94a0cce | /20220219-mocsctf/np-sha256/public/chall.py | 6a4c11eb2aaf0ca4a8034418d16ce8e92653ab4c | [] | no_license | samueltangz/ctf-archive-created | 7d7b5b4637415c633162b4c725245a412fafce49 | d4bf6abb532cda42b4796bf88b02de353608d166 | refs/heads/master | 2023-03-18T07:57:46.349924 | 2023-03-05T09:29:11 | 2023-03-05T09:29:11 | 124,827,870 | 11 | 2 | null | 2023-03-05T09:28:54 | 2018-03-12T03:18:13 | Python | UTF-8 | Python | false | false | 574 | py | import os
import sys
from sha256 import generate_hash
def main():
m1 = bytes.fromhex(input())
m2 = bytes.fromhex(input())
h1 = generate_hash(m1)
h2 = generate_hash(m2)
if m1 == m2:
print('No good!')
sys.exit(-1)
if len(m1) < 64:
print('No good!')
sys.exit(-1)
if len(m2) < 64:
print('No good!')
sys.exit(-1)
if h1 != h2:
print('No good!')
sys.exit(-1)
flag = os.environ.get('FLAG', 'MOCSCTF{REDACTED}')
print(f'Good! {flag}')
if __name__ == '__main__':
main() | [
"samueltangz613@gmail.com"
] | samueltangz613@gmail.com |
934c61e327a1a5a011bb982c8aa38c886f4d9397 | 3f08f43825e5a16e9788c91bdebd0134f6d56514 | /Breaking Bad/python.py | 08e8bac695d4e188f63821a36156c53c6ac067e3 | [] | no_license | delta/codegolf-inhott | 3a411c742545332943f4009d4d4076285c2d2964 | c6ee848e67c7074b7eeeb44ffbc1e84f115dafb7 | refs/heads/main | 2023-03-01T00:26:44.381965 | 2021-02-04T12:49:56 | 2021-02-04T12:49:56 | 333,818,744 | 0 | 0 | null | 2021-02-04T12:49:57 | 2021-01-28T16:34:32 | Java | UTF-8 | Python | false | false | 267 | py | (n,m,k),(x,s),a,b,c,d=(list(map(int,input().split()))for _ in" "*6)
a+=[x];b+=[0];c=[0]+c;d=[0]+d
L=sorted(list(zip(a,b)),key=lambda x:-x[1])
o,j=n*x,0
for ai,bi in L:
while j<=k and d[j]+bi<=s:j+=1
if j==0:continue
t=ai*(n-c[j-1])
o=min(o,t)
print(o) | [
"atharun05@gmail.com"
] | atharun05@gmail.com |
13f156fd389bcbf915701e7e2d50a80e78232162 | 829d144af6c88fe41c2df2fe40c11f5894222d9b | /ex079.py | 32488e691baa577763100d2dafc5d417ae21f69f | [] | no_license | davidbezerra405/PytonExercicios | 803d833aea8835cffc010bc5dbc67d0da5094a12 | 56a348f4042e2af68119e66f47049ca41c961cef | refs/heads/master | 2023-06-09T22:32:05.962091 | 2021-06-30T16:03:14 | 2021-06-30T16:03:14 | 376,387,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | valores = list()
while True:
v = int(input('Digite um valor: '))
if v not in valores:
valores.append(v)
print('Valor adicionado com sucesso...')
else:
print('Valor duplicado! Não vou adicionar...')
op = ' '
while op not in 'SN':
op = str(input('Deseja continuar [S/N]? ')).strip().upper()[0]
if op == 'N':
break
valores.sort()
print('-'*40)
print(f'Voce digitou os valores {valores}')
| [
"68335111+davidbezerra405@users.noreply.github.com"
] | 68335111+davidbezerra405@users.noreply.github.com |
8144fd14e1872d0b457d6e6d9fdb9385df733e9a | 2e65f2c71bd09c5f796ef8d590937c07e308001d | /src/troposphere_dns_certificate/__init__.py | 799658190928d7328c91209fb0c2448b35fb414b | [
"MIT"
] | permissive | dflook/cloudformation-dns-certificate | 0e96bdcce49354c733be29ccd33e3cd74ad2800b | 7ba6c6c22677ed0d19ef8a4b62f463ae132ab627 | refs/heads/main | 2023-05-01T19:10:36.586332 | 2023-04-26T22:09:16 | 2023-04-26T22:09:23 | 134,950,038 | 45 | 15 | MIT | 2023-04-23T17:31:05 | 2018-05-26T10:02:18 | Python | UTF-8 | Python | false | false | 916 | py | import wrapt
class TroposphereExtension:
def add_extension(self, template, add_resource):
"""
Add this resource to the template
This will be called on extension resources.
The implementation should add standard troposphere resources to the template
:param template: The template to add this resource to
:param add_resource: The add_resource function to call to add resources
"""
raise NotImplementedError('This method should add standard troposphere resources to the template')
@wrapt.patch_function_wrapper('troposphere', 'Template.add_resource')
def wrapper(wrapped, instance, args, kwargs):
def get_resource(resource):
return resource
resource = get_resource(*args, **kwargs)
if isinstance(resource, TroposphereExtension):
return resource.add_extension(instance, wrapped)
return wrapped(*args, **kwargs)
| [
"daniel@flook.org"
] | daniel@flook.org |
a59908205ae08f7899a1ccb6ce0e05a20f6f9060 | fc0150b1fd6ba0efd6746a34ffa8cba01640d10e | /Python_3_Programming_January_and_July_2016/Lecture_1/Задача_3_Нарисувайте_квадрат.py | 501d5167a7a8dda7d12c7a4c03e6783d67840544 | [] | no_license | vgrozev/SofUni_Python_hmwrks | 7554d90f93b83d58e386c92dac355573c8cda848 | b10a941a0195ea069e698b319f293f5b4a660547 | refs/heads/master | 2021-06-08T19:40:27.009205 | 2019-11-24T17:19:31 | 2019-11-24T17:19:31 | 95,629,443 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | import turtle
user_input = input("Please enter the lenght of the side: ")
length = int(user_input)
turtle.speed('slow')
for _ in range(0, 4):
turtle.forward(length)
turtle.right(90)
turtle.done()
| [
"vgrozev@gmail.com"
] | vgrozev@gmail.com |
2b4c3dab08d72b1941cb28ee51f4961a0e32414c | c5534423d44a2c1587e5ef5f4b893a78cd3b1cce | /Databricks/BedBricks/Spark-Programming-1.5.0-IL/Python/4.1 Streaming Query.py | 931d42474ae9210d99daf3d6754bcc83f9ccad08 | [] | no_license | adiazcan/databricks | 4fa964629ec1001f089e437fe1dbad238ac71ecb | 5e3506e701badee0bc32099f27b069ca66b36753 | refs/heads/master | 2022-03-23T13:22:07.166938 | 2022-03-11T07:48:46 | 2022-03-11T07:48:46 | 166,223,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,110 | py | # Databricks notebook source
# MAGIC
# MAGIC %md-sandbox
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 400px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Streaming Query
# MAGIC 1. Build streaming DataFrames
# MAGIC 1. Display streaming query results
# MAGIC 1. Write streaming query results
# MAGIC 1. Monitor streaming query
# MAGIC
# MAGIC ##### Classes
# MAGIC - DataStreamReader (<a href="https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.streaming.DataStreamReader.html#pyspark.sql.streaming.DataStreamReader" target="_blank">Python</a>/<a href="http://spark.apache.org/docs/latest/api/scala/org/apache/spark/sql/streaming/DataStreamReader.html" target="_blank">Scala</a>)
# MAGIC - DataStreamWriter (<a href="https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.streaming.DataStreamWriter.html#pyspark.sql.streaming.DataStreamWriter" target="_blank">Python</a>/<a href="http://spark.apache.org/docs/latest/api/scala/org/apache/spark/sql/streaming/DataStreamWriter.html" target="_blank">Scala</a>)
# MAGIC - StreamingQuery (<a href="https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.streaming.StreamingQuery.html#pyspark.sql.streaming.StreamingQuery" target="_blank">Python</a>/<a href="http://spark.apache.org/docs/latest/api/scala/org/apache/spark/sql/streaming/StreamingQuery.html" target="_blank">Scala</a>)
# COMMAND ----------
# MAGIC %run ./Includes/Classroom-Setup
# COMMAND ----------
schema = "device STRING, ecommerce STRUCT<purchase_revenue_in_usd: DOUBLE, total_item_quantity: BIGINT, unique_items: BIGINT>, event_name STRING, event_previous_timestamp BIGINT, event_timestamp BIGINT, geo STRUCT<city: STRING, state: STRING>, items ARRAY<STRUCT<coupon: STRING, item_id: STRING, item_name: STRING, item_revenue_in_usd: DOUBLE, price_in_usd: DOUBLE, quantity: BIGINT>>, traffic_source STRING, user_first_touch_timestamp BIGINT, user_id STRING"
df = (spark.readStream
.schema(schema)
.option("maxFilesPerTrigger", 1)
.parquet(eventsPath)
)
# COMMAND ----------
# MAGIC %md
# MAGIC ###  Build streaming DataFrames
# COMMAND ----------
df.isStreaming
# COMMAND ----------
from pyspark.sql.functions import col, approx_count_distinct, count
emailTrafficDF = (df.filter(col("traffic_source") == "email")
.withColumn("mobile", col("device").isin(["iOS", "Android"]))
.select("user_id", "event_timestamp", "mobile")
)
# COMMAND ----------
# MAGIC %md
# MAGIC ###  Write streaming query results
# COMMAND ----------
checkpointPath = userhome + "/email_traffic/checkpoint"
outputPath = userhome + "/email_traffic/output"
devicesQuery = (emailTrafficDF.writeStream
.outputMode("append")
.format("parquet")
.queryName("email_traffic_p")
.trigger(processingTime="1 second")
.option("checkpointLocation", checkpointPath)
.start(outputPath)
)
# COMMAND ----------
# MAGIC %md
# MAGIC ###  Monitor streaming query
# COMMAND ----------
devicesQuery.id
# COMMAND ----------
devicesQuery.status
# COMMAND ----------
devicesQuery.awaitTermination(5)
# COMMAND ----------
devicesQuery.stop()
# COMMAND ----------
# MAGIC %md
# MAGIC ##  Coupon Sales Lab
# MAGIC Process and append streaming data on transactions using coupons.
# MAGIC 1. Read data stream
# MAGIC 2. Filter for transactions with coupons codes
# MAGIC 3. Write streaming query results to parquet
# MAGIC 4. Monitor streaming query
# MAGIC 5. Stop streaming query
# MAGIC
# MAGIC ##### Classes
# MAGIC - [DataStreamReader](http://spark.apache.org/docs/3.0.0/api/scala/org/apache/spark/sql/streaming/DataStreamReader.html)
# MAGIC - [DataStreamWriter](http://spark.apache.org/docs/3.0.0/api/scala/org/apache/spark/sql/streaming/DataStreamWriter.html)
# MAGIC - [StreamingQuery](http://spark.apache.org/docs/3.0.0/api/scala/org/apache/spark/sql/streaming/StreamingQuery.html)
# COMMAND ----------
schema = "order_id BIGINT, email STRING, transaction_timestamp BIGINT, total_item_quantity BIGINT, purchase_revenue_in_usd DOUBLE, unique_items BIGINT, items ARRAY<STRUCT<coupon: STRING, item_id: STRING, item_name: STRING, item_revenue_in_usd: DOUBLE, price_in_usd: DOUBLE, quantity: BIGINT>>"
# COMMAND ----------
# MAGIC %md
# MAGIC ### 1. Read data stream
# MAGIC - Use schema stored in **`schema`**
# MAGIC - Set to process 1 file per trigger
# MAGIC - Read from parquet with filepath stored in **`salesPath`**
# MAGIC
# MAGIC Assign the resulting DataFrame to **`df`**
# COMMAND ----------
# TODO
df = (spark.FILL_IN
)
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ##### <img alt="Best Practice" title="Best Practice" style="vertical-align: text-bottom; position: relative; height:1.75em; top:0.3em" src="https://files.training.databricks.com/static/images/icon-blue-ribbon.svg"/> Check your work
# COMMAND ----------
assert df.isStreaming
assert df.columns == ['order_id', 'email', 'transaction_timestamp', 'total_item_quantity', 'purchase_revenue_in_usd', 'unique_items', 'items']
# COMMAND ----------
# MAGIC %md
# MAGIC ### 2. Filter for transactions with coupon codes
# MAGIC - Explode **`items`** field in **`df`**
# MAGIC - Filter for records where **`items.coupon`** is not null
# MAGIC
# MAGIC Assign the resulting DataFrame to **`couponSalesDF`**.
# COMMAND ----------
# TODO
couponSalesDF = (df.FILL_IN
)
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ##### <img alt="Best Practice" title="Best Practice" style="vertical-align: text-bottom; position: relative; height:1.75em; top:0.3em" src="https://files.training.databricks.com/static/images/icon-blue-ribbon.svg"/> Check your work
# COMMAND ----------
schemaStr = str(couponSalesDF.schema)
assert "StructField(items,StructType(List(StructField(coupon" in schemaStr, "items column was not exploded"
# COMMAND ----------
# MAGIC %md
# MAGIC ### 3. Write streaming query results to parquet
# MAGIC - Configure streaming query to write out to parquet in "append" mode
# MAGIC - Set query name to "coupon_sales"
# MAGIC - Set a trigger interval of 1 second
# MAGIC - Set checkpoint location to **`couponsCheckpointPath`**
# MAGIC - Set output filepath to **`couponsOutputPath`**
# MAGIC
# MAGIC Assign the resulting streaming query to **`couponSalesQuery`**.
# COMMAND ----------
# TODO
couponsCheckpointPath = workingDir + "/coupon-sales/checkpoint"
couponsOutputPath = workingDir + "/coupon-sales/output"
couponSalesQuery = (couponSalesDF.FILL_IN
)
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ##### <img alt="Best Practice" title="Best Practice" style="vertical-align: text-bottom; position: relative; height:1.75em; top:0.3em" src="https://files.training.databricks.com/static/images/icon-blue-ribbon.svg"/> Check your work
# COMMAND ----------
untilStreamIsReady("coupon_sales")
assert couponSalesQuery.isActive
assert len(dbutils.fs.ls(couponsOutputPath)) > 0
assert len(dbutils.fs.ls(couponsCheckpointPath)) > 0
assert "coupon_sales" in couponSalesQuery.lastProgress["name"]
# COMMAND ----------
# MAGIC %md
# MAGIC ### 4. Monitor streaming query
# MAGIC - Get ID of streaming query
# MAGIC - Get status of streaming query
# COMMAND ----------
# TODO
queryID = couponSalesQuery.FILL_IN
# COMMAND ----------
# TODO
queryStatus = couponSalesQuery.FILL_IN
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ##### <img alt="Best Practice" title="Best Practice" style="vertical-align: text-bottom; position: relative; height:1.75em; top:0.3em" src="https://files.training.databricks.com/static/images/icon-blue-ribbon.svg"/> Check your work
# COMMAND ----------
assert type(queryID) == str
assert list(queryStatus.keys()) == ['message', 'isDataAvailable', 'isTriggerActive']
# COMMAND ----------
# MAGIC %md
# MAGIC ### 5. Stop streaming query
# MAGIC - Stop the streaming query
# COMMAND ----------
# TODO
couponSalesQuery.FILL_IN
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ##### <img alt="Best Practice" title="Best Practice" style="vertical-align: text-bottom; position: relative; height:1.75em; top:0.3em" src="https://files.training.databricks.com/static/images/icon-blue-ribbon.svg"/> Check your work
# COMMAND ----------
assert not couponSalesQuery.isActive
# COMMAND ----------
# MAGIC %md
# MAGIC ### 6. Verify the records were written to a Parquet file
# COMMAND ----------
# TODO
# COMMAND ----------
display(spark.read.parquet(couponsOutputPath))
# COMMAND ----------
# MAGIC %md
# MAGIC ### Classroom Cleanup
# MAGIC Run the cell below to clean up resources.
# COMMAND ----------
# MAGIC %run ./Includes/Classroom-Cleanup
# COMMAND ----------
| [
"adiazcan@hotmail.com"
] | adiazcan@hotmail.com |
ae3244d7da48f5c04df6e987d772a3c6d1135460 | 351fd341a6d49c730df1e50981b2d0bd3bfcefd4 | /loginapp/bin/easy_install-2.7 | 6d1678ce0d7ef3144cdd8827cf51109660fddc8a | [] | no_license | shilpasanthosh/flask-user-login-app | cd0936546ed82d4ec305a856320743e89a9abe0e | e9c1cdc76ad91feba4725668c3156bb7b03d99f4 | refs/heads/master | 2021-01-10T04:18:37.795489 | 2015-12-26T10:19:33 | 2015-12-26T10:19:33 | 48,606,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | 7 | #!/usr/share/nginx/loginapp/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"shilpa@marketcalls.in"
] | shilpa@marketcalls.in |
1c0195cd53762f132a0786eb1b8132648183af93 | 5615446932deb52748018186ad14e2de1b52ba03 | /chreval/EffectiveStem.py | 0c43f0e858edeca3897d96648aee56b518ad218d | [
"Apache-2.0"
] | permissive | plewczynski/looper | 7ed8ac4c51f4aa1e1508233019e52cba6a1f18a4 | 25cd3d4e12002e0fa382a035ea65e894697c1835 | refs/heads/master | 2022-10-22T20:03:41.286178 | 2020-06-10T15:36:06 | 2020-06-10T15:36:06 | 84,062,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,389 | py | #!/usr/bin/env python3
"""@@@
program: EffectiveStem.py
Classes: EffectiveStem
Functions:
creation date: 200511
last update: 200511
version: 0.0
EffectiveStem allows sharing the assessment of every stem in a
universal way. There may need to be things like sequence
dependence. Presently, that is not considered.
"""
import sys
class EffectiveStem(object):
def __init__(self, max_aa_gap = 2, max_pp_gap = 2):
# secondary structure stem parameters
self.max_aa_gap = max_aa_gap # anti-parallel stems
self.max_pp_gap = max_pp_gap # parallel stems
# default parameters are for chromatin
self.old_approach = False
# for testing and comparing with the former method of Vienna
# and Vienna2TreeNode.
self.debug_ef = False
#
def is_connected_aaStem(self, slen1, ph1, qh1, slen2, pt2, qt2):
if self.old_approach:
# Some older evaluation schemes in Vienna and
# Vienna2TreeNode were strict and did not permit any
# effective stem of any kind
return False
#
"""@
*** all input parameters are integers
For the moment, I use my standard L/2 rule but I add the twist
that both stems should be at least as long as the longest
interval that separates them
|<-slen1->| |<-slen2->|
ph1 .... pt2
_ _ _ _ _ / \ _ _ _ _ _
|_|_|_|_|_| |_|_|_|_|_|
qh1\..../qt2
For example
(1)
...
_ _ _ _ _ / \ _ _
|_|_|_|_|_| |_|_|
\..../
pt2-ph1-1 = 3 and qh1-qt2-1 = 4, therefore dn = 4
slen1 = 6 and slen2 = 3
==> pair_gap = 4
Whereas slen1 >= 4, slen2 = 3 and not (slen2 >=
pair_gap). Therefore, this is not a connected stem
(2)
...
_ _ _ _ _ / \ _ _ _ _ _
|_|_|_|_|_| |_|_|_|_|_|
\..../
pt2-ph1-1 = 3 and qh1-qt2-1 = 4, therefore dn = 4
slen1 = 6 and slen2 = 6
==> pair_gap = 4
Since slen1 >= 4 and slen2 >= 4, this is a connected stem
(3)
slen1
->| |<-|<----- slen2 ------->|
_ /\ _ /\ _ /\ _ /\ _ /\ _
|_| |_| |_| |_| |_| |_|
\/ \/ \/ \/ \/
pt2-ph1-1 = 1 and qh1-qt2-1 = 1, therefore dn = 1
slen1 = 2 and slen2 = 10
==> pair_gap = 1
Since slen1 >= 1 and slen2 >= 1, this is a connected stem
"""
is_connected = False
dp12 = (pt2 - ph1 - 1)
dq12 = (qh1 - qt2 - 1)
pair_gap = 0
# find the largest gap (difference)
if dp12 < dq12:
pair_gap = dq12
else:
pair_gap = dp12
#
if self.debug_ef:
print ("Enter is_connected_aaStem:")
print (" slen1(%3d), ph1(%3d), qh1(%3d)" % (slen1, ph1, qh1))
print (" slen2(%3d), ph2(%3d), qh2(%3d)" % (slen2, pt2, qt2))
print ("dp12 = %d, dq12 = %d ==>> pair_gap(%d) < max_aa_gap(%d)?" \
% (dp12, dq12, pair_gap, self.max_aa_gap))
#
# test 1: Is the gap itself less that the cutoff?
if pair_gap > self.max_aa_gap:
pair_gap = self.max_aa_gap
else:
is_connected = True
#
# test 2: if passed 1, then are both respective stem lengths
# at least as long as the maximum gap itself?
if slen1 >= pair_gap and slen2 >= pair_gap and is_connected:
is_connected = True
else:
is_connected = False
#
if self.debug_ef:
print ("slen1(%d) >= %d && slen2(%d) >= %d" \
% (slen1, pair_gap, slen2, pair_gap))
print ("result: is_connect = ", is_connect)
#
return is_connected
#
def is_connected_ppStem(self, slen1, ph1, qh1, slen2, pt2, qt2):
if self.old_approach:
# Some older evaluation schemes in Vienna and
# Vienna2TreeNode were strict and did not permit any
# effective stem of any kind
return False
#
"""@
For chromatin, this is basically just an empty function to
passify the torturous scanning routines in
Vienna2TreeNode. Presently, this is only used by chromatin, so
it means that all operations involving connected stems never
happen because it is immediately resumed that it is not
connected. Perhaps I need to think about this, but anyway, for
the moment, I will follow the same plan established for
is_connected_asStem.
"""
is_connected = False
dp12 = float(pt2 - ph1 - 1)
dq12 = float(qt2 - qh1 - 1)
pair_gap = 0
# find the largest gap (difference)
if dp12 < dq12:
pair_gap = dq12
else:
pair_gap = dp12
#
if self.debug_ef:
print ("Enter is_connected_ppStem:")
print (" slen1(%3d), ph1(%3d), qh1(%3d)" % (slen1, ph1, qh1))
print (" slen2(%3d), ph2(%3d), qh2(%3d)" % (slen2, pt2, qt2))
print ("dp12 = %d, dq12 = %d ==>> pair_gap(%d) < max_pp_gap(%d)?" \
% (dp12, dq12, pair_gap, self.max_pp_gap))
#
# test 1: Is the gap itself less that the cutoff?
if pair_gap > self.max_pp_gap:
pair_gap = self.max_pp_gap
else:
is_connected = True
#
# test 2: if passed 1, then are both respective stem lengths
# at least as long as the maximum gap itself?
if slen1 >= pair_gap and slen2 >= pair_gap and is_connected:
is_connected = True
else:
is_connected = False
#
if self.debug_ef:
print ("slen1(%d) >= %d && slen2(%d) >= %d" \
% (slen1, pair_gap, slen2, pair_gap))
print ("result: is_connect = ", is_connect)
#
return is_connected
#
#
def test0():
es = EffectiveStem(8, 8)
# test the connect stem functions
slen1 = 1; ph1 = 8; qh1 = 46;
slen2 = 4; pt2 = 10; qt2 = 45
print (es.is_connected_aaStem(slen1, ph1, qh1, slen2, pt2, qt2))
slen1 = 4; ph1 = 3; qh1 = 46;
slen2 = 4; pt2 = 10; qt2 = 45
print (es.is_connected_aaStem(slen1, ph1, qh1, slen2, pt2, qt2))
slen1 = 1; ph1 = 8; qh1 = 43;
slen2 = 4; pt2 = 10; qt2 = 45
print (es.is_connected_ppStem(slen1, ph1, qh1, slen2, pt2, qt2))
#
def main(cl):
print (cl)
test0()
#
if __name__ == '__main__':
# running the program
main(sys.argv)
#
| [
"dawsonzhu@aol.com"
] | dawsonzhu@aol.com |
197885f0493a7e6edbb51992e02e56e33524a49e | 05425e4c3637928e9141b6c70957783f1a95dbb5 | /backend/kagoole/admin.py | 7fece1dd2a9642d3a02efab65d9955b5d9c1cb78 | [
"MIT"
] | permissive | Doarakko/kagoole | dc505d514988b56edcc7cec1a9f3d7238d8e5b15 | 5ff18d8bf1b05dfcc68e918461572645df583a28 | refs/heads/master | 2023-05-10T18:32:09.578731 | 2023-04-29T14:06:14 | 2023-04-29T14:06:14 | 190,133,623 | 11 | 2 | MIT | 2023-02-15T18:28:36 | 2019-06-04T05:12:55 | JavaScript | UTF-8 | Python | false | false | 176 | py | from django.contrib import admin
from reversion.admin import VersionAdmin
from .models import Solution
@admin.register(Solution)
class SolutionAdmin(VersionAdmin):
pass
| [
"2wua4nlyi@gmail.com"
] | 2wua4nlyi@gmail.com |
28a451889380139994d19d41449f1024a1657d39 | 6ff85b80c6fe1b3ad5416a304b93551a5e80de10 | /Python/Typing/ConvertingToInt.py | dc7abaeba61d61d2b38912ed04fadf88d3d3f1db | [
"MIT"
] | permissive | maniero/SOpt | c600cc2333e0a47ce013be3516bbb8080502ff2a | 5d17e1a9cbf115eaea6d30af2079d0c92ffff7a3 | refs/heads/master | 2023-08-10T16:48:46.058739 | 2023-08-10T13:42:17 | 2023-08-10T13:42:17 | 78,631,930 | 1,002 | 136 | MIT | 2023-01-28T12:10:01 | 2017-01-11T11:19:24 | C# | UTF-8 | Python | false | false | 183 | py | print(int('12\n'))
print(int('\n123'))
print(int('1234 '))
print(int(' 1235'))
print(int('1236c'))
print(int('a1237'))
print(int('123 8'))
#https://pt.stackoverflow.com/q/347387/101
| [
"noreply@github.com"
] | noreply@github.com |
3be5741174e801d924eb566ff2df287a30b4998d | eae5ea21847e0d94fc94fe2cc838828e42e057d1 | /Dan/nlpStratifyd/lib/python3.7/site-packages/streamlit/errors.py | b7307c7981da7b08dfcc731741af9ccc34265b16 | [] | no_license | CheLena/Stratifyd_IS | 19061a326a1bbbd4ee50b769e9d77f83dd3c4f00 | f20bcf71ea5d547481423dff4deb5a4c50619278 | refs/heads/master | 2022-06-30T10:40:16.314372 | 2020-05-06T16:10:14 | 2020-05-06T16:10:14 | 238,253,482 | 0 | 0 | null | 2020-03-25T16:57:46 | 2020-02-04T16:37:09 | Python | UTF-8 | Python | false | false | 1,794 | py | # -*- coding: utf-8 -*-
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class NoStaticFiles(Exception):
pass
class S3NoCredentials(Exception):
pass
class NoSessionContext(Exception):
pass
class MarkdownFormattedException(Exception):
"""Instances of this class can use markdown in their messages, which will get
nicely formatted on the frontend.
"""
pass
class StreamlitAPIException(MarkdownFormattedException):
"""Base class for Streamlit API exceptions.
An API exception should be thrown when user code interacts with the
Streamlit API incorrectly. (That is, when we throw an exception as a
result of a user's malformed `st.foo` call, it should be a
StreamlitAPIException or subclass.)
When displaying these exceptions on the frontend, we strip Streamlit
entries from the stack trace so that the user doesn't see a bunch of
noise related to Streamlit internals.
"""
pass
class DuplicateWidgetID(StreamlitAPIException):
pass
class UnhashableType(StreamlitAPIException):
pass
class UserHashError(StreamlitAPIException):
pass
class InternalHashError(MarkdownFormattedException):
"""Exception in Streamlit hashing code (i.e. not a user error)"""
pass
| [
"danielmurph8@gmail.com"
] | danielmurph8@gmail.com |
cd7134c5d2fbe212572d24191802300ec0a4f75f | 22db256dfda252626877f52a130e4d0cdc1b9de5 | /python2_code.py | b492ffcc4d91c68d49e920ce240654e6d1d2048b | [] | no_license | khoipham152/github-example | 2f44d288350ef1572f6bc4c21ce5b88b46412080 | 10c575c7535d9f740c6ed208f67b6dc8cb9ce3de | refs/heads/master | 2021-05-21T08:23:32.951669 | 2020-04-03T03:31:42 | 2020-04-03T03:31:42 | 252,617,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | print("second trial")
| [
"khoipham152@gmail.com"
] | khoipham152@gmail.com |
d0ad9224af16343b6541f99f97e46bd867f368f2 | 31427a8829edf517b5cff4f4294800ee3f2339a4 | /20200819/bs.py | 7a053030a5363b4ad66e2bb56b460622e5b15720 | [] | no_license | shlee16/cit_shl | 144463460e6919c5407a6f9b45fb97d33fd2f1ef | dcb74aac85d165484b679505ae3c15680bdd94b6 | refs/heads/master | 2022-11-30T20:01:51.247280 | 2020-08-19T08:07:20 | 2020-08-19T08:07:20 | 281,597,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | # https://www.dataquest.io/blog/web-scraping-tutorial-python/
from bs4 import BeautifulSoup
import requests
# page = requests.get("http://forecast.weather.gov/MapClick.php?lat=37.7772&lon=-122.4168")
# page = requests.get("https://www.w3schools.com/python/ref_random_randint.asp")
page = requests.get("https://www.naver.com/")
soup = BeautifulSoup(page.content, 'html.parser')
print(soup)
# seven_day = soup.find(id="seven-day-forecast")
# forecast_items = seven_day.find_all(class_="tombstone-container")
# print(forecast_items)
| [
"68633215+shlee16@users.noreply.github.com"
] | 68633215+shlee16@users.noreply.github.com |
7258b6ee34f09f4f049b06f3d02146f7bb2db513 | 3af3f16458e1afd3fcff53ec1e414951c351830f | /lockdownsite/GenerateOutput.py | 7e18cdce395cb12eed1c15af28904791f87ee050 | [] | no_license | lazyboy4943/lockdownhackathon2k21 | 3caf5b39143eda7d2420e133979c7606723985ab | ef891fc56e63c6b8aded56682f4af0988532ed5c | refs/heads/main | 2023-06-21T12:44:27.169598 | 2021-08-01T05:36:59 | 2021-08-01T05:36:59 | 391,536,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | import pickle
from math import e
def sigmoid(x):
return( 1 / (1 + e ** (-1 * x) ) )
Construction = [15, 16, 50, 50, 16, 12]
with open("WeightsAndBiases.txt", "rb") as f:
WeightsBiases = pickle.load(f)
def Output(image):
global Neurons
global TempSums
Neurons = [[0 for i in range(Construction[n])] for n in range(1, len(Construction))]
TempSums = [[0 for i in range(Construction[n])] for n in range(1, len(Construction))]
for n in range(len(Construction) - 1): # Loops through the layers
for i in range(Construction[n + 1]): # Loops through the output layer
sum = 0
for j in range(Construction[n]): # Loops through the input layer
sum = sum + ( WeightsBiases[n][i][j] * (image[j] if n == 0 else Neurons[n - 1][j]))
temp = sum + WeightsBiases[n][i][-1]
Neurons[n][i] = sigmoid(temp)
TempSums[n][i] = temp
Largest = 0
for x in Neurons[-1]:
if x > Largest:
Largest = x
return Neurons[-1].index(Largest)
| [
"noreply@github.com"
] | noreply@github.com |
376a10c12fcc8827ccfebe8c39ff8f4628f022cb | d75d1e73fb53aedad1572d61b6c8b4998525763e | /manage.py | cf92380841cc546053beb3dae34873d834bcebff | [] | no_license | GlenWong97/Django_Master | 23c82eae9516a5c65d64edda8a8745e4efe15889 | 971095497cf976666106083ea26d0b7b56cea1ee | refs/heads/master | 2022-01-22T17:28:23.643808 | 2019-07-18T07:53:16 | 2019-07-18T07:53:16 | 191,922,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'brainstorm.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"shuzewong@Yahoo.com.sg"
] | shuzewong@Yahoo.com.sg |
3dc85d34f6ff9880c486e9e074f5df4ddf2ceaf1 | a7cf04f9f84d92e46dbc7654b93b2f2dee59b52b | /contacts/views.py | adf43c19ede620a7e3ba59d5333375d880013f97 | [] | no_license | Devlox/real_estate_project | e78d389562bd77012e2f3f6e6e0d44dd62416880 | ddeb835d14366c2aa60f0005e048b1f565a47ec2 | refs/heads/master | 2020-04-25T09:30:53.615673 | 2019-02-26T10:51:31 | 2019-02-26T10:51:31 | 172,677,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,544 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from django.core.mail import send_mail
from .models import Contact
# To make an inquery of a listing
def contact (request):
if request.method == 'POST':
listing_id = request.POST['listing_id']
listing = request.POST['listing']
name = request.POST['name']
email = request.POST['email']
phone = request.POST['phone']
message = request.POST['message']
user_id = request.POST['user_id']
realtor_email = request.POST['realtor_email']
# Check if user has made inquery already
if request.user.is_authenticated:
user_id = request.user.id
has_contacted = Contact.objects.all().filter(listing_id=listing_id, user_id=user_id)
if has_contacted:
messages.error(request, 'You have already made an inquiry for this listing')
return redirect('/listings/'+listing_id)
contact = Contact(listing=listing, listing_id=listing_id, name=name, email=email, phone=phone, message=message, user_id=user_id)
contact.save()
# Send mail
send_mail(
'Property Listing Inquiry',
'There has been an inquery for ' + listing + '. Log into the admin panel for more info',
'muyambojohn1@gmail.com',
[realtor_email, ''],
fail_silently=False
)
messages.success(request, 'Your request has been submitted, a realtor will get back to you soon')
return redirect('/listings/'+listing_id)
| [
"muyambojohn1@gmail.com"
] | muyambojohn1@gmail.com |
094b2a37d54c7283bb751b9b9a6f85595937140e | 68a709e3a4cae897cf55e1ef90b132be7947ca8e | /Puntos extra/bam/data/models.py | db0b85878fa8f32b604e07729067c5c4aa1cad03 | [] | no_license | AngelloDavincii/BAM_EVALUACION | d37cd359523a4d4cd6189c59f70da4b900e73362 | ee45734ca46c3532eb3d86aa98f84d4531334833 | refs/heads/main | 2023-05-09T04:53:56.416541 | 2021-05-24T06:36:40 | 2021-05-24T06:36:40 | 369,895,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,568 | py | from django.db import models
from django.forms import model_to_dict
# Create your models here.
class product(models.Model):
name = models.CharField(max_length=255, null=True)
StandardCost = models.FloatField(null=True)
daysToManufacture = models.IntegerField(null=True)
def toJSON(self):
item=model_to_dict(self)
return item
class detalle(models.Model):
SalesOrderID = models.IntegerField(null=True)
OrderyQty = models.IntegerField(null=True)
Product = models.ForeignKey(product, on_delete=models.SET_NULL, null=True)
UnitPrice = models.FloatField(null=True)
LineTotal = models.FloatField(null=True)
def toJSON(self):
item=model_to_dict(self)
return item
class territory(models.Model):
name = models.CharField(max_length=50, null=True)
group = models.CharField(max_length=50, null=True)
def toJSON(self):
item=model_to_dict(self)
return item
class seller(models.Model):
name = models.CharField(max_length=50, null=True)
def toJSON(self):
item=model_to_dict(self)
return item
class sales(models.Model):
date = models.DateTimeField(null=True)
customer = models.IntegerField(null=True)
salesPerson = models.ForeignKey(seller, on_delete = models.SET_NULL,null=True)
territory = models.ForeignKey(territory, on_delete = models.SET_NULL,null=True)
total = models.FloatField(null=True)
def toJSON(self):
item=model_to_dict(self)
item['date'] = self.date.strftime('%Y-%m-%d')
return item
| [
"53918848+AngelloDavincii@users.noreply.github.com"
] | 53918848+AngelloDavincii@users.noreply.github.com |
ef9e0dffd76f0c55e89197746606a2d74bc66412 | 483f45b1d241d318c06842f250719e73b8c4dfe7 | /Ex085.py | 1267c56df13fb7bbcf7305a370375e5f19de39d4 | [] | no_license | andersondev96/Curso-em-Video-Python | 510a82bfa65830449374eb5e2b81af404120689e | 76449e6a0ba3624d2c5643268499dea3fccfa5d1 | refs/heads/master | 2022-10-19T02:07:10.967713 | 2020-06-14T23:57:02 | 2020-06-14T23:57:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | """
Crie um programa onde o usuário possa digitar sete valores numéricos e
cadastre-os em uma lista única que mantenha separados os valores pares
e ímpares. No final, mostre os valores pares e ímpares em ordem crescente.
"""
num = [[], []]
valor = 0
for c in range(1, 8):
valor = int(input(f'Digite o {c}º valor: '))
if valor % 2 == 0:
num[0].append(valor)
if valor % 2 == 1:
num[1].append(valor)
num[0].sort()
num[1].sort()
print('-=' *30)
print(f'Os valores pares digitados foram: {num[0]}')
print(f'Os valores ímpares digitados foram: {num[1]}')
| [
"andersonfferreira13@hotmail.com"
] | andersonfferreira13@hotmail.com |
c51648b926a8dee6d3d08b2358b1f474d0c99a66 | af53b1ac86d4235103d446e205f37000e9d3a5ef | /Advanced Python Facebook Group/Ex1.py | 0b402d96bb61fe7286e7e768e9f4a9467c2ecf41 | [] | no_license | Lylio/python-sandbox | 0754501831eef05301f218bd6fa2d26311aa955b | ede1c0551bb54e5d771847533cc1dd9ac2abdd29 | refs/heads/master | 2023-07-10T08:42:54.934541 | 2021-08-02T12:27:18 | 2021-08-02T12:27:18 | 138,468,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | def test1(*wordlist):
for word in wordlist:
print(word + "YEAH ")
if __name__ == '__main__':
fruits = ["apples", "oranges", "grapes"]
test1(fruits) | [
"cxz@gmx.com"
] | cxz@gmx.com |
69db7d43bcb2fc9f39d83c15cb3eded5e8788c97 | 9c4294271a405f13d35064da6e144c3baf0c71bd | /scripts/startup/bl_ui/properties_render.py | ce375eca894ad92b7982e30731f01d7d8ec12119 | [] | no_license | satishgoda/fluid-designer-scripts | 178ba9ab425fd8b02791f026eeba00d19bf4f4ea | ddccc5823c1ac09849c1d48dc2740a200cb40d84 | refs/heads/master | 2021-01-19T07:15:47.977416 | 2014-03-20T00:00:46 | 2014-03-20T00:01:06 | 18,070,299 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,648 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Menu, Panel
class RENDER_MT_presets(Menu):
bl_label = "Render Presets"
preset_subdir = "render"
preset_operator = "script.execute_preset"
draw = Menu.draw_preset
class RENDER_MT_ffmpeg_presets(Menu):
bl_label = "FFMPEG Presets"
preset_subdir = "ffmpeg"
preset_operator = "script.python_file_run"
draw = Menu.draw_preset
class RENDER_MT_framerate_presets(Menu):
bl_label = "Frame Rate Presets"
preset_subdir = "framerate"
preset_operator = "script.execute_preset"
draw = Menu.draw_preset
class RenderButtonsPanel():
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "render"
# COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
@classmethod
def poll(cls, context):
scene = context.scene
return scene and (scene.render.engine in cls.COMPAT_ENGINES)
class RENDER_PT_render(RenderButtonsPanel, Panel):
bl_label = "Render"
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
rd = context.scene.render
row = layout.row(align=True)
row.operator("render.render", text="Render", icon='RENDER_STILL')
row.operator("render.render", text="Animation", icon='RENDER_ANIMATION').animation = True
row.operator("sound.mixdown", text="Audio", icon='PLAY_AUDIO')
split = layout.split(percentage=0.33)
split.label(text="Display:")
row = split.row(align=True)
row.prop(rd, "display_mode", text="")
row.prop(rd, "use_lock_interface", icon_only=True)
class RENDER_PT_dimensions(RenderButtonsPanel, Panel):
bl_label = "Dimensions"
COMPAT_ENGINES = {'BLENDER_RENDER'}
_frame_rate_args_prev = None
_preset_class = None
@staticmethod
def _draw_framerate_label(*args):
# avoids re-creating text string each draw
if RENDER_PT_dimensions._frame_rate_args_prev == args:
return RENDER_PT_dimensions._frame_rate_ret
fps, fps_base, preset_label = args
if fps_base == 1.0:
fps_rate = round(fps)
else:
fps_rate = round(fps / fps_base, 2)
# TODO: Change the following to iterate over existing presets
custom_framerate = (fps_rate not in {23.98, 24, 25, 29.97, 30, 50, 59.94, 60})
if custom_framerate is True:
fps_label_text = "Custom (%r fps)" % fps_rate
show_framerate = True
else:
fps_label_text = "%r fps" % fps_rate
show_framerate = (preset_label == "Custom")
RENDER_PT_dimensions._frame_rate_args_prev = args
RENDER_PT_dimensions._frame_rate_ret = args = (fps_label_text, show_framerate)
return args
@staticmethod
def draw_framerate(sub, rd):
if RENDER_PT_dimensions._preset_class is None:
RENDER_PT_dimensions._preset_class = bpy.types.RENDER_MT_framerate_presets
args = rd.fps, rd.fps_base, RENDER_PT_dimensions._preset_class.bl_label
fps_label_text, show_framerate = RENDER_PT_dimensions._draw_framerate_label(*args)
sub.menu("RENDER_MT_framerate_presets", text=fps_label_text)
if show_framerate:
sub.prop(rd, "fps")
sub.prop(rd, "fps_base", text="/")
def draw(self, context):
layout = self.layout
scene = context.scene
rd = scene.render
row = layout.row(align=True)
row.menu("RENDER_MT_presets", text=bpy.types.RENDER_MT_presets.bl_label)
row.operator("render.preset_add", text="", icon='ZOOMIN')
row.operator("render.preset_add", text="", icon='ZOOMOUT').remove_active = True
split = layout.split()
col = split.column()
sub = col.column(align=True)
sub.label(text="Resolution:")
sub.prop(rd, "resolution_x", text="X")
sub.prop(rd, "resolution_y", text="Y")
sub.prop(rd, "resolution_percentage", text="")
sub.label(text="Aspect Ratio:")
sub.prop(rd, "pixel_aspect_x", text="X")
sub.prop(rd, "pixel_aspect_y", text="Y")
row = col.row()
row.prop(rd, "use_border", text="Border")
sub = row.row()
sub.active = rd.use_border
sub.prop(rd, "use_crop_to_border", text="Crop")
col = split.column()
sub = col.column(align=True)
sub.label(text="Frame Range:")
sub.prop(scene, "frame_start")
sub.prop(scene, "frame_end")
sub.prop(scene, "frame_step")
sub.label(text="Frame Rate:")
self.draw_framerate(sub, rd)
subrow = sub.row(align=True)
subrow.label(text="Time Remapping:")
subrow = sub.row(align=True)
subrow.prop(rd, "frame_map_old", text="Old")
subrow.prop(rd, "frame_map_new", text="New")
class RENDER_PT_antialiasing(RenderButtonsPanel, Panel):
bl_label = "Anti-Aliasing"
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw_header(self, context):
rd = context.scene.render
self.layout.prop(rd, "use_antialiasing", text="")
def draw(self, context):
layout = self.layout
rd = context.scene.render
layout.active = rd.use_antialiasing
split = layout.split()
col = split.column()
col.row().prop(rd, "antialiasing_samples", expand=True)
sub = col.row()
sub.enabled = not rd.use_border
sub.prop(rd, "use_full_sample")
col = split.column()
col.prop(rd, "pixel_filter_type", text="")
col.prop(rd, "filter_size", text="Size")
class RENDER_PT_motion_blur(RenderButtonsPanel, Panel):
bl_label = "Sampled Motion Blur"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
rd = context.scene.render
return not rd.use_full_sample and (rd.engine in cls.COMPAT_ENGINES)
def draw_header(self, context):
rd = context.scene.render
self.layout.prop(rd, "use_motion_blur", text="")
def draw(self, context):
layout = self.layout
rd = context.scene.render
layout.active = rd.use_motion_blur
row = layout.row()
row.prop(rd, "motion_blur_samples")
row.prop(rd, "motion_blur_shutter")
class RENDER_PT_shading(RenderButtonsPanel, Panel):
bl_label = "Shading"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
rd = context.scene.render
split = layout.split()
col = split.column()
col.prop(rd, "use_textures", text="Textures")
col.prop(rd, "use_shadows", text="Shadows")
col.prop(rd, "use_sss", text="Subsurface Scattering")
col.prop(rd, "use_envmaps", text="Environment Map")
col = split.column()
col.prop(rd, "use_raytrace", text="Ray Tracing")
col.prop(rd, "alpha_mode", text="Alpha")
class RENDER_PT_performance(RenderButtonsPanel, Panel):
bl_label = "Performance"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
rd = context.scene.render
split = layout.split()
col = split.column(align=True)
col.label(text="Threads:")
col.row(align=True).prop(rd, "threads_mode", expand=True)
sub = col.column(align=True)
sub.enabled = rd.threads_mode == 'FIXED'
sub.prop(rd, "threads")
col.label(text="Tile Size:")
col.prop(rd, "tile_x", text="X")
col.prop(rd, "tile_y", text="Y")
col = split.column()
col.label(text="Memory:")
sub = col.column()
sub.enabled = not (rd.use_border or rd.use_full_sample)
sub.prop(rd, "use_save_buffers")
sub = col.column()
sub.active = rd.use_compositing
sub.prop(rd, "use_free_image_textures")
sub.prop(rd, "use_free_unused_nodes")
sub = col.column()
sub.active = rd.use_raytrace
sub.label(text="Acceleration structure:")
sub.prop(rd, "raytrace_method", text="")
if rd.raytrace_method == 'OCTREE':
sub.prop(rd, "octree_resolution", text="Resolution")
else:
sub.prop(rd, "use_instances", text="Instances")
sub.prop(rd, "use_local_coords", text="Local Coordinates")
class RENDER_PT_post_processing(RenderButtonsPanel, Panel):
bl_label = "Post Processing"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
rd = context.scene.render
split = layout.split()
col = split.column()
col.prop(rd, "use_compositing")
col.prop(rd, "use_sequencer")
split.prop(rd, "dither_intensity", text="Dither", slider=True)
layout.separator()
split = layout.split()
col = split.column()
col.prop(rd, "use_fields", text="Fields")
sub = col.column()
sub.active = rd.use_fields
sub.row().prop(rd, "field_order", expand=True)
sub.prop(rd, "use_fields_still", text="Still")
col = split.column()
col.prop(rd, "use_edge_enhance")
sub = col.column()
sub.active = rd.use_edge_enhance
sub.prop(rd, "edge_threshold", text="Threshold", slider=True)
sub.prop(rd, "edge_color", text="")
class RENDER_PT_stamp(RenderButtonsPanel, Panel):
bl_label = "Stamp"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw_header(self, context):
rd = context.scene.render
self.layout.prop(rd, "use_stamp", text="")
def draw(self, context):
layout = self.layout
rd = context.scene.render
layout.active = rd.use_stamp
layout.prop(rd, "stamp_font_size", text="Font Size")
row = layout.row()
row.column().prop(rd, "stamp_foreground", slider=True)
row.column().prop(rd, "stamp_background", slider=True)
split = layout.split()
col = split.column()
col.prop(rd, "use_stamp_time", text="Time")
col.prop(rd, "use_stamp_date", text="Date")
col.prop(rd, "use_stamp_render_time", text="RenderTime")
col.prop(rd, "use_stamp_frame", text="Frame")
col.prop(rd, "use_stamp_scene", text="Scene")
col = split.column()
col.prop(rd, "use_stamp_camera", text="Camera")
col.prop(rd, "use_stamp_lens", text="Lens")
col.prop(rd, "use_stamp_filename", text="Filename")
col.prop(rd, "use_stamp_marker", text="Marker")
col.prop(rd, "use_stamp_sequencer_strip", text="Seq. Strip")
row = layout.split(percentage=0.2)
row.prop(rd, "use_stamp_note", text="Note")
sub = row.row()
sub.active = rd.use_stamp_note
sub.prop(rd, "stamp_note_text", text="")
class RENDER_PT_output(RenderButtonsPanel, Panel):
bl_label = "Output"
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
rd = context.scene.render
image_settings = rd.image_settings
file_format = image_settings.file_format
layout.prop(rd, "filepath", text="")
split = layout.split()
col = split.column()
col.active = not rd.is_movie_format
col.prop(rd, "use_overwrite")
col.prop(rd, "use_placeholder")
split.prop(rd, "use_file_extension")
layout.template_image_settings(image_settings, color_management=False)
if file_format == 'QUICKTIME':
quicktime = rd.quicktime
split = layout.split()
col = split.column()
col.prop(quicktime, "codec_type", text="Video Codec")
col.prop(quicktime, "codec_spatial_quality", text="Quality")
# Audio
col.prop(quicktime, "audiocodec_type", text="Audio Codec")
if quicktime.audiocodec_type != 'No audio':
split = layout.split()
if quicktime.audiocodec_type == 'LPCM':
split.prop(quicktime, "audio_bitdepth", text="")
split.prop(quicktime, "audio_samplerate", text="")
split = layout.split()
col = split.column()
if quicktime.audiocodec_type == 'AAC':
col.prop(quicktime, "audio_bitrate")
subsplit = split.split()
col = subsplit.column()
if quicktime.audiocodec_type == 'AAC':
col.prop(quicktime, "audio_codec_isvbr")
col = subsplit.column()
col.prop(quicktime, "audio_resampling_hq")
class RENDER_PT_encoding(RenderButtonsPanel, Panel):
bl_label = "Encoding"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.image_settings.file_format in {'FFMPEG', 'XVID', 'H264', 'THEORA'}
def draw(self, context):
layout = self.layout
rd = context.scene.render
ffmpeg = rd.ffmpeg
layout.menu("RENDER_MT_ffmpeg_presets", text="Presets")
split = layout.split()
split.prop(rd.ffmpeg, "format")
if ffmpeg.format in {'AVI', 'QUICKTIME', 'MKV', 'OGG'}:
split.prop(ffmpeg, "codec")
elif rd.ffmpeg.format == 'H264':
split.prop(ffmpeg, "use_lossless_output")
else:
split.label()
row = layout.row()
row.prop(ffmpeg, "video_bitrate")
row.prop(ffmpeg, "gopsize")
split = layout.split()
col = split.column()
col.label(text="Rate:")
col.prop(ffmpeg, "minrate", text="Minimum")
col.prop(ffmpeg, "maxrate", text="Maximum")
col.prop(ffmpeg, "buffersize", text="Buffer")
col = split.column()
col.prop(ffmpeg, "use_autosplit")
col.label(text="Mux:")
col.prop(ffmpeg, "muxrate", text="Rate")
col.prop(ffmpeg, "packetsize", text="Packet Size")
layout.separator()
# Audio:
if ffmpeg.format != 'MP3':
layout.prop(ffmpeg, "audio_codec", text="Audio Codec")
row = layout.row()
row.prop(ffmpeg, "audio_bitrate")
row.prop(ffmpeg, "audio_volume", slider=True)
class RENDER_PT_bake(RenderButtonsPanel, Panel):
bl_label = "Bake"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
rd = context.scene.render
layout.operator("object.bake_image", icon='RENDER_STILL')
layout.prop(rd, "bake_type")
multires_bake = False
if rd.bake_type in ['NORMALS', 'DISPLACEMENT', 'DERIVATIVE', 'AO']:
layout.prop(rd, "use_bake_multires")
multires_bake = rd.use_bake_multires
if not multires_bake:
if rd.bake_type == 'NORMALS':
layout.prop(rd, "bake_normal_space")
elif rd.bake_type in {'DISPLACEMENT', 'AO'}:
layout.prop(rd, "use_bake_normalize")
# col.prop(rd, "bake_aa_mode")
# col.prop(rd, "use_bake_antialiasing")
layout.separator()
split = layout.split()
col = split.column()
col.prop(rd, "use_bake_to_vertex_color")
sub = col.column()
sub.active = not rd.use_bake_to_vertex_color
sub.prop(rd, "use_bake_clear")
sub.prop(rd, "bake_margin")
sub.prop(rd, "bake_quad_split", text="Split")
col = split.column()
col.prop(rd, "use_bake_selected_to_active")
sub = col.column()
sub.active = rd.use_bake_selected_to_active
sub.prop(rd, "bake_distance")
sub.prop(rd, "bake_bias")
else:
split = layout.split()
col = split.column()
col.prop(rd, "use_bake_clear")
col.prop(rd, "bake_margin")
if rd.bake_type == 'DISPLACEMENT':
col = split.column()
col.prop(rd, "use_bake_lores_mesh")
if rd.bake_type == 'AO':
col = split.column()
col.prop(rd, "bake_bias")
col.prop(rd, "bake_samples")
if rd.bake_type == 'DERIVATIVE':
row = layout.row()
row.prop(rd, "use_bake_user_scale", text="")
sub = row.column()
sub.active = rd.use_bake_user_scale
sub.prop(rd, "bake_user_scale", text="User Scale")
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)
| [
"dev.andrewpeel@gmail.com"
] | dev.andrewpeel@gmail.com |
96deacaa022403dffe2dd2829259ace88ddbdc65 | ffeb5db9b7ee23a2bee74ee22814c8f15efa31bf | /src/gb.py | 4b0d8a2cf0a6b8a2d659c39f7d0f3e11a533d342 | [] | no_license | itaizy/2019-nCoV-GB | 5dc8ece4bc2e707f6df707929c07639435c69319 | a299b335a8d0045b29dbd10d18dc806d7ed17c07 | refs/heads/master | 2021-07-13T15:38:10.346632 | 2020-04-15T12:29:19 | 2020-04-15T12:29:19 | 241,924,704 | 0 | 2 | null | 2021-03-12T11:32:56 | 2020-02-20T15:52:18 | JavaScript | UTF-8 | Python | false | false | 2,407 | py | # Encoding: utf-8
import os
import json
from urllib.request import urlretrieve
from PIL import Image
import numpy as np
f = open('data/overall.json',encoding='utf-8')
user_dict = json.load(f)
f.close()
print("加载入文件完成...")
stocks = [
'modifyTime',
'currentConfirmedCount',
'confirmedCount',
'suspectedCount',
'curedCount',
'deadCount',
'seriousCount'
]
increments = [
'suspectedIncr',
'currentConfirmedIncr',
'confirmedIncr',
'curedIncr',
'deadIncr',
'seriousIncr'
]
qg4gb = {}
for key in stocks:
qg4gb[key] = user_dict[key]
for key in increments:
qg4gb[key] = user_dict[key] if key in user_dict.keys() else '-'
#####################################################
qg4gb['suspectedCount'] = 253
#####################################################
f = open('data/qg4gb.json', 'w', encoding='utf-8')
json.dump(qg4gb, f)
f.close()
print(qg4gb)
print('Done.')
val = os.system('wget -O - https://api.inews.qq.com/newsqa/v1/query/pubished/daily/list?province=%E6%B9%96%E5%8C%97 > data/hb4gball.json')
f = open('data/hb4gball.json',encoding='utf-8')
hball = json.load(f)
f.close()
f = open('data/hb4gb.json', 'w', encoding='utf-8')
hbdata = [k for k in sorted(hball['data'], key=lambda item:item['date'], reverse=True)][0]
# *************************************************************
hbdata['suspected_add'] = 0
hbdata['suspected'] = 34
hbdata['suspected_delete'] = 15
json.dump(hbdata, f, ensure_ascii=False)
f.close()
# *************************************************************
img4gb = {}
qgtc = []
for img_url in user_dict['quanguoTrendChart']:
kp = img_url['imgUrl']
print(kp)
filename = 'images/' + kp.split('/')[-1]
qgtc.append(filename)
hbtc = []
for img_url in user_dict['hbFeiHbTrendChart']:
kp = img_url['imgUrl']
print(kp)
filename = 'images/' + kp.split('/')[-1]
hbtc.append(filename)
img4gb['qgtc'] = qgtc
img4gb['hbtc'] = hbtc
f = open('data/img4gb.json', 'w', encoding='utf-8')
json.dump(img4gb, f)
f.close()
val = os.system('python3 ImgCrawler.py')
print('GB Done.' + str(val))
val = os.system('cp -r images/ ../build/gb')
val = os.system('cp data/qg4gb.json ../build/gb')
val = os.system('cp data/hb4gb.json ../build/gb')
val = os.system('cp data/img4gb.json ../build/gb')
| [
"itaizy@163.com"
] | itaizy@163.com |
b81506553a8b79fbb1ec1d99cb84b9d4d4caf035 | e19cd51e901aa0e6172db6de224016c1924f3d8d | /scratches/transfer_learning.py | b2b51ec5057007a442101bb45e35e039cf4692ec | [] | no_license | simon2k/tensorflow_sandbox | e7bd0933663d7477eee9c17411db7fdcd58bd8b8 | 5b5ffea5e35559487af627eaf60f78687e8f5ba8 | refs/heads/main | 2023-02-14T23:22:53.261495 | 2021-01-10T22:04:56 | 2021-01-10T22:04:56 | 326,102,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,444 | py | import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
import tensorflow as tf
from skimage.transform import resize
from sklearn.metrics import confusion_matrix
from tensorflow.keras.activations import relu, sigmoid
from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D
from tensorflow.keras.losses import BinaryCrossentropy, SparseCategoricalCrossentropy
from tensorflow.keras.metrics import BinaryAccuracy
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import SGD, RMSprop
from tensorflow.keras.callbacks import Callback, ModelCheckpoint, EarlyStopping, LearningRateScheduler
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.preprocessing.image import smart_resize
images_train = np.load('./assets/cats-vs-dogs-dataset/images_train.npy')
images_test = np.load('./assets/cats-vs-dogs-dataset/images_test.npy')
images_valid = np.load('./assets/cats-vs-dogs-dataset/images_valid.npy')
labels_train = np.load('./assets/cats-vs-dogs-dataset/labels_train.npy')
labels_test = np.load('./assets/cats-vs-dogs-dataset/labels_test.npy')
labels_valid = np.load('./assets/cats-vs-dogs-dataset/labels_valid.npy')
print('Train set shape: ', images_train.shape)
print('Train labels shape: ', labels_train.shape)
print('All unique train labels: ', np.unique(labels_train))
print('All unique test labels: ', np.unique(labels_test))
print('All unique valid labels: ', np.unique(labels_valid))
# class_names = np.array(['Dog', 'Cat'])
# plt.figure(figsize=(25, 10))
#
# indices = np.random.choice(images_train.shape[0], size=25, replace=False)
#
# for n, i in enumerate(indices):
# ax = plt.subplot(5, 5, n+1)
# plt.imshow(images_train[i])
# plt.title(class_names[labels_train[i]])
# plt.axis('off')
def get_model(input_shape):
input = Input(shape=input_shape)
h = Conv2D(filters=32, kernel_size=(3, 3), activation=relu, padding='SAME')(input)
h = Conv2D(filters=32, kernel_size=(3, 3), activation=relu, padding='SAME')(h)
h = MaxPooling2D(pool_size=2)(h)
h = Conv2D(filters=64, kernel_size=(3, 3), activation=relu, padding='SAME')(h)
h = Conv2D(filters=64, kernel_size=(3, 3), activation=relu, padding='SAME')(h)
h = MaxPooling2D(pool_size=2)(h)
h = Conv2D(filters=128, kernel_size=(3, 3), activation=relu, padding='SAME')(h)
h = Conv2D(filters=128, kernel_size=(3, 3), activation=relu, padding='SAME')(h)
h = MaxPooling2D(pool_size=2)(h)
h = Flatten()(h)
h = Dense(units=128, activation=relu)(h)
h = Dense(units=1, activation=sigmoid)(h)
model = Model(inputs=input, outputs=h)
model.compile(
optimizer=RMSprop(learning_rate=0.001),
loss=BinaryCrossentropy(),
metrics=[BinaryAccuracy()]
)
return model
early_stopping_cb = EarlyStopping(patience=2, verbose=1)
benchmark_model = get_model(images_train[0].shape)
benchmark_model.summary()
model_checkpoint_cb = ModelCheckpoint(
filepath='./assets/cats_vs_dogs_model/best_model_weights',
save_best_only=True,
save_weights_only=True,
verbose=1
)
history_benchmark = benchmark_model.fit(
x=images_train,
y=labels_train,
epochs=10,
batch_size=32,
validation_data=(images_valid, labels_valid),
# callbacks=[early_stopping_cb, model_checkpoint_cb]
callbacks=[model_checkpoint_cb]
)
# print(history_benchmark.history)
plt.figure(figsize=(15, 5))
plt.suptitle('Custom classifier')
plt.subplot(121)
plt.plot(history_benchmark.history['binary_accuracy'])
plt.plot(history_benchmark.history['val_binary_accuracy'])
plt.title('Accuracy vs Epochs')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(['Train', 'Validation'], loc='lower right')
plt.subplot(122)
plt.plot(history_benchmark.history['loss'])
plt.plot(history_benchmark.history['val_loss'])
plt.title('Loss vs Epochs')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(['Train', 'Validation'], loc='upper right')
benchmark_test_loss, benchmark_test_accuracy = benchmark_model.evaluate(
x=images_test,
y=labels_test
)
print(f'Test loss: {np.round(benchmark_test_loss, 2)}')
print(f'Test accuracy: {np.round(benchmark_test_accuracy, 2)}')
mobile_net_v2_model = MobileNetV2()
mobile_net_v2_model.summary()
def build_feature_extractor_model(model):
input_layer = model.inputs
output_layer = model.get_layer('global_average_pooling2d').output
return Model(inputs=input_layer, outputs=output_layer)
feature_extractor = build_feature_extractor_model(mobile_net_v2_model)
print('\nFeature extractor model:\n')
feature_extractor.summary()
def add_new_classifier_head(feature_extractor_model):
model = Sequential([
feature_extractor_model,
Dense(units=32, activation=relu),
Dropout(rate=.5),
Dense(units=1, activation=sigmoid)
])
return model
pet_classifier_model = add_new_classifier_head(feature_extractor)
pet_classifier_model.summary()
def freeze_pretrained_weights(model):
# model.get_layer('model').trainable = False # - the name is assigned a suffix "_n"
model.layers[0].trainable = False
freeze_pretrained_weights(pet_classifier_model)
pet_classifier_model.summary()
pet_classifier_model.compile(
optimizer=RMSprop(learning_rate=0.001),
loss=BinaryCrossentropy(),
metrics=[BinaryAccuracy()]
)
def resize_images(images):
return np.array(list(map(
lambda img: resize(img, output_shape=(224, 224, 3), anti_aliasing=True, preserve_range=True).astype(
'int32') / 255.,
images)))
images_valid_mnv2 = resize_images(images_valid)
# print('images_valid_mnv2.shape: ', images_valid_mnv2.shape)
# print('images_valid.shape: ', images_valid.shape)
images_train_mnv2 = resize_images(images_train)
images_test_mnv2 = resize_images(images_test)
# class_names = np.array(['Dog', 'Cat'])
# plt.figure(figsize=(25, 10))
# plt.title('Resized images to 224x224')
#
# indices = np.random.choice(images_valid_mnv2.shape[0], size=25, replace=False)
#
# for n, i in enumerate(indices):
# ax = plt.subplot(5, 5, n + 1)
# plt.imshow(images_valid_mnv2[i])
# plt.title(class_names[labels_train[i]])
# # plt.axis('off')
pet_classifier_history = pet_classifier_model.fit(
x=images_train_mnv2,
y=labels_train,
epochs=10,
batch_size=64,
validation_data=(images_valid_mnv2, labels_valid),
# callbacks=[early_stopping_cb]
)
print('pet_classifier_history:\n', pet_classifier_history.history)
print('history_benchmark:\n', history_benchmark.history)
plt.figure(figsize=(15, 5))
plt.suptitle('Pet classifier model')
plt.subplot(121)
plt.plot(pet_classifier_history.history['binary_accuracy'])
plt.plot(pet_classifier_history.history['val_binary_accuracy'])
plt.title('Accuracy vs Epochs')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(['Train', 'Validation'], loc='lower right')
plt.subplot(122)
plt.plot(pet_classifier_history.history['loss'])
plt.plot(pet_classifier_history.history['val_loss'])
plt.title('Loss vs Epochs')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(['Train', 'Validation'], loc='upper right')
pet_benchmark_test_loss, pet_benchmark_test_accuracy = pet_classifier_model.evaluate(
x=images_test_mnv2,
y=labels_test
)
print(f'[PetClass] Test loss: {np.round(benchmark_test_loss, 2)}')
print(f'[PetClass] Test accuracy: {np.round(benchmark_test_accuracy, 2)}')
# Compare both models
benchmark_train_accuracy = history_benchmark.history['binary_accuracy'][-1]
benchmark_val_accuracy = history_benchmark.history['val_binary_accuracy'][-1]
benchmark_train_loss = history_benchmark.history['loss'][-1]
benchmark_val_loss = history_benchmark.history['val_loss'][-1]
pet_benchmark_train_accuracy = pet_classifier_history.history['binary_accuracy'][-1]
pet_benchmark_val_accuracy = pet_classifier_history.history['val_binary_accuracy'][-1]
pet_benchmark_train_loss = pet_classifier_history.history['loss'][-1]
pet_benchmark_val_loss = pet_classifier_history.history['val_loss'][-1]
comparison = pd.DataFrame([
['Training loss', benchmark_train_loss, pet_benchmark_train_loss],
['Training accuracy', benchmark_train_accuracy, pet_benchmark_train_accuracy],
['Validation loss', benchmark_val_loss, pet_benchmark_val_loss],
['Validation accuracy', benchmark_val_accuracy, pet_benchmark_val_accuracy],
['Test loss', benchmark_test_loss, pet_benchmark_test_loss],
['Test accuracy', benchmark_test_loss, pet_benchmark_test_loss],
])
comparison.index = [''] * 6
print(comparison)
plt.figure(figsize=(15, 5))
plt.suptitle('Confusion matrix comparison')
preds = benchmark_model.predict(images_test)
preds = (preds > .5).astype('int32')
cm = confusion_matrix(labels_test, preds)
df_cm = pd.DataFrame(cm, index=['Dog', 'Cat'], columns=['Dog', 'Cat'])
plt.subplot(121)
plt.title('Confusion matrix for benchmark model\n')
sns.heatmap(df_cm, annot=True, fmt='d', cmap='YlGnBu')
plt.ylabel('Prediction')
plt.xlabel('Ground truth')
preds = pet_classifier_model.predict(images_test_mnv2)
preds = (preds > .5).astype('int32')
cm = confusion_matrix(labels_test, preds)
df_cm = pd.DataFrame(cm, index=['Dog', 'Cat'], columns=['Dog', 'Cat'])
plt.subplot(122)
plt.title('Confusion matrix for pet classifier model (transfer learning)\n')
sns.heatmap(df_cm, annot=True, fmt='d', cmap='YlGnBu')
plt.ylabel('Prediction')
plt.xlabel('Ground truth')
plt.show()
| [
"skieloch@gmail.com"
] | skieloch@gmail.com |
631ae50d7250ec87b0b5633600b070ac8fa460fd | 2bda4b8282f0d0866dd2319a5682d2f11634c1a5 | /test.py | e8f8b290b074278c811973ffdb4e59a00492df36 | [] | no_license | ColorfulCodes/Markov-Wisdom-Generator | 0e392594c248299f45cb56b17dec819b1fa49d69 | e68478bd728861e01a5391127e9b71e8725c365e | refs/heads/master | 2022-03-28T00:54:46.103761 | 2020-02-04T03:23:21 | 2020-02-04T03:23:21 | 72,307,518 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | import unittest
from Markov import run
class TestCode(unittest.TestCase):
# This test will check if length is correct
def testwillWork(self):
length = run()
self.assertTrue(len(length) <=160)
# This test should fail as 5 equals 8 not 7
def testwillNotWork(self):
length = run()
self.assertTrue(len(length) ==10)
if __name__ == '__main__':
unittest.main()
| [
"codescolorful@gmail.com"
] | codescolorful@gmail.com |
5bb331dee090ef3e3b573baecbe13ce7a57d91fe | de752ed33b779fcc6255508c7bd57570488ce4f9 | /DP and Recursion/matrix_chain_multiplication.py | 4ca1d5a988a676164a57bcdbd4f2c40601679bda | [] | no_license | agzuniverse/CompetitiveCoding | adbb8beda4247bb41fdcb7bc06c76c159a8b4145 | d6cdfe4ac5b6fb73a6a43808b985876f8eeb0061 | refs/heads/master | 2021-08-17T20:49:39.619400 | 2021-06-30T16:07:48 | 2021-06-30T16:07:48 | 163,753,001 | 8 | 4 | null | 2020-10-30T07:29:50 | 2019-01-01T17:10:54 | C++ | UTF-8 | Python | false | false | 1,129 | py | '''
Matrix Chain Multiplication problem
Given the dimensions of the matrix in an array P[0..n], calculate the minimum number of calculations required
to perform the multiplication of the matrices.
This solution runs in O(n^3) time.
The solution uses the fact that matrix multiplication is associative but the number of operations
can differ significantly depending on the order in which the multiplication is performed.
'''
import sys
def matrixChainMultiplication(p, n):
memo = [[sys.maxsize]*n for _ in range(n)]
# Cost is 0 when considering one matrix
for i in range(n):
memo[i][i] = 0
# z is the matrix chain length
for z in range(2, n):
# i and j are start and end points, with k varying with condition i<=k<j
for i in range(1, n-z+1):
j = i+z-1
for k in range(i, j):
currMin = memo[i][k] + memo[k+1][j] + p[i-1]*p[k]*p[j]
memo[i][j] = min(memo[i][j], currMin)
# The upper triangle elements are filled
return memo[1][n-1]
# Example input
p = [1, 2, 3, 4]
n = len(p)
print(matrixChainMultiplication(p, n))
| [
"aswinganesh666@gmail.com"
] | aswinganesh666@gmail.com |
7a47afec56d847940c9f74ffe116a6034a5d26e3 | 70cfccc3c39556c92b58b4be27a296efc145010c | /cleaner.py | 7bf97cb31cf04650be15a2140d3cad60d08c4b8a | [] | no_license | ahouston/calibre-plugin-language-cleaner | 5e402bcc4c77fb1aafc29fc32433cf16bb0d2058 | fa6d7bc7dc4909f36dbd7aa67efd5862da162a05 | refs/heads/master | 2023-03-16T06:14:49.986858 | 2020-09-07T21:41:06 | 2020-09-07T21:41:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,864 | py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
import os
import sys
import re
def keep_case(sub, matchobj):
''' Substitute requested word matching case of matched word '''
val = matchobj.group(0)
up_count = 0
if val.isupper():
sub = sub.upper()
else:
# Test first two to see if all uppercase
for ii in range(min(2, len(sub), len(val))):
if val[ii].isupper():
up_count += 1
sub = sub[:ii] + sub[ii].upper() + sub[ii+1:]
# Allow further uppercase only if all uppercase
for ii in range(min(len(sub), len(val))):
if up_count > 1:
up_count += 1
sub = sub[:ii] + sub[ii].upper() + sub[ii+1:]
return sub
def first_case(sub, matchobj):
''' Keep the case of the first lettter '''
val = matchobj.group(0)
if val.isupper():
sub = sub.upper()
else:
try:
for ii in range(1):
if val[ii].isupper():
sub = sub[:ii] + sub[ii].upper() + sub[ii+1:]
except:
print("*"*60, "sub=", sub, "val=", val, "*"*60)
return sub
def drop_first_match(sub, matchobj):
''' Drop first match, match case of first and return second '''
drop = matchobj.group(1)
val = matchobj.group(2)
try:
for ii in range(len(drop)): # find first alpha in drop
if drop[ii].isalpha():
if drop[ii].isupper(): # uppercase, so copy to val
for jj in range(len(val)): # find first alpha in val
if val[jj].isalpha():
val = val[:jj] + val[jj].upper() + val[jj+1:]
break
break
except:
print("*"*50, "error in drop_first_match")
print(drop)
print(val)
print(str(sub))
print(str(matchobj.groups()))
return val
# Prepare two lists for different meanings of ass
dirty_a_list = [
#########################################
# dirtier ass
#########################################
# haul ass
(re.compile(r'\b(move|haul)\Wass\b', re.I), "move fast", keep_case),
# little ass
(re.compile(r'little\W?ass\b', re.I), "little donkey", keep_case),
(re.compile(r'little\W?asses\b', re.I), "little donkeys", keep_case),
#your/own/etc. ass
(re.compile(r'(?<=(.your|..own|...my|..our|..her|..his|.this|.that|..the|their|those|these|..its|..for)\W)ass\b', re.I), "rear", keep_case),
(re.compile(r'(?<=(.your|..own|...my|..our|..her|..his|.this|.that|..the|their|those|these|..its|..for)\W)asses\b', re.I), "rears", keep_case),
# asses
(re.compile(r'\basses\b', re.I), "rears", keep_case),
# ass
(re.compile(r'\ban\Wass\b', re.I), "a jerk", keep_case),
(re.compile(r'\bass\b', re.I), "rear", keep_case),
]
clean_a_list = [
#########################################
# cleaner ass
#########################################
# haul ass
(re.compile(r'\bhaul\Wass\b', re.I), "move fast", keep_case),
# asses
(re.compile(r'\basses\b', re.I), "donkeys", keep_case),
# ass
(re.compile(r'\ban Ass\b'), "a Donkey", False), # C.S. Lewis
(re.compile(r'\ban\Wass\b', re.I), "a donkey", keep_case),
(re.compile(r'(?<!in\W)\bass\b', re.I), "donkey", keep_case),
]
s_lord = '(god|jesus(\W?christ)?|christ)'
lord_list = [
# Thank God
(re.compile(r'thank( you\,?)? '+s_lord+r'\b', re.I), "thank goodness", first_case),
# My God
(re.compile(r'(?<!(..\bin|..\bis|..\bto|..\bof|from|..\bon)\W)my ' + \
s_lord+r's?\b(?! \w)', re.I), "my goodness", first_case),
# Oh God
(re.compile(r'\boh\,? '+s_lord+r'\b', re.I), "oh goodness", first_case),
# Good God
(re.compile(r'\bgood '+s_lord+r'\b', re.I), "goodness", first_case),
# name of God
(re.compile(r'\bname of '+s_lord+r'\b', re.I), "world", first_case),
# In God's name
(re.compile(r'(?<=where\W)\bin\W'+s_lord + \
'\W*s name', re.U+re.I), "in the world", first_case),
(re.compile(r'\bin '+s_lord+'\W*s name', re.U+re.I),
"for goodness sake", first_case),
# in God
(re.compile(r'\bin '+s_lord+r'\b', re.I), "in the lord", first_case),
# of God
#(re.compile(r'(?<!(.church|society|...time) )of '+s_lord+r'\b',re.I),"of heaven",first_case),
# to God
(re.compile(r'\bto '+s_lord+r'\b', re.I), "to heaven", first_case),
# by God
(re.compile(r'\bby '+s_lord+r'\b', re.I), "by the heavens", first_case),
# God knows (start of sentence, not start of sentence)
(re.compile(r'([^ ]|\. +)'+s_lord+' knows', re.I),
r"\1Heaven knows", False),
(re.compile(r''+s_lord+' knows', re.I), "heaven knows", False),
# For God's sake
(re.compile(r'\bfor '+s_lord+'\W*s sake', re.U+re.I),
"for goodness sake", first_case),
# Godforsaken
(re.compile(r'\b'+s_lord+'.?forsaken\b', re.I), "forsaken", keep_case),
# Godawful
(re.compile(r'\b'+s_lord+'.?awful\b', re.I), "forsaken", keep_case),
]
# Use if this book is likely to take Lord's name in vain
vain_lord_list = [
(re.compile(r'thanked '+s_lord+r'\b', re.I), "thanked heaven", first_case),
(re.compile(r'(?<=([\.?!,]\W\W|..\"|..”|..“|.\W\W))'+s_lord +
's?(?=[\.,?!])', re.U+re.I), "goodness", keep_case),
# Jesus and/or Christ
(re.compile(r'(?<!of )\bjesus(\W?(christ|almighty))?', re.I), "goodness", first_case),
(re.compile(r'(?<!of )(?<!jesus )christ\b', re.I), "goodness", keep_case),
# God
#(re.compile(r'(?<![Oo][Ff] )\bG[Oo][Dd]\b(?! ([Bb][Ll][Ee][Ss][Ss]|[Ss][Aa][Vv][Ee]))'),"goodness",keep_case),
]
# 3 element list: [search phrase, replace value, preserve case function]
re_list = [
#########################################
# Random stuff
#########################################
# Remove suggestive 'tits' with not suggestive belly
# don't do 'tit for tat', tit-tat-toe, or split tit-ular
(re.compile(r'\b[tT][iI][tT][sS]?\b(?! for)(?!-tat)(?!-ul)',
re.I), 'belly', keep_case),
# Slut is rude, replace with slightly better hussy
(re.compile(r'\bslut\b', re.I), 'hussy', keep_case),
(re.compile(r'\bsluts\b', re.I), 'hussies', keep_case),
# Change topless bar to just bar
(re.compile(r'topless\Wbar', re.I), 'bar', keep_case),
# Replace whore with woman (not always a good replacement)
# (re.compile(r'\bwhore\b',re.I),'woman',keep_case),
# (re.compile(r'\bwhores\b',re.I),'women',keep_case),
# Whorehouse becomes brothel
(re.compile(r'whorehouse', re.I), 'brothel', keep_case),
# Crap and crapper to 'use the toilet'
(re.compile(r'take\Wa\Wcrap(per)?', re.I), 'use the toilet', keep_case),
(re.compile(r'\bcrapper', re.I), 'toilet', keep_case),
# Crap and crapper to garbage
(re.compile(r'\bcrap\b', re.I), 'garbage', keep_case),
(re.compile(r'\bcrapped\b', re.I), 'wet', keep_case),
# Cock-up with mess-up
(re.compile(r'\bcock.?up\b', re.I), "mess up", keep_case),
# Cocksucker with sucker
(re.compile(r'\bcock.?(?=suc)', re.I), "", False),
# Cocker with idiot (but not cocker spaniel
(re.compile(r'\bcocker\b(?![ -]spani)', re.I), "idiot", keep_case),
# Cunt
(re.compile(r'\bcunt\b', re.I), 'groin', keep_case),
# Replace goddammit and dammit with 'dang it'
(re.compile(r'([^\.?!] *) Goddam([mn])', re.I), r'\1 goddam\2', False),
(re.compile(r'(?:gods?)?dammit', re.I), 'dang it', keep_case),
#########################################
# Replace ass and its varieties (see specific lists above, dirty_a_list and clean_a_list)
#########################################
# smart ass
(re.compile(r'smart\W?ass\b', re.I), "smart aleck", keep_case),
(re.compile(r'smart\W?asses\b', re.I), "smart alecks", keep_case),
# kiss ass
(re.compile(r'kissin[^\s]\Wass(es)?\b',
re.U+re.I), "kissing up", keep_case),
(re.compile(r'kiss.{1,6}ass(es)?\b', re.I), "fly a kite", keep_case),
# kick ass
(re.compile(r'kick\W?ass\b', re.I), "kick booty", keep_case),
(re.compile(r'kick\W?asses\b', re.I), "kick booties", keep_case),
# cover ... ass
(re.compile(r'(cover.{0,8} )ass\b', re.I), r"\1rear", False),
(re.compile(r'(cover.{0,8} )asses\b', re.I), r"\1rears", False),
# kick ... ass
(re.compile(r'(kick.{0,8} )ass\b', re.I), r"\1rear", False),
(re.compile(r'(kick.{0,8} )ass\b', re.I), r"\1rears", False),
# assed
(re.compile(r'\bassed\b', re.I), "ended", keep_case),
# jack/dumbass
(re.compile(r'(?<=bray like a )(jack|dumb)ass\b', re.I), "donkey", keep_case),
(re.compile(r'(jack|dumb)ass\b', re.I), "jerk", keep_case),
(re.compile(r'(jack|dumb)asses\b', re.I), "jerks", keep_case),
# asshole
(re.compile(r'an\Wasshole', re.I), "a jerk", keep_case),
(re.compile(r'asshole', re.I), "jerk", keep_case),
# horse's ass
(re.compile(r'horse[^\s]?s ?ass\b', re.U+re.I), "jerk", keep_case),
(re.compile(r'horse[^\s]?s ?asses\b', re.U+re.I), "jerks", keep_case),
#########################################
# Replace damn and its varieties
#########################################
# I'll be damned
(re.compile(r'be(\W+)(?:gods? *)?damned', re.I), r'be\1darned', False),
# Give a damn
(re.compile(r'give(\W+.{0,10}?)a(\W+)(?:gods? *)?damn',
re.I), 'care', keep_case),
(re.compile(
r'gives(\W+.{0,10}?)a(\W+)(?:gods? *)?damn', re.I), 'cares', keep_case),
# Damn near
(re.compile(r'(?:gods? *)?damn(\W+)near', re.I), 'nearly', keep_case),
# a damn. Worth a damn -> worth a cent (couldn't think of a better word)
(re.compile(r'((matters?|worth|of)\W+a\W+)(?:gods? *)?damn\b', re.I), r'\1cent', False),
# of the damned
(re.compile(r'(of\W*the\W*)(?:gods? *)?damned\b', re.I), r'\1cursed', False),
# Your damned word, a damn word, etc
(re.compile(r'(your|our|her|his|this|that|the|their|hose|these|for|so|some|one|one more|too)( +)(?:gods? *)?damn(?:ed)?\b(?!-)', re.I), r'\1', False),
# a damn
(re.compile(r'(?<=\b[aA] )(?:gods? *)?damn(?:ed)',
re.I), 'darn', keep_case),
# damned good, damn sure, etc (Clancy's favorites)
(re.compile(r'\b((?:gods? *)?damn(?:ed))(?:\W+)(sure|near|sight|good|much|hard|easy|big|little|glad|clever|mess|smart|fine|fool|right|thing|much|shame|nice|mean|bad|lucky|late|important)', re.I), '', drop_first_match),
(re.compile(r'\b((?:gods? *)?damn(?:ed))(?:\W+)well', re.I), 'darn well', keep_case),
# Religious damning
(re.compile(r'\b(?:gods? *)?damned', re.I), 'cursed', keep_case),
(re.compile(r'\b(?:gods? *)?damndest', re.I), 'very best', keep_case),
(re.compile(r'\b(?:gods? *)?damning', re.I), 'condemning', keep_case),
(re.compile(r'\b(?:gods? *)?damnable', re.I), 'condemning', keep_case),
(re.compile(r'\b(?:gods? *)?damnably', re.I), 'cursedly', keep_case),
(re.compile(r'\b(?:gods? *)?damnatory', re.I), 'condemning', keep_case),
(re.compile(r'\b(?:gods? *)?damnation', re.I), 'condemnation', keep_case),
# damn it
(re.compile(r', (?:gods? *)?damn it(?: all)?', re.I), '', keep_case),
(re.compile(r'((?:gods? *)?damn it(?: all)?, +)(.)', re.I), '', drop_first_match),
# a damn something, like "a damn nuisance"
(re.compile(r'\ba(\W+)(?:gods? *)?damn', re.I), r'a\1blasted', False),
# damn you/his/her/etc
(re.compile(r'\b(?:gods? *)?damn you to hell', re.I), 'curse you', keep_case),
(re.compile(r'\b(?:gods? *)?damn(?= (him|his|her|you|next|the|you))', re.I),
'curse', keep_case),
# Word by itself
(re.compile(r'\b(?:gods? *)?damn\b', re.I), 'dang', keep_case),
# Final catch-all
(re.compile(r'(?:gods? *)?damn', re.I), 'dang', keep_case),
#########################################
# Bitch
#########################################
# Son of a bitch
(re.compile(r's[UuOo]n(s)?([ -])?[OoUu][FfVv]([ -])?(a)?([ -])?bitch(e)?',
re.I), 'jerk', keep_case),
# verb
(re.compile(r'bitchin[^\s]', re.U+re.I), 'complaining', keep_case),
(re.compile(r'bitched', re.I), 'complained', keep_case),
(re.compile(r'bitche?(?=s? abo)', re.I), 'complain', keep_case),
(re.compile(r'(?<=(n([^\s]|o)t ))bitch',
re.U+re.I), 'complain', keep_case),
# A bitch
(re.compile(r's a bitch', re.I), 's tough', keep_case),
# Bitch by itself
(re.compile(r'\bbitch(e)?', re.I), 'jerk', keep_case),
#########################################
# Shit
#########################################
# bullshit
(re.compile(r'\b(bull|horse|dog|jack)(.)?shit', re.I), 'shit', keep_case),
# Holy shit
(re.compile(r'\bholy\W*shit', re.I), 'incredible', keep_case),
# exclamantion
(re.compile(r'(?<=oh, )shit\b', re.I), 'shoot', keep_case),
(re.compile(r'(?<=oh )shit\b', re.I), 'shoot', keep_case),
(re.compile(r'(?<!\w )shit!', re.I), 'shoot!', keep_case),
(re.compile(r'(?<=--)shit', re.I), 'shoot', keep_case),
# no shit
(re.compile(r'(?<=no\W)shit\b', re.I), 'kidding', keep_case),
# know shit
(re.compile(r'(?<=know\W)shit\b', re.I), 'squat', keep_case),
#shit-load, head, can, hole, pot
(re.compile(r'shit(.)?load', re.I), 'load', keep_case),
(re.compile(r'shit(.)?can', re.I), 'trash', keep_case),
(re.compile(r'shit(.)?pot', re.I), 'toilet', keep_case),
(re.compile(r'shit(.)?head', re.I), 'idiot', keep_case),
(re.compile(r'shit(.)?hole', re.I), 'pile of trash', keep_case),
# verb shittin'
(re.compile(r'shittin(?=[^\s])?', re.U+re.I), 'kiddin', keep_case),
# shitter
(re.compile(r'shitter', re.I), 'toilet', keep_case),
# shitty
(re.compile(r'shitty', re.I), 'nasty', keep_case),
# shit-filled
(re.compile(r'\Wshit(.)?fill(ed)?', re.I), '', keep_case),
# shit
(re.compile(r'(?<=ive a )shit', re.I), 'hoot', keep_case),
(re.compile(r'(?<=got )shit', re.I), 'nothing', keep_case),
(re.compile(r'(?<=\w )shit', re.I), 'trash', keep_case),
(re.compile(r'[S]hit(?=[,\.!?])', re.I), 'incredible', keep_case),
(re.compile(r'\bshit\b', re.I), 'rubbish', keep_case),
#########################################
# f-bomb
#########################################
# clean up script...
(re.compile(r'(m[OoUu]th[AaEe]r?)?fuck', re.I), 'zxsa', keep_case),
# clean up script...
(re.compile(r'(m[OoUu]th[AaEe]r?)?fook', re.I), 'zxsa', keep_case),
# f yourself
(re.compile(r'zxsa[\W]?yourself', re.I), "kill yourself", first_case),
# cluster f
(re.compile(r'cluster[\W]?zxsa', re.I), "massive failure", first_case),
# f your
(re.compile(r'zxsa[\W]?your', re.I), "bother your", first_case),
# f you
(re.compile(r'(?<!the[\W])zxsa[\W]?you', re.I), "forget you", first_case),
# you f up/with
(re.compile(r'(?<=you[\W])zxsa(?=[\W][UuWw])', re.I), "mess", first_case),
# f's
(re.compile(r'zxsas(?=\W(around|with|on\b|up\b|over|under|through))',
re.U+re.I), "messes", first_case),
# f'in
(re.compile(r'zxsa(?=(in[^\s]?|s)?\W(around|with|on\b|up\b|over|under|through))',
re.U+re.I), "mess", first_case),
# f'ing A
(re.compile(r'zxsain[^\s]? a\b', re.U+re.I), "unbelievable", first_case),
(re.compile(r' (zxsain[^\s]?(?: well)?)(\W*.)',
re.U+re.I), "", drop_first_match),
(re.compile(r'(zxsain[^\s]? (?:well)?)(\W*.)',
re.U+re.I), "", drop_first_match),
(re.compile(r'zxsain[^\s]?', re.U+re.I), "frigging", keep_case),
# f'er
(re.compile(r'zxsaer', re.I), "idiot", keep_case),
# f'it
(re.compile(r'zxsa\W?it', re.I), "phoo", keep_case),
# f your/his/her/etc
(re.compile(
r'zxsa(?=(ed)?\W(your|our|her|his|us|this|that|the\b|their|those|these|them|[^\s]em|for|a\b))', re.U+re.I), "harass", keep_case),
# f'ed
(re.compile(r'zxsaed', re.I), "messed", keep_case),
# f the
(re.compile(r'zxsa(?=[\W]the)', re.I), "forget", keep_case),
# the f
(re.compile(r'(?<=the[\W])zxsa\b', re.I), "heck", keep_case),
# verb
(re.compile(r'zxsa(?=\W(around|with|on\b|up\b|over|under|through))', re.I),
"mess", first_case),
(re.compile(r'(?<=to\W)zxsa', re.I), "mess", first_case),
# f, f ups
(re.compile(r'zxsa(\W?up)?', re.I), "idiot", keep_case),
#########################################
# dick
#########################################
# dick around
(re.compile(r'dick(?=(in[^\s])?\W(around|with|on\b|up\b|over|under|through))',
re.U+re.I), "mess", first_case),
# dickin['/g]
(re.compile(r'dick(?=(in[^\s][^o]))', re.U+re.I), "mess", keep_case),
#dickweed, dickhead
(re.compile(r'dick[WwHh]e[AaEe]d', re.I), "jerk", keep_case),
# know dick
(re.compile(r'(?<=[Kk]now )dick'), "squat", keep_case),
# dick on its own (toe is just sort of random...), not bird dickcissel
(re.compile(r'\bdick\b(?!-ciss)'), "toe", keep_case),
#########################################
# bastard
#########################################
(re.compile(r'\bbastard', re.I), "mongrel", keep_case),
#########################################
# hell
#########################################
# hellhound
(re.compile(r'\bhell\W?hound', re.I), 'demonhound', keep_case),
# hell-word (not helldiver bird)
(re.compile(r'\bhell(?=-[^oO])(?!-diver)', re.I), 'demon', keep_case),
# hell's bells
(re.compile(r'\bhell.{0,4}s?\W?bells?', re.I), 'by golly', keep_case),
# hell with
(re.compile(r'(to|the)\Whell\Wwith', re.I), 'forget', keep_case),
(re.compile(r'\bhell(?=\Wwith)', re.I), 'heck', keep_case),
# beats the hell out of
(re.compile(r'beats\Wthe\Whell\Wout\Wof', re.I), 'beats', keep_case),
# to hell
(re.compile(r'(?<=\bto\W)hell\b', re.I), 'perdition', keep_case),
# some hell
(re.compile(r'(?<=some\W)hell\b', re.I), 'trouble', keep_case),
# give/gave hell
(re.compile(r'(g[IiAa]ve.{0,7}\W)hell\b(?!\Wof)',
re.I), r'\1trouble', False),
# raise/raising hell
(re.compile(r'(rais[IiEe].{0,10}\W)hell\b', re.I), r'\1trouble', False),
#chance in hell
(re.compile(r'(?<=chance)( in) hell\b(\W*.)', re.I), '*removed*', drop_first_match),
#burn in hell
(re.compile(r'(?<=burn)( in) hell\b(\W*.)', re.I), '*removed*', drop_first_match),
# living hell
(re.compile(r'(?<=living\W)hell\b', re.I), 'prison', keep_case),
# for/etc the hell
(re.compile(r'(?<=(..for)\Wthe\W)hell\b', re.I), 'heck', keep_case),
# what the hell[.?!]
(re.compile(r'what\Wthe\Whell(?=[\.?!\,])',
re.I), 'what the heck', keep_case),
# (in) the hell
(re.compile(
r'(?: in)? (the)\Whell(?=[ \.?!\,])(?! in)(?! your)(?! out)(?! I\b)(?! of\b)(\W*.)', re.I), '*removed*', drop_first_match),
(re.compile(r'(?:in\W)?(the)\W+hell (?!in)(?!your)(?!out)(?!I\b)(?!of\b)(\W*.)',
re.I), '*removed*', drop_first_match),
#(re.compile(r'(?:\Win)?\W(the)\Whell\b(?=[ \.?!\,])(?! in)(\W*.)',re.I),'*removed*',drop_first_match),
# what/how/whatever/etc the hell
(re.compile(r'(?<=(..how|..for|where|.what|tever|..who)\Wthe\W)hell\b',
re.I), 'heck', keep_case),
# sure/busy/etc. as hell
(re.compile(r'(?<!known)( as) hell\b(\W*.)', re.I), '', drop_first_match),
# helluva
(re.compile(r'\bhelluva', re.I), 'heck of a', keep_case),
#way in hell
(re.compile(r'(?<=way) (in) hell\b(\W*.)', re.I), '', drop_first_match),
#what in hell
(re.compile(r'(?<=what) (in) hell\b(\W*.)', re.I), '', drop_first_match),
# but hell
(re.compile(r'(?<=but )hell\b', re.I), 'heck', keep_case),
# to be hell
(re.compile(r'(?<=to be )hell\b', re.I), 'terrible', keep_case),
# is/it's hell
(re.compile(r'(?<=is )hell\b', re.I), 'perdition', keep_case),
(re.compile(r'(?<=it[^\s]s )hell\b', re.U+re.I), 'perdition', keep_case),
#Aw, hell
(re.compile(r'(?<=Aw, )hell\b', re.I), 'heck', keep_case),
# catch hell
(re.compile(r'catch hell\b', re.I), 'get in trouble', keep_case),
(re.compile(r'caught hell\b', re.I), 'got in trouble', keep_case),
# as hell
(re.compile(r'sure as hell[ \,]', re.I), 'for sure', keep_case),
(re.compile(r'ed as hell\b', re.I), 'ed as could be', keep_case),
(re.compile(r'\bas hell[ \,]', re.I), 'as could be', keep_case),
# of hell
(re.compile(r'\bof hell\b', re.I), 'of torture', keep_case),
# all hell
(re.compile(r'\ball hell\b', re.I), 'all perdition', keep_case),
# hell was
(re.compile(r'\bhell(?= was)', re.I), 'heck', keep_case),
# hell to pay
(re.compile(r'\bhell(?= to pay)', re.I), 'heck', keep_case),
# bloody hell
(re.compile(r'(?<=bloody.)hell\b', re.I), 'heck', keep_case),
# dang hell
(re.compile(r'(?<=dang.)hell\b', re.I), 'heck', keep_case),
# like hell
(re.compile(r'(?<=(..look|looked|..hurt) )like hell\b', re.I), 'really bad', keep_case),
(re.compile(r'(?<=felt )like hell\b', re.I), 'like garbage', keep_case),
(re.compile(r'L[Ii][Kk][Ee]\W[Hh][Ee][Ll][Ll]'),
'not a chance', keep_case),
(re.compile(r'l[Ii][Kk][Ee]\W[Hh][Ee][Ll][Ll]'), 'like mad', keep_case),
# The hell I
(re.compile(r'the\Whell\WI\b', re.I), 'the heck I', keep_case),
# hell of/out/off ...
(re.compile(r'\bhell(?=\W(of\W|out|off\b|do\W|are\b))', re.I), 'heck', keep_case),
# hellish
(re.compile(r'\bhellish', re.I), 'unpleasant', keep_case),
# this/real hell (not followed by ?)
(re.compile(r'(?<=(this|real)\W)hell(\W?hole|\W?pit)?(?!\?)', re.I), 'pit', keep_case),
# hell's
(re.compile(r'\bhell[^\s]s', re.U+re.I), 'perditions\'s', keep_case),
# interjection hell (preceeded by . or " or --, etc, followed by ,
(re.compile(r'(?<=([\.?!,]\W\W|..\"|..”|..“|.\W\W))hell(?=[,!])',
re.U+re.I), 'heck', keep_case),
# >hell< shows up in html with italics or emphasis
(re.compile(r'\>hell\<', re.U+re.I), '>perdition<', keep_case),
]
#+ ass_list + lord_list
DEBUG = True
def language_check(text):
ret_val = re_list + lord_list
# Determine if this book is likely to take Lord's name in vain
if re.search("(for Christ's sake!|Holy Christ!|Holy Jesus!|for God's sake!|God almighty!|goddamn|fuck)", text, re.I):
if DEBUG:
print("Looks like book uses Lord's name in vain")
ret_val += vain_lord_list
else:
if DEBUG:
print("Looks like book does not use Lord's name in vain")
# Ass has two very different contexts. Guess which to use.
if re.search("(dumbass|asshole|smart ass|kick ass|ass kick|ass handed|badass|cover.{0,5}ass)", text):
ret_val += dirty_a_list
if DEBUG:
print("Looks like book does not need the donkey treatment")
else:
ret_val += clean_a_list
if DEBUG:
print("Looks like book calls donkeys asses")
# open('/tmp/dump.txt','w').write(text)
return ret_val
'''
from functools import partial
import codecs
text = codecs.open('bad.txt', encoding='utf-8').read()
#if DEBUG:
# print(text)
# print("-"*40)
# print("-"*40)
output = ""
replacement_list = language_check(text)
output = ""
for line in text.split("\n"):
#Go through all elements of replacement_list
for search,sub,pcase in replacement_list:
if pcase: # Preserve case
line = search.sub(partial(pcase,sub),line)
else: # Don't preserve case
line = search.sub(sub,line)
output += line + "\n"
#if DEBUG:
# print(output)
codecs.open('clensed.txt','w', encoding='utf-8').write(output)
'''
| [
"username@example.com"
] | username@example.com |
ff27751426c9cf9cfea84220ce61f51bc471c742 | 3bed7d79ffdfeb0bb9cf830b0ad63df4056f4524 | /fdk_client/common/exceptions.py | 429cbf35090f3f744c4c337a5c4d93a79c8b4920 | [
"MIT"
] | permissive | gofynd/fdk-client-python | 3aea6d6e019e72e6bd87a99853243f4c31eb195e | 614d57855bb4c34dc3c1a204fb8b7b599b23e95e | refs/heads/main | 2023-08-21T16:58:23.710335 | 2023-07-04T11:30:26 | 2023-07-04T11:30:26 | 334,030,188 | 0 | 7 | MIT | 2023-06-26T13:36:21 | 2021-01-29T03:46:52 | Python | UTF-8 | Python | false | false | 1,206 | py | """Python code/sdk/common/exceptions.py."""
class FDKInvalidCredentialError(Exception):
"""Invalid credential exception."""
def __init__(self, message="Invalid Credentials"):
"""Initialize function __init__."""
super(FDKInvalidCredentialError, self).__init__(message)
class RequiredParametersError(Exception):
"""Invalid credential exception."""
def __init__(self, message="Required Parameters not present"):
"""Initialize function __init__."""
super(RequiredParametersError, self).__init__(message)
class FDKOAuthCodeError(Exception):
"""FDK OAuth Exception."""
def __init__(self, message=""):
"""Initialize function __init__."""
super(FDKOAuthCodeError, self).__init__(message)
class FDKClientValidationError(Exception):
"""FDK Validation Exception."""
def __init__(self, message=""):
"""Initialize function __init__."""
super(FDKClientValidationError, self).__init__(message)
class FDKTokenIssueError(Exception):
"""FDK Token Issue Error"""
def __ini__(self, message=""):
"""Initialize function __init__."""
super(FDKTokenIssueError, self).__init__(message)
| [
"jigar.dafda@gmail.com"
] | jigar.dafda@gmail.com |
259d88289570b1c7442b1fb0e9dbc2fcce8ee40b | 0feed51044426439cb475d2df6f6a82acf4a8b76 | /alphabet/G.py | 1c6542e82a943acf0a6c358ac8fd67a30d804b41 | [] | no_license | biswajitkundu93/pattern_all | c3f1a97881dfc9cb4895b52015e164c96ef017ff | 231468ac01fa7a21b5de6396a52b49c13f9583fc | refs/heads/master | 2023-04-11T06:11:26.984764 | 2021-04-21T13:05:40 | 2021-04-21T13:05:40 | 360,173,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | '''
*****
*
*
*
* ***
* *
*****
'''
size = 7
row_no = size
col_no = (size - 1)
star = "*"
fix = row_no - (row_no//2)
fix_col = (size//2)+1
for row in range(row_no):
for col in range(col_no):
if (col == 0) or (((row == 0) or (row == (row_no - 1))) and (col<(col_no-1))) or (((col_no-fix_col)<col<col_no) and (row == fix)) or ((col == (col_no-2)) and (row > fix)):
print(star,end="")
else:
print(end=" ")
print()
| [
"kundub591@gmail.com"
] | kundub591@gmail.com |
116084e8dd664efde68586528a71e504dbf7cc0e | 0aa40aa86a618f35c9e2884144664bc50885d569 | /portfolio-1/mywebsite/migrations/0001_initial.py | f131c067d0e979578c112fbf5494d16b2011c370 | [] | no_license | ananyasingh87/WebDev | fc55b524054e90d6692328a8e7cd107714d32eb4 | 50040491a5be9b7f3de8e2c93343ecf676dad580 | refs/heads/master | 2021-07-02T04:13:18.340869 | 2020-12-03T11:27:03 | 2020-12-03T11:27:03 | 201,674,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | # Generated by Django 2.2.17 on 2020-12-01 09:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('description', models.TextField()),
('image', models.ImageField(upload_to='')),
('link', models.TextField()),
],
),
]
| [
"noreply@github.com"
] | noreply@github.com |
428433ab6774a930dd36d3c9dde55ce6668ba730 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_358/ch18_2020_09_30_10_56_20_265107.py | 29f3ce3fd91883bd0ce98bda1f5b2b3cadb47227 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py |
def verifica_idade(x):
if x>21:
print('liberado EUA e BRASIl')
elif x>18<21:
print('liberado BRASIL')
else :
print('não esta liberado')
return x
| [
"you@example.com"
] | you@example.com |
65eb37da9be33214eeb691684c3c4e57aa346687 | a74274955856225165d05cbd26660f5a6055b94e | /10文件和异常/尝试s/10_190730.py | 1150f2fcca26e38636f831e65a8635eaf6fc71fc | [] | no_license | Vincent-Xin/learn_Python-Crash-Course | 9be5d1675d115f64133d242109ba94a056177d97 | e171be05ed3313c8ea196822e7c1c15cb298e8e7 | refs/heads/master | 2020-09-07T04:34:33.713445 | 2019-11-09T15:05:43 | 2019-11-09T15:05:43 | 220,656,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | #10-6逻辑
'''循环输入整数并求和,以及退出和提醒修正输入'''
def get_nums(n):
i=0
nums=[]
while i<n:
num=input('number'+str(i+1)+':')
if num=='q':
break
try:
num=int(num)
except ValueError:
print('亲,这里建议你输入正确数字呢!')
continue
else:
nums.append(num)
i+=1
return nums
def add_nums(nums):
if(len(nums)>=2):
num_sum=sum(nums)
print(num_sum)
while True:
some_nums=get_nums(4)
if(len(some_nums)<4):
break
add_nums(some_nums)
| [
"noreply@github.com"
] | noreply@github.com |
3a0fb01ccce767d1512dbe12156b64bd6108f048 | 1db1f823cde17671ea0a74da2d977a11f4765f2a | /FirstLogin/admin.py | d1d4d6fff5885e81407f22659a1e80a6a859a970 | [] | no_license | blacker-boy/web | f5c048b5a4999f99801a30cf5b3f272a2bf6dfb9 | 495ad68a67de1b6cf1b75cda2022cc51b7463cae | refs/heads/master | 2020-03-06T23:47:18.010736 | 2018-04-23T11:12:21 | 2018-04-23T11:12:21 | 127,141,174 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,960 | py | #-*- coding: UTF-8 -*-
import web
import MySQLdb
urls=(
'/Adminlogin','Admin_Login',
'/Homepage','Home_page'
)
app=web.application(urls,globals())
class Admin_Login:
def GET(self):
# print("get")
return web.seeother('/static/login_1.html',True)
def POST(self):
# web.header("Access-Control-Allow-Origin", "*")
# print("post")
params=web.input()
x=params['admin_id']
y=params['password']
x=str(x)
y=str(y)
# print(x,y)
db = MySQLdb.connect("localhost", port=3306, user="root", passwd="123456", db="admin_login", charset='utf8')
cursor=db.cursor()
# # sql="""create table adminlogin(
# # ID_admin char(20) not null,
# # password char(20))"""
# # cursor.execute(sql)
# # sql="""insert into adminlogin(
# # ID_admin,password)
# # values('1001','0001')"""
# # try:
# # cursor.execute(sql)
# # db.commit()
# # except:
# # db.rollback()
sql= '''select * from adminlogin '''
try:
cursor.execute(sql)
results=cursor.fetchall()
for row in results:
adminid = row[0]
password = row[1]
# print(adminid, password)
if adminid==x and password==y :
# a=Home_page()
# a.GET()
return 1
break
else:
return False
except:
db.rollback()
cursor.close()
db.close()
# if x=='1001' and y=='0001':
# # return ResponseUtil.writeToResponse(True);
# # a=Home_page()
# # a.GET()
# return 1
class Home_page:
def GET(self):
return web.seeother('/static/Homepage.html',True)
if __name__=='__main__':
app.run()
| [
"1391186209@qq.com"
] | 1391186209@qq.com |
c706371fac0234005089d5be699dd893f9fd48c1 | a6ccb1e49bd55782877934f06bd6f47c8cfd7ad0 | /models.py | 5b47ec44ca20695c1a612ba704821ba34800c057 | [] | no_license | acct252000/udacity_scalable_apps | adba67d9e77f963d8ed49c8fc1551e222b28a9aa | a507b813a2598b7666369a34f121d2905aec78fc | refs/heads/master | 2020-06-16T02:58:22.517202 | 2016-12-22T14:13:33 | 2016-12-22T14:13:33 | 75,251,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,122 | py | """models.py - This file contains the class definitions for the Datastore
entities used by the Game. Created by Christine Stoner 12-15-2016"""
__copyright__ = """
Copyright 2016 Christine Stoner
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
import random
import logging
from datetime import date
from protorpc import messages
from collections import Counter
from google.appengine.ext import ndb
# Holds values for standard deck of cards
CARD_NUMBER_VALUES = ['A', '2', '3', '4', '5',
'6', '7', '8', '9', '10', 'J', 'Q', 'K']
# Hold suits for standard deck of cards
CARD_SUITS = ['hearts', 'diamonds', 'clubs', 'spades']
# variable to hold list of tuples for standard deck of cards
DECKOFCARDS = []
# populate DECKOFCARDS with tuples of (suit, card number)
for suit in CARD_SUITS:
for value in CARD_NUMBER_VALUES:
card = (suit, value)
DECKOFCARDS.append(card)
class User(ndb.Model):
"""User profile listing name and email of user
Attributes:
name: string property
email: string property
"""
name = ndb.StringProperty(required=True)
email = ndb.StringProperty()
class Game(ndb.Model):
"""Game object that lists data necessary for game
Attributes:
player_one_hand: text property holding comma separated
cardnumbers (0-51) of hand
player_two_hand: text property holding comma separated
cardnumbers (0-51) of hand
discard_pile: text property holding comma separated
cardnumbers(0-51) of discarded cards
undrawn_cards: text property holding comma separated
cardnumbers(0-51) of discarded cards
current_suit: text property holding lower case current suit of game
game_over: boolean property indicating if game is over
user_one: key property referencing User class
user_two: key property referencing User class
user_one_turn: boolean property indicating if user one turn
cancelled: boolean property indicating if game is cancelled
move: repeated field holding string tracking game history in format
user, action, card suit, card number
date: date property holding date created
computer_card: string property used for computer games, the string
number of the card selected by the computer to play
computer_crazy_suit: string property used for computer games, the
suit the computer has selected when playing an 8
game_message: string message used for messages from computer play
"""
player_one_hand = ndb.TextProperty(required=True)
player_two_hand = ndb.TextProperty(required=True)
discard_pile = ndb.TextProperty(required=True)
undrawn_cards = ndb.TextProperty(required=True)
current_suit = ndb.StringProperty(required=True)
game_over = ndb.BooleanProperty(required=True, default=False)
user_one = ndb.KeyProperty(required=True, kind='User')
user_two = ndb.KeyProperty(required=True, kind='User')
user_one_turn = ndb.BooleanProperty(required=True)
cancelled = ndb.BooleanProperty(required=True)
move = ndb.StringProperty(repeated=True)
date = ndb.DateProperty(required=True)
computer_card = ndb.StringProperty()
computer_crazy_suit = ndb.StringProperty()
game_message = ndb.StringProperty()
@classmethod
def new_game(cls, user_one, user_two):
"""Create a new game and save"""
cards = range(0, 52)
cards = map(str, cards)
random.shuffle(cards)
game = Game(user_one=user_one,
user_two=user_two,
player_one_hand=','.join(cards[0:7]),
player_two_hand=','.join(cards[7:14]),
discard_pile=cards[14],
current_suit=DECKOFCARDS[int(cards[14])][0],
undrawn_cards=','.join(cards[15:]),
# user_one_turn = bool(random.getrandbits(1)),
user_one_turn=True,
cancelled=False,
game_over=False,
date=date.today(),
computer_card='99',
computer_crazy_suit='none')
game.put()
return game
def to_text_list(cls, card_string):
"""convert string of card number(0-51) from computer play
into a list object representing card value
"""
# split string into integer list
card_value = DECKOFCARDS[int(card_string)]
# add text cards to list
logging.info(card_value)
# convert to list and return
return list(card_value)
def to_card_type(cls, card_string):
"""convert string of card numbers(0-51) into card values
from DECKOFCARDS and join in string to return in game form
"""
# split string into integer list
if card_string:
card_list = card_string.split(',')
card_list = map(int, card_list)
# add text cards to list
card_values = []
for card_number in card_list:
card = DECKOFCARDS[card_number]
card_string = '(' + card[0] + ',' + card[1] + ')'
card_values.append(card_string)
card_values_string = '*'.join(card_values)
else:
# returns blank line if no cards
card_values_string = card_string
return card_values_string
def to_cards(cls, card_string):
"""convert string of card numbers(0-51) into card values
from DECKOFCARDS add to list of card values to return
"""
# split string into integer list
card_list = card_string.split(',')
card_list = map(int, card_list)
# add text cards to list
card_values = []
for card_number in card_list:
card = DECKOFCARDS[card_number]
card_string = '(' + card[0] + ',' + card[1] + ')'
card_values.append(card_string)
return card_values
def to_string(cls, card):
"""convert string of card number into string of card value"""
card_number = DECKOFCARDS.index(card)
return str(card_number)
def card_in_hand(self, card_number, card_suit):
"""function to determine if card played is in player's hand"""
card_in_question = (card_suit, card_number)
# convert to card number and check if in hand
card_in_question_number = str(DECKOFCARDS.index(card_in_question))
if self.user_one_turn:
cards_held_list = self.player_one_hand.split(',')
else:
cards_held_list = self.player_two_hand.split(',')
if card_in_question_number in cards_held_list:
return True
else:
return False
def discard_card(self, user_one_turn, play_card_number, play_card_suit):
"""function to discard card from player's hand and change turn"""
# determine discarded card number
discarded_card = (play_card_suit, play_card_number)
discarded_card_number = DECKOFCARDS.index(discarded_card)
# add card to discarded pile
discarded_pile_list = self.discard_pile.split(',')
discarded_pile_list.insert(0, str(discarded_card_number))
self.discard_pile = ','.join(discarded_pile_list)
# update player hand and game history and cycle turn
if user_one_turn:
player_hand_list = self.player_one_hand.split(',')
player_hand_list.remove(str(discarded_card_number))
self.player_one_hand = ','.join(player_hand_list)
self.user_one_turn = False
user_name = self.user_one.get().name
else:
player_hand_list = self.player_two_hand.split(',')
player_hand_list.remove(str(discarded_card_number))
self.player_two_hand = ','.join(player_hand_list)
self.user_one_turn = True
user_name = self.user_two.get().name
game_move = [user_name, 'play', play_card_suit, play_card_number]
self.move.append(','.join(game_move))
self.put()
def card_callback():
"""dummy callbackfunction for when callback not needed in two
person play
"""
# This line is intentionally assigned to zero
callback_variable = 0
def draw_card(self, user_one_turn, callback=card_callback):
"""function to draw card from undrawn cards and add to hand
and reshuffle if no more cards to draw
"""
# return string number of top undrawn card
undrawn_card_list = self.undrawn_cards.split(',')
# set reshuffle to true if drawing last card
reshuffle = False
if len(undrawn_card_list) == 1:
reshuffle = True
drawn_card = undrawn_card_list.pop(0)
# add card to player hand
if user_one_turn is True:
player_hand_list = self.player_one_hand.split(',')
#player_hand_list.append(drawn_card)
player_hand_list.insert(0,drawn_card)
self.player_one_hand = ','.join(player_hand_list)
user_name = self.user_one.get().name
else:
player_hand_list = self.player_two_hand.split(',')
#player_hand_list.append(drawn_card)
player_hand_list.insert(0,drawn_card)
self.player_two_hand = ','.join(player_hand_list)
user_name = self.user_two.get().name
# reshuffle cards
if reshuffle is True:
discard_pile_list = self.discard_pile.split(',')
last_discard_card = discard_pile_list.pop(0)
random.shuffle(discard_pile_list)
self.undrawn_cards = ','.join(discard_pile_list)
self.discard_pile = last_discard_card
else:
self.undrawn_cards = ','.join(undrawn_card_list)
game_move = [user_name, 'draw', DECKOFCARDS[int(drawn_card)][0],
DECKOFCARDS[int(drawn_card)][1]]
self.move.append(','.join(game_move))
self.put()
callback()
def computer_play_card(self, callback=card_callback):
"""game logic for computer to select card to play"""
# determine card number
current_number = DECKOFCARDS[int(self.discard_pile.split(',')[0])][1]
# convert card numbers to cards for game logic
computer_hand_list = self.player_two_hand.split(',')
cards_in_hand = []
for card_id in computer_hand_list:
card = DECKOFCARDS[int(card_id)]
cards_in_hand.append(card)
# count number of cards of each suit
suits_held = Counter([x for (x, y) in cards_in_hand])
suits = suits_held.most_common()
# determine if have matching number suit
cards_matching_number = []
for player_card in cards_in_hand:
if player_card[1] == current_number:
cards_matching_number.append(player_card)
# ordinal suits
first_suit = suits[0][0]
second_suit = first_suit
third_suit = first_suit
fourth_suit = first_suit
selected_card = ('none', 'none')
if len(suits) > 1:
second_suit = suits[1][0]
if len(suits) > 2:
third_suit = suits[2][0]
if len(suits) > 3:
fourth_suit = suits[3][0]
# play by suit from most common suit down, excluding 8
if first_suit == self.current_suit:
for card in cards_in_hand:
if card[0] == first_suit and card[1] != '8':
selected_card = card
elif first_suit in [x[0] for x in cards_matching_number]:
for card in cards_matching_number:
if card[1] == current_number and card[0] == first_suit:
selected_card = card
elif second_suit == self.current_suit:
for card in cards_in_hand:
if card[0] == second_suit and card[1] != '8':
selected_card = card
elif second_suit in [x[0] for x in cards_matching_number]:
for card in cards_matching_number:
if card[1] == current_number and card[0] == second_suit:
selected_card = card
elif third_suit == self.current_suit:
for card in cards_in_hand:
if card[0] == third_suit and card[1] != '8':
selected_card = card
elif third_suit in [x[0] for x in cards_matching_number]:
for card in cards_matching_number:
if card[1] == current_number and card[0] == third_suit:
selected_card = card
elif fourth_suit == self.current_suit:
for card in cards_in_hand:
if card[0] == fourth_suit and card[1] != '8':
selected_card = card
elif fourth_suit in [x[0] for x in cards_matching_number]:
for card in cards_matching_number:
if card[1] == current_number and card[0] == fourth_suit:
selected_card = card
# if no other cards, play eight if available
elif '8' in [x[1] for x in cards_in_hand]:
for card in cards_in_hand:
if card[1] == '8':
selected_card = card
# no card to pay
else:
selected_card = ('none', 'none')
# set model attribute based on selected card
if selected_card[0] == 'none':
self.computer_crazy_suit = 'none'
self.computer_card = '99'
elif selected_card[1] == '8':
self.computer_crazy_suit = first_suit
self.computer_card = self.to_string(selected_card)
else:
self.computer_crazy_suit = selected_card[0]
self.computer_card = self.to_string(selected_card)
callback()
def computer_take_turn(self):
"""function that draws for computer until card can be played
then discards card and ends game if necessary
"""
while self.computer_card == '99':
self.draw_card(False, self.computer_play_card)
# discard card if one selected
if self.computer_card != '99':
self.current_suit = self.computer_crazy_suit
computer_card_type = self.to_text_list(self.computer_card)
# end game if playing last card
if len(self.player_two_hand.split(',')) < 2:
self.discard_card(False, computer_card_type[1],
computer_card_type[0])
self.end_game(False)
self.game_message = 'Game Over! Computer wins!'
else:
self.discard_card(False, computer_card_type[1],
computer_card_type[0])
self.computer_card = '99'
def to_form(self, form_message=''):
"""Returns a GameForm representation of the Game"""
form = GameForm()
form.urlsafe_key = self.key.urlsafe()
form.user_one_name = self.user_one.get().name
form.user_two_name = self.user_two.get().name
form.player_one_hand = self.to_card_type(self.player_one_hand)
form.player_two_hand = self.to_card_type(self.player_two_hand)
form.discard_pile = self.to_card_type(self.discard_pile)
form.current_suit = self.current_suit
form.undrawn_cards = self.to_card_type(self.undrawn_cards)
form.user_one_turn = self.user_one_turn
form.game_over = self.game_over
form.cancelled = self.cancelled
form.date = str(self.date)
if self.game_message:
form.message = self.game_message
else:
form.message = form_message
return form
def to_history_form(self):
"""returns a history form representation of the game history"""
form = GameHistoryForm()
form.urlsafe_key = self.key.urlsafe()
form.user_one_name = self.user_one.get().name
form.user_two_name = self.user_two.get().name
form.date = str(self.date)
form.move = self.move
return form
def cancel_game(self):
"""cancels current game"""
self.cancelled = True
self.game_over = True
self.put()
def end_game(self, user_one_turn):
"""ends game when over"""
self.game_over = True
self.put()
if user_one_turn:
score = Score(winning_user=self.user_one,
losing_user=self.user_two, date=date.today())
else:
score = Score(winning_user=self.user_two,
losing_user=self.user_one, date=date.today())
# Add the game to the score 'board'
score.put()
class Score(ndb.Model):
"""Score object that tracks winners and losers.
Attributes:
winning_user: winning user key
losing_user: losing user key
date: date Game completed
"""
winning_user = ndb.KeyProperty(required=True, kind='User')
losing_user = ndb.KeyProperty(required=True, kind='User')
date = ndb.DateProperty(required=True)
def to_form(self):
"""returns form representation of Score object"""
return ScoreForm(winning_user_name=self.winning_user.get().name,
losing_user_name=self.losing_user.get().name,
date=str(self.date))
class UserForm(messages.Message):
"""UserForm for username and email information"""
user_name = messages.StringField(1, required=True)
email = messages.StringField(2, required=True)
class GameForm(messages.Message):
"""GameForm for outbound game state information"""
urlsafe_key = messages.StringField(1, required=True)
user_one_name = messages.StringField(2, required=True)
user_two_name = messages.StringField(3, required=True)
player_one_hand = messages.StringField(4, required=True)
player_two_hand = messages.StringField(5, required=True)
discard_pile = messages.StringField(6, required=True)
current_suit = messages.StringField(7, required=True)
undrawn_cards = messages.StringField(8, required=True)
user_one_turn = messages.BooleanField(9, required=True)
game_over = messages.BooleanField(10, required=True)
cancelled = messages.BooleanField(11, required=True)
date = messages.StringField(12, required=True)
message = messages.StringField(13)
class GameHistoryForm(messages.Message):
"""GameHistoryForm for outbound game history information"""
urlsafe_key = messages.StringField(1, required=True)
user_one_name = messages.StringField(2, required=True)
user_two_name = messages.StringField(3, required=True)
date = messages.StringField(4, required=True)
move = messages.StringField(5, repeated=True)
class NewGameForm(messages.Message):
"""Used to create a new game"""
user_one_name = messages.StringField(1, required=True)
user_two_name = messages.StringField(2, required=True)
class PlayCardForm(messages.Message):
"""Used to play a card in an existing game"""
card_number = messages.StringField(1, required=True)
card_suit = messages.StringField(2, required=True)
crazy_suit = messages.StringField(3)
class ScoreForm(messages.Message):
"""ScoreForm for outbound Score information"""
winning_user_name = messages.StringField(1, required=True)
losing_user_name = messages.StringField(2, required=True)
date = messages.StringField(3, required=True)
class UserRankingForm(messages.Message):
"""UserRankingForm for outbound user ranking information"""
user_name = messages.StringField(1, required=True)
wins = messages.IntegerField(2, required=True)
losses = messages.IntegerField(3, required=True)
games = messages.IntegerField(4, required=True)
winning_percentage = messages.FloatField(5, required=True)
class ScoreForms(messages.Message):
"""Return multiple ScoreForms"""
items = messages.MessageField(ScoreForm, 1, repeated=True)
class UserRankingForms(messages.Message):
"""Return multiple UserRankingForms"""
items = messages.MessageField(UserRankingForm, 1, repeated=True)
class GameForms(messages.Message):
"""Return multiple GameForms"""
items = messages.MessageField(GameForm, 1, repeated=True)
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
message = messages.StringField(1, required=True)
| [
"acct252000@gmail.com"
] | acct252000@gmail.com |
7ddf2763ff35f675e8d16652a84d505283b5f102 | 5f0a1d552ca4d683a166982d40952631c58066b0 | /apps/c_test/test/test-114.py | ddb24bf9f10799163e355277bcd0545642bb41a8 | [] | no_license | tangmingming/daydayfresh | d1b559046215cc60e7f0d4d87744b4c51f298ffd | 95d59699f4a97756cf689eba958581380217ce84 | refs/heads/master | 2021-09-06T04:47:30.286033 | 2018-02-02T12:50:36 | 2018-02-02T12:50:36 | 113,733,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | import json
import requests
url = "https://sms.yunpian.com/v2/sms/single_send.json"
headers = {
"Accept": "application/json",
"charset": "utf-8"
}
data = {
"apikey":"947bfcde6345fa588917fedf930dae2c",
"mobile":"18516770799",
"text":"【天天生鲜】您的验证码是1234。如非本人操作,请忽略本短信"
}
response = requests.post(url, headers=headers, data=data)
print(response.text) | [
"tangmingmingvip@163.com"
] | tangmingmingvip@163.com |
a40191aa9116827033cd459acae7e1f8e9939ad5 | 900be47870a62b8055b99edac9e1b0f6c6f0dc35 | /Tests/Python/registration_test.py | 58ae58a985fbaf5452ebbc476cc17ab188151cb4 | [] | no_license | simeks/pluto | e24fa1809eb06aeb8d41126402ab3825dae0f9d2 | 9f28fce900cbc8d7ebb56b79a9b6eaebb8141a67 | refs/heads/master | 2021-03-28T20:23:49.109098 | 2018-02-22T13:59:03 | 2018-02-22T13:59:03 | 75,561,384 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | import image
import imageutils
import registration
settings = {
'step_size': 0.5,
'regularization_weight': 0.05,
'block_size': (12, 12, 12),
'pyramid_level_min': 0,
'pyramid_level_max': 6,
}
e = registration.RegistrationEngine('blocked_graph_cut', image.PixelType_Float32, settings)
i = imread('chess.png')
ir = imageutils.split_channels(i)[0]
i2 = imread('chess2.png')
i2r = imageutils.split_channels(i2)[0]
df = e.execute(ir, i2r)
out = registration.transform(i, df)
imwrite(out, 'out.png')
| [
"simon.ekstrom@gmail.com"
] | simon.ekstrom@gmail.com |
47919a0a9fec9f4cf0a311fce3417cc744500ce6 | c729a8da1d95e40e4d238fa0d98860e863b9c88a | /dump/Django_server/venv/bin/symilar | f30855ff2a00a2580f80757d47a3e2cf6a42cbb1 | [] | no_license | TrellixVulnTeam/flutter_django_5D89 | 971d2ccdeab045b582680e0e1bf0e2c303c95b98 | 62c928d2dcc6a04ea77ccf433782a14b43be543b | refs/heads/main | 2023-08-26T14:24:28.590062 | 2021-11-11T13:38:40 | 2021-11-11T13:38:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | #!/home/x038xx77/Desktop/DEV/New/flutter_django/Django_server/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"x038xx77@gmail.com"
] | x038xx77@gmail.com | |
027dac08617c30b15472b1109ac2de7f1ddf1b03 | d63122a5352e209fb1e44a7ff7d508e477db829e | /userControl/browsing.py | 36e580f94576c7f99422f0fd59fdfadf8bef89cc | [] | no_license | adrienparis/mayaUC | e412dd24be70585ba1dfdb75b8472e17bd0a72bc | c5d0bf92e1a36450c9664e6f29ef726665835da1 | refs/heads/main | 2023-01-24T13:29:33.688875 | 2020-12-06T00:12:05 | 2020-12-06T00:12:05 | 318,320,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,538 | py | class Browsing(object):
class item():
def __init__(self, name, elem, image=None, info="", icon=None):
self.name = name
self.elem = elem
self.image = image
self.icon = icon
self.info = info
self.deep = 0
self.parent = None
self.displayElem = {}
self.selected = False
def setParent(self, parent):
if self.parent is not None:
self.parent.removeChildren(self)
parent.addChildren(self)
def addDisplayElem(self, dispElem, name):
self.displayElem[name] = dispElem
pass
def getDisplayElemBy(self, name):
return self.displayElem[name]
def selection(self, val):
self.selected = val
for d in self.displayElem.values():
d.selection(val)
def returnRoot(self):
if self.parent is not None:
return self.parent.returnRoot()
else:
return self
class folder(item):
def __init__(self, name, elem):
Browsing.item.__init__(self, name, elem)
self._childrens = []
self.icon = "arrowBottom"
self.isDeployed = True
self.area = None
self.selecteds = []
def deploying(self, val):
self.isDeployed = val
if val:
self.icon = "arrowBottom"
else:
self.icon = "arrowRight"
for e in self.displayElem.values():
e.icon = self.icon
e.refresh()
# self.displayElem.icon = self.icon
# self.displayElem.refresh()
self.area.visibility(self.isDeployed)
# cmds.formLayout(self.area, e=True, vis=self.isDeployed)
def deployingAll(self, val):
self.deploying(val)
for f in self._childrens:
if f.__class__ is Browsing.folder:
Browsing.folder.deployingAll(f, val)
def addChildren(self, child):
self._childrens.append(child)
child.parent = self
def removeChildren(self, child):
self._childrens.remove(child)
child.parent = None
def getAllParent(self):
if self.parent is None:
return "/"
return self.getAllParent() + "/" + self.name
def __init__(self):
# super(Browsing, self).__init__()
self.folders = {}
self.items = {}
self.root = Browsing.folder(".", None)
self.selecteds = []
self.multiSelect = True
self.addable = True
def deleteAllItemsFolders(self):
self.folders = {}
self.items = {}
self.root = Browsing.folder(".", None)
def addFolder(self, name, elem, parent=None):
f = Browsing.folder(name, elem)
if parent is None:
f.setParent(self.root)
f.deep = 1
else:
f.setParent(parent)
f.deep = f.parent.deep + 1
self.folders[elem] = f
return f
def addItem(self, name, elem, parent=None, image=None, info="", icon=None):
i = Browsing.item(name, elem, image=image, info=info, icon=icon)
if parent is None:
i.setParent(self.root)
else:
i.setParent(parent)
i.deep = i.parent.deep + 1
self.items[elem] = i
return i
def importBrows(self, imp):
self.root = imp.root
def _clickItem(self, item, displayElem, mod):
for t in item.returnRoot().selecteds:
if (mod != 1 or item.parent != t.parent or not self.multiSelect):
t.selection(False)
# t.displayElem.selection(False)
if mod != 1:
item.returnRoot().selecteds = []
if mod <= 1:
if item.selected:
item.selection(False)
item.returnRoot().selecteds.remove(item)
else:
item.selection(True)
item.returnRoot().selecteds.append(item)
# selection = [x.elem for x in self.selecteds if x.displayElem.selected]
self.runEvent("changeSelection", item.returnRoot().selecteds)
def select(self, selection, value):
'''display the lines in selection as selected in the tree
selection: the lines to be select
value: True to select
'''
pass
| [
"adrien.paris3@gmail.com"
] | adrien.paris3@gmail.com |
4ed1e67b7c0ac2f72bc91d45cacf40cceb25223e | d9da5aaf0a8af3d9887f201f7c3384764822cda9 | /makeinput.py | 91a64c8fa6b662c0c0ceb10d1047aadf1a18c084 | [] | no_license | sesameman/solve-tov-equation | 1e1d1637a164aea68fa85edcfc08c86665c99d4b | 5ecfcddffcf669e2a7025a76449b59b6858c4e2f | refs/heads/main | 2023-02-20T20:40:15.439035 | 2021-01-22T09:11:12 | 2021-01-22T09:11:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # !/usr/bin/python3.9
# author:kangjiayin
# time:\now?
#制作需要的压强列表
import json
filepre='inipre.json'
pre=[]
##遍历压强从0到2000
for i in range(200000):
step=0.005
new=round((i+1)*step,3)
pre.append(new)
#写入文件
with open(filepre,'w') as preData:
json.dump(pre, preData)
# with open(filepre) as preData:
# #json.dump(pre, preData)
# aa=json.load(preData)
| [
"noreply@github.com"
] | noreply@github.com |
1555a5a3a6a222ed065c23732481e23d748ace99 | a34c3a310afaffbc5b028d85baf8597c58e1c5b9 | /quantstats/version.py | 3c56ef3e9b8017eddf7b253b8fcde3d5f05d27c8 | [
"Apache-2.0"
] | permissive | blackcherry88/quantstats | 15e4d8025935d4b7cb4f42a1514ddb5873fb8d93 | 7b33bf45bb6e9985ff73d507c895d7ac7bde1d8d | refs/heads/main | 2023-08-13T03:19:50.265058 | 2021-10-12T23:32:36 | 2021-10-12T23:32:36 | 416,522,542 | 0 | 0 | Apache-2.0 | 2021-10-12T23:00:33 | 2021-10-12T23:00:33 | null | UTF-8 | Python | false | false | 19 | py | version = "0.0.43"
| [
"ran@aroussi.com"
] | ran@aroussi.com |
acbe18b373d44340d6ef34b14fb63c8c9d4e63ef | 0e846526b43bae7e56b693b7269a68ae9d14d18c | /TTHAnalysis/cfg/run_hnlDisplaced_cfg.py | 9da94a4470ed6f348923f822c8595c5e06333cda | [] | no_license | lesenka/cmgtools-lite | c4909698fb3680f3aa6aab0c44fdfd17f0bf2e0d | dc19a8ac8892f82e6027bab7674fc5c77e628f9d | refs/heads/80X | 2021-01-20T04:36:18.519459 | 2018-07-10T14:50:13 | 2018-07-10T14:50:13 | 89,702,817 | 0 | 0 | null | 2018-07-10T14:50:14 | 2017-04-28T12:10:54 | Python | UTF-8 | Python | false | false | 6,504 | py |
##########################################################
## CONFIGURATION FOR HNL TREES ##
## skim condition: >= 2 loose leptons, no pt cuts or id ##
##########################################################
import PhysicsTools.HeppyCore.framework.config as cfg
import re
#-------- LOAD ALL ANALYZERS -----------
from CMGTools.TTHAnalysis.analyzers.susyCore_modules_cff import *
from PhysicsTools.HeppyCore.framework.heppy_loop import getHeppyOption
susyCoreSequence.remove(genHiggsAna)
susyCoreSequence.remove(genHFAna)
susyCoreSequence.remove(pdfwAna)
susyCoreSequence.remove(susyScanAna)
susyCoreSequence.remove(tauAna)
susyCoreSequence.remove(photonAna)
susyCoreSequence.remove(isoTrackAna)
#-------- SET OPTIONS AND REDEFINE CONFIGURATIONS -----------
isTest = getHeppyOption("test",None) != None and not re.match("^\d+$",getHeppyOption("test"))
# Lepton Skimming
ttHLepSkim.minLeptons = 3
ttHLepSkim.maxLeptons = 999
# Run miniIso
lepAna.doMiniIsolation = True
lepAna.packedCandidates = 'packedPFCandidates'
lepAna.miniIsolationPUCorr = 'rhoArea'
lepAna.miniIsolationVetoLeptons = None # use 'inclusive' to veto inclusive leptons and their footprint in all isolation cones
lepAna.doIsolationScan = False
# Lepton Preselection
# inclusive very loose muon selection
lepAna.inclusive_muon_pt = 5
lepAna.inclusive_muon_dxy = 99999
lepAna.inclusive_muon_dz = 99999
# loose muon selection
lepAna.loose_muon_dxy = 99999
lepAna.loose_muon_dz = 99999
lepAna.loose_muon_relIso = 1.0
# inclusive very loose electron selection
lepAna.inclusive_electron_id = ""
lepAna.inclusive_electron_pt = 5
lepAna.inclusive_electron_eta = 2.5
lepAna.inclusive_electron_dxy = 99999
lepAna.inclusive_electron_dz = 99999
lepAna.inclusive_electron_lostHits = 10.0,
# loose electron selection
lepAna.loose_electron_id = ""
lepAna.loose_electron_pt = 5,
lepAna.loose_electron_eta = 2.5,
lepAna.loose_electron_dxy = 99999,
lepAna.loose_electron_dz = 99999,
lepAna.loose_electron_relIso = 1.0,
lepAna.loose_electron_lostHits = 10.0,
jetAna.copyJetsByValue = True # do not remove this
metAna.copyMETsByValue = True # do not remove this
jetAna.cleanJetsFromLeptons=False
jetAna.cleanSelectedLeptons=False
jetAna.storeLowPtJets=False
jetAna.jetEtaCentral = jetAna.jetEta
jetAna.mcGT="Spring16_25nsV8_MC"
jetAna.dataGT = "Spring16_25nsV8BCD_DATA Spring16_25nsV8E_DATA Spring16_25nsV8F_DATA Spring16_25nsV8_DATA"
jetAna.runsDataJEC = [276811, 277420, 278802]
susyCoreSequence.insert(susyCoreSequence.index(ttHCoreEventAna),
ttHSVAna)
## tree configuration
from CMGTools.TTHAnalysis.analyzers.treeProducerHNL import *
susyCoreSequence.insert(susyCoreSequence.index(ttHCoreEventAna),
susyLeptonMatchAna)
leptonTypeSusyExtraLight.addVariables([
NTupleVariable("mcUCSXMatchId", lambda x : x.mcUCSXMatchId if hasattr(x,'mcUCSXMatchId') else -1, mcOnly=True, help="MC truth matching a la UCSX"),
])
leptonTypeSusy.addVariables([
NTupleVariable("mvaIdSpring16HZZ", lambda lepton : lepton.mvaRun2("Spring16HZZ") if abs(lepton.pdgId()) == 11 else 1, help="EGamma POG MVA ID, Spring16, HZZ; 1 for muons"),
NTupleVariable("mvaIdSpring16GP", lambda lepton : lepton.mvaRun2("Spring16GP") if abs(lepton.pdgId()) == 11 else 1, help="EGamma POG MVA ID, Spring16, GeneralPurpose; 1 for muons"),
])
## Tree Producer
treeProducer = cfg.Analyzer(
AutoFillTreeProducer, name='treeProducerHNL',
vectorTree = True,
saveTLorentzVectors = False, # can set to True to get also the TLorentzVectors, but trees will be bigger
defaultFloatType = 'F', # use Float_t for floating point
globalVariables = hnl_globalVariables,
globalObjects = hnl_globalObjects,
collections = hnl_collections,
)
from CMGTools.RootTools.samples.samples_13TeV_RunIISummer16MiniAODv2 import *
#from CMGTools.RootTools.samples.samples_13TeV_DATA2016 import *
from CMGTools.HToZZ4L.tools.configTools import printSummary, configureSplittingFromTime, cropToLumi, prescaleComponents, insertEventSelector
selectedComponents = [TTLep_pow]
#-------- SEQUENCE -----------
sequence = cfg.Sequence(susyCoreSequence+[
treeProducer,
])
preprocessor = None
#-------- HOW TO RUN -----------
test = getHeppyOption('test')
if test == '1':
#comp = selectedComponents[0]
comp = cfg.MCComponent( files = ["/afs/cern.ch/work/m/mvit/public/HeavyNeutrino_trilepton_M-40_V-1e-05_2l_NLO/heavyNeutrino_100.root"], name="M-40_V-1e-05_2l_NLO" )
comp.files = comp.files[:1]
comp.splitFactor = 1
comp.fineSplitFactor = 1
selectedComponents = [ comp ]
## Auto-AAA
from CMGTools.RootTools.samples.autoAAAconfig import *
if not getHeppyOption("isCrab"):
autoAAA(selectedComponents)
## output histogram
outputService=[]
from PhysicsTools.HeppyCore.framework.services.tfile import TFileService
output_service = cfg.Service(
TFileService,
'outputfile',
name="outputfile",
fname='treeProducerHNL/tree.root',
option='recreate'
)
outputService.append(output_service)
# print summary of components to process
printSummary(selectedComponents)
# the following is declared in case this cfg is used in input to the heppy.py script
from PhysicsTools.HeppyCore.framework.eventsfwlite import Events
from CMGTools.TTHAnalysis.tools.EOSEventsWithDownload import EOSEventsWithDownload
event_class = EOSEventsWithDownload if not preprocessor else Events
EOSEventsWithDownload.aggressive = 2 # always fetch if running on Wigner
if getHeppyOption("nofetch") or getHeppyOption("isCrab"):
event_class = Events
if preprocessor: preprocessor.prefetch = False
config = cfg.Config( components = selectedComponents,
sequence = sequence,
services = outputService,
preprocessor = preprocessor,
events_class = event_class)
| [
"lesya.shchutska@cern.ch"
] | lesya.shchutska@cern.ch |
d579b30d52e69dc20657216b704e6ec994f8b5c6 | 8904b28f9a0e4d7c2c3e4e1e67754464de7fc8ba | /Search/Find Peak Element.py | bad39f8d5f6bf43897cf2426a30fa35d740ce611 | [] | no_license | Chriszhangmw/LeetCode | 0b3f58470a51c360f5480df09251235faf3e836f | efe1d09e55812f8cb163e12ad333d134fadbb61a | refs/heads/master | 2020-08-04T00:43:11.856254 | 2020-01-29T22:23:57 | 2020-01-29T22:23:57 | 211,940,761 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | '''
A peak element is an element that is greater than its neighbors.
Given an input array nums, where nums[i] ≠ nums[i+1], find a peak element and return its index.
The array may contain multiple peaks, in that case return the index to any one of the peaks is fine.
You may imagine that nums[-1] = nums[n] = -∞.
Example 1:
Input: nums = [1,2,3,1]
Output: 2
Explanation: 3 is a peak element and your function should return the index number 2.
Example 2:
Input: nums = [1,2,1,3,5,6,4]
Output: 1 or 5
Explanation: Your function can return either index number 1 where the peak element is 2,
or index number 5 where the peak element is 6.
'''
def method(nums):
left = 1
res = []
while left < len(nums)-1:
if nums[left] < nums[left-1]:
left +=1
continue
else:
if nums[left] > nums[left+1]:
res.append(left)
left +=2
else:
left +=1
print(res)
def method2(nums):
left = 1
right = len(nums) -1
mid = left + (right - left) // 2
if nums[mid] > nums[mid-1] and nums[mid] > nums[mid + 1]:
return mid
elif nums[mid - 1] > nums[mid]:
return method2(nums[:mid])
else:
return method2(nums[mid+1:])
# while left < right:
# mid = left + (right - left)//2
# if nums[mid]
nums = [1,2,1,3,5,6,4]
print(method2(nums))
| [
"zhangmw_play@163.com"
] | zhangmw_play@163.com |
45b7420c1ea6cfe12fe8dc5473de99b96b622d11 | c004966e87cb9a8bef12990d156083831dc52fc8 | /Lists/add_list.py | 3338677c57de5959588538f2e1e2d2ae2a20500a | [] | no_license | EdwinTinoco/Python-Course | eef543ec9aa281b762d1a3d6e396555867e5a788 | 86aba504d63ec389cb734cfa66ea612414bb338d | refs/heads/master | 2021-01-16T11:16:58.567479 | 2020-08-05T14:39:42 | 2020-08-05T14:39:42 | 243,099,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | tags = ['python', 'development', 'tutorials', 'code']
# Nope
# tags[-1] = 'Programming'
# In Place
tags.extend('Programming')
tags.extend(['Programming']) # para agregar un nuevo elemento al final de la lista
# New List
new_tags = tags + ['Programming']
print(new_tags)
print(tags) | [
"jaredlotic@gmail.com"
] | jaredlotic@gmail.com |
248bef5f88eb26138b9788d39d41f15cdc9e6200 | 4ae5e6f74955f30a8d6439d881d4c3fe40726a23 | /ask/qa/urls.py | 83bef41ef8f9a3d0efa997d089fd4af9e9c334ea | [] | no_license | gemetalreg/DjangoMailRu | da0e497b05723d1fd22e6905213edbc193e5f472 | 3075828dc7c50eb2d6d83dd093d32526459c6f66 | refs/heads/master | 2023-01-02T18:28:19.103164 | 2020-10-26T06:21:22 | 2020-10-26T06:21:22 | 104,564,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | from django.conf.urls import url
from . import views
from views import test, fail404, one_question, new_questions, popular
urlpatterns = [
url(r'^$', views.new_questions),
url(r'^login/', views.test),
url(r'^signup/', views.test),
url(r'^question/(?P<qid>\d+)/', views.one_question, name='question'),
url(r'^ask/', views.ask),
url(r'^popular/', views.popular),
url(r'^new/', views.test),
url(r"^", views.fail404)
]
| [
"itunereg@gmail.com"
] | itunereg@gmail.com |
da335c0cd13edba4b65ecf5d0d102ff3cec047ba | 01faa1318b24e2b0f0dd63abe1daa6df11f1e220 | /backend/smiles_21366/wsgi.py | 92397821d7f408a104036345985dc426681dbfbe | [] | no_license | crowdbotics-apps/smiles-21366 | 8c86f08b7fb10ec77dc4ba9bc09192b63443cba2 | 6d57fe1e1f9c5fd7a2a806734556638b1f536015 | refs/heads/master | 2022-12-28T17:24:06.222261 | 2020-10-11T18:00:08 | 2020-10-11T18:00:08 | 303,180,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for smiles_21366 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'smiles_21366.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
1745a654482f682ea239168617904469693bff9f | ac20c187296d0a342f37ce2cdfeff3bd4a78557a | /lambda Functions/lf2.py | 475da9fa94ce708a295af68b6f2c72336705c4fc | [] | no_license | aj3087/AWS-Dining-Concierge-chatbot | efceb780af16a644f93c069a8b9d1fa925717829 | b23154d60afc5a5db0f9076ed45052d0aa6502ec | refs/heads/main | 2023-05-01T00:18:13.719282 | 2021-05-23T04:17:46 | 2021-05-23T04:17:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,848 | py | import boto3
import json
import requests
import random
from requests_aws4auth import AWS4Auth
def receiveMsgFromSqsQueue():
sqs = boto3.client('sqs')
queue_url = 'https://sqs.us-east-1.amazonaws.com/060585368592/Queue1'
response = sqs.receive_message(
QueueUrl=queue_url,
AttributeNames=['SentTimestamp'],
MaxNumberOfMessages=5,
MessageAttributeNames=['All'],
VisibilityTimeout=10,
WaitTimeSeconds=0
)
return response
# The function return list of business id
def findRestaurantFromElasticSearch(cuisine):
region = 'us-east-1'
service = 'es'
credentials = boto3.Session(aws_access_key_id="",
aws_secret_access_key="",
region_name="us-east-1").get_credentials()
awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)
host = 'search-restaurants-rjc4ohjssdb6z36tig2yjp6i5q.us-east-1.es.amazonaws.com'
index = 'restaurants'
url = 'https://' + host + '/' + index + '/_search'
# i am just getting 3 buisiness id from es but its not random rn
query = {
"size": 1300,
"query": {
"query_string": {
"default_field": "cuisine",
"query": cuisine
}
}
}
headers = { "Content-Type": "application/json" }
response = requests.get(url,auth=awsauth, headers=headers, data=json.dumps(query))
res = response.json()
noOfHits = res['hits']['total']
hits = res['hits']['hits']
#print(noOfHits)
#print(hits[0]['_id'])
buisinessIds = []
for hit in hits:
buisinessIds.append(str(hit['_source']['Business ID']))
#print(len(buisinessIds))
return buisinessIds
# function returns detail of all resturantids as a list(working)
def getRestaurantFromDb(restaurantIds):
res = []
client = boto3.resource('dynamodb')
table = client.Table('yelp-restaurants')
for id in restaurantIds:
response = table.get_item(Key={'Business ID': id})
res.append(response)
return res
def getMsgToSend(restaurantDetails,message):
noOfPeople = message['MessageAttributes']['NoOfPeople']['StringValue']
date = message['MessageAttributes']['Date']['StringValue']
time = message['MessageAttributes']['Time']['StringValue']
cuisine = message['MessageAttributes']['Cuisine']['StringValue']
separator = ', '
resOneName = restaurantDetails[0]['Item']['name']
resOneAdd = separator.join(restaurantDetails[0]['Item']['address'])
resTwoName = restaurantDetails[1]['Item']['name']
resTwoAdd = separator.join(restaurantDetails[1]['Item']['address'])
resThreeName = restaurantDetails[2]['Item']['name']
resThreeAdd = separator.join(restaurantDetails[2]['Item']['address'])
msg = 'Hello! Here are my {0} restaurant suggestions for {1} people, for {2} at {3} : 1. {4}, located at {5}, 2. {6}, located at {7},3. {8}, located at {9}. Enjoy your meal!'.format(cuisine,noOfPeople,date,time,resOneName,resOneAdd,resTwoName,resTwoAdd,resThreeName,resThreeAdd)
return msg
def sendSMS(msgToSend,phoneNumber):
client = boto3.client("sns")
# sample phone number shown PhoneNumber="+12223334444"
client.publish(PhoneNumber = phoneNumber,Message=msgToSend)
def deleteMsg(receipt_handle):
sqs = boto3.client('sqs')
queue_url = 'https://sqs.us-east-1.amazonaws.com/060585368592/Queue1'
sqs.delete_message(QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
def lambda_handler(event, context):
# getting response from sqs queue
sqsQueueResponse = receiveMsgFromSqsQueue()
if "Messages" in sqsQueueResponse.keys():
for message in sqsQueueResponse['Messages']:
cuisine = message['MessageAttributes']['Cuisine']['StringValue']
restaurantIds = findRestaurantFromElasticSearch(cuisine)
# Assume that it returns a list of restaurantsIds
# call some random function to select 3 from the list
restaurantIds = random.sample(restaurantIds, 3)
restaurantDetails = getRestaurantFromDb(restaurantIds)
# now we have all required details to send the sms
# now we will create the required message using the details
msgToSend = getMsgToSend(restaurantDetails,message)
print(msgToSend)
# dont uncomment below line until required. There is max limit on msg
phoneNumber = message['MessageAttributes']['PhoneNumber']['StringValue']
if "+1" not in phoneNumber:
phoneNumber = '+1'+phoneNumber
sendSMS(msgToSend,phoneNumber)
#now delete message from queue
receipt_handle = message['ReceiptHandle']
deleteMsg(receipt_handle)
| [
"sowmya.nittala@gmail.com"
] | sowmya.nittala@gmail.com |
1aef21b8dcb84d2e04dca58eb2b3845d493b340b | c5b545ee6e0884fcfbf198497bc580830c08fb7f | /ciclos/ciclos9.py | 302f583566f514202d94b3c664041e5153b3dffa | [] | no_license | rubenalvarez98/Recuperalo | 9c772ca971fcabf4e1f5f9a8fc0caedf336a07b2 | c03d5ecbfdd6f8c2b7c33207e279f375a9adf5f6 | refs/heads/main | 2023-07-20T08:20:48.384930 | 2023-07-18T03:05:54 | 2023-07-18T03:05:54 | 349,874,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | numero = int(input("Ingrese un número para calcular su factorial: "))
factorial = 1
# Verificar si el número es negativo, cero o positivo
if numero < 0:
print("Solo numeros positivos")
elif numero == 0:
print("El factorial de 0 es 1.")
else:
for i in range(1, numero + 1):
factorial *= i
print("El factorial de", numero, "es", factorial)
| [
"noreply@github.com"
] | noreply@github.com |
0d3af189c999c81966b68412047b30a061b58994 | b3b066a566618f49ae83c81e963543a9b956a00a | /Unsupervised Learning in Python/02_Visualization with hierarchical clustering and t-SNE/08_t-SNE visualization of grain dataset.py | a7fd796bfaa4ac6fcdb158ffa94e3376e19f2bff | [] | no_license | ahmed-gharib89/DataCamp_Data_Scientist_with_Python_2020 | 666c4129c3f0b5d759b511529a365dfd36c12f1a | f3d20b788c8ef766e7c86c817e6c2ef7b69520b8 | refs/heads/master | 2022-12-22T21:09:13.955273 | 2020-09-30T01:16:05 | 2020-09-30T01:16:05 | 289,991,534 | 2 | 0 | null | 2020-08-24T17:15:43 | 2020-08-24T17:15:42 | null | UTF-8 | Python | false | false | 1,603 | py | '''
t-SNE visualization of grain dataset
In the video, you saw t-SNE applied to the iris dataset. In this exercise, you'll apply t-SNE to the grain samples data and inspect the resulting t-SNE features using a scatter plot. You are given an array samples of grain samples and a list variety_numbers giving the variety number of each grain sample.
INSTRUCTIONS
100XP
Import TSNE from sklearn.manifold.
Create a TSNE instance called model with learning_rate=200.
Apply the .fit_transform() method of model to samples. Assign the result to tsne_features.
Select the column 0 of tsne_features. Assign the result to xs.
Select the column 1 of tsne_features. Assign the result to ys.
Make a scatter plot of the t-SNE features xs and ys. To color the points by the grain variety, specify the additional keyword argument c=variety_numbers.
'''
# Import TSNE
from sklearn.manifold import TSNE
# Create a TSNE instance: model
model = TSNE(learning_rate=200)
# Apply fit_transform to samples: tsne_features
tsne_features = model.fit_transform(samples)
# Select the 0th feature: xs
xs = tsne_features[:,0]
# Select the 1st feature: ys
ys = tsne_features[:,1]
# Scatter plot, coloring by variety_numbers
plt.scatter(xs, ys, c=variety_numbers)
plt.show()
#========================================================#
# DEVELOPER #
# BasitAminBhatti #
# Github #
# https://github.com/basitaminbhatti #
#========================================================# | [
"Your-Email"
] | Your-Email |
78895d70380f80f6cdf233a4227ecd16e1366f47 | dc99fa1a0058aae3f765d2c01c3eefecc5ae7388 | /src/framat/__init__.py | cc7655655bb6224308bc5b4cb14062eb94c941de | [
"Apache-2.0"
] | permissive | Corentin1985/framat | a4cbeb47fa3573683907b6a6cb684c75aeec60d8 | 4177a95b4ed8d95a8330365e32ca13ac9ef24640 | refs/heads/master | 2023-05-08T23:11:01.516954 | 2021-06-03T18:33:20 | 2021-06-03T18:45:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | MODULE_NAME = 'FramAT'
from ._log import _plogger as log
from ._model import Model, Builtin
| [
"dettmann@kth.se"
] | dettmann@kth.se |
c2db51665a4a42adf5ad7c52339291548d358ab4 | 4a971163f1b3ed376913825a8e85bfd7122a16e2 | /forum/migrations/0004_auto_20210415_1307.py | 2fa64e1076d073cef6daf407404ebe85b1937f57 | [] | no_license | kifahnaim/djangoAimarena | a314b77e95b86290274721f95af8a036025447d3 | 0961afa7f8df1a15a9af22b04f908dbde4f880c3 | refs/heads/main | 2023-04-28T04:52:33.144912 | 2021-05-15T06:48:11 | 2021-05-15T06:48:11 | 367,449,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | # Generated by Django 3.0.5 on 2021-04-15 13:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('forum', '0003_comment_deleted'),
]
operations = [
migrations.AlterModelTable(
name='topic',
table=None,
),
migrations.CreateModel(
name='subtopic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_topic_title', models.CharField(max_length=120)),
('sub_topic_description', models.CharField(max_length=200)),
('topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='topic_id', to='forum.topic')),
],
options={
'db_table': 'topic',
},
),
]
| [
"naimkifah@gmail.com"
] | naimkifah@gmail.com |
74558fd4430abbbd5dc2c6fe6384727804b8ca98 | 303694a68e924feab66a7edf2bf9c02b6dd89983 | /todo_project/todom/views.py | 1a991d1594a35527a50afda2578ed4f0a16fc85f | [] | no_license | mehranwafai/Todo-App | e3a9df35433a88884dac36e29c6529e0e57c18b3 | 32bfbdf681b8b0c95a5e43924b6a8afc6df4facc | refs/heads/master | 2022-05-28T20:20:58.614449 | 2020-05-04T18:08:54 | 2020-05-04T18:08:54 | 260,666,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | from django.shortcuts import render
from django.http import HttpResponseRedirect
from .models import TodoItem
def todoview (request):
all_todo_items = TodoItem.objects.all()
return render(request,'todo.html',{'all_items' : all_todo_items })
def addTodo(request):
new_item = TodoItem(content = request.POST['content'])
new_item.save()
return HttpResponseRedirect('/todom/')
def deleteTodo(request, todo_id):
item_to_delete = TodoItem.objects.get(id=todo_id)
item_to_delete.delete()
return HttpResponseRedirect('/todom/')
| [
"a@hhh"
] | a@hhh |
df6e085b85aea5a18b3c8ad935106b7ab1fc2768 | 9b41bd4d829b7b4b5fc7ea2f375089793f34beb0 | /lib/googlecloudsdk/core/http_proxy.py | 9d0ab19cc662888bb1f6fb6514fed07f85a5da0e | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | eyalev/gcloud | 20a596f9cbf7873eaea652a0b2ad080678f1598c | 421ee63a0a6d90a097e8530d53a6df5b905a0205 | refs/heads/master | 2020-12-25T14:48:11.142544 | 2016-06-22T08:43:20 | 2016-06-22T08:43:20 | 61,703,392 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,949 | py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to get an http proxy information."""
import urllib
from googlecloudsdk.core import config
from googlecloudsdk.core import properties
import httplib2
def GetDefaultProxyInfo(method='http'):
"""Get ProxyInfo from environment.
This function is meant to mimic httplib2.proxy_info_from_environment, but get
the proxy information from urllib.getproxies instead. urllib can also get
proxy information from Windows Internet Explorer settings or MacOSX framework
SystemConfiguration.
Args:
method: protocol string
Returns:
httplib2 ProxyInfo object or None
"""
proxy_dict = urllib.getproxies()
proxy_url = proxy_dict.get(method, None)
if not proxy_url:
return None
pi = httplib2.proxy_info_from_url(proxy_url, method)
# The ProxyInfo object has a bypass_host method that takes the hostname as an
# argument and it returns 1 or 0 based on if the hostname should bypass the
# proxy or not. We could either build the bypassed hosts list and pass it to
# pi.bypass_hosts, or we can just replace the method with the function in
# urllib, and completely mimic urllib logic. We do the latter.
# Since the urllib.proxy_bypass _function_ (no self arg) is not "bound" to the
# class instance, it doesn't receive the self arg when its called. We don't
# need to "bind" it via types.MethodType(urllib.proxy_bypass, pi).
pi.bypass_host = urllib.proxy_bypass
return pi
def GetProxyProperties():
"""Get proxy information from cloud sdk properties in dictionary form."""
proxy_type_map = config.GetProxyTypeMap()
proxy_type = properties.VALUES.proxy.proxy_type.Get()
proxy_address = properties.VALUES.proxy.address.Get()
proxy_port = properties.VALUES.proxy.port.GetInt()
proxy_prop_set = len(filter(None, (proxy_type, proxy_address, proxy_port)))
if proxy_prop_set > 0 and proxy_prop_set != 3:
raise properties.InvalidValueError(
'Please set all or none of the following properties: '
'proxy/type, proxy/address and proxy/port')
if not proxy_prop_set:
return {}
proxy_user = properties.VALUES.proxy.username.Get()
proxy_pass = properties.VALUES.proxy.password.Get()
return {
'proxy_type': proxy_type_map[proxy_type],
'proxy_address': proxy_address,
'proxy_port': proxy_port,
'proxy_user': proxy_user,
'proxy_pass': proxy_pass,
}
def GetHttpProxyInfo():
"""Get ProxyInfo object or callable to be passed to httplib2.Http.
httplib2.Http can issue requests through a proxy. That information is passed
via either ProxyInfo objects or a callback function that receives the protocol
the request is made on and returns the proxy address. If users set the gcloud
properties, we create a ProxyInfo object with those settings. If users do not
set gcloud properties, we return a function that can be called to get default
settings.
Returns:
httplib2 ProxyInfo object or callable function that returns a Proxy Info
object given the protocol (http, https)
"""
proxy_settings = GetProxyProperties()
if proxy_settings:
return httplib2.ProxyInfo(
proxy_settings['proxy_type'],
proxy_settings['proxy_address'],
proxy_settings['proxy_port'],
proxy_user=proxy_settings['proxy_user'],
proxy_pass=proxy_settings['proxy_pass'])
return GetDefaultProxyInfo
| [
"eyalev@gmail.com"
] | eyalev@gmail.com |
eaa9965c1192d42b18600bdb6f41f2ae68fe3fcf | 817ff801938d25776b2564b3087c8a3c674da1a7 | /NUP153_Min_One/WT_Minimization/WT_5.py | d243521c696a6396a6864e7e0ae3d14778c5c4c7 | [] | no_license | yanghaobojordan/HIV1-Capsid | b22e21a9ad530ae11f128f409e298c5ab68871ee | f44f04dc9886e660c1fe870936c48e0e5bb5adc6 | refs/heads/main | 2023-04-09T01:27:26.626676 | 2021-04-23T18:17:07 | 2021-04-23T18:17:07 | 360,968,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,576 | py | from pyrosetta import *
from pyrosetta import PyMOLMover
from pyrosetta.toolbox import cleanATOM
from pyrosetta.toolbox import get_secstruct
from pyrosetta.teaching import *
from pyrosetta.toolbox import get_hbonds
from pyrosetta.toolbox import mutate_residue
from pyrosetta.rosetta.protocols.relax import *
from pyrosetta.rosetta.protocols.simple_moves import *
from pyrosetta.rosetta.core.fragment import *
from pyrosetta.rosetta.protocols.moves import *
from pyrosetta.rosetta.protocols.rigid import *
from pyrosetta.rosetta.protocols.docking import *
import sys
init()
def main():
filename=sys.argv[1]
pose=pose_from_pdb(filename)
test=Pose()
test.assign(pose)
scorefxn=get_fa_scorefxn()
dumpfile = 'Folding_WT_5.pdb'
txtfile = 'Folding_WT_5.txt'
newfile = open(txtfile, "w")
newfile.write(str(scorefxn(test)))
newfile.write('\n')
kT = 1
mc = MonteCarlo(test, scorefxn, kT)
min_mover = MinMover()
mm = MoveMap()
mm.set_bb(True)
mm.set_chi(True)
min_mover.movemap(mm)
min_mover.score_function(scorefxn)
min_mover.min_type("dfpmin")
min_mover.tolerance(0.001)
task_pack=standard_packer_task(test)
task_pack.restrict_to_repacking()
task_pack.or_include_current(True)
pack_mover=PackRotamersMover(scorefxn, task_pack)
for i in range(20):
pack_mover.apply(test)
mc.boltzmann(test)
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
print ('Repacking Complete')
print ('Lowest Score ', scorefxn(test))
print (mc.show_scores())
print (mc.show_counters())
print (mc.show_state())
for i in range(1):
min_mover.apply(test)
mc.boltzmann(test)
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
print ('Minimization Complete')
print ('Lowest Score ', scorefxn(test))
print (mc.show_scores())
print (mc.show_counters())
print (mc.show_state())
newfile.write(str(scorefxn(test)))
newfile.write('\n')
newfile.write('RMSD ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
newfile.close()
test.dump_pdb(dumpfile)
main()
| [
"yanghaobojordan@gmail.com"
] | yanghaobojordan@gmail.com |
ba53f557d96cdeecb70bddfb7bb783ac5e61cb62 | 3c57a4056566856acc62e5599e8a06dfccf6206b | /compact.py | 833cac1929939f04e88ed85c3ac8469793a1f375 | [] | no_license | LuiGGi629/zadania | 9668eff6b090738b5ecf5b5fecddcfff16a70882 | 1d068e1a9a0451304e3d40752e20db9d82f79bd3 | refs/heads/master | 2020-07-24T00:01:20.631768 | 2019-10-13T08:39:23 | 2019-10-13T08:39:23 | 207,744,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | from itertools import groupby
def compact(iterable):
"""Return iterable with adjacent duplicate values removed."""
return (
item
for item, group in groupby(iterable)
# groupby function groups consecutive items
# in an iterable that are equivalent
)
| [
"wojtek.felix.zuber@gmail.com"
] | wojtek.felix.zuber@gmail.com |
bf9122934b6fe0917c4076faf466074d24464b46 | 7f2bb8cf028eb63568ebef6b18f527305ac4ca18 | /ui_terraindatamanipulator.py | 2a71039e09d0004ec08b5b63633bb88a05817fdc | [] | no_license | CCAlves/GSDCalculator-QGISPlugin | 1b342e9e692f4687b2fe445d037741a2472254a3 | 7fc12a2dc2a1654240e0b5949adf5c68502c3b4f | refs/heads/master | 2021-01-25T05:21:31.666908 | 2014-06-26T12:09:05 | 2014-06-26T12:09:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,808 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_terraindatamanipulator.ui'
#
# Created: Thu Apr 03 10:49:21 2014
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_TerrainDataManipulator(object):
def setupUi(self, TerrainDataManipulator):
TerrainDataManipulator.setObjectName(_fromUtf8("TerrainDataManipulator"))
TerrainDataManipulator.resize(400, 300)
self.buttonBox = QtGui.QDialogButtonBox(TerrainDataManipulator)
self.buttonBox.setGeometry(QtCore.QRect(30, 240, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.retranslateUi(TerrainDataManipulator)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), TerrainDataManipulator.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), TerrainDataManipulator.reject)
QtCore.QMetaObject.connectSlotsByName(TerrainDataManipulator)
def retranslateUi(self, TerrainDataManipulator):
TerrainDataManipulator.setWindowTitle(_translate("TerrainDataManipulator", "TerrainDataManipulator", None))
| [
"caiocesar10@terra.com.br"
] | caiocesar10@terra.com.br |
1c2fba9705105a98ee24ad6354ee9388847d6538 | c87cdbaa2ad9938157a3f558d1c9b7f4bebd1091 | /lab1/server.py | bc3688140c181bf6ac063c3bc568df7186373682 | [] | no_license | vathsavi/computer-networks | e0f819a14c3758232f06a565d392527303be8163 | 3c53330f623389c70e633e02662a65f982165dd0 | refs/heads/master | 2020-12-18T14:10:47.083020 | 2020-01-21T18:32:56 | 2020-01-21T18:32:56 | 235,412,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | import socket # Import socket module
from ast import literal_eval
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) # Create a socket object
host = socket.gethostbyname(socket.gethostname()) # Get local machine name
port = 12345 # Reserve a port for your service.
s.bind((host, port)) # Bind to the port
print ('host ip', host)
s.listen(5) # Now wait for client connection.
while True:
c, addr = s.accept() # Establish connection with client.
print ('Got connection from', addr)
c.send(b'Thank you for connecting')
print(c.recv(1024))
l=c.recv(1024).decode('utf-8')
l=literal_eval(l)
check={}
dup=[]
for x in l:
if x not in check:
check[x]=1
else:
if check[x]==1:
dup.append(x)
temp = list(dict.fromkeys(l))
c.send(bytes(str(temp),'UTF-8'))
c.send(bytes(str(check ),'UTF-8'))
c.close() # Close the connection
| [
"noreply@github.com"
] | noreply@github.com |
8da8980b99393e3ccc23f3ef361ffcdbb41504a7 | c47c254ca476c1f9969f8f3e89acb4d0618c14b6 | /datasets/tensorflow-1.0.1/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded_var.py | 392309d543ed93d5cf2d53a76005052e6b3839ae | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | yijunyu/demo | 5cf4e83f585254a28b31c4a050630b8f661a90c8 | 11c0c84081a3181494b9c469bda42a313c457ad2 | refs/heads/master | 2023-02-22T09:00:12.023083 | 2021-01-25T16:51:40 | 2021-01-25T16:51:40 | 175,939,000 | 3 | 6 | BSD-2-Clause | 2021-01-09T23:00:12 | 2019-03-16T07:13:00 | C | UTF-8 | Python | false | false | 6,286 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains the MNIST network using preloaded data stored in a variable.
Run using bazel:
bazel run -c opt \
<...>/tensorflow/examples/how_tos/reading_data:fully_connected_preloaded_var
or, if installed via pip:
cd tensorflow/examples/how_tos/reading_data
python fully_connected_preloaded_var.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
FLAGS = None
def run_training():
"""Train MNIST for a number of epochs."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
with tf.name_scope('input'):
# Input data
images_initializer = tf.placeholder(
dtype=data_sets.train.images.dtype,
shape=data_sets.train.images.shape)
labels_initializer = tf.placeholder(
dtype=data_sets.train.labels.dtype,
shape=data_sets.train.labels.shape)
input_images = tf.Variable(
images_initializer, trainable=False, collections=[])
input_labels = tf.Variable(
labels_initializer, trainable=False, collections=[])
image, label = tf.train.slice_input_producer(
[input_images, input_labels], num_epochs=FLAGS.num_epochs)
label = tf.cast(label, tf.int32)
images, labels = tf.train.batch(
[image, label], batch_size=FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create the op for initializing variables.
init_op = tf.global_variables_initializer()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
sess.run(init_op)
sess.run(input_images.initializer,
feed_dict={images_initializer: data_sets.train.images})
sess.run(input_labels.initializer,
feed_dict={labels_initializer: data_sets.train.labels})
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# And then after everything is built, start the training loop.
try:
step = 0
while not coord.should_stop():
start_time = time.time()
# Run one step of the model.
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value,
duration))
# Update the events file.
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
step += 1
# Save a checkpoint periodically.
if (step + 1) % 1000 == 0:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
step += 1
except tf.errors.OutOfRangeError:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
run_training()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--num_epochs',
type=int,
default=2,
help='Number of epochs to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/data',
help='Directory to put the training data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"y.yu@open.ac.uk"
] | y.yu@open.ac.uk |
018b1bfdd15dfa1af373f4e2756a60015f387971 | 212266251f6fad18d293dcd752240f90867319a0 | /library-fine-manager/finer.py | 6d6956c0cd03184f82c29920f7c4c7c653ae23b9 | [] | no_license | manojkumarmc/python-examples | b5e41a2c621ad1b2e54002f1c4b96c6b833eb455 | 2eb474f998d6212586a8a2a3723d7471ab565c08 | refs/heads/master | 2021-01-19T00:58:56.397735 | 2019-06-10T17:03:29 | 2019-06-10T17:03:29 | 38,770,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py |
class MyDate(object):
def __init__(self, d, m, y):
self.day = int(d)
self.month = int(m)
self.year = int(y)
def __lt__(self, obj):
ret_val = False
if self.year < obj.year:
ret_val = True
else:
if self.month < obj.month:
ret_val = True
elif self.month == obj.month:
if self.day < obj.day:
ret_val = True
else:
if self.day < obj.day:
ret_val = True
return ret_val
def __eq__(self, obj):
return self.year == obj.year and self.month == obj.month and self.day == obj.day
def same_calender_month(self, obj):
return self.year == obj.year and self.month == obj.month
def diff_days(self, obj):
return obj.day - self.day
def same_calender_year(self, obj):
return self.year == obj.year
def diff_years(self, obj):
return obj.year - self.year
def diff_months(self, obj):
return obj.month - self.month
actual_date = MyDate(2,7,2015)
expected_date = MyDate(1,2,2014)
if actual_date < expected_date or actual_date == expected_date:
print 'The fine is : INR 0'
elif expected_date.same_calender_month(actual_date):
print 'The fine is : INR ' + str(expected_date.diff_days(actual_date) * 15)
elif expected_date.same_calender_year(actual_date):
print 'The fine is : INR ' + str(expected_date.diff_months(actual_date) * 500)
else:
print 'The fine is : INR 1000'
| [
"manoj.kumar.myaliputhenpurayi@oracle.com"
] | manoj.kumar.myaliputhenpurayi@oracle.com |
35c84ac6379d534e50a7fa91f5600b2edbfab57b | 85ba50d40869d9ff32297a5e200302798e302317 | /manage.py | 4be7b17e10c1980647410181fb6c9493a0516def | [] | no_license | theshadowagent/dgap_mipt_site | a84c8fe7a6a9a232f89f07ccc7e9ff35ffb913f8 | e5da6a13104982089f55075c8d4b2f5953119a59 | refs/heads/master | 2021-08-11T12:35:36.181494 | 2017-11-13T18:01:37 | 2017-11-13T18:01:37 | 110,582,569 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dgap_site.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"shadowagent1st@gmail.com"
] | shadowagent1st@gmail.com |
d6822889e8d7f9096d7d9804978b9982b6c549be | 48138abce366dafa66878749aab172db5f7f4947 | /main.py | 86d116c5be974bba532485c4d60e232bd2214cd9 | [] | no_license | paper2project/DeepFinger | 9ed6a9bd1cee52d67c71fba1749a2ece3fb3ab65 | 4817aece492d289537924a43eb16e2741fcd2c75 | refs/heads/main | 2023-06-11T00:30:54.396339 | 2021-06-28T16:17:51 | 2021-06-28T16:17:51 | 380,778,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,199 | py | '''
This script handling the training process.
'''
import argparse
import math
import time
import os
from collections import Counter
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
import CNN.Constants as Constants
from dataset import TranslationDataset, paired_collate_fn
from CNN.model import Net
import clustering
from util import UnifLabelSampler
import numpy as np
from data_processing import DataManager
from sklearn.metrics import balanced_accuracy_score
def train_epoch(model, training_data, optimizer, device):
''' Epoch operation in training phase'''
model.train()
total_loss = 0
n_total = 0
n_correct = 0
for batch in tqdm(
training_data, mininterval=2,
desc=' - (Training) ', leave=False):
src_seq,src_lbl = map(lambda x: x.to(device), batch)
optimizer.zero_grad()
pred = model(src_seq)
sample = (torch.max(pred, 1)[1] == src_lbl).sum().item()
n_correct += sample
n_total += src_lbl.shape[0]
loss = F.cross_entropy(pred, src_lbl)
loss.backward()
optimizer.step()
# note keeping
total_loss += loss.item()
train_acc = 100. * n_correct / n_total
print("training acc is: ", train_acc, "training loss is: ", total_loss)
return total_loss, train_acc
def eval_epoch(model, validation_data, device):
''' Epoch operation in evaluation phase '''
model.eval()
total_loss = 0
y_true = torch.LongTensor(0).to(device)
y_predict = torch.LongTensor(0).to(device)
with torch.no_grad():
for batch in tqdm(
validation_data, mininterval=2,
desc=' - (Validation) ', leave=False):
src_seq,src_lbl= [x.to(device) for x in batch]
pred = model(src_seq)
y_predict = torch.cat([y_predict, torch.max(pred, 1)[1]], 0)
y_true = torch.cat([y_true, src_lbl], 0)
loss = F.cross_entropy(pred, src_lbl)
total_loss += loss.item()
y_true = y_true.cpu().numpy().tolist()
y_predict = y_predict.cpu().numpy().tolist()
y_true_trans = np.array(y_true)
y_predict_trans = np.array(y_predict)
acc = balanced_accuracy_score(y_true_trans, y_predict_trans)
valid_acc = 100. * acc
print("validation acc is: ", valid_acc, "validation loss is: ", total_loss)
return y_true,y_predict,total_loss, valid_acc
def train(model, training_data, validation_data, testing_data, optimizer, device, opt):
''' Start training '''
log_train_file = None
log_valid_file = None
if opt.log:
log_train_file = opt.log + '.train.log'
log_valid_file = opt.log + '.valid.log'
print('[Info] Training performance will be written to file: {} and {}'.format(
log_train_file, log_valid_file))
with open(log_train_file, 'w') as log_tf, open(log_valid_file, 'w') as log_vf:
log_tf.write('epoch,loss,ppl,accuracy\n')
log_vf.write('epoch,loss,ppl,accuracy\n')
valid_accus = []
test_accus=[]
for epoch_i in range(opt.epoch):
print('[ Epoch', epoch_i, ']')
start = time.time()
train_loss, train_accu = train_epoch(
model, training_data, optimizer, device)
print(' - (Training) ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %, '\
'elapse: {elapse:3.3f} min'.format(
ppl=math.exp(min(train_loss, 100)), accu=train_accu,
elapse=(time.time()-start)/60))
start = time.time()
_,_,valid_loss, valid_accu = eval_epoch(model, validation_data, device)
print(' - (Validation) ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %, '\
'elapse: {elapse:3.3f} min'.format(
ppl=math.exp(min(valid_loss, 100)), accu=valid_accu,
elapse=(time.time()-start)/60))
valid_accus += [valid_accu]
start = time.time()
y_true,y_predict,test_loss, test_accu = eval_epoch(model, testing_data, device)
print(' - (Testing) ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %, '\
'elapse: {elapse:3.3f} min'.format(
ppl=math.exp(min(test_loss, 100)), accu=test_accu,
elapse=(time.time()-start)/60))
test_accus += [test_accu]
## save test-pred-labels
with open(os.path.join(opt.save_pred_result,'label_test_pred'+str(opt.fold_num)+'_'+str(epoch_i)+'.txt'),'w') as f:
for i in y_true:
f.write(str(i)+' ')
f.write('\n')
for j in y_predict:
f.write(str(j)+' ')
## save model
model_state_dict = model.state_dict()
checkpoint = {
'model': model_state_dict,
'settings': opt,
'epoch': epoch_i,
'valid_accu': valid_accu,
'optimizer': optimizer}
if opt.save_model:
if opt.save_model_mode == 'all':
model_name = opt.save_model + \
'_fold_' + str(opt.fold_num) + \
'_epoch_' + str(epoch_i) + \
'_fs' + str(opt.feature_size) + \
'_ks' + str(opt.kernel_size) + \
'_es' + str(opt.embedding_size) + \
'_sl' + str(opt.max_word_seq_len) + \
'_dp' + str(opt.dropout) + \
'_accu_{accu:3.3f}'.format(accu=100 * valid_accu) + \
'.chkpt'
torch.save(checkpoint, model_name)
elif opt.save_model_mode == 'step':
if (epoch_i+1)%opt.save_model_epoch_step==0:
model_name = opt.save_model + \
'_fold_' + str(opt.fold_num) + \
'_epoch_' + str(epoch_i) + \
'_fs' + str(opt.feature_size) + \
'_ks' + str(opt.kernel_size) + \
'_es' + str(opt.embedding_size) + \
'_sl' + str(opt.max_word_seq_len) + \
'_dp' + str(opt.dropout) + \
'_accu_{accu:3.3f}'.format(accu=100 * valid_accu) + \
'.chkpt'
torch.save(checkpoint, model_name)
print(' - [Info] The checkpoint file has been updated.')
## save log
if log_train_file and log_valid_file:
with open(log_train_file, 'a') as log_tf, open(log_valid_file, 'a') as log_vf:
log_tf.write('{epoch},{loss: 8.5f},{ppl: 8.5f},{accu:3.3f}\n'.format(
epoch=epoch_i, loss=train_loss,
ppl=math.exp(min(train_loss, 100)), accu=100*train_accu))
log_vf.write('{epoch},{loss: 8.5f},{ppl: 8.5f},{accu:3.3f}\n'.format(
epoch=epoch_i, loss=valid_loss,
ppl=math.exp(min(valid_loss, 100)), accu=100*valid_accu))
def main(opt):
## opt operate
os.environ['CUDA_VISIBLE_DEVICES'] = opt.CUDA_VISIBLE_DEVICES
opt.cuda = not opt.no_cuda
opt.kernel_size_list=[int(x) for x in opt.kernel_size.split(',')]
result_dir='_fs'+str(opt.feature_size)+\
'_ks'+str(opt.kernel_size)+\
'_es'+str(opt.embedding_size)+\
'_sl'+str(opt.max_word_seq_len)+\
'_dp'+str(opt.dropout)+\
'_cn'+str(opt.cluster_num)+\
'_eps'+str(opt.eps)+ \
'_ms' + str(opt.min_samples) + \
'_fn' + str(opt.fold_num) + \
'/'
opt.save_model=opt.save_model+opt.train_src.split('/')[-1]+'/'+result_dir
os.makedirs(opt.save_model,exist_ok=True)
opt.save_pred_result=opt.save_pred_result+opt.train_src.split('/')[-1]+'/'+result_dir
os.makedirs(opt.save_pred_result, exist_ok=True)
print(opt.save_pred_result)
for label_choose in range(opt.grained):
#========= Processing Dataset =========#
if(opt.data==''):
datamanager = DataManager(opt.train_src,opt.grained,label_choose,opt.max_word_seq_len,
opt.fold_num,opt.min_word_count,opt.save_preprocess_data)
data=datamanager.getdata()
# #========= Loading Dataset =========#
else:
data = torch.load(opt.data)
training_data, validation_data,testing_data = prepare_dataloaders(data, opt)
opt.src_vocab_size = training_data.dataset.src_vocab_size
print(opt)
# clustering algorithm to use
deepcluster = clustering.__dict__['Kmeans'](opt.cluster_num,opt.eps,opt.min_samples)
device = torch.device('cuda' if opt.cuda else 'cpu')
CNNnet=Net(opt.src_vocab_size,
opt.max_word_seq_len,
opt.embedding_size,
opt.feature_size,
opt.kernel_size_list,
opt.grained).to(device)
fd = int(CNNnet.top_layer.weight.size()[1])
print('fd:',fd)
latest_cluster_index={}
for iteration_i in range(opt.iteration):
print('[ iteration', iteration_i, ']')
CNNnet.top_layer = None
features = compute_features(training_data, CNNnet, len(data['train']['src']))
deepcluster.cluster(features,data['train']['src'], verbose=True)
train_dataset_cluster,cluster_label_list= clustering.cluster_assign(deepcluster.cluster_label_list,
data['train']['src'],iteration_i,latest_cluster_index)
with open(os.path.join(opt.save_pred_result,'{}_cluster_data.txt'.format(label_choose)),'w') as f:
for i,data_x in enumerate(data['origin']['train']):
f.write(str(cluster_label_list[i])+'\t'+str(data_x)+'\n')
latest_cluster_index=cluster_label_list
train_dataloader_cluster = prepare_dataloaders_cluster(train_dataset_cluster, opt, data)
# set last fully connected layer
CNNnet.top_layer = nn.Linear(fd, len(set(deepcluster.cluster_label_list)))
CNNnet.top_layer.weight.data.normal_(0, 0.01)
CNNnet.top_layer.bias.data.zero_()
CNNnet.top_layer.cuda()
learnrate = 5*1e-5
optimizer = optim.Adam(
filter(lambda x: x.requires_grad, CNNnet.parameters()), lr=learnrate,
betas=(0.9, 0.98), eps=1e-09)
# train(CNNnet, train_dataloader_cluster, optimizer, device, opt)
for i in range(opt.epoch):
train_loss, train_accu = train_epoch(
CNNnet, train_dataloader_cluster, optimizer, device)
print(label_choose,train_loss,train_accu)
def compute_features(dataloader, model, N):
model.eval()
# discard the label information in the dataloader级
for i, (input_tensor, _) in enumerate(dataloader):
input_var = torch.autograd.Variable(input_tensor.cuda(), volatile=True)
aux = model(input_var).data.cpu().numpy()
if i == 0:
features = np.zeros((N, aux.shape[1]), dtype='float32')
aux = aux.astype('float32')
if i < len(dataloader) - 1:
features[i * opt.batch_size: (i + 1) *opt.batch_size] = aux
else:
# special treatment for final batch
features[i * opt.batch_size:] = aux
print(features.shape)
return features
def prepare_dataloaders(data, opt):
# ========= Preparing DataLoader =========#
train_loader = torch.utils.data.DataLoader(
TranslationDataset(
src_word2idx=data['dict']['src'],
src_insts=data['train']['src'],
src_lbls=data['train']['lbl']),
num_workers=2,
batch_size=opt.batch_size,
collate_fn=paired_collate_fn)
valid_loader = torch.utils.data.DataLoader(
TranslationDataset(
src_word2idx=data['dict']['src'],
src_insts=data['valid']['src'],
src_lbls=data['valid']['lbl']),
num_workers=2,
batch_size=opt.batch_size,
collate_fn=paired_collate_fn)
test_loader = torch.utils.data.DataLoader(
TranslationDataset(
src_word2idx=data['dict']['src'],
src_insts=data['test']['src'],
src_lbls=data['test']['lbl']),
num_workers=2,
batch_size=opt.batch_size,
collate_fn=paired_collate_fn)
return train_loader, valid_loader, test_loader
def prepare_dataloaders_cluster(train_dataset_cluster,opt,data):
data_src=[data[0] for data in train_dataset_cluster]
data_lbl = [data[1] for data in train_dataset_cluster]
print('data_lbl',Counter(data_lbl))
train_dataloader_cluster = torch.utils.data.DataLoader(
TranslationDataset(
src_word2idx=data['dict']['src'],
src_insts=data_src,
src_lbls=data_lbl),
num_workers=2,
batch_size=opt.batch_size,
collate_fn=paired_collate_fn)
return train_dataloader_cluster
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-cluster_num', type=int, default=30)
parser.add_argument('-eps', type=float, default=0.1)
parser.add_argument('-min_samples', type=int, default=10)
parser.add_argument('-grained', type=int, default=8)
parser.add_argument('-train_src', default='')
parser.add_argument('-save_preprocess_data', default='')
parser.add_argument('-max_word_seq_len', type=int, default=32)
parser.add_argument('-min_word_count', type=int, default=0)
parser.add_argument('-fold_num', type=int, default=5)
parser.add_argument('-epoch', type=int, default=50)
parser.add_argument('-iteration', type=int, default=5)
parser.add_argument('-batch_size', type=int, default=128)
parser.add_argument('-embedding_size', type=int, default=128)
parser.add_argument('-feature_size', type=int, default=128)
parser.add_argument('-kernel_size', type=str, default='2,3,4')
parser.add_argument('-dropout', type=float, default=0.4)
parser.add_argument('-no_cuda', action='store_true')
parser.add_argument('-CUDA_VISIBLE_DEVICES', type=str, default='1')
parser.add_argument('-data', type=str, default='')
parser.add_argument('-log', default=None)
parser.add_argument('-save_model', default='model/')
parser.add_argument('-save_model_mode', type=str, choices=['all', 'step'], default='step')
parser.add_argument('-save_model_epoch_step', type=int, default=5)
parser.add_argument('-save_pred_result', default='cluster_data/')
parser.add_argument('--reassign', type=float, default=1.,
help="""how many epochs of training between two consecutive
reassignments of clusters (default: 1)""")
opt = parser.parse_args()
strat=time.time()
main(opt)
print(time.time()-strat)
| [
"noreply@github.com"
] | noreply@github.com |
6d1512c5e0a4678f22341438fe96e88589d84131 | 7a7ee079d24f501d7966bd5be6a04293a49e8ef8 | /AvesProyecto/urls.py | b4cc6882b59947481e7dbc440b8b12a5d3535047 | [] | no_license | zacur13/proyecto-aves | 498c7e9abef716b278a606324c8a207cffadc46b | 62134e3f5867e713a8bbd0a69cfa01a56fe63fad | refs/heads/master | 2021-08-23T00:16:44.263871 | 2017-12-01T21:04:42 | 2017-12-01T21:04:42 | 112,787,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | """AvesProyecto URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| [
"matheo-chantera@hotmail.com"
] | matheo-chantera@hotmail.com |
867a465d139fd1c55cdc38f9b43be2ff95796c18 | f8666599b83d34c861651861cc7db5b3c434fc87 | /plotly/graph_objs/scatterternary/__init__.py | 661912d2d40112f9b5bd08423f0728686cc78db3 | [
"MIT"
] | permissive | mode/plotly.py | 8b66806e88c9f1820d478bab726f0bea81884432 | c5a9ac386a40df2816e6c13264dadf14299401e4 | refs/heads/master | 2022-08-26T00:07:35.376636 | 2018-09-26T19:08:54 | 2018-09-26T19:19:31 | 60,372,968 | 1 | 1 | MIT | 2019-11-13T23:03:22 | 2016-06-03T19:34:55 | Python | UTF-8 | Python | false | false | 434 | py | from ._unselected import Unselected
from plotly.graph_objs.scatterternary import unselected
from ._textfont import Textfont
from ._stream import Stream
from ._selected import Selected
from plotly.graph_objs.scatterternary import selected
from ._marker import Marker
from plotly.graph_objs.scatterternary import marker
from ._line import Line
from ._hoverlabel import Hoverlabel
from plotly.graph_objs.scatterternary import hoverlabel
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
595087eb3b63686a2d60c00914f72ffeb145cc4e | 31c682e52ae4a947c4b284bcc58adb56e71ef1d9 | /src/model/neural.py | 38ea174df6bc3f634626b69c7e6f834114bb6368 | [
"MIT"
] | permissive | dkfrankandersen/ITU-BSc-Thesis | 5052fb1644953e651d3c452fa28633189f17da6c | d86dab2050966a65e8b81cd57dfcc0508e280543 | refs/heads/master | 2022-12-13T14:43:18.924220 | 2020-09-01T11:44:15 | 2020-09-01T11:44:15 | 291,978,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,417 | py | """
-------------------------------
neural: Neural Network wrapper.
-------------------------------
"""
import numpy as np
import tensorflow as tf
from keras import losses
from keras.backend.tensorflow_backend import set_session, clear_session, get_session
from keras.layers import Dense, Conv2D, BatchNormalization, Input, Flatten
from keras.layers.core import Activation
from keras.optimizers import SGD
from keras.models import Model, clone_model
from keras.initializers import random_uniform, random_normal
from keras.regularizers import l2
from keras.utils import get_custom_objects
from keras.utils.vis_utils import plot_model
from keras.layers import LeakyReLU
from model.residual import Residual
from config import Config
def softmax_cross_entropy_with_logits(y_bool, y_pred):
zeros = tf.zeros(shape=(tf.shape(y_bool)), dtype=tf.float32)
where_true = tf.equal(y_bool, zeros)
where_false = tf.fill(tf.shape(y_bool), -100.0)
pred = tf.where(where_true, where_false, y_pred)
return tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_bool, logits=pred)
def set_nn_config():
# Clean up from previous TF graphs.
tf.reset_default_graph()
clear_session()
get_custom_objects().update({"softmax_cross_entropy_with_logits": softmax_cross_entropy_with_logits})
# Config options, to stop TF from eating all GPU memory.
nn_config = tf.ConfigProto()
nn_config.gpu_options.per_process_gpu_memory_fraction = Config.MAX_GPU_FRACTION
nn_config.gpu_options.allow_growth = True
set_session(tf.Session(config=nn_config))
class NeuralNetwork:
"""
The dual policy network, which guides,
and is trained by, the MCTS algorithm.
"""
input_stacks = 0
def __init__(self, game, model=None):
self.game = game
if model:
self.input_layer(game)
self.model = model
self.model._make_predict_function()
if not self.model._is_compiled:
self.compile_model(self.model, game)
return
set_nn_config()
inp = self.input_layer(game)
# -=-=-=-=-=- Network 'body'. -=-=-=-=-=-
# First convolutional layer.
out = self.conv_layer(inp, Config.CONV_FILTERS, 3)
# Residual layers.
for _ in range(Config.RES_LAYERS):
out = Residual(Config.CONV_FILTERS, Config.CONV_FILTERS, out)
# -=-=-=-=-=- Policy 'head'. -=-=-=-=-=-
policy = self.policy_head(game, out)
# -=-=-=-=-=- Value 'head'. -=-=-=-=-=-
value = self.value_head(out)
self.model = Model(inputs=inp, outputs=[policy, value])
self.compile_model(self.model, game)
self.model._make_predict_function()
def conv_layer(self, inp, filters, kernel_size):
"""
Construct a 2D convolutional, rectified, batchnormalized
layer with the given input, filters and kernel size.
"""
out = Conv2D(filters, kernel_size=(kernel_size, kernel_size), strides=1, padding="same",
use_bias=Config.USE_BIAS,
kernel_regularizer=l2(Config.REGULARIZER_CONST))(inp)
out = BatchNormalization()(out)
out = LeakyReLU()(out)
return out
def get_initializer(self, min_val, max_val, inputs=10):
if Config.WEIGHT_INITIALIZER == "uniform":
return random_uniform(min_val, max_val)
if Config.WEIGHT_INITIALIZER == "normal":
return random_normal(min_val, 1/np.sqrt(inputs)) # Stddev = 1/sqrt(inputs)
def compile_model(self, model, game):
"""
Create relevant loss functions, weights and
optimizer, and compile the neural network model.
"""
game_name = type(game).__name__
# Policy head loss weights & loss function.
loss_weights = [0.5, 0.5]
loss_funcs = [softmax_cross_entropy_with_logits]
if game_name == "Latrunculi":
loss_funcs.append(losses.binary_crossentropy)
# Value head loss weights & loss function.
loss_funcs.append(losses.mean_squared_error)
# Stochastic Gradient Descent optimizer with momentum.
model.compile(optimizer=SGD(lr=Config.LEARNING_RATE,
decay=Config.WEIGHT_DECAY,
momentum=Config.MOMENTUM),
loss_weights=loss_weights,
loss=loss_funcs,
metrics=[softmax_cross_entropy_with_logits, "accuracy"])
def input_layer(self, game):
game_type = type(game).__name__
input_depth = 1
if game_type == "Latrunculi":
input_depth = 4
else:
input_depth = 2
self.input_stacks = input_depth
return Input((input_depth, game.size, game.size))
def policy_head(self, game, prev):
policy = self.conv_layer(prev, 2, 1)
game_type = type(game).__name__
if game_type == "Latrunculi":
# Split into...
# ...move policies.
policy_moves = Conv2D(4, kernel_size=3, strides=1, padding="same",
use_bias=Config.USE_BIAS, name="policy_head")
# ...delete captured pieces policy.
policy_delete = Conv2D(1, kernel_size=3, strides=1, padding="same",
use_bias=Config.USE_BIAS, name="policy_head2")(policy)
return policy_moves
else:
# Vector of probabilities for all squares.
policy = Flatten()(policy)
policy = Dense(game.size*game.size,
kernel_regularizer=l2(Config.REGULARIZER_CONST),
use_bias=Config.USE_BIAS, name="policy_head")(policy)
return policy
def value_head(self, prev):
value = self.conv_layer(prev, 1, 1)
# Flatten into linear layer.
value = Flatten()(value)
value = Dense(Config.CONV_FILTERS, use_bias=Config.USE_BIAS,
kernel_regularizer=l2(Config.REGULARIZER_CONST))(value)
value = LeakyReLU()(value)
# Final value layer. Linar layer with one output neuron.
value = Dense(1,
kernel_regularizer=l2(Config.REGULARIZER_CONST),
use_bias=Config.USE_BIAS)(value)
# Tanh activation, outputs probability of win/loss/draw as scalar value between -1 and 1.
value = Activation("tanh", name="value_head")(value)
return value
def save_as_image(self):
plot_model(self.model, to_file='../resources/model_graph.png', show_shapes=True)
def shape_input(self, inp):
reshaped = inp
if len(inp.shape) < 4:
size = self.game.size
reshaped = np.array([inp]).reshape((-1, self.input_stacks, size, size))
return reshaped
def copy_model(self, game):
model_copy = clone_model(self.model)
model_copy.build(self.input_layer(game))
self.compile_model(model_copy, game)
model_copy.set_weights(self.model.get_weights())
return model_copy
def log_flops(self):
"""
Function for testing FLOPS (Floating Operations Per Second)
for a specific network model.
Curtesy of: https://stackoverflow.com/a/47561171
"""
run_meta = tf.RunMetadata()
opts = tf.profiler.ProfileOptionBuilder.float_operation()
flops = tf.profiler.profile(get_session().graph, run_meta=run_meta, cmd='op', options=opts)
opts = tf.profiler.ProfileOptionBuilder.trainable_variables_parameter()
params = tf.profiler.profile(get_session().graph, run_meta=run_meta, cmd='op', options=opts)
if flops is not None:
with open("../resources/flops_summary.txt", "w") as file:
file.write("FLOPS SUMMARY:\n")
file.write(str(flops.total_float_ops)+" FLOPS\n")
file.write(str(params.total_parameters) + " params.")
def evaluate(self, inp):
"""
Evaluate a given state 'image' using the network.
@param inp - Image/structured data for a state.
@returns (p, z)
- p: A 4D array of probabilities for each
available action in state. These values help
guide the MCTS search.
- z: A value indicating the expected outcome of
the game from the given state.
"""
shaped_input = self.shape_input(inp)
output = self.model.predict(shaped_input)
game_type = type(self.game).__name__
policy_moves = output[0][:]
if False:
self.log_flops()
if game_type == "Latrunculi":
policy_delete = output[1][:]
return ((policy_moves, policy_delete), output[2][:])
return policy_moves, output[1][:]
def train(self, inputs, expected_out):
"""
Train the network on a batch of data.
@param inputs - Numpy array of game 'images', i.e: game states.
@param expected_out - Numpy array of tuples with (terminal values
of inputted states, action/move probability distribution of inputted states).
"""
result = self.model.fit(inputs, expected_out, batch_size=Config.BATCH_SIZE, verbose=0,
epochs=Config.EPOCHS_PER_BATCH, validation_split=Config.VALIDATION_SPLIT)
return result.history
| [
"fand@itu.dk"
] | fand@itu.dk |
3b65b388d53c466d7a621dfd9a085f080b406564 | 05546a7729d0cbf6f4ae697bad7aec235d3d9504 | /www/judge/languages/rust.py | 306d1e8eff9b57faaf4f7ef5e2594fc95089451c | [] | no_license | riceluxs1t/algospot | 60c7b3ca6c1fa8bbdf5220b78496c0bf9969174f | 557bedd0031ff3e726578fbd899fa71435abc31a | refs/heads/master | 2021-01-19T03:02:20.714594 | 2016-12-25T04:26:09 | 2016-12-25T04:26:09 | 79,389,643 | 0 | 1 | null | 2017-01-18T22:08:06 | 2017-01-18T22:08:06 | null | UTF-8 | Python | false | false | 1,425 | py | import subprocess
from django.conf import settings
def system(cmd):
return subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
COMPILE_MEMORY_LIMIT = settings.JUDGE_SETTINGS['MINMEMORYSIZE']
LANGUAGE = "Rust"
EXT = "rs"
VERSION = system(["rustc", "--version"])[0].split("\n")[0]
ADDITIONAL_FILES = []
def setup(sandbox, source_code):
sandbox.write_file(source_code, "submission.rs")
compiled = sandbox.run("rustc -O submission.rs -o a.out",
stdout=".stdout",
stderr=".stderr",
time_limit=10,
memory_limit=COMPILE_MEMORY_LIMIT)
if compiled.split()[0] != "OK":
return {"status": "error",
"message": sandbox.read_file(".stderr")}
#sandbox.run("rm submission.cpp .stdin .stderr")
return {"status": "ok"}
def run(sandbox, input_file, time_limit, memory_limit):
result = sandbox.run("./a.out", stdin=input_file,
time_limit=time_limit,
memory_limit=memory_limit,
stdout=".stdout",
stderr=".stderr")
toks = result.split()
if toks[0] != "OK":
return {"status": "fail", "message": result, "verdict": toks[0] }
return {"status": "ok", "time": toks[1], "memory": toks[2], "output": ".stdout"}
| [
"wookayin@gmail.com"
] | wookayin@gmail.com |
193a32436711b86caa6109b48777c171a89f9862 | a1b0e86979f5cc7f00c9a65073a79c88fcddbb0f | /LiveProject/companyNameScrape.py | 2bacaa2730d32349d3374e74bc36e9bcd9fc02eb | [] | no_license | georgialoper/techAcademyCourseWork | 07025f804027473c12911cc0865e8f872cd8551c | 42eedcfd054fd574d1b55ed299fefc00968ac9b7 | refs/heads/master | 2021-06-20T22:10:54.645793 | 2017-04-06T20:25:05 | 2017-04-06T20:25:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | import requests
from BeautifulSoup import BeautifulSoup
url = 'http://portlandtech.org/'
response = requests.get(url)
html = response.content
soup = BeautifulSoup(html)
table = soup.find('div', attrs={'id': 'fulllist'})
for row in table.findAll('li'):
for link in row.findAll('a'):
print link.text | [
"noreply@github.com"
] | noreply@github.com |
a118a1e83e9def0da9db511d4c9133740f9a5b18 | 221cada2354556fbb969f25ddd3079542904ef5d | /Leetcode/794.py | 3dbb0c842ec25e6a2dc1adf25ee07a5470c2690e | [] | no_license | syzdemonhunter/Coding_Exercises | 4b09e1a7dad7d1e3d4d4ae27e6e006732ffdcb1d | ca71572677d2b2a2aed94bb60d6ec88cc486a7f3 | refs/heads/master | 2020-05-24T11:19:35.019543 | 2019-11-22T20:08:32 | 2019-11-22T20:08:32 | 187,245,394 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | # https://leetcode.com/problems/valid-tic-tac-toe-state/
# T: O(n)
# S: O(1)
class Solution:
def isWin(self, board, c):
for i in range(3): # Row check
if board[i] == c*3:
return True
for i in range(3): # Column check
if board[0][i] == c and board[1][i] == c and board[2][i] == c:
return True
if board[0][0] == c and board[1][1] == c and board[2][2] == c or \
board[0][2] == c and board[1][1] == c and board[2][0] == c: # Diagonal check
return True
return False
def validTicTacToe(self, board: List[str]) -> bool:
count_X = count_O = 0
for i in range(3):
for j in range(3):
count_X += 1 if board[i][j] == 'X' else 0
count_O += 1 if board[i][j] == 'O' else 0
if count_O > count_X or count_X > count_O + 1:
return False
if count_O == count_X and self.isWin(board, 'X') or \
count_X == count_O + 1 and self.isWin(board, 'O'):
return False
return True
| [
"syzuser60@gmail.com"
] | syzuser60@gmail.com |
07dd7e37fa0fb096b32c2870025f984da525a821 | b68aa412b36a13df9b08ff7d736e1a0803afa3d9 | /astrobject/instruments/catalogues.py | 2fdde997d218a6b8c77dec062a4ed5f05da139bb | [] | no_license | ufeindt/astrobject | 320b7837e1ae29e562f6f58e8287dcea9df4581a | f60824913a3bfe7fdedb68794dfb7fc69e28569b | refs/heads/master | 2021-01-18T02:55:28.487364 | 2016-12-02T15:28:11 | 2016-12-02T15:28:11 | 46,552,676 | 0 | 0 | null | 2015-11-20T09:33:58 | 2015-11-20T09:33:58 | null | UTF-8 | Python | false | false | 15,103 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from sncosmo import get_bandpass
from .baseinstrument import Catalogue, coordinates,units
# -- here load all the object that could be parsed
from ..utils.tools import kwargs_update
from ..utils.decorators import _autogen_docstring_inheritance, make_method
# ============================= #
# #
# Quick Catalogue Study #
# #
# ============================= #
@make_method(Catalogue)
def stellar_density( catalogue, mask=None,
angdist=0.1*units.degree):
""" get the stellar density of the catalogue
Parameters
----------
catalogue: [Catalogue]
the catalogue for which you want the stellar density.
mask: [bool-array] -optional-
boolean array for instance generated by the `get_mask` catalogue method.
By default, (mask=None) the mask will be stars_only=True.
For instance, a catmag_mask could be a great idea.
Return
------
float
"""
mask = catalogue.get_mask(stars_only=True) if mask is None else mask
ra,dec = catalogue.get(["ra","dec"], mask= mask)
skyradec = coordinates.SkyCoord(ra=ra,dec=dec, unit="deg")
return np.bincount(skyradec.search_around_sky(skyradec,angdist)[0])
#################################
# #
# All Sky GAIA: Catalogue #
# #
#################################
def fetch_gaia_catalogue(center, radius, extracolumns=[], column_filters={}, **kwargs):
""" "query the gaia catalogue thought Vizier (I/337, DR1) using astroquery.
This function requieres an internet connection.
Parameters
----------
center: [string] 'ra dec'
position of the center of the catalogue to query.
radius: [string] 'value unit'
radius of the region to query. For instance '1d' means a
1 degree raduis
extracolumns: [list-of-string] -optional-
Add extra column from the V/139 catalogue that will be added to
the basic query (default: position, ID, object-type, magnitudes)
column_filters: [dict] -optional-
Selection criterium for the queried catalogue.
**kwargs goes to astroquery.vizier.Vizier
Returns
-------
SDSSCatalogue (child of Catalogue)
"""
try:
from astroquery import vizier
except:
raise ImportError("install astroquery. (pip install astroquery)")
# Basic Info
# --------------
columns = ["RA_ICRS","DE_ICRS","e_RA_ICRS","e_DE_ICRS","Source","Dup",
"o_<Gmag>","<FG>","e_<FG>","<Gmag>","Var"]
columns = columns+extracolumns
column_quality = {} # Nothing there yet
c = vizier.Vizier(catalog="I/337/gaia", columns=columns,
column_filters=kwargs_update(column_quality,**column_filters),
**kwargs)
c.ROW_LIMIT = "unlimited"
t = c.query_region(center,radius=radius).values()[0]
cat = GAIACatalogue(empty=True)
cat.create(t.columns ,None,
key_ra="RA_ICRS",key_dec="DE_ICRS")
return cat
class GAIACatalogue( Catalogue ):
source_name = "Gaia"
def __init__(self, catalogue_file=None, empty=False,
key_mag="__Gmag_", key_magerr="__e_Gmag_",
key_ra=None, key_dec=None, **kwargs):
"""
"""
self.__build__(data_index=0,key_mag=key_mag,
key_magerr=key_magerr,key_id="Source",
key_ra=key_ra,key_dec=key_dec)
if empty:
return
if catalogue_file is not None:
self.load(catalogue_file,**kwargs)
@_autogen_docstring_inheritance(Catalogue.set_mag_keys,"Catalogue.set_mag_keys")
def set_mag_keys(self,key_mag,key_magerr):
#
# add lbda def
#
super(GAIACatalogue,self).set_mag_keys(key_mag,key_magerr)
if "G" in key_mag:
self.lbda = 6730
#################################
# #
# BASIC SDSS: Catalogue #
# #
#################################
def fetch_sdss_catalogue(center, radius, extracolumns=[],column_filters={"rmag":"5..25"},**kwargs):
""" query online sdss-catalogue in Vizier (V/139, DR9) using astroquery.
This function requieres an internet connection.
Parameters
----------
center: [string] 'ra dec'
position of the center of the catalogue to query.
radius: [string] 'value unit'
radius of the region to query. For instance '1d' means a
1 degree raduis
extracolumns: [list-of-string] -optional-
Add extra column from the V/139 catalogue that will be added to
the basic query (default: position, ID, object-type, magnitudes)
column_filters: [dict] -optional-
Selection criterium for the queried catalogue.
**kwargs goes to astroquery.vizier.Vizier
Returns
-------
SDSSCatalogue (child of Catalogue)
"""
from .sdss import SDSS_INFO
try:
from astroquery import vizier
except:
raise ImportError("install astroquery. (pip install astroquery)")
# -----------
# - DL info
columns = ["cl","objID",#"SDSS9",
"RAJ2000","e_RAJ2000","DEJ2000","e_DEJ2000",
#"ObsDate","Q"#"mode",
]
for band in SDSS_INFO["bands"]:
columns.append("%smag"%band)
columns.append("e_%smag"%band)
columns = columns+extracolumns
column_quality = {"mode":"1","Q":"2.3"}
# - WARNING if discovered that some of the bandmag were missing if too many colums requested
c = vizier.Vizier(catalog="V/139", columns=columns,
column_filters=kwargs_update(column_quality,**column_filters),
**kwargs)
c.ROW_LIMIT = "unlimited"
#try:
t = c.query_region(center,radius=radius).values()[0]
#except :
# raise IOError("Error while querying the given coords. You might not have an internet connection")
cat = SDSSCatalogue(empty=True)
cat.create(t.columns,None,
key_class="cl",value_star=6,key_id="objID",
key_ra="RAJ2000",key_dec="DEJ2000")
return cat
# ------------------- #
# - SDSS CATALOGUE - #
# ------------------- #
class SDSSCatalogue( Catalogue ):
"""
"""
source_name = "SDSS"
def __init__(self, catalogue_file=None,empty=False,
value_star=6,key_mag=None,key_magerr=None,
key_ra=None,key_dec=None,**kwargs):
"""
"""
self.__build__(data_index=2,key_mag=key_mag,
key_magerr=key_magerr,key_id="objID",
key_ra=key_ra,key_dec=key_dec)
if empty:
return
if catalogue_file is not None:
self.load(catalogue_file,**kwargs)
self.set_starsid("cl",6)
@_autogen_docstring_inheritance(Catalogue.set_mag_keys,"Catalogue.set_mag_keys")
def set_mag_keys(self,key_mag,key_magerr):
#
# add lbda def
#
super(SDSSCatalogue,self).set_mag_keys(key_mag,key_magerr)
if key_mag is not None:
bandpass = get_bandpass("sdss%s"%key_mag[0])
self.lbda = bandpass.wave_eff
#################################
# #
# BASIC 2MASS: Catalogue #
# #
#################################
def fetch_2mass_catalogue(center,radius,extracolumns=[],
column_filters={"Jmag":"5..30"},**kwargs):
""" query online 2mass-catalogue in Vizier (II/246) using astroquery.
This function requieres an internet connection.
Parameters
----------
center: [string] 'ra dec'
position of the center of the catalogue to query.
radius: [string] 'value unit'
radius of the region to query. For instance '1d' means a
1 degree raduis
extracolumns: [list-of-string] -optional-
Add extra column from the II/246 catalogue that will be added to
the basic query (default: position, ID, magnitudes)
column_filters: [dict] -optional-
Selection criterium for the queried catalogue.
**kwargs goes to astroquery.vizier.Vizier
Returns
-------
MASSCatalogue (child of Catalogue)
"""
try:
from astroquery import vizier
except:
raise ImportError("install astroquery. (pip install astroquery)")
# -----------
# - DL info
columns = ["2MASS",
"RAJ2000","DEJ2000",
]
for band in ["J","H","K"]:
columns.append("%smag"%band)
columns.append("e_%smag"%band)
columns = columns+extracolumns
# - WARNING if discovered that some of the bandmag were missing if too many colums requested
c = vizier.Vizier(catalog="II/246", columns=columns, column_filters=column_filters,
**kwargs)
c.ROW_LIMIT = 100000
try:
t = c.query_region(center,radius=radius).values()[0]
except:
raise IOError("Error while querying the given coords. You might not have an internet connection")
cat = MASSCatalogue(empty=True)
cat.create(t.columns,None,
key_class="PointSource",value_star=None,
key_ra="RAJ2000",key_dec="DEJ2000")
return cat
# ------------------- #
# - 2MASS CATALOGUE - #
# ------------------- #
class MASSCatalogue( Catalogue ):
"""
"""
source_name = "2MASS"
def __init__(self, catalogue_file=None,empty=False,
key_mag=None,key_magerr=None,key_ra=None,key_dec=None,**kwargs):
"""
"""
self.__build__(data_index=2,key_mag=key_mag,
key_magerr=key_magerr,
key_ra=key_ra,key_dec=key_dec)
if empty:
return
if catalogue_file is not None:
self.load(catalogue_file,**kwargs)
@_autogen_docstring_inheritance(Catalogue.set_mag_keys,"Catalogue.set_mag_keys")
def set_mag_keys(self,key_mag,key_magerr):
#
# add lbda def
#
super(MASSCatalogue,self).set_mag_keys(key_mag,key_magerr)
if key_mag is not None:
if key_mag == "Jmag":
self.lbda = 12350
elif key_mag == "Hmag":
self.lbda = 16620
elif key_mag == "Kmag":
self.lbda = 21590
else:
raise ValueError("'%s' is not a recognized 2MASS band")
# ----------------------- #
# - CATALOGUE HACK - #
# ----------------------- #
@property
def mag(self):
if not self._is_keymag_set_(verbose=False):
print "No 'key_mag' defined. J band used by default. -> To change: set_mag_keys() "
self.set_mag_keys("Jmag","e_Jmag")
return super(MASSCatalogue,self).mag
# ------------------------------
# - All points are Point Sources
@property
def _objecttype(self):
print "All Loaded data are %s"%self._build_properties["key_class"]
return np.ones(self.nobjects)
@property
def starmask(self):
""" This will tell which of the datapoints is a star
Remark, you need to have defined key_class and value_star
in the __build_properties to be able to have access to this mask
==> In 2MASS PointSource catalogue, all data are stars
"""
return np.ones(self.nobjects_in_fov,dtype="bool") #not self.fovmask already in objecttype
#################################
# #
# BASIC WISE: Catalogue #
# #
#################################
def fetch_wise_catalogue(center,radius,extracolumns=[],column_filters={"Jmag":"5..30"}):
""" query online wise-catalogue in Vizier (II/328) using astroquery.
This function requieres an internet connection.
Parameters
----------
center: [string] 'ra dec'
position of the center of the catalogue to query.
radius: [string] 'value unit'
radius of the region to query. For instance '1d' means a
1 degree raduis
extracolumns: [list-of-string] -optional-
Add extra column from the II/328 catalogue that will be added to
the basic query (default: position, ID, magnitudes)
column_filters: [dict] -optional-
Selection criterium for the queried catalogue.
**kwargs goes to astroquery.vizier.Vizier
Returns
-------
WISECatalogue (child of Catalogue)
"""
try:
from astroquery import vizier
except:
raise ImportError("install astroquery. (pip install astroquery)")
# -----------
# - DL info
columns = ["AllWISE","ID",
"RAJ2000","DEJ2000",
]
for band in ["J","H","K","W1","W2","W3","W4"]:
columns.append("%smag"%band)
columns.append("e_%smag"%band)
columns = columns+extracolumns
# - WARNING if discovered that some of the bandmag were missing if too many colums requested
c = vizier.Vizier(catalog="II/328", columns=columns, column_filters=column_filters,
**kwargs)
c.ROW_LIMIT = 100000
try:
t = c.query_region(center,radius=radius).values()[0]
except:
raise IOError("Error while querying the given coords. You might not have an internet connection")
cat = WISECatalogue(empty=True)
cat.create(t.columns,None,
key_class="ToBeDone",value_star=None,
key_ra="RAJ2000",key_dec="DEJ2000")
return cat
# ------------------- #
# - WISE CATALOGUE - #
# ------------------- #
class WISECatalogue( Catalogue ):
"""
"""
source_name = "WISE"
def __init__(self, catalogue_file=None,empty=False,
key_mag=None,key_magerr=None,key_ra=None,key_dec=None,**kwargs):
"""
"""
print "STAR vs. GALAXY PARSING NOT READY YET"
self.__build__(data_index=2,key_mag=key_mag,
key_magerr=key_magerr,
key_ra=key_ra,key_dec=key_dec)
if empty:
return
if catalogue_file is not None:
self.load(catalogue_file,**kwargs)
@_autogen_docstring_inheritance(Catalogue.set_mag_keys,"Catalogue.set_mag_keys")
def set_mag_keys(self,key_mag,key_magerr):
#
# add lbda def
#
super(WISECatalogue,self).set_mag_keys(key_mag,key_magerr)
if key_mag is not None:
self.lbda = "TO BE DEFINED"
@property
def mag(self):
if not self._is_keymag_set_(verbose=False):
print "No 'key_mag' defined. W1 band used by default. -> To change: set_mag_keys() "
self.set_mag_keys("W1mag","e_W1mag")
return super(WISECatalogue,self).mag
| [
"mick.rigault@gmail.com"
] | mick.rigault@gmail.com |
fa813f9e8dbe06ff9eaa964817ab6545a98fec69 | b6964dfe5c0d49711d406319d9c093733b6abf1c | /code/src/NGLib/python_out/bake_me.py | 359b214ae20c947fd4bb9998aa63b286cef981d6 | [] | no_license | evertheylen/MachinesEnBerekenbaarheid | 73d948a1c0023211dd3c55a9132a4caca884924b | 5cd5104b7433a30480dc7b87b02dfa10ec2cdf33 | refs/heads/master | 2021-01-11T05:06:44.033969 | 2016-02-05T13:00:57 | 2016-02-05T13:00:57 | 44,000,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py |
# separate file because this might get complex
import platform
dependencies["headers"] = [
"NGLib/outputter>>headers",
"libs/tinyxml>>headers",
]
gcc_config = {
"pre_extra": "-Wno-deprecated -I%s -Ilibs/boost/b2stuff/include/"%project_config["python_include_dir"],
#"post_extra": "-pthread -Wl,-Bdynamic -lutil -ldl -lm -Wl,-Bstatic -Llibs/boost/b2stuff/lib/ -lboost_python3 -Wl,-Bdynamic -l%s"%project_config["python_lib"]
}
| [
"evertheylen@gmail.com"
] | evertheylen@gmail.com |
39b66afc5455a6ee774e865e8558fee80c0280a5 | 6a7d1a090a05e42eed759ca033a7a53809ff8456 | /image/test/test_image_api.py | 54af50e89111ec0161e8420baf539ce85b65d306 | [] | no_license | edetec/image-gallery-be | bec69372a2804ff4666acfeebd6802a8add7b5f4 | f25ec557e179b6d67098ddc05a7b4c9ab57c4b6b | refs/heads/master | 2022-12-06T11:39:24.352354 | 2020-08-24T02:24:32 | 2020-08-24T02:24:32 | 289,008,221 | 0 | 0 | null | 2020-08-24T02:24:33 | 2020-08-20T13:04:16 | Python | UTF-8 | Python | false | false | 4,648 | py | import uuid
from http import HTTPStatus
from pathlib import Path
import pytest
from image.models import ImageModel
@pytest.yield_fixture()
def images(session):
image1 = ImageModel(
file_path="file1.jpg",
thumbnail_path="thumbnail/file1.jpg",
description="description file 1",
format="jpg",
dimensions="800x600")
image2 = ImageModel(
file_path="file2.jpg",
thumbnail_path="thumbnail/file2.jpg",
description="description file 2",
format="jpg",
dimensions="800x600")
image3 = ImageModel(
file_path="file3.jpg",
thumbnail_path="thumbnail/file3.jpg",
description="description file 3",
format="jpg",
dimensions="900x600")
session.add(image1)
session.add(image2)
session.add(image3)
yield image1
for image in ImageModel.query.all():
session.delete(image)
@pytest.yield_fixture
def photo1():
file_name = 'photo 1.png'
file_path = Path(__file__).parent.joinpath('assets', file_name)
with open(file_path, 'rb') as file:
yield file, file_name
@pytest.yield_fixture
def photo2():
file_name = 'photo 1.png'
file_path = Path(__file__).parent.joinpath('assets', file_name)
with open(file_path, 'rb') as file:
yield file, file_name
@pytest.yield_fixture
def tmp_app_dir(test_client, tmpdir):
config = test_client.application.config
ROOT_DIR = config['ROOT_DIR']
config['ROOT_DIR'] = tmpdir
Path(tmpdir).joinpath(config['IMAGES_PATH']).mkdir(parents=True)
yield tmpdir
config['ROOT_DIR'] = ROOT_DIR
def test_get_all_images(test_client, images, data_regression):
response = test_client.get("/api/v1/image/")
assert response.status_code == HTTPStatus.OK
data_regression.check(response.json)
def test_get_images_with_filter_param(test_client, images, data_regression):
filter_param = "file 1"
response = test_client.get(f"/api/v1/image/?description={filter_param}")
assert response.status_code == HTTPStatus.OK
assert len(response.json) == 1
data_regression.check(response.json)
def test_get_all_image_dimensions(test_client, images, data_regression):
response = test_client.get(f"/api/v1/image/dimensions/")
assert response.status_code == HTTPStatus.OK
data_regression.check(response.json)
def test_image_upload(test_client, photo1, tmp_app_dir, monkeypatch, data_regression):
monkeypatch.setattr(uuid, "uuid4", lambda: 'unique-file-name')
response = test_client.post('/api/v1/image/', buffered=True,
content_type='multipart/form-data',
data={
'description': 'Photo 1 description',
'file': photo1
})
assert response.status_code == HTTPStatus.CREATED
image_dict = response.json
assert Path(tmp_app_dir).joinpath(image_dict['file_path']).exists()
assert Path(tmp_app_dir).joinpath(image_dict['thumbnail_path']).exists()
data_regression.check(image_dict)
def test_image_update(test_client, photo1, photo2, tmp_app_dir, monkeypatch, data_regression):
response = test_client.post('/api/v1/image/', buffered=True,
content_type='multipart/form-data',
data={
'description': 'Photo 2 description',
'file': photo1
})
assert response.status_code == HTTPStatus.CREATED
image1_dict = response.json
assert Path(tmp_app_dir).joinpath(image1_dict['file_path']).exists()
assert Path(tmp_app_dir).joinpath(image1_dict['thumbnail_path']).exists()
image1_id = image1_dict['id']
monkeypatch.setattr(uuid, "uuid4", lambda: 'unique-file-name')
response = test_client.put(f'/api/v1/image/{image1_id}', buffered=True,
content_type='multipart/form-data',
data={
'description': 'Photo 2 description',
'file': photo2
})
assert response.status_code == HTTPStatus.OK
image_dict = response.json
assert Path(tmp_app_dir).joinpath(image_dict['file_path']).exists()
assert Path(tmp_app_dir).joinpath(image_dict['thumbnail_path']).exists()
assert not Path(tmp_app_dir).joinpath(image1_dict['file_path']).exists()
assert not Path(tmp_app_dir).joinpath(image1_dict['thumbnail_path']).exists()
data_regression.check(image_dict)
| [
"eder@esss.co"
] | eder@esss.co |
04eafc67675909eccd0f244b0a096ccc4e5420a6 | 035b1e04b06ef90023cf224de3b0bfa55dbdde74 | /Class4/venv/Scripts/easy_install-3.5-script.py | 7d41db120019363710078e864b427c2e562d7c82 | [] | no_license | jlmoldan/pythonclass | 47845aa0e53c33dd817324d0a8731e96bcd8475a | 58db089e4084c1659aae632f5e5ebe8774d558c8 | refs/heads/master | 2020-03-15T13:01:25.893839 | 2018-05-11T20:27:30 | 2018-05-11T20:27:30 | 132,156,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | #!C:\Users\jlmoldan\PycharmProjects\Class4\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install-3.5'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install-3.5')()
)
| [
"jlmoldan@stthomas.edu"
] | jlmoldan@stthomas.edu |
238ac565cdf12992f80ce83da24f806d520de596 | e8202ff4594f8d4f9834d6be165b39a8fc3d437d | /week8/codingbat/5_logic1/4.py | 836839f334d36ffc780055952acbcbe68817f006 | [] | no_license | as1k/webdev-labs | 93e939999f38520fc701e245341fee13dfdd8cb2 | 393bf4ffdae8d60640a54efabce62db4ba76eeb8 | refs/heads/master | 2022-04-14T15:38:15.033878 | 2020-04-16T22:12:17 | 2020-04-16T22:12:17 | 237,159,602 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | def caught_speeding(speed, is_birthday):
if is_birthday==False:
if speed<=60:
return 0
if speed>=61 and speed<=80:
return 1
if speed>=81:
return 2
else:
if speed<=65:
return 0
if speed>=66 and speed<=85:
return 1
if speed>=86:
return 2
| [
"nurmukhambet.ak@gmail.com"
] | nurmukhambet.ak@gmail.com |
ce7e45ab87739c913d655d6123914c9bd55130fd | 578082323428bed9016bbf3931f53034b20aff80 | /users/migrations/0001_initial.py | 586d3b48a44c4dbc24b932a7002b5ac5f0d8ff8e | [] | no_license | bazhe1897/blog_project | 34dbe3608404d536d2def3515a080fb6e4c657a1 | 394e37925b9a5e6ca5817b7d7d847ac45fbe9c28 | refs/heads/main | 2023-08-30T09:23:43.768828 | 2021-10-14T15:51:18 | 2021-10-14T15:51:18 | 409,693,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | # Generated by Django 3.2.6 on 2021-09-22 19:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"blagojaangov@gmail.com"
] | blagojaangov@gmail.com |
bc5f511362b6c2b31b8f889d0bc798730eb19ee9 | 18f069d9a20ed3db981b04f44db99af39cd2664d | /build_index.py | 7d8a67702c2585e1584bb268ba26a31f83ba5f9a | [] | no_license | WeAreWizards/pypidata | 511ccac1ad2d9da630424935381e650afe4967ed | 48071c15d8294ad340a352cdff7a208fe6abe336 | refs/heads/master | 2016-09-07T11:38:59.547812 | 2014-12-14T10:11:11 | 2014-12-14T10:11:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,207 | py | """Transform the scraped pypi data into a pandas DataFrame for later
consumption.
$ nix-shell -p pythonPackages.nose pythonPackages.protobuf \
pythonPackages.setuptools pythonPackages.pandas \
pythonPackages.tables pythonPackages.ipython
"""
import raw_data_pb2
import pandas
import glob
import json
def emit_json_data():
for path in glob.glob('./raw/*'):
with open(path) as f:
rd = raw_data_pb2.RawData()
rd.ParseFromString(f.read())
if '<h1>Not Found</h1>' in rd.package_json:
print "Not found: {}".format(rd.package_name)
continue
try:
info = json.loads(rd.package_json)
except ValueError:
print "Could not decode", rd.package_json
yield rd, info
def emit_package_records():
for rd, info in emit_json_data():
license = info['info'].get('license', 'n/a')
# name can be different from package identifier
name = info['info'].get('name', 'n/a')
yield (
rd.retrieved_timestamp,
rd.package_name,
license,
name,
)
def emit_version_records():
for rd, info in emit_json_data():
for version, releases in info['releases'].items():
for release in releases:
yield (
rd.retrieved_timestamp,
rd.package_name,
version,
release.get('downloads'),
release.get('md5_digest'),
release.get('packagetype'),
release.get('size'),
)
def main():
package_df = pandas.DataFrame.from_records(
emit_package_records(),
columns=['timestamp', 'pypi_name', 'license', 'name'],
)
version_df = pandas.DataFrame.from_records(
emit_version_records(),
columns=['timestamp', 'pypi_name', 'version',
'downloads', 'md5_digest',
'packagetype', 'size'],
)
h5 = pandas.HDFStore('./pypi.h5', mode='w')
h5['package_df'] = package_df
h5['version_df'] = version_df
h5.close()
if __name__ == '__main__':
main()
| [
"tehunger@gmail.com"
] | tehunger@gmail.com |
9d4c4c45c5a09a003e9693ea88c39e532c229da9 | ffb373415060a3f943d30e624710df3972ca0bcf | /manage.py | 0530080fc5f643fdc8e396b5ccdea3ca6800e179 | [] | no_license | LexLongcoding/pop_print | 55acf5fface31ae9b0d8126d5ef8eb554d7abcc9 | 612cf41d7afb6db675d6abba2d91c1589acdc339 | refs/heads/master | 2023-04-02T02:38:05.081230 | 2021-04-07T20:24:03 | 2021-04-07T20:24:03 | 352,515,446 | 0 | 1 | null | 2021-04-07T18:33:30 | 2021-03-29T04:26:56 | HTML | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Pop_Print.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"DevLexLong@gmail.com"
] | DevLexLong@gmail.com |
0e758e5dd750f1d26d9a8be05715980a3cd69a75 | 2983821358fff7ec26e8a88565f0a380ab8eac66 | /manage.py | 393219102e836343fbb63885fe31660f00bf3c24 | [] | no_license | Camarbro/POS | c8733ee17f2db2bc3a4f46f3819aaef7dbad4836 | 09dbd021afad0ccd2158a6e5b233e4b7d8d0a04f | refs/heads/master | 2021-01-17T16:41:41.305330 | 2016-08-10T06:07:27 | 2016-08-10T06:07:27 | 65,355,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Susanas.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"Postmaster@automovilismoonline.com"
] | Postmaster@automovilismoonline.com |
43677b98b8f8f7e0e4283394cf75e03f9aa196b2 | 1358257d86019a9232dba7571fedbfe938352f9f | /LibraryManagement/apps.py | 9d4149239887f6e7f3e0ea534744cfb8d7c6cb98 | [] | no_license | adeelehsan/LibraryManagementSystem | e7de727defe1d00c9332254bb0ef64d28a7fb2d3 | 68e5be7fb5a26607eed62dd67a9c38bc3b91bf97 | refs/heads/master | 2021-01-01T04:09:08.085846 | 2017-07-16T13:20:13 | 2017-07-16T13:20:13 | 97,133,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class LibrarymanagementConfig(AppConfig):
name = 'LibraryManagement'
| [
"adeel.ehsan@arbisoft.com"
] | adeel.ehsan@arbisoft.com |
5b9f15b5a722739f3678256572c434c7ca5c4d66 | 51f36a1d97c6520cd614267d301665c9d0909ebf | /Keras_1_fitGenerate/predict.py | 3883e89fc39b23a0038ae571f486555f7eca6175 | [
"MIT"
] | permissive | sunwoo76/Polyp-Segmentation | f7d2f0378af013da9dc72bc667a9704ff66499aa | 764a62dd8de134e462939aafb0c1c24d67dfb564 | refs/heads/master | 2022-04-09T17:37:56.618034 | 2020-03-06T18:44:29 | 2020-03-06T18:44:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,934 | py | import os
"""
# If you have multi-gpu, designate the number of GPU to use.
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "6"
"""
import argparse
import logging
import cv2
from skimage import io
import skimage.transform as transform
import numpy as np
import segmentation_models as sm
from eval import get_dice
def predict_img(net,
full_img,
resizing=384,
out_threshold=0.5):
img = transform.resize(full_img, (resizing,resizing), anti_aliasing=True)
img = np.expand_dims(img, 0)
output = net.predict(img)
output = output.squeeze()
return output > out_threshold
def get_args():
parser = argparse.ArgumentParser(description='Predict masks from input images',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model', '-m', default='efficientnetb1', # resnet50, densnet169,... possible.
metavar='FILE',
help="backcone name")
parser.add_argument('--weight', '-w', default='./EffiB1U_checkpoints/weight_name...', # put in the 'h5'file name
metavar='FILE',
help="Specify the file in which the weight stored")
parser.add_argument('--input', '-i', metavar='INPUT', nargs='+',
help='filenames of input images', default= "../data/test2/imgs/CVC_Original")
#"../data/test2/imgs/CVC_Original" , "../data/test1/imgs/ETIS_imgs_png"
#"../data/test2/masks/CVC_Ground Truth", "../data/test1/masks/ETIS_Ground Truth_png"
parser.add_argument('--target', '-tar', metavar='TARGET', nargs='+',
help='filenames of input masks', default= "./data/test2/masks/CVC_Ground Truth")
parser.add_argument('--output', '-o', metavar='OUTPUT', nargs='+', default="./results/U_Dens169_result/test2",
help='Filenames of ouput images')
parser.add_argument('--no-save', '-n', action='store_true',
help="Do not save the output masks",
default=False)
parser.add_argument('--mask-threshold', '-t', type=float,
help="Minimum probability value to consider a mask pixel white",
default=0.5)
parser.add_argument('--resizing', '-s', type=float,
help="Scale factor for the input images",
default=384)
return parser.parse_args()
def get_output_filenames(args):
in_files = os.listdir(args.input)
in_files.sort()
out_files = []
for f in in_files:
pathsplit = os.path.splitext(f)
out_files.append(args.output + "/{}_OUT{}".format(pathsplit[0], pathsplit[1]))
if len(in_files) != len(out_files):
logging.error("Input files and output files are not of the same length")
raise SystemExit()
return out_files
def mask_to_image(mask):
return (mask * 255).astype(np.uint8)
if __name__ == "__main__":
args = get_args()
in_files = list()
in_files_name = os.listdir(args.input)
in_files_name.sort()
target_files = list()
target_names = os.listdir(args.target)
target_names.sort()
for i, in_file in enumerate(in_files_name):
in_file_path = os.path.join(args.input, in_file)
in_files.append(in_file_path)
target_file_path = os.path.join(args.target, target_names[i])
target_files.append(target_file_path)
out_files = get_output_filenames(args)
model = sm.Unet(args.model)
model.load_weights(args.weight)
# We need to copile model. but it is not important because we will predict not train. It is formulaic
model.compile(
"Adam",
loss=sm.losses.bce_dice_loss, # sm.losses.bce_jaccard_loss, # sm.losses.binary_crossentropy,
metrics=[sm.metrics.iou_score],
)
logging.info("Model loaded !")
total_num = len(in_files)
sum_dice = 0.0
for i, fn in enumerate(in_files):
logging.info("\nPredicting +image {} ...".format(fn))
img = io.imread(fn)
mask = predict_img(model, img, resizing=args.resizing)
if args.no_save:
out_fn = out_files[i]
result = mask_to_image(mask)
cv2.imwrite(out_files[i], result)
logging.info("Mask saved to {}".format(out_files[i]))
target = io.imread(target_files[i])
target = transform.resize(target, (args.resizing, args.resizing), anti_aliasing=True)
mask = np.expand_dims(mask, 0) # for calculate dice coefficient
target = np.expand_dims(target, 0) # for calculate dice coefficient
dice_score = get_dice(mask, target)
sum_dice += dice_score
print("Img : {0} - dice coeff : {1}".format(fn.split("/")[-1], dice_score))
print("Total average dice score : {}".format(sum_dice/total_num)) | [
"38250239+sunshower76@users.noreply.github.com"
] | 38250239+sunshower76@users.noreply.github.com |
93656dddb0943a181bcf8693aa865c1d33bc8aab | 5f32ae0e0c8fe1fd8b7fc500876c687209f22dce | /2019_11_01 Vorläufer bis dahin/2019_10_16 GWST Syn V1_0-R3.py | 20b0b1db493d78c0cf9c7d1870833786d788043d | [] | no_license | UlrichNielsen/gsw | 802399850b0ad04d2122d82819d90ad187d2c53d | ae4e3bc3531378af35f245fac973eb854860b55d | refs/heads/master | 2020-09-20T07:07:38.946058 | 2019-11-28T09:06:52 | 2019-11-28T09:06:52 | 224,407,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,224 | py | # Ziel ist es ein einfaches Zählprogramm auf Interrupt-Basis
# für den Gas-,Strom- und Wasserverbrauch
# Mess- und Speicherintervall == 6 min
# Die Daten werden auf den NAS-Server (Synology) gespeichert
# jeweils unter Tages-Datum von 6:00 bis 5:54 des Folgetages
# Endlosspeicherung auskommentiert (mit ## )
# Um die Interrupts nutzen zu können,
# ist eine aktuelle Version des Moduls RPi.GPIO notwendig.
import RPi.GPIO as GPIO
import time, _thread
import os, sys, csv
#datapath=/home/pi/shareSynol/IR-Lese/
#cp -v /dev/ttyUSB0 ${datapath}serialin.txt
def main():
# Peneral Purpose I/O behandeln
global channel_in # Eingang (Gas)
global LS_in # Line sense modul als Input-geber (Wasser)
global channel_out_r # Ausgang rote LED (Gas)
global channel_out_gr # Ausgang grüne LED (Gas)
global channel_out_b # Ausgang blaue LED (Wasser)
global channel_out_ge # Ausgang gelbe LED (Wasser)
global channel_out_we # Ausgang weisse LED (Lebenszeichen)
global channel_out_ro # Ausgang rote LED: Werte lesen (Temperatur)
global channel_out_ws # Ausgang weisse LED: Ruhephase (Temperatur)
global Counter_min # lfd. Zähler während der 6 min
global Counter_up # lfd. Zähler Flanke-up (Gas, Magnet-"Puls" bei 0 Durchgang)
global Counter_down # lfd. Zähler Flanke-down (Gas, Magnet-"Puls" bei 0 Durchgang)
global Cnt_6m # Zähler mit Ergebnis des jeweiligen 6 min-Takts
global Cnt_h # Zähler der Pulse i.d. Stunde (Summierung der angefallenen 6 min-Zähler
global Cnt_d # dasselbe für den Tag
global Cnt_W # dasselbe für die Woche
global Cnt_M # dasselbe für den Monat
global Cnt_Y # dasselbe für das Jahr
# Strom
global Strom_6m # Strom aus IR-Zähler
global Strom_6m_diff
global Strom_6m_vor
# Wasser
global Counter_W
global Counter_W_minus
global Cnt_W_6m
global Cnt_W_h
global Cnt_W_d
global Cnt_W_W
global Cnt_W_M
global Cnt_W_Y
global LfdDir, TagWe, Quelle
global Jahr, lfdJahr
global TdJ, lfdTdJ
global MdJ, lfdMdJ
global TdM, lfdTdM
global WdJ, lfdWdJ
global TdW, lfdTdW
global Std, lfdStd
global Min
global Voll # Muster für den "Füllgrad" des Intervalls (1 = noch nicht "voll")
global Filename
global FileWoWe
global File6bis554
global Flag_Anf6 # Flag für 6:00 Anfang
global d, f, l, s, f6 # fileidentifier
global Flag_up, Flag_down, Flanke_w_up, Flanke_w_down
# Global für vorhandene Temperatursensoren
global tempSensorBezeichnung
global tempSensorAnzahl
global tempSensorWert
global realSensorBezeichnung
# Initialisieren
# Gas
Counter_up = 0
Counter_down = 0
Counter_min = 0
# Wasser
Counter_W = 0 # lfd. Wasser-Zähler während der 6 min
# Strom
Strom_6m_vor = 0
# Maske für h, W, Monat, Jahr usw.
Voll = 11111
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False) # Keine Ausgabe von Warnungen
channel_in = 24 # GPIO-PIN 24 als Eingang Hall-Sonde (Gas)
LS_in = 17 # GPIO-PIN 17 als Eingang LS-Sonde (Wasser)
channel_out_r = 23 # GPIO-PIN 23 als Ausgang rote LED (Gas)
channel_out_gr = 18 # GPIO-PIN 18 als Ausgang grüne LED (Gas)
channel_out_b = 27 # GPIO-PIN 27 als Ausgang blaue LED (Wasser)
channel_out_ge = 22 # GPIO-PIN 22 als Ausgang gelbe LED (Wasser)
channel_out_we = 25 # Lebenszeichen
channel_out_ro = 16 # GPIO-PIN 16 als Ausgang rote LED (Temperatur)
channel_out_ws = 20 # GPIO-PIN 20 als Ausgang weisse LED (Temperatur)
GPIO.cleanup()
GPIO.setup(channel_out_r, GPIO.OUT) # Definition Output PIN rote LED (Gas)
GPIO.setup(channel_out_gr, GPIO.OUT) # Definition Output PIN grüne LED (Gas)
GPIO.setup(channel_out_b, GPIO.OUT) # Definition Output PIN blaue LED (Wasser)
GPIO.setup(channel_out_ge, GPIO.OUT) # Definition Output PIN gelbe LED (Wasser)
GPIO.setup(channel_out_we, GPIO.OUT) # Definition Output PIN weisse LED (Lebenszeichen)
GPIO.setup(channel_out_ro, GPIO.OUT) # Definition Output PIN rote LED (Temperatur)
GPIO.setup(channel_out_ws, GPIO.OUT) # Definition Output PIN weisse LED (Temperatur)
# Gas-Input: Pull-Down Widerstand aktivieren, um einen festen Pegel zu definieren
# Aussenwiderstand und 0,1 µF über Eingang
# (Widerstand gegen 3,3 V funktioniert nicht)
GPIO.setup(channel_in, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
# Wasser LS-pin to input, use internal pull-up-resistor
GPIO.setup(LS_in,GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Erkennen einer Flankenänderung bei Gas
GPIO.add_event_detect(channel_in, GPIO.BOTH, Interrupt_H_L, bouncetime = 100)
# 100 msec Prellzeit
# Erkennen einer Flankenänderung bei Wasser
GPIO.add_event_detect(LS_in, GPIO.BOTH, Wasser_Interr, bouncetime=25)
# 25 msec Prellzeit
# Flanken Flip-Flops für Prell-Unterdrückung in ISR
Flag_up =0
Flag_down =0
Flanke_w_up =0
Flanke_w_down =0
id = _thread.get_ident()
# Gas
# initial Rot ausschalten (kein 8er Durchgang)
GPIO.output(channel_out_r, GPIO.LOW)
# initial Grün einschalten (warten auf 8er Durchgang)
GPIO.output(channel_out_gr, GPIO.HIGH)
# Wasser
# initial Blau ausschalten (kein Durchgang)
GPIO.output(channel_out_b, GPIO.LOW)
# initial Gelb einschalten (warten auf Durchgang)
GPIO.output(channel_out_ge, GPIO.HIGH)
# Temperatur
tempSensorBezeichnung = [] #Liste mit den einzelnen Sensoren-Kennungen
tempSensorAnzahl = 0 #INT für die Anzahl der gelesenen Sensoren
tempSensorWert = [] #Liste mit den einzelnen Sensor-Werten
realSensorBezeichnung = []
# initial weiß anschalten (neben Heizkreisverteiler) Ruhepause
GPIO.output(channel_out_we, GPIO.HIGH)
# initial rot ausschalten (neben Heizkreisverteiler) keine Messung
GPIO.output(channel_out_r, GPIO.LOW)
# Anzahl und Bezeichnungen der vorhandenen Temperatursensoren einlesen
ds1820einlesen() #Anzahl und Bezeichnungen der vorhandenen Temperatursensoren einlesen
# Lebenszeichen
#Initial Weiss einschalten
GPIO.output(channel_out_we, GPIO.HIGH)
#*********************************************************************
#
# Dateien erzeugen bzw. öffnen in eigenem Folder nicht auf NAS
# ausgeblendet mit #DS
# Daten-Struktur
# auch für csv-DB mit Strom
#
# 1. Zeitangaben
# - weitere Zeitangaben für die Darstellung aus time.strftime("%Y.%j.%m.%d.%H.%M.%V.%u")
# - Jahr Monat Tag time.strftime("%x")
# - Stunde Min Sec time.strftime("%X")
#
# 2. Werte in m³ (jedesmal)
# - Durchläufe (Counter_min) in den 6 min, d.h. counter_min*0,01 m³ Gasverbrauch
# - zugehöriger saldierter h-Wert (auf 0 gesetzt bei Beginn einer h)
# - Strom-Zählerwert alle 6 min, 4 Stellen hinter dem Komma und als Differenzwert
# - Wasserwerte
# - zugehöriges Pattern für Vollständigkeit (Voll in Reihenfolge 0, sonst 1)
#
#---------------------------------------------------------------------------
# Zeiten und Intervallwerte initialisieren
Ticks = time.time()
###
# Werte und Zeiten initialisieren
###
Jahr = int(time.strftime("%Y"))
lfdJahr = Jahr
TdJ = int(time.strftime("%j")) # Tag des Jahres
lfdTdJ = TdJ
MdJ = int(time.strftime("%m")) # Monat des Jahres
lfdMdJ = MdJ
TdM = int(time.strftime("%d")) # Tag des Monats
lfdTdM = TdM
WdJ = int(time.strftime("%V")) # Woche des Jahres
lfdWdJ = WdJ
TdW = int(time.strftime("%u")) # Tag der Woche
lfdTdW = TdW
Std = int(time.strftime("%H")) # Stunde des Tages
lfdStd = Std
Min = int(time.strftime("%M")) # Minute der Stunde
# Gas
Cnt_6m = 0
Cnt_h = 0
Cnt_d = 0
Cnt_W = 0
Cnt_M = 0
Cnt_Y = 0
# Strom
Strom_6m = 0
# Wasser
Counter_W = 0
Cnt_W_6m = 0
Cnt_W_h = 0
Cnt_W_d = 0
Cnt_W_W = 0
Cnt_W_M = 0
Cnt_W_Y = 0
Voll = 11111
# Path setzen
#DS path="/home/pi/shareSynol/"
path="/home/pi/LokaleErg/"
os.chdir(path)
# Öffnen einer "neuen" DB (Löschen alter Inhalte und Überschriften schreiben)
# Name : Jahr und Tag des laufenden Jahres und Gas&Strom
LfdDir = str ("lfdProgrammErg/") # Ergebnis-Folder
LfdY = str (time.strftime("%Y_")) # Lfd Jahr
lfdM = str (time.strftime("%m_")) # Monat des Jahres
lfdT = str (time.strftime("%d")) # Tag des Monats, hier für die nicht-Tagesfiles
Quelle = str (" GSW.csv")
WoWe = str (" Wochenwerte.csv")
TagWe = str ("_ab 6h")
### Filenamen nach üblicher-Konvention z.B. 2018_03_12 Gas etc.)
## Filename = LfdDir + LfdY + lfdM + lfdT + Quelle
## d = open(Filename,"w")
## writer = csv.writer(d, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
### Schreiben der Liste als CSV-Datensatz ergibt "Überschrift"
## writer.writerow(["Datum", "Zeit", "Strom_6m_dif", "Gas_6m", "Cnt_W_6m", "G_Cnt_h", "G_Cnt_d", "g_Cnt_W", "G_Cnt_M", "G_Cnt_Y", "Strom_6m","Cnt_W_h", "Cnt_W_d", "Cnt_W_W", "Cnt_W_M", "Voll"])
# und schliessen
## d.close ()
# und dasselbe für die Tageswerte von 6:00 bis 5:54 initial, wird nicht vollständig sein da belieber Start des Programms
File6bis554 = LfdDir + LfdY + lfdM + lfdT + TagWe + Quelle
f6 = open(File6bis554,"w")
f6riter = csv.writer(f6, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
# Schreiben der Liste als CSV-Datensatz ergibt "Überschrift"
f6riter.writerow(["Datum", "Zeit", "Gas_6m", "Strom_6m_diff","Wasser_6m", "Schlafz.", "Dach/Mitte"])
# und schliessen
#
f6.close ()
Flag_Anf6 = 1 # Flag für 6:00 Anfang setzen
# und dasselbe für den Wochenwert So 16:00
FileWoWe = LfdDir + LfdY + lfdM + lfdT + WoWe
f = open(FileWoWe,"w")
friter = csv.writer(f, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
# Schreiben der Liste als CSV-Datensatz ergibt "Überschrift"
friter.writerow(["Datum", "Gas", "Strom", "Wasser"])
f.close
#***************************************************************
_thread.start_new_thread(Zeitfenster6min,())
for i in range (1576800): #sollte bei 6 sec sein: 26280h = 1095d = 3y sein
time.sleep(3) # 3 sec
GPIO.output(channel_out_we, GPIO.LOW) # weiss aus
time.sleep(3) # 3 sec
GPIO.output(channel_out_we, GPIO.HIGH) # weiss an
# time.sleep(125000) # beliebig
def ds1820einlesen():
global tempSensorBezeichnung, tempSensorAnzahl, programmStatus, realSensorBezeichnung
#Verzeichnisinhalt auslesen mit allen vorhandenen Sensorbezeichnungen 28-xxxx
try:
for x in os.listdir("/sys/bus/w1/devices"):
if (x.split("-")[0] == "28") or (x.split("-")[0] == "10"):
tempSensorBezeichnung.append(x)
tempSensorAnzahl = tempSensorAnzahl + 1
except:
# Auslesefehler
print ("Der Verzeichnisinhalt konnte nicht ausgelesen werden.")
programmStatus = 0
def ds1820auslesen():
global tempSensorBezeichnung, tempSensorAnzahl, tempSensorWert, programmStatus, realSensorBezeichnung
x = 0
try:
# 1-wire Slave Dateien gem. der ermittelten Anzahl auslesen
while x < tempSensorAnzahl:
dateiName = "/sys/bus/w1/devices/" + tempSensorBezeichnung[x] + "/w1_slave"
file = open(dateiName)
filecontent = file.read()
file.close()
# Temperaturwerte auslesen und konvertieren
stringvalue = filecontent.split("\n")[1].split(" ")[9]
sensorwert = float(stringvalue[2:]) / 1000
temperatur = '%6.2f' % sensorwert #Sensor- bzw. Temperaturwert auf 2 Dezimalstellen formatiert
tempSensorWert.insert(x,temperatur) #Wert in Liste aktualisieren
x = x + 1
except:
# Fehler bei Auslesung der Sensoren
print ("Die Auslesung der DS1820 Sensoren war nicht möglich.")
programmStatus = 0
# die ISR deklarieren
def Interrupt_H_L(channel_in):
global Counter_min, Counter_up, Counter_down
global Flag_up, Flag_down
if GPIO.input(channel_in) == 1: # steigende Flanke (0 → 1)
GPIO.output(channel_out_r, GPIO.HIGH) # rote LED an
GPIO.output(channel_out_gr, GPIO.LOW) # grüne LED aus
# print(" Steigende Flanke 1.: Flag_up, Flag_down, Counter_up" ,Flag_up, Flag_down, Counter_up)
# print(" um: ", time.strftime("%H.%M.%S"))
if Flag_up == 0:
Flag_up = 1
Flag_down = 0
Counter_up = Counter_up + 1
# print(" Steigende Flanke 2.: Flag_up, Flag_down, Counter_up" ,Flag_up, Flag_down, Counter_up)
# print(" um: ", time.strftime("%H.%M.%S"))
else:
Flag_up = Flag_up + 1
# print(" Steigende Flanke 3.(Neg): Flag_up, Flag_down, Counter_up" ,Flag_up, Flag_down, Counter_up)
# print(" um: ", time.strftime("%H.%M.%S"))
else: # fallende Flanke (1 → 0)
GPIO.output(channel_out_r, GPIO.LOW) # rote LED aus
GPIO.output(channel_out_gr, GPIO.HIGH) # grüne LED an
# print("1. Fallende Flanke: Flag_down, Flag_up, Counter_min" ,Flag_down, Flag_up, Counter_min)
# print(" um: ", time.strftime("%H.%M.%S"))
if Flag_down == 0:
Flag_down = 1
Flag_up = 0
Counter_min = Counter_min + 1
# print("2. Fallende Flanke: Flag_down, Flag_up, Counter_min" ,Flag_down, Flag_up, Counter_min)
# print(" um: ", time.strftime("%H.%M.%S"))
else:
Flag_down = Flag_down + 1
# print("3. (Neg) Fallende Flanke: Flag_down, Flag_up, Counter_min" ,Flag_down, Flag_up, Counter_min)
# print(" um: ", time.strftime("%H.%M.%S"))
def Wasser_Interr(LS_in):
global Counter_W # lfd. Wasser Zähler während der 6 min
global Flanke_w_up, Flanke_w_down, Counter_W_minus
if GPIO.input(LS_in) == 1: # steigende Flanke (0 → 1)
# print ("Optisch Hell/Dunkel detected: blaue-LED on, grün aus")
if Flanke_w_up == 0:
Flanke_w_down = 0
Flanke_w_up = 1
Counter_W = Counter_W + 1
# print ("WZ= ", time.strftime("%H:%M:%S"), time.time(), Counter_W)
GPIO.output(channel_out_b, GPIO.HIGH) # blaue LED an
GPIO.output(channel_out_ge, GPIO.LOW) # gelbe LED aus
else:
Flanke_w_up = Flanke_w_up + 1
# print (" Flanke_w_up = ", Flanke_w_up)
else:
if Flanke_w_down == 0:
Flanke_w_up = 0
Flanke_w_down = 1
# print ("Optisch Dunkel/Hell: blaue-LED off, grün an")
GPIO.output(channel_out_b, GPIO.LOW) # blaue LED aus
GPIO.output(channel_out_ge, GPIO.HIGH) # gelbe LED an
# Counter_W_minus = Counter_W_minus-1
# print ("WZ-minus= ", time.strftime("%H:%M:%S"), time.time(), Counter_W_minus)
else:
Flanke_w_down = Flanke_w_down + 1
# Timer via Threads setzen
def Zeitfenster6min():
id = _thread.get_ident()
# Gas
global Counter_up, Counter_min, Cnt_6m, Cnt_h, Cnt_d, Cnt_W, Cnt_M, Cnt_Y
# Strom
global Strom_6m, Strom_6m_diff, Strom_6m_vor
# Wasser
global Counter_W, Counter_W_minus, Cnt_W_6m, Cnt_W_h, Cnt_W_d, Cnt_W_W, Cnt_W_M, Cnt_W_Y
global LfdDir, TagWe, Quelle
global Jahr, lfdJahr
global TdJ, lfdTdJ
global MdJ, lfdMdJ
global TdM, lfdTdM
global WdJ, lfdWdJ
global TdW, lfdTdW
global Std, lfdStd
global Min
global Voll
global Filename
global FileWoWe
global File6bis554
global Flag_Anf6 # Flag für 6:00 Anfang
global d, f, l, s, f6
# Wochenwert zu Sonntag 15:00 setzen
WoWert_Gas = 0
WoWert_Wasser = 0
# 6 min Intervalle
min_interv = 6 #6 min
min_interv_sec = min_interv * 60 #6 min in sec
jetztmin = int(time.strftime("%M"))
zuviel_sec = int(time.strftime("%S"))
# Wartezeitberechnung
rest_zeit = min_interv - (jetztmin % min_interv) # in min
rest_zeit_sec = rest_zeit * 60 - zuviel_sec # als sec korrigiert
time.sleep(rest_zeit_sec)
# Durchlaufzähler
# 6 min- und Durchlauf- Zähler auf 0 setzen vor erstem vollen Intervall, Flanken up für Test auf Richtig
Counter_up = 0
Counter_min = 0
Counter_W = 0
Durchl = 0
# Zeiten setzen
zeitAktuell = int(time.strftime("%d")) # Test für Tagelang
zeitEnde = zeitAktuell + 365 #erst einnmal 365d
# Ende erreicht?
while zeitAktuell < zeitEnde:
# Datenbank aufmachen zum Schreiben
## d = open(Filename,"a+")
f6 = open(File6bis554,"a+")
# Zeit genau auf min-Anfang der 6-min Periode setzen, Schlupf durch CPU-Leistung
zuviel_sec = int(time.strftime("%S"))
time.sleep((min_interv_sec-zuviel_sec))
### test incl. ausdruck #########################################
# Zeit innerhalb der Stunde als Minute und wievieltes Intervall
jetztmin = int(time.strftime("%M"))
jetzt_intv = int(jetztmin/min_interv)
# print("--6min-- jetztmin", jetztmin, jetzt_intv)
#================================================================
# Zeit und Impulsanzahl in Datenbank speichern
#------------------------------------------------------------------------
# laufende Zeitwerte gewinnen
#
Ticks = time.time()
lfdJahr = int(time.strftime("%Y"))
lfdTdJ = int(time.strftime("%j")) # Tag des Jahres
lfdMdJ = int(time.strftime("%m")) # Monat des Jahres
lfdTdM = int(time.strftime("%d")) # Tag des Monats
lfdWdJ = int(time.strftime("%V")) # Woche des Jahres
lfdTdW = int(time.strftime("%u")) # Tag der Woche
lfdStd = int(time.strftime("%H")) # Stunde des Tages
lfdMin = int(time.strftime("%M")) # Minute der Stunde
# Werte ermitteln
#
# laufenden 6 min umspeichern: Counter_min zu Cnt_6m
Cnt_6m = Counter_min
Cnt_W_6m = Counter_W
# 6 min Zähler anschließend auf 0 setzen für neues Intervall
Counter_min = 0
Counter_W = 0
# laufenden h-Wert bearbeiten
Cnt_h = Cnt_h + Cnt_6m
Cnt_W_h = Cnt_W_h + Cnt_W_6m
# laufenden d-Wert bearbeiten
Cnt_d = Cnt_d + Cnt_6m
Cnt_W_d = Cnt_W_d + Cnt_W_6m
# laufenden Wochen-Wert bearbeiten
Cnt_W = Cnt_W + Cnt_6m
Cnt_W_W = Cnt_W_W + Cnt_W_6m
# laufenden Monats-Wert bearbeiten
Cnt_M = Cnt_M + Cnt_6m
Cnt_W_M = Cnt_W_M + Cnt_W_6m
# laufenden Jahres-Wert bearbeiten
Cnt_Y = Cnt_Y + Cnt_6m
# Wochenwert für Gas setzen bis alles stimmt
WoWert_Gas = WoWert_Gas +Cnt_6m
WoWert_Wasser = WoWert_Wasser +Cnt_W_6m
# Stromwert ermitteln aus Zählerauslesung
# von ShareSynol auf Lokale Erg geändert 29.10.2018
# Öffnen csv-Quelle
lese_pfad = str ("/home/pi/LokaleErg/IR-Lese/")
lfile = str ("serialin.txt")
lquelle = lese_pfad + lfile
l = open(lquelle,"rb")
# Test-Ausdruck
allezeilen = l.read()
#print(allezeilen)
# Stop-Pattern bei Suche auf Stromwert (entspr.b'52 ff 59')
byte_patter = b"\x1eR\xffY"
# print("byte_patter: ", byte_patter)
seek_index = 10
byte_index = 4 #ist der richtige Wert
erg_index = 8
while seek_index < 400: # sollte reichen
l.seek(seek_index)
bytesuch=l.read(byte_index)
if bytesuch == byte_patter:
# Testausdruck
#print("Treffer", seek_index)
seek_index= seek_index + byte_index
l.seek(seek_index)
# Testausdruck
#print ("Pointer auf Wert: ", seek_index)
byte_erg = l.read(erg_index)
# Testausdruck
#print("Wert: ", byte_erg)
Strom_6m =(int.from_bytes(byte_erg, byteorder='big'))
# einfügen in DB
# Testausdruck
# print(Strom_6m)
if Strom_6m > 0:
Strom_6m_diff = Strom_6m - Strom_6m_vor
# print(Strom_6m_diff, Strom_6m_vor)
if Strom_6m_diff > 5999: # erster Wert vor Diff-Bildung
Strom_6m_diff = 0
Strom_6m_vor = Strom_6m
break
seek_index = seek_index + 1
# Schliessen der Strom-Dateien
l.close ()
# Temperaturwerte einlesen und einen speichern im allg. Datensartz
# für Test IO20
realSensorBezeichnung = ["Sensor #11 ","Dach/Garten ",
"Dach/ Mitte ","Wz/Mitte ",
"Bad ","Wz/Fenster ",
"Wz/Küche ","Schlafzimmer",
"Dach/Strasse","Sensor #12 ",
"Windfang/WC ","Arbeitsz. "] # Liste der zug. Sensorbezeichnungen
x = 0
ds1820auslesen()
GPIO.output(channel_out_r, GPIO.HIGH) # rote LED an
GPIO.output(channel_out_we, GPIO.LOW) # weisse LED aus
#+ print ("Zeit und Sensorbezeichnung und Temperaturwert:")
#+ while x < tempSensorAnzahl:
#* print (time.strftime("%H:%M:%S") , " " ,realSensorBezeichnung[x] , " " , tempSensorBezeichnung[x] , " " , tempSensorWert[x] , " °C")
#+ x = x + 1
GPIO.output(channel_out_ro, GPIO.LOW) # rote LED aus
GPIO.output(channel_out_ws, GPIO.HIGH) # weisse LED an
# Datensatz für 6-5:54 erzeugen
lfdDatum = time.strftime("%x")
lfdUhr = time.strftime("%X")
if Flag_Anf6 >= 0:
f6.write (str (lfdDatum) + ";"
+ str (lfdUhr) + ";"
+ str (Cnt_6m) + ";"
+ str (Strom_6m_diff) + ";"
+ str (Cnt_W_6m) + ";"
+ str (tempSensorWert[7]) + ";"
+ str (tempSensorWert[2]) + ";"
+ "\n")
Counter_up = 0
#
# Zeitübergänge berücksichtigen
# Wochenwert Sonntag 16:00 setzen
#
if lfdTdW == 7:
if lfdStd ==16:
if lfdMin < 6:
lfdDatum = time.strftime("%c")
f = open(FileWoWe,"a+")
f.write(str(lfdDatum) + ";"
+ str (WoWert_Gas) + ";"
+ str (Strom_6m) + ";"
+ str (WoWert_Wasser) + ";"
+ "\n")
f.close ()
WoWert_Gas = 0
WoWert_Wasser = 0
#
if lfdStd != Std: # Volle Stunde
Std = lfdStd
if Voll == 11111:
Voll = 1111
if lfdStd == 6:
if lfdMin < 10:
Flag_Anf6 = 0
# altes Tagesfile schliessen
f6.close()
# und neues File für die Tageswerte von 6:00 bis 5:54 aufmachen
LfdY = str (time.strftime("%Y_")) # Lfd Jahr, hier für die Tagesfiles
lfdM = str (time.strftime("%m_")) # Monat des Jahres, hier für die Tagesfiles
lfdT = str (time.strftime("%d")) # Tag des Monats, hier für die Tagesfiles
File6bis554 = LfdDir + LfdY + lfdM + lfdT + TagWe + Quelle
f6 = open(File6bis554,"w")
f6riter = csv.writer(f6, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
# Schreiben der Liste als CSV-Datensatz ergibt "Überschrift"
f6riter.writerow(["Datum", "Zeit", "Gas_6m", "Strom_6m_dif", "Cnt_W_6m", "Schlafz.", "Dach/Mitte"])
# und schliessen "Überschrift"
f6.close()
# und neu aufmachen
f6 = open(File6bis554,"a+")
Cnt_h = 0
Cnt_W_h = 0
# print("--6min-- Std, Cnt_h, Voll:" ,Std, Cnt_h, Voll)
if lfdTdW != TdW: # voller Tag Mitternacht 24h
TdW = lfdTdW
if Voll == 1111:
Voll = 111
Cnt_d = 0
Cnt_W_d = 0
# print("--6min-- TdW, Cnt_d, Voll:" ,TdW, Cnt_d, Voll)
if lfdWdJ != WdJ: # volle Woche
WdJ = lfdWdJ
if Voll == 111:
Voll = 11
Cnt_W = 0
Cnt_W_W = 0
# print("--6min-- WdJ, Cnt_W, Voll:" ,WdJ, Cnt_W, Voll)
if lfdMdJ != MdJ: # voller Monat
MdJ = lfdMdJ
if Voll == 11:
Voll = 1
Cnt_M = 0
Cnt_W_M = 0
# print("--6min-- MdJ, Cnt_M, Voll:" ,MdJ, Cnt_M, Voll)
if lfdJahr != Jahr: # volles Jahr
Jahr = lfdJahr
Voll = 00000
Cnt_Y = 0
# print("--6min-- Jahr, Cnt_Y, Voll:" ,Jahr, Cnt_Y, Voll)
#================================================================
Durchl = Durchl + 1
### test ausdruck ###############################################
# print("--6min-- Durchlauf Nr.:", Durchl)
### test ausdruck ###############################################
## d.close ()
#
# f6.close ()
# Zeit fuer Schleife ermitteln
zeitAktuell = int(time.strftime("%d"))
#######################################
return
# Hauptprogramm
main() # Invoke the main function
try:
while True:
pass
# Quit on Ctrl-c
except KeyboardInterrupt:
print ("Ctrl-C - quit")
# Alle ausschalten
GPIO.output(channel_out_r, GPIO.LOW) # Rot aus
GPIO.output(channel_out_ge, GPIO.LOW) # Gelb aus
GPIO.output(channel_out_b, GPIO.LOW) # Blau aus
GPIO.output(channel_out_gr, GPIO.LOW) # Grün aus
# Cleanup GPIO
finally:
GPIO.cleanup()
| [
"ulrich.nielsen@googlemail.com"
] | ulrich.nielsen@googlemail.com |
ce40acb6d2dfad691c156238fd8bcf18ef9b465b | 6578999fe06b9d8e1280853ab6fc0bbcc68b51a7 | /models/__init__.py | 8a7ff22b0f416fc33f41cb0900a70697a0cf035c | [] | no_license | tangyuhao/transfer-tensorflow | 3abfef3474771a53ba24c8a47b03745079ab5af8 | 3fbbb228006def636c908fd95a45ded3a27ac2fc | refs/heads/master | 2021-08-30T02:10:55.359575 | 2017-12-15T16:44:44 | 2017-12-15T16:44:44 | 111,703,441 | 4 | 0 | null | 2017-11-22T15:47:21 | 2017-11-22T15:47:21 | null | UTF-8 | Python | false | false | 210 | py | from .caffe_alexnet import *
from .caffe_mean_file import *
# Map from model name to (constructor, output feature numbers of fc, mean file)
base_models = {
'alexnet': (alexnet, (9216, 4096, 4096, 1000))
}
| [
"sunziping2016@gmail.com"
] | sunziping2016@gmail.com |
cd7cf82c026b80ab657e904cee8cfe89525b9563 | d1a31b2558ffa51546facba6d9e2dc47ff88d396 | /GenericViews/settings.py | cb1e219360a7a80d0f819e587dd5e2d55fc3fbb2 | [] | no_license | syedarfa459/DjangoCBVs | d65ac91143c2cdb406963a32c6bf854a1277edad | 03133f18a138fc21853cb0e73c0531035ea8e49c | refs/heads/master | 2023-01-07T19:53:50.436757 | 2020-11-06T16:20:10 | 2020-11-06T16:20:10 | 310,645,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,113 | py | """
Django settings for GenericViews project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p=rtuk#3h5e_^i8w5m@c@a9rl-_vqhm3mp0&z2g5@p)slr_h-f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'CreateViewDemo',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'GenericViews.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'GenericViews.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"arfashahji@gmail.com"
] | arfashahji@gmail.com |
20faba5fd718594721809afc32054e86bb5736ff | 51899c24fe2ab1ee1a0dd40b58867532f5428004 | /web/myapp/app/SearchModule/t_search.py | 0c49a8cf702bba4aab97f0a0e18d46295987a90a | [] | no_license | 763272955/python | a2d07ee37d31c919d0fecb0a798a87bc024b6970 | b1952ee702227d5624a76410e365dfc38e9beb5c | refs/heads/master | 2021-05-11T11:33:55.246035 | 2018-01-16T06:20:28 | 2018-01-16T06:20:28 | 117,638,934 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,159 | py | #!/opt/python27/bin/python
#coding:utf-8
import json
import urllib2
from ..Config import config
t_behaviors = { 'is_danger': '具有典型行为',
'is_network': '具有网络行为',
'is_process': '具有进程操作行为',
'is_file': '具有文件操作行为',
'is_registry': '具有注册表操作行为',
'is_other':'具有其他行为'
}
def t_search(hash):
'''
T system backup information
for static_status:1 for started, 2 for finished
for breed_status:0 and 1 for started, 2 for finished
'''
t_result = {
'sign':'t', 'virus_name':'', 'is_virus':'', 'cast_time':'',\
'format_name':'', 'static_status': '未完成', 'dynamic_status':'未完成', \
'analysis_steps': ['预处理','多引擎扫描','静态分析'], 'behaviors':[]
}
t_static_url = config.t_static_url + hash
try:
t_static_result = json.loads((urllib2.urlopen(t_static_url)).read())
except Exception as e:
raise e
else:
t_result['static_status'] = "已完成"
t_result['is_virus'] = '恶意' if t_result['virus_name'] else '非恶意'
if t_static_result["basic_info"]:
t_result['virus_name'] = t_static_result['basic_info']['virus_name']
t_result['cast_time'] = t_static_result['basic_info']['cp_time']
t_result['format_name'] = t_static_result['basic_info']['file_format']
t_dynamic_url = config.t_dynamic_url + hash
try:
t_dynamic_result = json.loads((urllib2.urlopen(t_dynamic_url)).read())
except Exception as e:
raise e
if t_dynamic_result['succeed']:
t_result['dynamic_status'] = '已完成'
t_result['analysis_steps'].append('动态分析')
t_result['behaviors'] = [t_behaviors[key] for key in t_behaviors \
if t_dynamic_result["content"][key] ]
t_result["analysis_steps"] = '-'.join(t_result["analysis_steps"])
t_result["behaviors"] = '-'.join(t_result["behaviors"])
return t_result | [
"33079717+763272955@users.noreply.github.com"
] | 33079717+763272955@users.noreply.github.com |
66c1143ccb54ec8f526cda648875317d8f080c01 | e1f54e2c4e1601dfb400694ce66f86394afcbf29 | /backtowork.py | b2a3d0712cb6637d0c565fca87fe1db63fb0dd3e | [] | no_license | svhossen/vinted | e173265aa61e3ede33a8add96e0e7997a4faf697 | 33af6b4c013c26c2aacf10a3d09b3d0e839db7aa | refs/heads/master | 2022-11-06T17:16:26.157483 | 2020-06-23T15:29:23 | 2020-06-23T15:29:23 | 273,799,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,806 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 7 15:40:18 2020
@author: hossenbuxs
"""
from timeit import default_timer as timer
import scrapy
import os
os.chdir(r'U:\lavspec\Startup\backtowork')
import sys
sys.path.append(r'U:\Lavspec\Startup\backtowork\backtowork')
from items import NewItem
from urllib.parse import urlparse
import pandas as pd
from scrapy.http import Request
import pandas as pd
import datetime
#%%
class backtowork(scrapy.Spider):
name = "backtowork"
allowed_domains = ['www.backtowork24.com']
start_urls = ['https://www.backtowork24.com/equity-crowdfunding?gclid=EAIaIQobChMI-6SMr9K_5wIVRIXVCh1aKAa3EAAYAiAAEgIPvfD_BwE']
def parse(self, response):
item = NewItem()
#in order to use this, name must exist prior to launch the code
item['Name'] = [response.xpath('//*[@id="grid-campaign"]/div/div/div[{}]/a/div/div[1]/span[5]/h4/text()'.format(i)).get() for i in range(100)]
item['Adesioni']= [response.xpath('//*[@id="grid-campaign"]/div/div/div[{}]/a/div/div[2]/div[4]/div/div/div[1]/h4/text()'.format(i)).get() for i in range(100)]
item['Equity']= [response.xpath('//*[@id="grid-campaign"]/div/div/div[{}]/a/div/div[2]/div[4]/div/div/div[2]/h4/text()'.format(i)).get() for i in range(100)]
item['Settore']=[response.xpath('//*[@id="grid-campaign"]/div/div/div[{}]/a/div/div[2]/div[4]/div/div/div[3]/h4/text()'.format(i)).get() for i in range(100)]
item['description']= [response.xpath('//*[@id="grid-campaign"]/div/div/div[{}]/a/div/div[2]/div[2]/p/text()'.format(i)).get() for i in range(100)]
item['status']=[response.xpath('//*[@id="grid-campaign"]/div/div/div[{}]/a/div/div[2]/div[3]/div/div/p/text()'.format(i)).get() for i in range(100)]
item['percent_raised'] = [response.xpath('//*[@id="grid-campaign"]/div/div/div[{}]/a/div/div[2]/div[3]/div/div/div[1]/text()'.format(i)).get() for i in range(100)]
item['date'] = datetime.datetime.now()
item['base_url'] = response.url
return item
#%%
def main():
import scrapy
from scrapy import cmdline
cmdline.execute("scrapy crawl backtowork -o {}.json -t json".format('backtowork_03_19_20').split())
#cmdline.execute("scrapy crawl backtowork".split())
if __name__ =="__main__":
main()
#%%
import json
with open('backtowork_03_19_20.json', 'r') as f:
distros_dict = json.load(f)
dataframe = pd.DataFrame(distros_dict[0])
dataframe.to_csv('backtowork_03_19_20.csv')
| [
"noreply@github.com"
] | noreply@github.com |
c8a6531bad1d22622b253c30712ab63535b7ba14 | b254f030cefdddbabf6868b1d5d9a784aba88b2c | /tutorial/porting-multi-modules/mypreprocessor2.py | 1f03bc84da1e1046a8b8cc39dd23c9ed4510df0f | [
"Apache-2.0"
] | permissive | gnes-ai/hub | 84220c37eea388fd57c914e86007469cd126d371 | 94cff9011ff6447ce1af51c5307813ab6fbbb156 | refs/heads/master | 2020-07-05T13:23:59.573400 | 2019-10-24T05:10:12 | 2019-10-24T05:10:12 | 202,658,837 | 38 | 11 | NOASSERTION | 2019-10-24T05:10:13 | 2019-08-16T04:33:52 | Python | UTF-8 | Python | false | false | 375 | py | from gnes.preprocessor.text.base import BaseTextPreprocessor
class MyPreprocessor2(BaseTextPreprocessor):
def __init__(self, bar, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bar = bar
def apply(self, doc: 'gnes_pb2.Document') -> None:
super().apply(doc)
doc.raw_text += self.bar
self.logger.info(doc.raw_text)
| [
"hanhxiao@tencent.com"
] | hanhxiao@tencent.com |
7a6a0b91917a46a5de11415981074fe5b760823a | f24985cd3f0fec0d4343bec4981cd0c190328046 | /train/app/model.py | aa47bb4d9c43c5faf45ec639f3e4d56b46819b63 | [] | no_license | LinLL/scrapy_pic | e69f0b3fcde516eb758072e826a46b9fd4af971e | 4852d43be55d962445d0202ec03ce0a7e59ab1c3 | refs/heads/master | 2020-12-25T15:07:50.547392 | 2016-10-17T02:15:54 | 2016-10-17T02:15:54 | 67,203,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Beautys(db.Model):
id = db.Column(db.Integer, primary_key=True)
url = db.Column(db.String(70), unique=True)
loves = db.Column(db.Integer)
page = db.Column(db.Integer)
def __init__(self, url, page, loves=0):
self.url = url
self.loves = loves
self.page = page
def __repr__(self):
return "<Beautys , url:{}, loves:{}, page:{}>".format( self.url, self.loves ,self.page)
| [
"luolin.work@gmail.com"
] | luolin.work@gmail.com |
a738fb0d91c7cefa3fc82be2a129ddfd5b83fae8 | 3d1a0d198f2ab85dd65e56ed7a4a60250e9decaa | /2020/day02/aafrecct/day02.py | 9663fd3d4d81af83d4d1f729cd057d5c918143f9 | [] | no_license | petacreepers23/AdventCode | 82235e96a65826abeafa0bdc35b413d539385a90 | 8f52b043e37b4f276f34de972ca04d743b7c1f47 | refs/heads/master | 2023-02-03T15:16:52.877745 | 2020-12-17T23:33:03 | 2020-12-17T23:33:03 | 317,660,628 | 0 | 0 | null | 2020-12-17T22:04:06 | 2020-12-01T20:33:04 | Python | UTF-8 | Python | false | false | 825 | py | def next_line():
with open('day02.input', 'r') as f:
while(l := f.readline()) != '':
yield l
def parse_line(line):
line = line.split(' ')
line[0] = [int(i) for i in line[0].split('-')]
line[1] = line[1].replace(':', '')
line[2] = line[2].replace('\n', '')
return line
def check_star_1(parsed_line):
return parsed_line[0][0] <= parsed_line[2].count(parsed_line[1]) <= parsed_line[0][1]
def check_star_2(parsed_line):
a = 0
for i in range(2):
if parsed_line[0][i] <= len(parsed_line[2]):
a += parsed_line[2][parsed_line[0][i] - 1] == parsed_line[1]
return a == 1
star1 = 0
star2 = 0
for l in next_line():
l = parse_line(l)
star1 += 1 if check_star_1(l) else 0
star2 += 1 if check_star_2(l) else 0
print(star1)
print(star2)
| [
"aafrected@gmail.com"
] | aafrected@gmail.com |
c01f46251c7d9ca4cdbc9e0920e810fc569ffbfd | 292be3c6499c690899ff2506d22e315de040c766 | /animation_opt_data.py | 86214672181ec47f859a5122d83e1204b5f5ba77 | [] | no_license | dawnknight/Kproject | fa1d2cf96a8d7c6fcfb8aabb868a68997440cfb4 | 27f076357b5913c6e4f3c92596839ee209671529 | refs/heads/master | 2020-07-16T08:51:27.627550 | 2016-12-18T04:57:32 | 2016-12-18T04:57:32 | 73,948,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 02 13:04:16 2016
@author: liuqi
"""
import cPickle
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
data_all = cPickle.load(file('mocapdata1128_array.pkl','r'))
keys = data_all['keys']
NUM_LABELS = len(keys) # total number of the labels
NUM_FRAMES = len(data_all['rcpos'][0]) #total number of the frames
print 'The total frames: ', NUM_FRAMES
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# data_orig = {} # dictionary
# data_interp = {}
ax.set_xlabel('X axis')
ax.set_ylabel('Z axis')
ax.set_zlabel('Y axis')
for frame_no in range(NUM_FRAMES):
plt.cla()
xs = []
ys = []
zs = []
for i in xrange( NUM_LABELS ):
xs.append(data_all['rcpos'][i*3][frame_no])
ys.append(data_all['rcpos'][i*3+1][frame_no])
zs.append(-1*data_all['rcpos'][i*3+2][frame_no])
ax.scatter(xs, zs, ys)
ax.set_xlim(-0.5,1.5)
ax.set_ylim(-0.2,1.9)
ax.set_zlim(1,2)
ax.set_title(frame_no)
plt.draw()
plt.pause(1.0/120)
| [
"atc327@nyu.edu"
] | atc327@nyu.edu |
a0c4e362e2865879a5638f88f353f7b7f24fd3f9 | 21537bd6ff388baf6653f8299b467dd187112fd0 | /example/wugui.py | 5caabced41c3bf0b13fb258f5b60fd4356d60181 | [] | no_license | z313608886/PythonStudy | 4c874979dcb8a0423d9d9a2cc9c2065fa964cbf5 | 4bb022d72e149932c6f6c8c8618beb5988178a1d | refs/heads/master | 2020-05-19T11:56:19.614962 | 2019-08-28T03:16:13 | 2019-08-28T03:16:13 | 185,004,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | import turtle
bob = turtle.Turtle()
print(bob)
'''bob.fd(100)
bob.lt(90)
bob.fd(100)
bob.lt(90)
bob.fd(100)
bob.lt(90)
bob.fd(100)'''
app=4
for i in range(app):
bob.fd(100)
bob.lt(90)
turtle.mainloop()
| [
"313608886@qq.com"
] | 313608886@qq.com |
3fe70926213c2c43854b814f3fa2a772fcbc0115 | 877f89c4d1c352f452f1911cea7fa0888b642492 | /2-by-2-grid.py | a9e70b8f3be3708cf13302e329bf0645ba3eea3f | [] | no_license | sSKDs/Think-Python-2E | 55bfeca3fac01699f47058eb6b9c7debd0ef2382 | 63d580fbb033f228601036904a77986b00cfc70d | refs/heads/master | 2020-12-02T07:48:39.092615 | 2017-07-10T09:14:53 | 2017-07-10T09:14:53 | 96,729,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | def grid():
a = '+' + ' -' * 4
b = '+'
c = '/' + " " * 8
d = '/'
def print_1():
print(a, a, b)
def print_2():
print(c, c, d)
def do_twice(f):
f()
f()
def do_four(f):
do_twice(f)
do_twice(f)
def print_grid():
print_1()
do_four(print_2)
do_twice(print_grid)
print_1()
| [
"noreply@github.com"
] | noreply@github.com |
46df32664759646298fd335369f412281b58466f | eebe06df6c21b64f46c4054767c6013a294f9f6e | /src/graph_def_editor/rewrite.py | aff4b34b73df6de57ad08ad7016ed7ecb7b4b5a1 | [] | no_license | Hari-Nagarajan/gpt2-twitter | 86ed98cd55e6c59c420c23fb8fd4aac40c5c6c4e | db8a4d40cbad24a8f51c889147d2e9b9af9648e1 | refs/heads/master | 2023-02-21T04:52:34.101164 | 2021-07-13T15:58:00 | 2021-07-13T15:58:00 | 231,670,820 | 6 | 3 | null | 2023-02-16T00:37:21 | 2020-01-03T21:47:04 | Python | UTF-8 | Python | false | false | 25,758 | py | # Copyright 2019 IBM. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
rewrite.py
Graph rewrites that ship with the GraphDef Editor.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import sys
if sys.version >= '3':
from typing import Tuple, Dict, Iterable, Union, Callable, Any
from graph_def_editor import graph, node, reroute, tensor, util
# Shorten select.TreeExpr to make patterns easier to read
from graph_def_editor.select import TreeExpr
__all__ = [
"change_batch_size",
"fold_batch_norms",
]
def change_batch_size(
g, # type: graph.Graph
new_size, # type: int
inputs # type: Iterable[Union[node.Node, tensor.Tensor]]
):
# type: (...) -> None
"""
Change the batch size of a model.
Runs size inference over the graph to propagate the new batch size
throughout the graph.
Modifies the graph in place. If the rewrite fails, the graph may be left
in an inconsistent state.
Args:
g: The graph on which to modify the batch size. Modified in place.
new_size: New batch size to apply on the input(s) to the graph.
Can be `None` to indicate dynamic batch size.
inputs: Placeholder nodes that are the input to the graph, either
the `Node`s themselves or as their output `Tensor`s
"""
input_nodes = [i.node if isinstance(i, tensor.Tensor) else i
for i in inputs]
# Basic sanity checks
for n in input_nodes:
if n.op_type != "Placeholder":
raise ValueError("Input node {} is not a Placeholder".format(n))
if n.graph is not g:
raise ValueError("Input node {} is not in graph {}".format(n, g))
# Update input nodes
for n in input_nodes:
orig_shape = n.get_attr("shape")
new_dims = [d for d in orig_shape.dims]
new_dims[0] = new_size
n.replace_attr("shape", tf.TensorShape(new_dims))
# Propagate new batch size throughout graph
g.infer_shapes_and_dtypes()
def _fixed_point_apply(
pattern, # type: TreeExpr
action, # type: Callable[[graph.Graph, Dict[str, node.Node]],bool]
g # type: graph.Graph
):
# type: (...) -> None
"""
Repeatedly apply a pattern-action rule until the graph stops changing.
Args:
pattern: Expression that selects a portion of the graph for modification
action: Rule (as a Callable) that optionally modifies the graph. Returns
True if modifications occurred and False otherwise.
"""
keep_going = True
while keep_going:
keep_going = False
# Each iteration walks through all the nodes of the graph to avoid O(n^2)
# behavior
nodes_before = g.nodes
for n in nodes_before:
if n.graph is None:
# Node has been removed from the graph.
continue
match_info = pattern.eval_from(n)
if match_info is not None:
# Found a structural match rooted at the current node. Perform action.
change_happened = action(g, match_info)
if change_happened:
keep_going = True
def _scale_weights(weights_node, # type: node.Node
scale, # type: np.ndarray
dims # type: Tuple[int]
):
# type: (...) -> None
"""
Multiply each row/column/dimension of a set of constant weights by a
scaling factor, in place.
Args:
weights_node: Const node containing weights
scale: Array where each entry contains a scale factor for a slice of
the weights tensor in weights_node
dims: Dimensions of the weights along which the scale factor should be
applied.
"""
if len(dims) != len(scale.shape):
raise ValueError("Target dimensions {} not compatible with shape of "
"scale array {}".format(dims, scale))
if weights_node.op_type != "Const":
raise TypeError("Unexpected op type {} for weights_node".format(
weights_node.op_type))
weights = weights_node.get_attr("value")
for i in range(len(dims)):
if scale.shape[i] != weights.shape[dims[i]]:
raise ValueError("Scale vector of shape {} can't be applied along "
"dimensions {} of a weights vector of shape "
"{}".format(scale.shape, dims, weights.shape))
def compute_target_multi_index(scale_multi_index):
ret = [slice(None)] * len(weights.shape)
for d in range(len(dims)):
ret[dims[d]] = scale_multi_index[d]
return tuple(ret)
scaled_weights = np.float64(weights)
itr = np.nditer(scale, flags=["multi_index"])
while not itr.finished:
scale_factor = np.float64(itr[0])
target_coords = compute_target_multi_index(itr.multi_index)
scaled_weights[target_coords] *= scale_factor
itr.iternext()
# Cast down to the original precision
scaled_weights = scaled_weights.astype(weights.dtype)
# Modify the node in place
weights_node.replace_attr("value", scaled_weights)
def _add_scale_to_conv_weights(conv_node, # type: node.Node
weights_node, # type: node.Node
scale # type: np.ndarray
):
# type: (...) -> None
"""
Subroutine of fold_batch_norms() and fold_old_batch_norms().
Extract the weights from a Conv2D, DepthwiseConv2D, or MatMul op, multiply by
scaling factors, and put the resulting scaled weights in place.
Args:
conv_node: Conv2D/MatMul node to be rewritten
weights_node: Const node containing weights that parametrize the
transformation that conv_node performs.
scale: Array where each entry contains a scale factor for the
corresponding output column of conv_node
"""
# Each type of convolution
if conv_node.op_type == "DepthwiseConv2dNative":
# Dimensions 2 and 3 of the the filters are input channel and multiplier
# index, respectively.
weights_shape = weights_node.output(0).shape
num_input_channels = weights_shape[2]
channel_multiplier = weights_shape[3]
scale = scale.reshape([num_input_channels, channel_multiplier])
_scale_weights(weights_node, scale, [2, 3])
elif conv_node.op_type == "Conv2D":
_scale_weights(weights_node, scale, [3])
elif conv_node.op_type == "MatMul":
_scale_weights(weights_node, scale, [1])
else:
raise ValueError("Unexpected op type {} for conv_node".format(
conv_node.op_type))
def fold_batch_norms(g):
# type: (graph.Graph) -> None
"""
Python port of the Graph Transform Tool rewrite by the same name.
Identifies instances of the pattern `Conv2D => Mul` and folds the
multiplication into the convolution's filter coefficients. This pattern
occurs as a result of `Conv2D => BatchNorm` turning into
`Conv2D => Mul => Add` when a multi-op batch normalization is used.
Also covers the related cases when the `Conv2D` is replaced with a `MatMul`
or a `DepthwiseConv2D`
"""
pattern = TreeExpr(op="Mul", alias="mul", inputs=(
TreeExpr(op="Conv2D|MatMul|DepthwiseConv2dNative", alias="conv", inputs=(
TreeExpr(),
TreeExpr(op="Const", alias="weights")
)),
TreeExpr(op="Const", alias="mul_values")))
def action(_, match_info):
# type: (Any, Dict[str, node.Node]) -> bool
mul_node = match_info["mul"]
conv_node = match_info["conv"]
weights_node = match_info["weights"]
mul_values_node = match_info["mul_values"]
# Cast to 64-bit float to avoid losing precision
scale = np.float64(mul_values_node.get_attr("value"))
# If there is another direct consumer of the output of the convolution,
# skip the rewrite.
if len(conv_node.outputs[0].consumers()) > 1:
return False
_add_scale_to_conv_weights(conv_node, weights_node, scale)
# Cut the Mul node out of the graph
reroute.reroute_ts(mul_node.inputs[0], mul_node.outputs[0])
g.remove_node_by_name(mul_node.name, False)
# Const might still be in use; check before removing it.
if len(mul_values_node.outputs[0].consumers()) == 0:
g.remove_node_by_name(mul_values_node.name, False)
# Original rewrite gave the name of the Mul node to the Conv2D. Recreate
# that behavior here, including putting the node in the collections that
# the Mul node was a member of.
g.rename_node(conv_node.name, mul_node.name)
conv_node.remove_from_collections()
for collection_name in mul_node.collection_names:
conv_node.add_to_collection(collection_name)
return True
_fixed_point_apply(pattern, action, g)
def _get_batch_norm_params(
batch_norm_node # type: node.Node
):
# type: (...) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, Any, bool]
"""
Delve into the inputs of a fused batch normalization node and fetch the
constant values for the descriptive statistics that define the
normalization.
Args:
batch_norm_node: The fused batch normalization op. The caller is
responsible for ensuring that the variable inputs to this op have been
converted to consts.
Returns:
The following values:
mean, variance, beta, gamma, variance_epsilon,
scale_after_normalization
The first four return values contain descriptive stats, cast to float64,
while the last is a Boolean value.
"""
def get_const_values():
return (np.float64(batch_norm_node.inputs[ix].node.get_attr("value"))
for ix in range(1, 5) # Inputs 1-4 are the normalization params.
)
# Compensate for different input orders and attribute names
if "BatchNormWithGlobalNormalization" == batch_norm_node.op_type:
mean, variance, beta, gamma = get_const_values()
variance_epsilon = np.float64(batch_norm_node.get_attr(
"variance_epsilon"))
scale_after_normalization = bool(batch_norm_node.get_attr(
"scale_after_normalization"))
elif "FusedBatchNorm" == batch_norm_node.op_type:
gamma, beta, mean, variance = get_const_values()
variance_epsilon = np.float64(batch_norm_node.get_attr("epsilon"))
scale_after_normalization = True
else:
raise TypeError("Unexpected op type {} for fused batch norm".format(
batch_norm_node.op_type))
return (mean, variance, beta, gamma, variance_epsilon,
scale_after_normalization)
def _get_scale_and_offset(match_info):
# type: (Dict[str, node.Node]) -> None
"""
Dig the batch normalization parameters out of a subgraph match and
compute scale and offset vectors that the normalization applies at
inference time.
Args:
match_info: Should contain ops under the following keys:
"batch_norm" ==> fused batch normalization op. Caller should have
verified that all inputs to this op are Consts
Returns:
scale, offset: Two Numpy arrays containing the scale and offset vectors
of 64-bit floating point numbers
"""
batch_norm_node = match_info["batch_norm"]
(mean, variance, beta, gamma, variance_epsilon,
scale_after_normalization) = _get_batch_norm_params(batch_norm_node)
# Sanity check: Everything should have the same 1-D shape
mean_shape = mean.shape
if len(mean_shape) != 1:
raise ValueError("Shape of mean ({}) is not a vector".format(mean_shape))
if (variance.shape != mean_shape or beta.shape != mean_shape or
gamma.shape != mean_shape):
raise ValueError("Shapes {}, {}, {}, and {} for mean, variance, beta, "
"and gamma don't all match"
"".format(mean.shape, variance.shape, beta.shape,
gamma.shape))
# Now we have everything we need to compute scale and offset values.
if scale_after_normalization:
scale = (1.0 / np.sqrt(variance + variance_epsilon)) * gamma
else:
scale = (1.0 / np.sqrt(variance + variance_epsilon))
offset = (-mean * scale) + beta
return scale, offset
def _replace_batch_norm_with_bias_add(
g, # type: graph.Graph
match_info, # type: Dict[str, node.Node]
offset # type: np.ndarray
):
# type: (...) -> None
"""
Replace the fused batch normalization node in the graph with a BiasAdd
node that applies the offset from the original normalization.
Then remove the batch normalization node and its input constants.
Args:
match_info: Should contain ops under the following keys:
"batch_norm" ==> fused batch normalization op
"conv" ==> Convolution or matmul op that feeds into the batch
normalization
"mean", "variance", "beta", "gamma" ==> Const nodes containing
normalization parameters
offset: Offset that the batch norm node applies at inference time
"""
batch_norm_node = match_info["batch_norm"]
orig_inputs = batch_norm_node.inputs
conv_node = match_info["conv"] if "conv" in match_info else match_info[
"conv0"]
data_format = conv_node.get_attr("data_format") if conv_node.has_attr(
"data_format") else None
# TODO(frreiss): Support non-32-bit offsets
bias_offset_node = util.make_const(g, batch_norm_node.name + "_offset",
np.float32(offset), uniquify_name=True)
bias_add_node = g.add_node(batch_norm_node.name + "_bias_add", "BiasAdd",
uniquify_name=True)
if data_format is not None:
bias_add_node.add_attr("data_format", data_format)
bias_add_node.add_attr("T", batch_norm_node.get_attr("T"))
bias_add_node.set_inputs([batch_norm_node.inputs[0], bias_offset_node])
bias_add_node.set_outputs_from_pairs([(batch_norm_node.output(0).dtype,
batch_norm_node.output(0).shape)])
# Splice the batch norm op out of the graph and replace with a newly
# created BiasAdd node.
# Note that the batch norm node has a bunch of other outputs that aren't
# used in inference.
reroute.reroute_ts(bias_add_node.output(0), batch_norm_node.output(0))
g.remove_node_by_name(batch_norm_node.name, False)
# Original rewrite gave the name of the batch norm node to the BiasAdd.
# Recreate that behavior here, including putting the node in the
# collections that the original node was a member of.
g.rename_node(bias_add_node.name, batch_norm_node.name)
for collection_name in batch_norm_node.collection_names:
bias_add_node.add_to_collection(collection_name)
# Remove the input constants if they are no longer used.
# Input 0 is the value to be normalized, and inputs 1-4 are the consts that
# hold normalization parameters.
for ix in range(1, 5):
in_tensor = orig_inputs[ix]
if len(in_tensor.consumers()) == 0:
g.remove_node_by_name(in_tensor.node.name, False)
def fold_old_batch_norms(g):
# type: (graph.Graph) -> None
"""
Python port of the Graph Transform Tool rewrite by the same name.
This rewrite looks for instances of the pattern `Conv2D => [batch norm]`,
where [batch norm] is a fused batch normalization operator.
The rewrite also covers instances of `DepthwiseConv2D => [batch norm]` when
the channel multiplier of the DepthwiseConv2D op is 1.
The TF documentation says that this rewrite is only for graphs produced by
legacy code, but this is not true. As of January 2019, the most recent
version of TensorFlow produces fused batch normalization operators by default.
Specifically, legacy code uses the `BatchNormWithGlobalNormalization` op,
while new code uses the `FusedBatchNorm` op.
In addition to covering the basic `Conv2D => [batch norm]` pattern,
the rewrite also covers the cases where some postprocessing nodes exist
between the `Conv2D` and the `[batch norm]` parts. As a result, the rewrite
proceeds in three passes.
"""
# Perform three passes to cover three different types of subgraph.
# PASS 1: Simple Conv2D => [batch norm] pattern.
pattern_1 = TreeExpr(
op="BatchNormWithGlobalNormalization|FusedBatchNorm",
alias="batch_norm", inputs=(
TreeExpr(op="Conv2D|DepthwiseConv2dNative", alias="conv", inputs=(
TreeExpr(),
TreeExpr(op="Const", alias="weights")
)),
TreeExpr(op="Const"),
TreeExpr(op="Const"),
TreeExpr(op="Const"),
TreeExpr(op="Const"),
))
def action_1(_, match_info):
# type: (Any, Dict[str, node.Node]) -> bool
conv_node = match_info["conv"]
weights_node = match_info["weights"]
# If there is another direct consumer of the output of the convolution,
# skip the rewrite.
if len(conv_node.outputs[0].consumers()) > 1:
return False
scale, offset = _get_scale_and_offset(match_info)
_add_scale_to_conv_weights(conv_node, weights_node, scale)
_replace_batch_norm_with_bias_add(g, match_info, offset)
return True
_fixed_point_apply(pattern_1, action_1, g)
# PASS 2: Conv2D|DepthwiseConv2D => BatchToSpaceND => [batch norm]
pattern_2 = TreeExpr(
op="BatchNormWithGlobalNormalization|FusedBatchNorm",
alias="batch_norm", inputs=(
TreeExpr(op="BatchToSpaceND", alias="batch_to_space", inputs=(
TreeExpr(op="Conv2D|DepthwiseConv2dNative", alias="conv", inputs=(
TreeExpr(),
TreeExpr(op="Const", alias="weights")
)))),
TreeExpr(op="Const"),
TreeExpr(op="Const"),
TreeExpr(op="Const"),
TreeExpr(op="Const"),
))
def action_2(_, match_info):
# type: (Any, Dict[str, node.Node]) -> bool
conv_node = match_info["conv"]
weights_node = match_info["weights"]
# If there is another direct consumer of the output of the convolution,
# the BatchToSpaceND, or the convolution weights, skip the rewrite
for n in (conv_node, weights_node, match_info["batch_to_space"]):
if len(n.output(0).consumers()) > 1:
return False
scale, offset = _get_scale_and_offset(match_info)
_add_scale_to_conv_weights(conv_node, weights_node, scale)
_replace_batch_norm_with_bias_add(g, match_info, offset)
return True
_fixed_point_apply(pattern_2, action_2, g)
# PASS 3: Two Conv2D's -> Concat -> [batch norm]
pattern_3 = TreeExpr(
op="BatchNormWithGlobalNormalization|FusedBatchNorm",
alias="batch_norm", inputs=(
TreeExpr(op="ConcatV2|Concat", alias="concat", inputs=(
TreeExpr(op="Conv2D", alias="conv0", inputs=(
TreeExpr(),
TreeExpr(op="Const", alias="weights0")
)),
TreeExpr(op="Conv2D", alias="conv1", inputs=(
TreeExpr(),
TreeExpr(op="Const", alias="weights1")
)),
TreeExpr(op="Const", alias="axis")
)),
TreeExpr(op="Const"),
TreeExpr(op="Const"),
TreeExpr(op="Const"),
TreeExpr(op="Const"),
))
def action_3(_, match_info):
# type: (Any, Dict[str, node.Node]) -> bool
# If there is another direct consumer of anything between a conv and the
# final output, skip the rewrite
if len(match_info["conv0"].outputs[0].consumers()) > 1:
return False
if len(match_info["conv1"].outputs[0].consumers()) > 1:
return False
if len(match_info["concat"].outputs[0].consumers()) > 1:
return False
conv0_node = match_info["conv0"]
conv1_node = match_info["conv1"]
weights0_node = match_info["weights0"]
weights1_node = match_info["weights1"]
scale, offset = _get_scale_and_offset(match_info)
axis = match_info["axis"].get_attr("value")
if axis == 3:
# Concatenating along channel axis ==> Need to split scale and offset
split_cols = weights0_node.get_attr("value").shape[3]
scale_0, offset_0 = scale[:split_cols], offset[:split_cols]
scale_1, offset_1 = scale[split_cols:], offset[split_cols:]
else:
# Concatenating along axis other than channel ==> Scale every channel
scale_0, offset_0 = scale, offset
scale_1, offset_1 = scale, offset
_add_scale_to_conv_weights(conv0_node, weights0_node, scale_0)
_add_scale_to_conv_weights(conv1_node, weights1_node, scale_1)
_replace_batch_norm_with_bias_add(g, match_info, offset)
return True
_fixed_point_apply(pattern_3, action_3, g)
def fold_batch_norms_up(g):
# type: (graph.Graph) -> None
"""
Identifies instances of the pattern
```
Mul => Add => (optional ReLU/ReLU6) => [Conv2D|MatMul|DepthwiseConv2d]
```
and the equivalent pattern
```
FusedBatchNorm => (optional ReLU/ReLU6) => [Conv2D|MatMul|DepthwiseConv2d]
```
Then fuses the multiplication into the convolution's filter coefficients
and applies a correction to the Add op to compensate for add happening
before multiply.
If the nonlinearity is a ReLU6, replaces it with
```
ReLU => Min(6 / multiplier from batch norm)
"""
def compute_input_dim(n #type: node.Node
):
if n.op_type == "Conv2D" or n.op_type == "DepthwiseConv2dNative":
return 2
elif n.op_type == "MatMul":
return 0
else:
raise ValueError("Unexpected op type {}".format(n.op_type))
pattern_1 = (
TreeExpr(op="Conv2D|MatMul|DepthwiseConv2dNative", alias="conv", inputs=(
TreeExpr(op="Relu|Relu6", alias="relu", optional=True, inputs=(
TreeExpr(op="Add", alias="add", inputs=(
TreeExpr(op="Mul", alias="mul", inputs=(
TreeExpr(),
TreeExpr(op="Const", alias="mul_values")
)),
TreeExpr(op="Const", alias="add_values")
))
)),
TreeExpr(op="Const", alias="weights")))
)
def handle_relu6(relu6_op, scale):
# type: (node.Node, np.ndarray) -> None
"""
Additional rewrite logic that replaces a ReLU6 op with a ReLU plus scaled
minumum.
Args:
relu6_op: Original Relu6
scale: Scale factor pulled from the batch normalization
"""
# ReLU6 op: min(max(features, 0), 6). Add min() component to graph.
target_np_type = relu6_op.output(0).dtype.as_numpy_dtype
min_values = (6. / scale).astype(target_np_type)
min_node = util.make_simple_binary_op(
g, relu6_op.name + "/min", "Minimum", relu6_op.output(0),
util.make_const(g, relu6_op.name + "/min/const", min_values).output(0))
reroute.reroute_ts(min_node.output(0), relu6_op.output(0),
cannot_modify=[min_node])
relu6_op.change_op_type("Relu")
def action_1(_, match_info):
# type: (Any, Dict[str, node.Node]) -> bool
conv_node = match_info["conv"]
add_node = match_info["add"]
mul_node = match_info["mul"]
weights_node = match_info["weights"]
mul_values_node = match_info["mul_values"]
add_values_node = match_info["add_values"]
# If there is another direct consumer of anything we're about to
# modify, skip the rewrite.
for n in (add_node, mul_node, weights_node, add_values_node):
if len(n.output(0).consumers()) > 1:
return False
# Scale the weights to compensate for unscaled inputs.
scale = np.float64(mul_values_node.get_attr("value"))
_scale_weights(weights_node, scale, [compute_input_dim(conv_node)])
# Divide the additive factor to compensate for the multiplication being
# pulled above the Add.
add_values = add_values_node.get_attr("value")
new_add_values = add_values.astype(np.float64) / scale
add_values_node.replace_attr("value", new_add_values.astype(
add_values.dtype))
# Cut the Mul node out of the graph
reroute.reroute_ts(mul_node.inputs[0], mul_node.outputs[0])
g.remove_node_by_name(mul_node.name, False)
# Const might still be in use; check before removing it.
if len(mul_values_node.outputs[0].consumers()) == 0:
g.remove_node_by_name(mul_values_node.name, False)
if "relu" in match_info and match_info["relu"].op_type == "Relu6":
handle_relu6(match_info["relu"], scale)
return True
_fixed_point_apply(pattern_1, action_1, g)
pattern_2 = (
TreeExpr(op="Conv2D|MatMul|DepthwiseConv2dNative", alias="conv", inputs=(
TreeExpr(op="Relu|Relu6", alias="relu", optional=True, inputs=(
TreeExpr(op="FusedBatchNorm", alias="batch_norm", inputs=(
TreeExpr(),
TreeExpr(op="Const"),
TreeExpr(op="Const"),
TreeExpr(op="Const"),
TreeExpr(op="Const")
)),
)),
TreeExpr(op="Const", alias="weights")))
)
def action_2(_, match_info):
# type: (Any, Dict[str, node.Node]) -> bool
conv_node = match_info["conv"]
batch_norm_node = match_info["batch_norm"]
weights_node = match_info["weights"]
# If there is another direct consumer of anything we're about to
# modify, skip the rewrite.
for n in (batch_norm_node, weights_node):
if len(n.output(0).consumers()) > 1:
return False
scale, offset = _get_scale_and_offset(match_info)
# Scale the weights to compensate for unscaled inputs.
_scale_weights(weights_node, scale, [compute_input_dim(conv_node)])
# Divide the additive factor to compensate for the multiplication being
# pulled above the fused batch norm's embedded addition.
offset /= scale
_replace_batch_norm_with_bias_add(g, match_info, offset)
if "relu" in match_info and match_info["relu"].op_type == "Relu6":
handle_relu6(match_info["relu"], scale)
return True
_fixed_point_apply(pattern_2, action_2, g)
| [
"hari@nagarajan.io"
] | hari@nagarajan.io |
b26bc6da235636368ae07cbc90981a25521e6737 | bde8e24b07bb3a403fa40a3c2aabe3f8d4466272 | /question90-99/question94.py | d4c4fe4224ccf09a7489f19f4a51854c02394b5f | [] | no_license | refine-P/NLP100Knock | fda6680b6d72faae9d8805829fa7d9cb9ab379d6 | ed29a3a3d80820ef074247f79253c7ef97500b55 | refs/heads/master | 2021-07-06T15:55:29.512827 | 2019-04-07T16:37:34 | 2019-04-07T16:37:34 | 179,993,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | #coding:utf-8
#Windowsだと警告が出るが特に問題がないらしいのでこれで握りつぶす
#参考(http://stackoverflow.com/questions/41658568/chunkize-warning-while-installing-gensim)
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
from gensim.models.word2vec import Word2Vec
if __name__ == "__main__":
model = Word2Vec.load("word2vec.model")
with open("combined.tab", "r", encoding='utf-8') as fr, open("353_result.txt", "w", encoding='utf-8') as fw:
for line in fr.readlines()[1:]:
words = line.split()
try:
sim = model.similarity(words[0], words[1])
result = "%s\t%f\n" % (" ".join(words[0:2]), sim)
except:
result = "%s\t-1\n" % " ".join(words[0:2])
fw.write(result)
| [
"32488002+refine-P@users.noreply.github.com"
] | 32488002+refine-P@users.noreply.github.com |
fefd208821f5b71f960ba8f78725eede063b565b | cd941f68203a17c5bf5232c9fc7e4c69b5c1148b | /Timus/1296_Hyperjump.py | 271d661f88efacf6fa4af5f49c4002724031c402 | [] | no_license | ppakorn/dotaIsDown | e916651a7138e6683171a551e85d6e5bda2dedf0 | b408a02278788ebfddbb57fd7ee439e8a57f686d | refs/heads/master | 2023-09-01T05:17:06.231973 | 2023-08-28T13:59:23 | 2023-08-28T13:59:23 | 183,803,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | def solution(a) :
max = 0
sum = 0
for i in range(len(a)):
if sum > 0:
sum += a[i]
else:
sum = a[i]
if sum > max:
max = sum
return max
n = int(input())
a = [0] * n
for i in range(n):
a[i] = int(input())
print(solution(a)) | [
"pakorn@ampostech.com"
] | pakorn@ampostech.com |
3e5f8c81073da737f0ad411e4dbce8d06e3374c0 | 899f955df7737c8d08a43e1a9acab0f46c504000 | /bin/calc_word_bool.py | 4dc469cd8e56e3bcdb5c344c2fe0b38b3d519a93 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | aziele/alfpy | 89f91d829cb9dc07e3e2af152263ea9586e72529 | 25545be14affa7d7e89e5b5ebcfe4f3e688108b7 | refs/heads/master | 2022-06-23T23:38:46.846729 | 2022-06-14T06:28:08 | 2022-06-14T06:28:08 | 72,731,037 | 22 | 6 | null | null | null | null | UTF-8 | Python | false | false | 3,379 | py | #! /usr/bin/env python
# Copyright (c) 2016 Zielezinski A, combio.pl
import argparse
import sys
from alfpy import word_bool_distance
from alfpy import word_pattern
from alfpy import word_vector
from alfpy.utils import distmatrix
from alfpy.utils import seqrecords
from alfpy.version import __version__
def get_parser():
parser = argparse.ArgumentParser(
description='''Calculate distances between DNA/protein sequences based
on boolean 1-D vectors of word counting occurrences.''',
add_help=False, prog='calc_word_bool.py'
)
group = parser.add_argument_group('REQUIRED ARGUMENTS')
group.add_argument('--fasta', '-f',
help='input FASTA sequence filename', required=True,
type=argparse.FileType('r'), metavar="FILE")
group = parser.add_argument_group(' Choose between the two options')
g1 = group.add_mutually_exclusive_group()
g1.add_argument('--word_size', '-s', metavar="N",
help='word size for creating word patterns',
type=int)
g1.add_argument('--word_pattern', '-w',
help='input filename w/ pre-computed word patterns',
type=argparse.FileType('r'), metavar="FILE")
group = parser.add_argument_group('OPTIONAL ARGUMENTS')
distlist = word_bool_distance.Distance.get_disttypes()
group.add_argument('--distance', '-d', choices=distlist,
help='choose from: {} [DEFAULT: %(default)s]'.format(
", ".join(distlist)),
metavar='', default="jaccard")
group = parser.add_argument_group('OUTPUT ARGUMENTS')
group.add_argument('--out', '-o', help="output filename",
metavar="FILE")
group.add_argument('--outfmt', choices=['phylip', 'pairwise'],
default='phylip',
help='distances output format [DEFAULT: %(default)s]')
group = parser.add_argument_group("OTHER OPTIONS")
group.add_argument("-h", "--help", action="help",
help="show this help message and exit")
group.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
if len(sys.argv[1:]) == 0:
# parser.print_help()
parser.print_usage() # for just the usage line
parser.exit()
return parser
def validate_args(parser):
args = parser.parse_args()
if args.word_size:
if args.word_size < 1:
parser.error('Word size must be >= 1.')
elif args.word_pattern:
pass
else:
parser.error("Specify either: --word_size or --word_pattern.")
return args
def main():
parser = get_parser()
args = validate_args(parser)
seq_records = seqrecords.read_fasta(args.fasta)
if args.word_size:
p = word_pattern.create(seq_records.seq_list, args.word_size)
else:
p = word_pattern.read(args.word_pattern)
bools = word_vector.Bools(seq_records.length_list, p)
dist = word_bool_distance.Distance(bools, args.distance)
matrix = distmatrix.create(seq_records.id_list, dist)
if args.out:
oh = open(args.out, 'w')
matrix.write_to_file(oh, args.outfmt)
oh.close()
else:
matrix.display(args.outfmt)
if __name__ == '__main__':
main()
| [
"a.zielezinski@gmail.com"
] | a.zielezinski@gmail.com |
6fd7fd37669a3e861a981401bd51e05255ebdd8d | ea28fd0a5c918e48705d7c104fe24513e1be2267 | /git_commands_file.py | 0b402ddd28a28d91f2e6d213aaa91f4ffddbfe37 | [] | no_license | sharmila-gudla/blue_team_for- | f5b8c0c3c77dc9fb68a35576000223b735e4817d | 2df51e68bed49c49e227c6f6bbc98b2a68ea1f23 | refs/heads/master | 2022-10-08T23:45:25.734524 | 2020-06-11T10:25:29 | 2020-06-11T10:25:29 | 271,329,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | Hello world!!!!!!!!!!!!!!!!
hiiiiii
helllo | [
"gudlasharmila1234@gmail.com"
] | gudlasharmila1234@gmail.com |
8be4b31d5b6d0932f1f3501c51ac72f4c982f1d9 | b0764d16090acc1fa93661c6673c8dfdd6b240e0 | /client/service/enterprise_bl.py | 683a1f2cb7f915468ee566e042884de25839f113 | [] | no_license | blshao84/zzbj | 3903321869ce0efd934291f220ca31a1be6fd66f | b82b2cfad69d615fe73f134ed073f71d44229ed5 | refs/heads/master | 2023-02-13T21:31:27.529948 | 2021-01-08T04:34:09 | 2021-01-08T04:34:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,666 | py | # -*- coding: utf-8 -*-
# @Author : dongjianghai
# @File : enterprise_bl.py
# Introduction :
import datetime
import logging
import pandas as pd
from service import utils as utils
_date_format = '%Y-%m-%d'
_date_format_s = '%Y-%m-%d %H:%M:%S'
def get_base_elements(code):
data = {'code': code}
url = 'http://{ip}:{port}/base-element/search'.format(ip=utils.ip(), port=utils.port())
params = {
"id": None,
"params": data
}
try:
resp = utils.post(url=url, params=params)
if 'error' in resp:
raise RuntimeError('获取服务器excel配置未知异常')
for re in resp:
if re.get('status'):
re['status'] = '1'
else:
re['status'] = '0'
df1 = pd.DataFrame(resp)
for df in df1:
df1.rename(columns={df: hump2Underline(df)}, inplace=True)
data = df1.to_dict(orient='records')
return data
except Exception as e:
logging.error(repr(e))
raise RuntimeError('获取服务器excel配置未知异常')
# 驼峰转下划线
def hump2Underline(text):
res = []
for index, char in enumerate(text):
if char.isupper() and index != 0:
res.append("_")
res.append(char)
return ''.join(res).lower()
def convert_data(row, datas, columns):
node = {}
for col in columns:
value = row.get(col)
if value is not None:
if isinstance(value, datetime.datetime):
value = value.strftime(_date_format)
elif isinstance(value, datetime.date):
value = value.strftime(_date_format)
elif check_time_format(value) == '1':
# str to date
value = datetime.datetime.strptime(value, _date_format_s)
# date formatting
value = value.strftime(_date_format)
elif check_time_format(value) == '2':
# str to date
value = value.strftime(value, _date_format)
# date formatting
value = value.strftime(_date_format)
node[col] = str(value)
else:
node[col] = ''
datas.append(node)
def extract_data(df_data, cols):
datas = []
df_data.apply(lambda row: convert_data(row, datas, cols),
axis=1)
return datas
def check_time_format(date):
try:
if ":" in date:
datetime.datetime.strptime(date, _date_format_s)
return '1'
else:
datetime.datetime.strptime.strptime(date, _date_format)
return '2'
except:
return '0'
| [
"sinorockie@gmail.com"
] | sinorockie@gmail.com |
7d650679d38d65b2e05abc53a001d57e0cd69d52 | 5e990a70ece0c40f6e675a59495192342682f1a8 | /model_2_mutimodal_attention_iiit_dataaug.py | 8bd964c183d9a10c028b87832d4c7d704949752a | [] | no_license | hardik-uppal/Hardik_AIIM | d293f2cd30c85928a0fce241010e66594d347bb5 | 1aaae3a06f90649156b966e5acd7ebc91ff38831 | refs/heads/master | 2021-08-06T04:23:17.737607 | 2020-08-27T18:48:38 | 2020-08-27T18:48:38 | 214,052,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,457 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 00:10:40 2019
@author: hardi
"""
import gc
import numpy as np
from keras import layers, models, optimizers
from keras import backend as K
from keras.utils import to_categorical
import matplotlib.pyplot as plt
#from utils import combine_images
from PIL import Image
#from capsulelayers import CapsuleLayer, PrimaryCap, Length, Mask
#from keras_vggface.vggface import VGGFace
from model_vgg_face import VGG16
import keras
import pickle
from attention_module import cbam_block
#import config
from keras.models import Model
from keras.layers import Dense, Input, Conv2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D, Activation, Dropout, BatchNormalization
from keras import backend as K
from triplet_loss_semi_hard import triplet_loss_adapted_from_tf
from keras.backend.tensorflow_backend import set_session
from keras.backend.tensorflow_backend import clear_session
from keras.backend.tensorflow_backend import get_session
import tensorflow
import cv2
import imgaug.augmenters as iaa
K.set_image_data_format('channels_last')
def reset_keras(model):
sess = get_session()
clear_session()
sess.close()
sess = get_session()
try:
del model # this is from global space - change this as you need
except:
pass
print(gc.collect()) # if it's done something you should see a number being outputted
# use the same config as you used to create the session
config = tensorflow.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 1
config.gpu_options.visible_device_list = "0"
set_session(tensorflow.Session(config=config))
def normalized(image):
norm_batch =[]
epsilon = 0.00000000000001
for i in image:
norm=np.zeros((224,224,3),np.float32)
norm_rgb=np.zeros((224,224,3),np.uint8)
b=i[:,:,0]
g=i[:,:,1]
r=i[:,:,2]
sum = b+g+r+epsilon
norm[:,:,0]=b/sum
norm[:,:,1]=g/sum
norm[:,:,2]=r/sum
norm_rgb=cv2.convertScaleAbs(norm)
norm_batch.append(norm_rgb)
norm_batch= np.asarray(norm_batch)
return norm_batch
def VGGFace_multimodal(input_shape, n_class):
"""
:param input_shape: data shape, 3d, [width, height, channels]
:param n_class: number of classes
:return: Keras Model used for training
"""
# RGB MODALITY BRANCH OF CNN
inputs_rgb = layers.Input(shape=input_shape)
########################VGG/RESNET or any other network
vgg_model_rgb = VGG16(include_top=False, weights='vggface', input_tensor=None, input_shape=input_shape, pooling=None, type_name='rgb')
conv_model_rgb = vgg_model_rgb(inputs_rgb)
########################
inputs_depth = layers.Input(shape=input_shape)
vgg_model_depth = VGG16(include_top=False, weights='vggface', input_tensor=None, input_shape=input_shape, pooling=None, type_name='depth')
conv_model_depth = vgg_model_depth(inputs_depth)
######################
# conv_model_depth = vgg_model_depth(inputs_depth)
# fc6_rgb = layers.Dense(2048, activation='relu', name='fc6_rgb')(dropout_rgb)
# fc6_depth = layers.Dense(2048, activation='relu', name='fc6_depth')(dropout_depth)
# CONACTENATE the ends of RGB & DEPTH
merge_rgb_depth = layers.concatenate([conv_model_rgb,conv_model_depth], axis=-1)
attention_features = cbam_block(merge_rgb_depth)
#
#
# ############ for RGB
# flat_model_rgb = layers.Flatten(name='flatten_rgb')(conv_model_rgb)
# fc6_rgb = layers.Dense(2048, activation='relu', name='fc6_rgb')(flat_model_rgb)
# dropout_rgb = layers.Dropout(0.2)(fc6_rgb)
######## for Depth
flat_model = layers.Flatten(name='flatten')(attention_features)
fc6 = layers.Dense(2048, activation='relu', name='fc6')(flat_model)
bn_1 = BatchNormalization(name='1_bn')(fc6)
dropout_1 = layers.Dropout(0.5)(bn_1)
# flatten_concat = layers.Flatten(name='flatten_concat')(merge_rgb_depth)
# fc6 = layers.Dense(2048, activation='relu', name='fc6')(merge_rgb_depth)
fc7 = layers.Dense(1024, activation='relu', name='fc7')(dropout_1)
bn_2 = BatchNormalization(name='2_bn')(fc7)
dropout_2 = layers.Dropout(0.5)(bn_2)
fc8 = layers.Dense(512, activation='relu', name='fc8')(dropout_2)
bn_3 = BatchNormalization(name='3_bn')(fc8)
dropout_3 = layers.Dropout(0.5)(bn_3)
#VECTORIZING OUTPUT
output = layers.Dense(n_class, activation='softmax', name='output')(dropout_3)
# MODAL [INPUTS , OUTPUTS]
train_model = models.Model(inputs=[inputs_rgb, inputs_depth], outputs=[output])
# weights_path = 'CurtinFaces/vgg_multimodal_dropout-0.5_3fc-512/weights-25.h5'
# train_model.load_weights(weights_path)
train_model.summary()
for layer in train_model.layers[:-26]:
layer.trainable = False
# for layer in train_model.layers[2].layers[:-4]:
# layer.trainable = False
# for layer in train_model.layers[3].layers[:-4]:
# layer.trainable = False
return train_model
#def margin_loss(y_true, y_pred):
# """
# Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.
# :param y_true: [None, n_classes]
# :param y_pred: [None, num_capsule]
# :return: a scalar loss value.
# """
# L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
# 0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))
#
# return K.mean(K.sum(L, 1))
def train(model, args):
"""
Training
:param model: the model
:param args: arguments
:return: The trained model
"""
# unpacking the data
# (x_train, y_train), (x_test, y_test) = data
# callbacks
log = callbacks.CSVLogger(args.save_dir + '/log.csv')
es_cb = callbacks.EarlyStopping(monitor='val_loss', patience=20, verbose=1, mode='auto')
tb = callbacks.TensorBoard(log_dir=args.save_dir + '/tensorboard-logs',
batch_size=args.batch_size, histogram_freq=int(args.debug))
checkpoint = callbacks.ModelCheckpoint(args.save_dir + '/weights-best.h5', monitor='val_acc',
save_best_only=True, save_weights_only=True, verbose=1)
lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: args.lr * (args.lr_decay ** epoch))
# compile the model
model.compile(optimizer=optimizers.Adam(lr=args.lr),
loss=['categorical_crossentropy'],#triplet_loss_adapted_from_tf,
metrics=['accuracy'])
# Begin: Training with data augmentation ---------------------------------------------------------------------#
def train_generator(batch_size, val_train):
fold =1
rgb_train_dir = 'D:/RGB_D_Dataset_new/fold{}/train/RGB/'.format(fold) #'D:/RGB_D_Dataset_new/fold1/train/RGB/' #D:/CurtinFaces_crop/RGB/train/ --Curtin
depth_train_dir = 'D:/RGB_D_Dataset_new/fold{}/train/depth/'.format(fold) #D:/CurtinFaces_crop/normalized/DEPTH/train/
rgb_val_dir = 'D:/RGB_D_Dataset_new/fold{}/test/RGB/'.format(fold) #D:/CurtinFaces_crop/RGB/test1/
depth_val_dir = 'D:/RGB_D_Dataset_new/fold{}/test/depth/'.format(fold) #D:/CurtinFaces_crop/normalized/DEPTH/test1/
#
batch_size = int(batch_size/5)
train_datagen = ImageDataGenerator(rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)#, validation_split=0.2)
generator_rgb = train_datagen.flow_from_directory(directory=rgb_train_dir, target_size=(224, 224), color_mode="rgb",
batch_size=batch_size, class_mode="categorical", shuffle=True, seed=42)
generator_depth = train_datagen.flow_from_directory(directory=depth_train_dir, target_size=(224, 224), color_mode="rgb",
batch_size=batch_size, class_mode="categorical", shuffle=True, seed=42)
generator_rgb_val = train_datagen.flow_from_directory(directory=rgb_val_dir, target_size=(224, 224), color_mode="rgb",
batch_size=batch_size, class_mode="categorical", shuffle=True, seed=42)
generator_depth_val = train_datagen.flow_from_directory(directory=depth_val_dir, target_size=(224, 224), color_mode="rgb",
batch_size=batch_size, class_mode="categorical", shuffle=True, seed=42)
if val_train=='train':
while 1:
#rgb data aug
x_batch_rgb, y_batch_rgb = generator_rgb.next()
flip_img = iaa.Fliplr(1)(images=x_batch_rgb)
rot_img = iaa.Affine(rotate=(-30, 30))(images=x_batch_rgb)
shear_aug = iaa.Affine(shear=(-16, 16))(images=x_batch_rgb)
trans_aug = iaa.Affine(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)})(images=x_batch_rgb)
x_batch_rgb_final = np.concatenate([x_batch_rgb,flip_img,rot_img,shear_aug,trans_aug],axis=0)
y_batch_rgb_final = np.tile(y_batch_rgb,(5,1))
## depth data aug
x_batch_depth, y_batch_depth = generator_depth.next()
flip_img = iaa.Fliplr(1)(images=x_batch_depth)
rot_img = iaa.Affine(rotate=(-30, 30))(images=x_batch_depth)
shear_aug = iaa.Affine(shear=(-16, 16))(images=x_batch_depth)
trans_aug = iaa.Affine(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)})(images=x_batch_depth)
x_batch_depth_final = np.concatenate([x_batch_depth,flip_img,rot_img,shear_aug,trans_aug],axis=0)
y_batch_depth_final = np.tile(y_batch_rgb,(5,1))
yield [[x_batch_rgb_final, x_batch_depth_final], y_batch_rgb_final]
elif val_train == 'val':
while 1:
x_batch_rgb, y_batch_rgb = generator_rgb_val.next()
x_batch_depth, y_batch_depth = generator_depth_val.next()
yield [[x_batch_rgb, x_batch_depth], y_batch_rgb]
# Training with data augmentation. If shift_fraction=0., also no augmentation.
model.fit_generator(generator=train_generator(args.batch_size,'train'),
steps_per_epoch=int(424 / int(args.batch_size/5)),##936 curtin faces###424 fold1 iiitd
epochs=args.epochs,
validation_data=train_generator(args.batch_size,'val'),
validation_steps = int( 4181 / int(args.batch_size)),##4108 curtin faces###4181 fold1 iiitd
callbacks=[log, tb, checkpoint, lr_decay, es_cb])
# End: Training with data augmentation -----------------------------------------------------------------------#
# model.save_weights(args.save_dir + '/trained_model.h5')
# print('Trained model saved to \'%s/trained_model.h5\'' % args.save_dir)
from utils import plot_log
plot_log(args.save_dir + '/log.csv', show=True)
return model
#
#def test(model, args):
#
#
# model.compile(optimizer=optimizers.Adam(lr=args.lr),
# loss=['categorical_crossentropy'],
#
# metrics=['accuracy'])
# model.load_weights('./CurtinFaces/dataaug_vgg_multimodal_dropout-0.5_3fc_batch30/weights-best.h5')
#
#
# def test_generator(batch_size=1):
# train_datagen = ImageDataGenerator(rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
# generator_rgb = train_datagen.flow_from_directory(directory="D:/CurtinFaces_crop/RGB/test1/", target_size=(224, 224), color_mode="rgb",
# batch_size=1, class_mode="categorical", shuffle=True, seed=42)
# generator_depth = train_datagen.flow_from_directory(directory="D:/CurtinFaces_crop/normalized/DEPTH/test1/", target_size=(224, 224), color_mode="rgb",
# batch_size=1, class_mode="categorical", shuffle=True, seed=42)
#
# while 1:
# x_batch_rgb, y_batch_rgb = generator_rgb.next()
# x_batch_depth, y_batch_depth = generator_depth.next()
# yield [[x_batch_rgb, x_batch_depth], y_batch_rgb]
#
# scores = model.evaluate_generator(generator=test_generator(1),steps = 260)###test1 2028 ###test2 1560##test3 260
# print('Test loss: {} ; Accuracy on Test: {}'.format(scores[0],scores[1]))
# import csv
# test_log = args.save_dir + '/log_test.csv'
# with open(test_log, 'w', newline='') as csvfile:
# spamwriter = csv.writer(csvfile)
# spamwriter.writerow(['Test loss: {} ; Accuracy on Test: {}'.format(scores[0],scores[1])])
if __name__ == "__main__":
import os
import argparse
from keras.preprocessing.image import ImageDataGenerator
from keras import callbacks
# setting the hyper parameters
parser = argparse.ArgumentParser(description="RGB-D network")
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--batch_size', default=30, type=int)## only divisible by 5
parser.add_argument('--lr', default=0.01, type=float,
help="Initial learning rate")
parser.add_argument('--lr_decay', default=0.9, type=float,
help="The value multiplied by lr at each epoch. Set a larger value for larger epochs")
parser.add_argument('--lam_recon', default=0.392, type=float,
help="The coefficient for the loss of decoder")
parser.add_argument('-r', '--routings', default=3, type=int,
help="Number of iterations used in routing algorithm. should > 0")
parser.add_argument('--shift_fraction', default=0.1, type=float,
help="Fraction of pixels to shift at most in each direction.")
parser.add_argument('--debug', action='store_true',
help="Save weights by TensorBoard")
parser.add_argument('--save_dir', default='./IIIT_D/icip_test')
parser.add_argument('-t', '--testing', action='store_true',
help="Test the trained model on testing dataset")
parser.add_argument('--digit', default=5, type=int,
help="Digit to manipulate")
parser.add_argument('-w', '--weights', default=None,
help="The path of the saved weights. Should be specified when testing")
args = parser.parse_args()
print(args)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# define model
model = VGGFace_multimodal(input_shape=(224,224,3), n_class=106)
model.summary()
model_trained = train(model=model, args=args)
# model_trained = VGGFace_multimodal(input_shape=(224,224,3), n_class=52)
# test(model=model, args=args)
reset_keras(model=model_trained)
# reset_keras(model=model)
# as long as weights are given, will run testing
| [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.